S: Orphan
F: drivers/platform/x86/wmi.c
+F: include/uapi/linux/wmi.h
AD1889 ALSA SOUND DRIVER
S: Supported
F: drivers/input/misc/adxl34x.c
-AEDSP16 DRIVER
-S: Maintained
-F: sound/oss/aedsp16.c
-
AF9013 MEDIA DRIVER
F: include/linux/altera_jtaguart.h
AMAZON ETHERNET DRIVERS
-M: Netanel Belgazal <netanel@annapurnalabs.com>
-R: Saeed Bishara <saeed@annapurnalabs.com>
-R: Zorik Machulsky <zorik@annapurnalabs.com>
+M: Netanel Belgazal <netanel@amazon.com>
+R: Saeed Bishara <saeedb@amazon.com>
+R: Zorik Machulsky <zorik@amazon.com>
S: Supported
F: Documentation/networking/ena.txt
F: drivers/staging/android/
ANDROID GOLDFISH RTC DRIVER
-M: Miodrag Dinic <miodrag.dinic@imgtec.com>
+M: Miodrag Dinic <miodrag.dinic@mips.com>
S: Supported
F: Documentation/devicetree/bindings/rtc/google,goldfish-rtc.txt
F: drivers/rtc/rtc-goldfish.c
W: http://www.linux4sam.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/nferre/linux-at91.git
S: Supported
+N: at91
+N: atmel
F: arch/arm/mach-at91/
F: include/soc/at91/
F: arch/arm/boot/dts/at91*.dts
F: arch/arm/boot/dts/sama*.dtsi
F: arch/arm/include/debug/at91.S
F: drivers/memory/atmel*
+F: drivers/watchdog/sama5d4_wdt.c
+X: drivers/input/touchscreen/atmel_mxt_ts.c
+X: drivers/net/wireless/atmel/
ARM/CALXEDA HIGHBANK ARCHITECTURE
ARM/Mediatek RTC DRIVER
S: Maintained
+F: Documentation/devicetree/bindings/rtc/rtc-mt7622.txt
F: drivers/rtc/rtc-mt6397.c
+F: drivers/rtc/rtc-mt7622.c
ARM/Mediatek SoC support
T: git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next
S: Supported
F: arch/arm64/boot/dts/renesas/
+F: Documentation/devicetree/bindings/arm/shmobile.txt
F: drivers/soc/renesas/
F: include/linux/soc/renesas/
F: arch/arm/configs/shmobile_defconfig
F: arch/arm/include/debug/renesas-scif.S
F: arch/arm/mach-shmobile/
+F: Documentation/devicetree/bindings/arm/shmobile.txt
F: drivers/soc/renesas/
F: include/linux/soc/renesas/
S: Maintained
+ARM/TEGRA HDMI CEC SUBSYSTEM SUPPORT
+S: Maintained
+F: drivers/media/platform/tegra-cec/
+F: Documentation/devicetree/bindings/media/tegra-cec.txt
+
ARM/TETON BGA MACHINE SUPPORT
T: git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-uniphier.git
S: Maintained
+F: Documentation/devicetree/bindings/gpio/gpio-uniphier.txt
F: arch/arm/boot/dts/uniphier*
F: arch/arm/include/asm/hardware/cache-uniphier.h
F: arch/arm/mach-uniphier/
F: arch/arm64/boot/dts/socionext/
F: drivers/bus/uniphier-system-bus.c
F: drivers/clk/uniphier/
+F: drivers/gpio/gpio-uniphier.c
F: drivers/i2c/busses/i2c-uniphier*
F: drivers/irqchip/irq-uniphier-aidet.c
F: drivers/pinctrl/uniphier/
F: drivers/i2c/busses/i2c-zx2967.c
F: drivers/mmc/host/dw_mmc-zx.*
F: drivers/pinctrl/zte/
-F: drivers/reset/reset-zx2967.c
F: drivers/soc/zte/
F: drivers/thermal/zx2967_thermal.c
F: drivers/watchdog/zx2967_wdt.c
ARM/ZYNQ ARCHITECTURE
W: http://wiki.xilinx.com
T: git https://github.com/Xilinx/linux-xlnx.git
F: include/linux/async_tx.h
AT24 EEPROM DRIVER
S: Maintained
F: drivers/misc/eeprom/at24.c
F: drivers/net/hamradio/baycom*
BCACHE (BLOCK LAYER CACHE)
W: http://bcache.evilpiepirate.org
-S: Orphan
+C: irc://irc.oftc.net/bcache
+S: Maintained
F: drivers/md/bcache/
BDISP ST MEDIA DRIVER
S: Supported
F: arch/x86/net/bpf_jit*
F: Documentation/networking/filter.txt
+F: Documentation/bpf/
F: include/linux/bpf*
F: include/linux/filter.h
F: include/uapi/linux/bpf*
F: net/sched/act_bpf.c
F: net/sched/cls_bpf.c
F: samples/bpf/
-F: tools/net/bpf*
+F: tools/bpf/
F: tools/testing/selftests/bpf/
BROADCOM B44 10/100 ETHERNET DRIVER
F: drivers/gpio/gpio-brcmstb.c
F: Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt
+BROADCOM BRCMSTB USB2 and USB3 PHY DRIVER
+S: Maintained
+F: drivers/phy/broadcom/phy-brcm-usb*
+
BROADCOM GENET ETHERNET DRIVER
S: Supported
N: bcm585*
N: bcm586*
N: bcm88312
+N: hr2
F: arch/arm64/boot/dts/broadcom/ns2*
F: drivers/clk/bcm/clk-ns*
F: drivers/pinctrl/bcm/pinctrl-ns*
F: Documentation/devicetree/bindings/cpufreq/brcm,stb-avs-cpu-freq.txt
F: drivers/cpufreq/brcmstb*
+BROADCOM STB AVS TMON DRIVER
+S: Maintained
+F: Documentation/devicetree/bindings/thermal/brcm,avs-tmon.txt
+F: drivers/thermal/broadcom/brcmstb*
+
BROADCOM STB NAND FLASH DRIVER
S: Maintained
F: drivers/mtd/nand/brcmnand/
+BROADCOM STB DPFE DRIVER
+S: Maintained
+F: Documentation/devicetree/bindings/memory-controllers/brcm,dpfe-cpu.txt
+F: drivers/memory/brcmstb_dpfe.c
+
BROADCOM SYSTEMPORT ETHERNET DRIVER
CA8210 IEEE-802.15.4 RADIO DRIVER
W: https://github.com/Cascoda/ca8210-linux.git
S: Maintained
F: include/uapi/linux/cec-funcs.h
F: Documentation/devicetree/bindings/media/cec.txt
+CEC GPIO DRIVER
+T: git git://linuxtv.org/media_tree.git
+W: http://linuxtv.org
+S: Supported
+F: drivers/media/platform/cec-gpio/
+F: Documentation/devicetree/bindings/media/cec-gpio.txt
+
CELL BROADBAND ENGINE ARCHITECTURE
F: drivers/auxdisplay/cfag12864bfb.c
F: include/linux/cfag12864b.h
-CFG80211 and NL80211
+802.11 (including CFG80211/NL80211)
W: http://wireless.kernel.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
S: Maintained
+F: net/wireless/
F: include/uapi/linux/nl80211.h
+F: include/linux/ieee80211.h
+F: include/net/wext.h
F: include/net/cfg80211.h
-F: net/wireless/*
-X: net/wireless/wext*
+F: include/net/iw_handler.h
+F: include/net/ieee80211_radiotap.h
+F: Documentation/driver-api/80211/cfg80211.rst
+F: Documentation/networking/regulatory.txt
CHAR and MISC DRIVERS
CISCO VIC ETHERNET NIC DRIVER
-M: Neel Patel <neepatel@cisco.com>
+M: Parvi Kaustubhi <pkaustub@cisco.com>
S: Supported
F: drivers/net/ethernet/cisco/enic/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
S: Supported
-F: drivers/clocksource
+F: drivers/clocksource/
+F: Documentation/devicetree/bindings/timer/
CMPC ACPI DRIVER
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git misc
W: http://coccinelle.lip6.fr/
S: Maintained
F: Documentation/cgroup-v1/cpusets.txt
F: include/linux/cpuset.h
-F: kernel/cpuset.c
+F: kernel/cgroup/cpuset.c
CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
CPU POWER MONITORING SUBSYSTEM
S: Maintained
F: tools/power/cpupower/
F: include/linux/cpuidle.h
CRAMFS FILESYSTEM
-W: http://sourceforge.net/projects/cramfs/
-S: Orphan / Obsolete
+S: Maintained
F: Documentation/filesystems/cramfs.txt
F: fs/cramfs/
S: Maintained
F: drivers/net/fddi/defxx.*
+DELL SMBIOS DRIVER
+S: Maintained
+F: drivers/platform/x86/dell-smbios.*
+
+DELL SMBIOS SMM DRIVER
+S: Maintained
+F: drivers/platform/x86/dell-smbios-smm.c
+
+DELL SMBIOS WMI DRIVER
+S: Maintained
+F: drivers/platform/x86/dell-smbios-wmi.c
+F: tools/wmi/dell-smbios-example.c
+
DELL LAPTOP DRIVER
F: Documentation/dcdbas.txt
F: drivers/firmware/dcdbas.*
-DELL WMI EXTRAS DRIVER
+DELL WMI NOTIFICATIONS DRIVER
S: Maintained
F: drivers/platform/x86/dell-wmi.c
+DELL WMI DESCRIPTOR DRIVER
+S: Maintained
+F: drivers/platform/x86/dell-wmi-descriptor.c
+
DELTA ST MEDIA DRIVER
T: quilt http://people.redhat.com/agk/patches/linux/editing/
S: Maintained
F: Documentation/device-mapper/
+F: drivers/md/Makefile
+F: drivers/md/Kconfig
F: drivers/md/dm*
F: drivers/md/persistent-data/
F: include/linux/device-mapper.h
F: Documentation/devicetree/bindings/input/da90??-onkey.txt
F: Documentation/devicetree/bindings/thermal/da90??-thermal.txt
F: Documentation/devicetree/bindings/regulator/da92*.txt
-F: Documentation/devicetree/bindings/watchdog/da92??-wdt.txt
+F: Documentation/devicetree/bindings/watchdog/da90??-wdt.txt
F: Documentation/devicetree/bindings/sound/da[79]*.txt
F: drivers/gpio/gpio-da90??.c
F: drivers/hwmon/da90??-hwmon.c
S: Maintained
F: drivers/i2c/busses/i2c-diolan-u2c.c
-DIRECT ACCESS (DAX)
+FILESYSTEM DIRECT ACCESS (DAX)
F: include/linux/dax.h
F: include/trace/events/fs_dax.h
+DEVICE DIRECT ACCESS (DAX)
+S: Supported
+F: drivers/dax/
+
DIRECTORY NOTIFICATION (DNOTIFY)
F: drivers/dma/
F: include/linux/dmaengine.h
F: Documentation/devicetree/bindings/dma/
-F: Documentation/dmaengine/
+F: Documentation/driver-api/dmaengine/
T: git git://git.infradead.org/users/vkoul/slave-dma.git
DMA MAPPING HELPERS
DRM DRIVERS AND MISC GPU PATCHES
W: https://01.org/linuxgraphics/gfx-docs/maintainer-tools/drm-misc.html
S: Maintained
F: Documentation/devicetree/bindings/display/renesas,du.txt
DRM DRIVERS FOR ROCKCHIP
S: Maintained
F: drivers/gpu/drm/rockchip/
S: Maintained
F: drivers/edac/highbank*
-EDAC-CAVIUM
+EDAC-CAVIUM OCTEON
S: Supported
F: drivers/edac/octeon_edac*
+
+EDAC-CAVIUM THUNDERX
+S: Supported
F: drivers/edac/thunderx_edac*
EDAC-CORE
F: include/video/s1d13xxxfb.h
ERRSEQ ERROR TRACKING INFRASTRUCTURE
-M: Jeff Layton <jlayton@poochiereds.net>
+M: Jeff Layton <jlayton@kernel.org>
S: Maintained
F: lib/errseq.c
F: include/linux/errseq.h
Extended Verification Module (EVM)
S: Supported
F: security/integrity/evm/
F: include/uapi/scsi/fc/
FILE LOCKING (flock() and fcntl()/lockf())
-M: Jeff Layton <jlayton@poochiereds.net>
+M: Jeff Layton <jlayton@kernel.org>
S: Maintained
FPGA MANAGER FRAMEWORK
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/atull/linux-fpga.git
FREESCALE CAAM (Cryptographic Acceleration and Assurance Module) DRIVER
-M: Dan Douglass <dan.douglass@nxp.com>
+M: Aymen Sghaier <aymen.sghaier@nxp.com>
S: Maintained
F: drivers/crypto/caam/
S: Supported
F: fs/crypto/
F: include/linux/fscrypt*.h
+F: Documentation/filesystems/fscrypt.rst
FUJITSU FR-V (FRV) PORT
S: Orphan
W: http://hwmon.wiki.kernel.org/
-T: quilt http://jdelvare.nerim.net/devel/linux/jdelvare-hwmon/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
S: Maintained
F: Documentation/hwmon/
F: drivers/net/ethernet/hisilicon/
F: Documentation/devicetree/bindings/net/hisilicon*.txt
+HISILICON PMU DRIVER
+W: http://www.hisilicon.com
+S: Supported
+F: drivers/perf/hisilicon
+F: Documentation/perf/hisi-pmu.txt
+
HISILICON ROCE DRIVER
F: Documentation/networking/ieee802154.txt
IFE PROTOCOL
-M: Yotam Gigi <yotamg@mellanox.com>
+M: Yotam Gigi <yotam.gi@gmail.com>
F: net/ife
F: include/net/ife.h
F: drivers/usb/atm/ueagle-atm.c
IMGTEC ASCII LCD DRIVER
-M: Paul Burton <paul.burton@imgtec.com>
+M: Paul Burton <paul.burton@mips.com>
S: Maintained
F: Documentation/devicetree/bindings/auxdisplay/img-ascii-lcd.txt
F: drivers/auxdisplay/img-ascii-lcd.c
INFINIBAND SUBSYSTEM
W: http://www.openfabrics.org/
Q: http://patchwork.kernel.org/project/linux-rdma/list/
INTEGRITY MEASUREMENT ARCHITECTURE (IMA)
T: git git://git.kernel.org/pub/scm/linux/kernel/git/zohar/linux-integrity.git
S: Supported
F: security/integrity/ima/
W: https://01.org/igvt-g
- T: git https://github.com/01org/gvt-linux.git
+ T: git https://github.com/intel/gvt-linux.git
S: Supported
F: drivers/gpu/drm/i915/gvt/
F: drivers/net/wimax/i2400m/
F: include/uapi/linux/wimax/i2400m.h
+INTEL WMI THUNDERBOLT FORCE POWER DRIVER
+S: Maintained
+F: drivers/platform/x86/intel-wmi-thunderbolt.c
+
INTEL(R) TRACE HUB
S: Supported
W: http://jfs.sourceforge.net/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git
+T: git git://github.com/kleikamp/linux-shaggy.git
S: Maintained
F: Documentation/filesystems/jfs.txt
F: fs/jfs/
F: scripts/Makefile.kasan
KCONFIG
-T: git git://gitorious.org/linux-kconfig/linux-kconfig
-S: Maintained
+S: Orphan
F: Documentation/kbuild/kconfig-language.txt
F: scripts/kconfig/
KERNEL BUILD + files below scripts/ (unless maintained elsewhere)
T: git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git
S: Maintained
KERNEL NFSD, SUNRPC, AND LOCKD SERVERS
-M: Jeff Layton <jlayton@poochiereds.net>
+M: Jeff Layton <jlayton@kernel.org>
W: http://nfs.sourceforge.net/
T: git git://linux-nfs.org/~bfields/linux.git
F: arch/x86/kvm/
F: arch/x86/include/uapi/asm/kvm*
F: arch/x86/include/asm/kvm*
+F: arch/x86/include/asm/pvclock-abi.h
F: arch/x86/kernel/kvm.c
F: arch/x86/kernel/kvmclock.c
KEYS-ENCRYPTED
S: Supported
F: Documentation/security/keys/trusted-encrypted.rst
F: security/keys/encrypted-keys/
KEYS-TRUSTED
-L: linux-security-module@vger.kernel.org
+L: linux-integrity@vger.kernel.org
S: Supported
F: Documentation/security/keys/trusted-encrypted.rst
F: include/linux/kgdb.h
F: kernel/debug/
-KMEMCHECK
-S: Maintained
-F: Documentation/dev-tools/kmemcheck.rst
-F: arch/x86/include/asm/kmemcheck.h
-F: arch/x86/mm/kmemcheck/
-F: include/linux/kmemcheck.h
-F: mm/kmemcheck.c
-
KMEMLEAK
S: Maintained
F: Documentation/scsi/53c700.txt
F: drivers/scsi/53c700*
+LEAKING_ADDRESSES
+S: Maintained
+F: scripts/leaking_addresses.pl
+
LED SUBSYSTEM
F: include/net/mac80211.h
F: net/mac80211/
F: drivers/net/wireless/mac80211_hwsim.[ch]
+F: Documentation/networking/mac80211_hwsim/README
MAILBOX API
F: drivers/net/ethernet/mellanox/mlxsw/
MELLANOX FIRMWARE FLASH LIBRARY (mlxfw)
S: Supported
W: http://www.mellanox.com
F: arch/mips/
MIPS BOSTON DEVELOPMENT BOARD
-M: Paul Burton <paul.burton@imgtec.com>
+M: Paul Burton <paul.burton@mips.com>
S: Maintained
F: Documentation/devicetree/bindings/clock/img,boston-clock.txt
F: include/dt-bindings/clock/boston-clock.h
MIPS GENERIC PLATFORM
-M: Paul Burton <paul.burton@imgtec.com>
+M: Paul Burton <paul.burton@mips.com>
S: Supported
F: arch/mips/generic/
F: drivers/*/*/*loongson1*
MIPS RINT INSTRUCTION EMULATION
-M: Aleksandar Markovic <aleksandar.markovic@imgtec.com>
+M: Aleksandar Markovic <aleksandar.markovic@mips.com>
S: Supported
F: arch/mips/math-emu/sp_rint.c
F: include/linux/mux/
F: drivers/mux/
-MULTISOUND SOUND DRIVER
-S: Maintained
-F: Documentation/sound/oss/MultiSound
-F: sound/oss/msnd*
-
MULTITECH MULTIPORT CARD (ISICOM)
S: Orphan
F: drivers/tty/isicom.c
F: Documentation/devicetree/bindings/display/mxsfb-drm.txt
MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
-M: Hyong-Youb Kim <hykim@myri.com>
+M: Chris Lee <christopher.lee@cspi.com>
-W: https://www.myricom.com/support/downloads/myri10ge.html
+W: https://www.cspi.com/ethernet-products/support/downloads/
S: Supported
F: drivers/net/ethernet/myricom/myri10ge/
S: Maintained
F: net/dsa/
F: include/net/dsa.h
+F: include/linux/dsa/
F: drivers/net/dsa/
NETWORKING [GENERAL]
F: include/uapi/linux/net.h
F: include/uapi/linux/netdevice.h
F: include/uapi/linux/net_namespace.h
-F: tools/net/
F: tools/testing/selftests/net/
+F: lib/net_utils.c
F: lib/random32.c
NETWORKING [IPSEC]
F: drivers/ntb/hw/idt/
NTB INTEL DRIVER
S: Supported
-W: https://github.com/jonmason/ntb/wiki
-T: git git://github.com/jonmason/ntb.git
+W: https://github.com/davejiang/linux/wiki
+T: git https://github.com/davejiang/linux.git
F: drivers/ntb/hw/intel/
NTFS FILESYSTEM
W: http://openrisc.io
S: Maintained
+F: Documentation/devicetree/bindings/openrisc/
+F: Documentation/openrisc/
F: arch/openrisc/
+F: drivers/irqchip/irq-ompic.c
+F: drivers/irqchip/irq-or1k-*
OPENVSWITCH
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm.git
-F: drivers/base/power/opp/
+F: drivers/opp/
F: include/linux/pm_opp.h
F: Documentation/power/opp.txt
F: Documentation/devicetree/bindings/opp/
PCI DRIVER FOR MICROSEMI SWITCHTEC
S: Maintained
F: Documentation/ABI/testing/sysfs-class-switchtec
F: drivers/pci/switch/switchtec*
F: include/uapi/linux/switchtec_ioctl.h
+F: include/linux/switchtec.h
+F: drivers/ntb/hw/mscc/
PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
PCI ENDPOINT SUBSYSTEM
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kishon/pci-endpoint.git
S: Supported
F: arch/x86/pci/
F: arch/x86/kernel/quirks.c
+PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS
+Q: http://patchwork.ozlabs.org/project/linux-pci/list/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/lpieralisi/pci.git/
+S: Supported
+F: drivers/pci/host/
+F: drivers/pci/dwc/
+
PCIE DRIVER FOR AXIS ARTPEC
PCIE DRIVER FOR HISILICON
S: Maintained
F: Documentation/devicetree/bindings/pci/hisilicon-pcie.txt
F: Documentation/devicetree/bindings/pci/pcie-kirin.txt
F: drivers/pci/dwc/pcie-kirin.c
+PCIE DRIVER FOR HISILICON STB
+S: Maintained
+F: Documentation/devicetree/bindings/pci/hisilicon-histb-pcie.txt
+F: drivers/pci/dwc/pcie-histb.c
+
PCIE DRIVER FOR MEDIATEK
F: Documentation/devicetree/bindings/pci/rockchip-pcie.txt
F: drivers/pci/host/pcie-rockchip.c
+PCI DRIVER FOR V3 SEMICONDUCTOR V360EPC
+S: Maintained
+F: Documentation/devicetree/bindings/pci/v3-v360epc-pci.txt
+F: drivers/pci/host/pci-v3-semi.c
+
PCIE DRIVER FOR ST SPEAR13XX
F: crypto/pcrypt.c
F: include/crypto/pcrypt.h
+PEAQ WMI HOTKEYS DRIVER
+S: Maintained
+F: drivers/platform/x86/peaq-wmi.c
+
PER-CPU MEMORY ALLOCATOR
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-drivers.git sh-pfc
S: Maintained
F: drivers/pinctrl/sh-pfc/
F: drivers/pinctrl/spear/
PISTACHIO SOC SUPPORT
-S: Maintained
+S: Odd Fixes
F: arch/mips/pistachio/
F: arch/mips/include/asm/mach-pistachio/
F: arch/mips/boot/dts/img/pistachio*
F: drivers/block/ps3vram.c
PSAMPLE PACKET SAMPLING SUPPORT:
-M: Yotam Gigi <yotamg@mellanox.com>
+M: Yotam Gigi <yotam.gi@gmail.com>
S: Maintained
F: net/psample
F: include/net/psample.h
QAT DRIVER
S: Supported
F: drivers/crypto/qat/
QLOGIC QL4xxx RDMA DRIVER
S: Supported
RENESAS CLOCK DRIVERS
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-drivers.git clk-renesas
S: Supported
F: drivers/clk/renesas/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
S: Maintained
F: Documentation/rfkill.txt
+F: Documentation/ABI/stable/sysfs-class-rfkill
F: net/rfkill/
RHASHTABLE
F: drivers/mtd/nand/r852.c
F: drivers/mtd/nand/r852.h
+RISC-V ARCHITECTURE
+T: git https://github.com/riscv/riscv-linux
+S: Supported
+F: arch/riscv/
+K: riscv
+N: riscv
+
ROCCAT DRIVERS
W: http://sourceforge.net/projects/roccat/
F: include/linux/hid-roccat*
F: Documentation/ABI/*/sysfs-driver-hid-roccat*
+ROCKCHIP RASTER 2D GRAPHIC ACCELERATION UNIT DRIVER
+S: Maintained
+F: drivers/media/platform/rockchip/rga/
+F: Documentation/devicetree/bindings/media/rockchip-rga.txt
+
ROCKER DRIVER
S: Maintained
F: drivers/crypto/exynos-rng.c
-F: Documentation/devicetree/bindings/rng/samsung,exynos-rng4.txt
+F: Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt
SAMSUNG FRAMEBUFFER DRIVER
S: Maintained
F: drivers/mmc/host/sdhci-spear.c
+SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) TI OMAP DRIVER
+S: Maintained
+F: drivers/mmc/host/sdhci-omap.c
+
SECURE ENCRYPTING DEVICE (SED) OPAL DRIVER
S: Supported
F: block/sed*
T: git git://git.kernel.org/pub/scm/linux/kernel/git/shli/md.git
S: Supported
-F: drivers/md/
+F: drivers/md/Makefile
+F: drivers/md/Kconfig
+F: drivers/md/md*
+F: drivers/md/raid*
F: include/linux/raid/
F: include/uapi/linux/raid/
S: Supported
F: Documentation/process/stable-kernel-rules.rst
+STAGING - ATOMISP DRIVER
+S: Maintained
+F: drivers/staging/media/atomisp/
+
STAGING - COMEDI
F: arch/arc/boot/dts/ax*
F: Documentation/devicetree/bindings/arc/axs10*
+SYNOPSYS AXS10x RESET CONTROLLER DRIVER
+S: Supported
+F: drivers/reset/reset-axs10x.c
+F: Documentation/devicetree/bindings/reset/snps,axs10x-reset.txt
+
+SYNOPSYS DESIGNWARE APB GPIO DRIVER
+S: Maintained
+F: drivers/gpio/gpio-dwapb.c
+F: Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt
+
SYNOPSYS DESIGNWARE DMAC DRIVER
S: Maintained
F: include/linux/dma/dw.h
F: include/linux/platform_data/dma-dw.h
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt.git
S: Maintained
F: drivers/thunderbolt/
+F: include/linux/thunderbolt.h
+
+THUNDERBOLT NETWORK DRIVER
+S: Maintained
+F: drivers/net/thunderbolt.c
THUNDERX GPIO DRIVER
TPM DEVICE DRIVER
-W: http://tpmdd.sourceforge.net
-Q: https://patchwork.kernel.org/project/tpmdd-devel/list/
+Q: https://patchwork.kernel.org/project/linux-integrity/list/
T: git git://git.infradead.org/users/jjs/linux-tpmdd.git
S: Maintained
F: drivers/char/tpm/
-TPM IBM_VTPM DEVICE DRIVER
-W: http://tpmdd.sourceforge.net
-S: Maintained
-F: drivers/char/tpm/tpm_ibmvtpm*
-
TRACING
S: Maintained
-F: drivers/hid/hid-udraw.c
+F: drivers/hid/hid-udraw-ps3.c
UFS FILESYSTEM
F: include/linux/virtio_vsock.h
F: include/uapi/linux/virtio_vsock.h
F: include/uapi/linux/vsockmon.h
+F: include/uapi/linux/vm_sockets_diag.h
+F: net/vmw_vsock/diag.c
F: net/vmw_vsock/af_vsock_tap.c
F: net/vmw_vsock/virtio_transport_common.c
F: net/vmw_vsock/virtio_transport.c
F: drivers/net/vsockmon.c
F: drivers/vhost/vsock.c
F: drivers/vhost/vsock.h
+F: tools/testing/vsock/
VIRTIO CONSOLE DRIVER
S: Supported
F: drivers/s390/virtio/
+F: arch/s390/include/uapi/asm/virtio-ccw.h
VIRTIO GPU DRIVER
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
F: Documentation/driver-api/vme.rst
F: drivers/staging/vme/
F: drivers/vme/
S: Supported
W: http://wireless.kernel.org/en/users/Drivers/wil6210
F: drivers/net/wireless/ath/wil6210/
-F: include/uapi/linux/wil6210_uapi.h
WIMAX STACK
F: Documentation/devicetree/bindings/regulator/arizona-regulator.txt
F: Documentation/devicetree/bindings/mfd/arizona.txt
F: Documentation/devicetree/bindings/mfd/wm831x.txt
+F: Documentation/devicetree/bindings/sound/wlf,arizona.txt
F: arch/arm/mach-s3c64xx/mach-crag6410*
F: drivers/clk/clk-wm83*.c
F: drivers/extcon/extcon-arizona.c
F: drivers/*/xen-*front.c
F: drivers/xen/
F: arch/x86/include/asm/xen/
+F: arch/x86/include/asm/pvclock-abi.h
F: include/xen/
F: include/uapi/xen/
F: Documentation/ABI/stable/sysfs-hypervisor-xen
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Intel OnChip System Fabric MailBox access support
*/
*/
int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb);
+ /**
+ * iosf_mbi_unregister_pmic_bus_access_notifier_unlocked - Unregister PMIC bus
+ * notifier, unlocked
+ *
+ * Like iosf_mbi_unregister_pmic_bus_access_notifier(), but for use when the
+ * caller has already called iosf_mbi_punit_acquire() itself.
+ *
+ * @nb: notifier_block to unregister
+ */
+ int iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
+ struct notifier_block *nb);
+
/**
* iosf_mbi_call_pmic_bus_access_notifier_chain - Call PMIC bus notifier chain
*
*/
int iosf_mbi_call_pmic_bus_access_notifier_chain(unsigned long val, void *v);
+ /**
+ * iosf_mbi_assert_punit_acquired - Assert that the P-Unit has been acquired.
+ */
+ void iosf_mbi_assert_punit_acquired(void);
+
#else /* CONFIG_IOSF_MBI is not enabled */
static inline
bool iosf_mbi_available(void)
return 0;
}
+ static inline int
+ iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(struct notifier_block *nb)
+ {
+ return 0;
+ }
+
static inline
int iosf_mbi_call_pmic_bus_access_notifier_chain(unsigned long val, void *v)
{
return 0;
}
+ static inline void iosf_mbi_assert_punit_acquired(void) {}
+
#endif /* CONFIG_IOSF_MBI */
#endif /* IOSF_MBI_SYMS_H */
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
- subdir-ccflags-$(CONFIG_DRM_I915_WERROR) := -Werror
+ # Add a set of useful warning flags and enable -Werror for CI to prevent
+ # trivial mistakes from creeping in. We have to do this piecemeal as we reject
+ # any patch that isn't warning clean, so turning on -Wall -Wextra (or W=1) we
+ # need to filter out dubious warnings. Still it is our interest
+ # to keep running locally with W=1 C=1 until we are completely clean.
+ #
+ # Note the danger in using -Wall -Wextra is that when CI updates gcc we
+ # will most likely get a sudden build breakage... Hopefully we will fix
+ # new warnings before CI updates!
+ subdir-ccflags-y := -Wall -Wextra
+ subdir-ccflags-y += $(call cc-disable-warning, unused-parameter)
+ subdir-ccflags-y += $(call cc-disable-warning, type-limits)
+ subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers)
+ subdir-ccflags-y += $(call cc-disable-warning, implicit-fallthrough)
+ subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
+
+ # Fine grained warnings disable
+ CFLAGS_i915_pci.o = $(call cc-disable-warning, override-init)
+ CFLAGS_intel_fbdev.o = $(call cc-disable-warning, override-init)
+
subdir-ccflags-y += \
$(call as-instr,movntdqa (%eax)$(comma)%xmm0,-DCONFIG_AS_MOVNTDQA)
intel_uc_fw.o \
intel_guc.o \
intel_guc_ct.o \
- intel_guc_log.o \
intel_guc_fw.o \
- intel_huc.o \
- i915_guc_submission.o
+ intel_guc_log.o \
+ intel_guc_submission.o \
+ intel_huc.o
# autogenerated null render state
i915-y += intel_renderstate_gen6.o \
i915_oa_kblgt2.o \
i915_oa_kblgt3.o \
i915_oa_glk.o \
- i915_oa_cflgt2.o
+ i915_oa_cflgt2.o \
+ i915_oa_cflgt3.o \
+ i915_oa_cnl.o
ifeq ($(CONFIG_DRM_I915_GVT),y)
i915-y += intel_gvt.o
+# SPDX-License-Identifier: GPL-2.0
GVT_DIR := gvt
GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
- execlist.o scheduler.o sched_policy.o render.o cmd_parser.o
+ execlist.o scheduler.o sched_policy.o render.o cmd_parser.o debugfs.o
ccflags-y += -I$(src) -I$(src)/$(GVT_DIR)
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
u64 h_addr;
int ret;
- ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << GTT_PAGE_SHIFT,
+ ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT,
&h_addr);
if (ret)
return ret;
- *h_index = h_addr >> GTT_PAGE_SHIFT;
+ *h_index = h_addr >> I915_GTT_PAGE_SHIFT;
return 0;
}
u64 g_addr;
int ret;
- ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << GTT_PAGE_SHIFT,
+ ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT,
&g_addr);
if (ret)
return ret;
- *g_index = g_addr >> GTT_PAGE_SHIFT;
+ *g_index = g_addr >> I915_GTT_PAGE_SHIFT;
return 0;
}
struct gtt_type_table_entry {
int entry_type;
+ int pt_type;
int next_pt_type;
int pse_entry_type;
};
- #define GTT_TYPE_TABLE_ENTRY(type, e_type, npt_type, pse_type) \
+ #define GTT_TYPE_TABLE_ENTRY(type, e_type, cpt_type, npt_type, pse_type) \
[type] = { \
.entry_type = e_type, \
+ .pt_type = cpt_type, \
.next_pt_type = npt_type, \
.pse_entry_type = pse_type, \
}
static struct gtt_type_table_entry gtt_type_table[] = {
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
+ GTT_TYPE_INVALID,
GTT_TYPE_PPGTT_PML4_PT,
GTT_TYPE_INVALID),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT,
GTT_TYPE_PPGTT_PML4_ENTRY,
+ GTT_TYPE_PPGTT_PML4_PT,
GTT_TYPE_PPGTT_PDP_PT,
GTT_TYPE_INVALID),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY,
GTT_TYPE_PPGTT_PML4_ENTRY,
+ GTT_TYPE_PPGTT_PML4_PT,
GTT_TYPE_PPGTT_PDP_PT,
GTT_TYPE_INVALID),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT,
GTT_TYPE_PPGTT_PDP_ENTRY,
+ GTT_TYPE_PPGTT_PDP_PT,
GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_PPGTT_PTE_1G_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
+ GTT_TYPE_INVALID,
GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_PPGTT_PTE_1G_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY,
GTT_TYPE_PPGTT_PDP_ENTRY,
+ GTT_TYPE_PPGTT_PDP_PT,
GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_PPGTT_PTE_1G_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_PPGTT_PDE_ENTRY,
+ GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_PPGTT_PTE_2M_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY,
GTT_TYPE_PPGTT_PDE_ENTRY,
+ GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_PPGTT_PTE_2M_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+ GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_INVALID,
GTT_TYPE_INVALID),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+ GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_INVALID,
GTT_TYPE_INVALID),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
GTT_TYPE_PPGTT_PDE_ENTRY,
+ GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_INVALID,
GTT_TYPE_PPGTT_PTE_2M_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY,
GTT_TYPE_PPGTT_PDP_ENTRY,
+ GTT_TYPE_PPGTT_PDP_PT,
GTT_TYPE_INVALID,
GTT_TYPE_PPGTT_PTE_1G_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE,
GTT_TYPE_GGTT_PTE,
GTT_TYPE_INVALID,
+ GTT_TYPE_INVALID,
GTT_TYPE_INVALID),
};
return gtt_type_table[type].next_pt_type;
}
+ static inline int get_pt_type(int type)
+ {
+ return gtt_type_table[type].pt_type;
+ }
+
static inline int get_entry_type(int type)
{
return gtt_type_table[type].entry_type;
#define GTT_HAW 46
-#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30 + 1)) - 1) << 30)
-#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21 + 1)) - 1) << 21)
-#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12 + 1)) - 1) << 12)
+#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30)) - 1) << 30)
+#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21)) - 1) << 21)
+#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12)) - 1) << 12)
static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
{
return false;
e->type = get_entry_type(e->type);
- if (!(e->val64 & (1 << 7)))
+ if (!(e->val64 & BIT(7)))
return false;
e->type = get_pse_type(e->type);
|| e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
return (e->val64 != 0);
else
- return (e->val64 & (1 << 0));
+ return (e->val64 & BIT(0));
}
static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
{
- e->val64 &= ~(1 << 0);
+ e->val64 &= ~BIT(0);
+ }
+
+ static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e)
+ {
+ e->val64 |= BIT(0);
}
/*
*/
static unsigned long gma_to_ggtt_pte_index(unsigned long gma)
{
- unsigned long x = (gma >> GTT_PAGE_SHIFT);
+ unsigned long x = (gma >> I915_GTT_PAGE_SHIFT);
trace_gma_index(__func__, gma, x);
return x;
.get_entry = gtt_get_entry64,
.set_entry = gtt_set_entry64,
.clear_present = gtt_entry_clear_present,
+ .set_present = gtt_entry_set_present,
.test_present = gen8_gtt_test_present,
.test_pse = gen8_gtt_test_pse,
.get_pfn = gen8_gtt_get_pfn,
return -EINVAL;
ret = ops->get_entry(page_table, e, index, guest,
- spt->guest_page.gfn << GTT_PAGE_SHIFT,
+ spt->guest_page.track.gfn << I915_GTT_PAGE_SHIFT,
spt->vgpu);
if (ret)
return ret;
return -EINVAL;
return ops->set_entry(page_table, e, index, guest,
- spt->guest_page.gfn << GTT_PAGE_SHIFT,
+ spt->guest_page.track.gfn << I915_GTT_PAGE_SHIFT,
spt->vgpu);
}
spt->shadow_page.type, e, index, false)
/**
- * intel_vgpu_init_guest_page - init a guest page data structure
+ * intel_vgpu_init_page_track - init a page track data structure
* @vgpu: a vGPU
- * @p: a guest page data structure
+ * @t: a page track data structure
* @gfn: guest memory page frame number
- * @handler: function will be called when target guest memory page has
+ * @handler: the function will be called when target guest memory page has
* been modified.
*
- * This function is called when user wants to track a guest memory page.
+ * This function is called when a user wants to prepare a page track data
+ * structure to track a guest memory page.
*
* Returns:
* Zero on success, negative error code if failed.
*/
- int intel_vgpu_init_guest_page(struct intel_vgpu *vgpu,
- struct intel_vgpu_guest_page *p,
+ int intel_vgpu_init_page_track(struct intel_vgpu *vgpu,
+ struct intel_vgpu_page_track *t,
unsigned long gfn,
int (*handler)(void *, u64, void *, int),
void *data)
{
- INIT_HLIST_NODE(&p->node);
+ INIT_HLIST_NODE(&t->node);
- p->writeprotection = false;
- p->gfn = gfn;
- p->handler = handler;
- p->data = data;
- p->oos_page = NULL;
- p->write_cnt = 0;
+ t->tracked = false;
+ t->gfn = gfn;
+ t->handler = handler;
+ t->data = data;
- hash_add(vgpu->gtt.guest_page_hash_table, &p->node, p->gfn);
+ hash_add(vgpu->gtt.tracked_guest_page_hash_table, &t->node, t->gfn);
return 0;
}
- static int detach_oos_page(struct intel_vgpu *vgpu,
- struct intel_vgpu_oos_page *oos_page);
-
/**
- * intel_vgpu_clean_guest_page - release the resource owned by guest page data
- * structure
+ * intel_vgpu_clean_page_track - release a page track data structure
* @vgpu: a vGPU
- * @p: a tracked guest page
+ * @t: a page track data structure
*
- * This function is called when user tries to stop tracking a guest memory
- * page.
+ * This function is called before a user frees a page track data structure.
*/
- void intel_vgpu_clean_guest_page(struct intel_vgpu *vgpu,
- struct intel_vgpu_guest_page *p)
+ void intel_vgpu_clean_page_track(struct intel_vgpu *vgpu,
+ struct intel_vgpu_page_track *t)
{
- if (!hlist_unhashed(&p->node))
- hash_del(&p->node);
-
- if (p->oos_page)
- detach_oos_page(vgpu, p->oos_page);
+ if (!hlist_unhashed(&t->node))
+ hash_del(&t->node);
- if (p->writeprotection)
- intel_gvt_hypervisor_unset_wp_page(vgpu, p);
+ if (t->tracked)
+ intel_gvt_hypervisor_disable_page_track(vgpu, t);
}
/**
- * intel_vgpu_find_guest_page - find a guest page data structure by GFN.
+ * intel_vgpu_find_tracked_page - find a tracked guest page
* @vgpu: a vGPU
* @gfn: guest memory page frame number
*
- * This function is called when emulation logic wants to know if a trapped GFN
- * is a tracked guest page.
+ * This function is called when the emulation layer wants to figure out if a
+ * trapped GFN is a tracked guest page.
*
* Returns:
- * Pointer to guest page data structure, NULL if failed.
+ * Pointer to page track data structure, NULL if not found.
*/
- struct intel_vgpu_guest_page *intel_vgpu_find_guest_page(
+ struct intel_vgpu_page_track *intel_vgpu_find_tracked_page(
struct intel_vgpu *vgpu, unsigned long gfn)
{
- struct intel_vgpu_guest_page *p;
+ struct intel_vgpu_page_track *t;
- hash_for_each_possible(vgpu->gtt.guest_page_hash_table,
- p, node, gfn) {
- if (p->gfn == gfn)
- return p;
+ hash_for_each_possible(vgpu->gtt.tracked_guest_page_hash_table,
+ t, node, gfn) {
+ if (t->gfn == gfn)
+ return t;
}
return NULL;
}
+ static int init_guest_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *p,
+ unsigned long gfn,
+ int (*handler)(void *, u64, void *, int),
+ void *data)
+ {
+ p->oos_page = NULL;
+ p->write_cnt = 0;
+
+ return intel_vgpu_init_page_track(vgpu, &p->track, gfn, handler, data);
+ }
+
+ static int detach_oos_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_oos_page *oos_page);
+
+ static void clean_guest_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *p)
+ {
+ if (p->oos_page)
+ detach_oos_page(vgpu, p->oos_page);
+
+ intel_vgpu_clean_page_track(vgpu, &p->track);
+ }
+
static inline int init_shadow_page(struct intel_vgpu *vgpu,
- struct intel_vgpu_shadow_page *p, int type)
+ struct intel_vgpu_shadow_page *p, int type, bool hash)
{
struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
dma_addr_t daddr;
INIT_HLIST_NODE(&p->node);
- p->mfn = daddr >> GTT_PAGE_SHIFT;
- hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn);
+ p->mfn = daddr >> I915_GTT_PAGE_SHIFT;
+ if (hash)
+ hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn);
return 0;
}
{
struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
- dma_unmap_page(kdev, p->mfn << GTT_PAGE_SHIFT, 4096,
+ dma_unmap_page(kdev, p->mfn << I915_GTT_PAGE_SHIFT, 4096,
PCI_DMA_BIDIRECTIONAL);
if (!hlist_unhashed(&p->node))
return NULL;
}
+ #define page_track_to_guest_page(ptr) \
+ container_of(ptr, struct intel_vgpu_guest_page, track)
+
#define guest_page_to_ppgtt_spt(ptr) \
container_of(ptr, struct intel_vgpu_ppgtt_spt, guest_page)
trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type);
clean_shadow_page(spt->vgpu, &spt->shadow_page);
- intel_vgpu_clean_guest_page(spt->vgpu, &spt->guest_page);
+ clean_guest_page(spt->vgpu, &spt->guest_page);
list_del_init(&spt->post_shadow_list);
free_spt(spt);
ppgtt_free_shadow_page(shadow_page_to_ppgtt_spt(sp));
}
- static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
+ static int ppgtt_handle_guest_write_page_table_bytes(
+ struct intel_vgpu_guest_page *gpt,
u64 pa, void *p_data, int bytes);
- static int ppgtt_write_protection_handler(void *gp, u64 pa,
+ static int ppgtt_write_protection_handler(void *data, u64 pa,
void *p_data, int bytes)
{
- struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp;
+ struct intel_vgpu_page_track *t = data;
+ struct intel_vgpu_guest_page *p = page_track_to_guest_page(t);
int ret;
if (bytes != 4 && bytes != 8)
return -EINVAL;
- if (!gpt->writeprotection)
+ if (!t->tracked)
return -EINVAL;
- ret = ppgtt_handle_guest_write_page_table_bytes(gp,
+ ret = ppgtt_handle_guest_write_page_table_bytes(p,
pa, p_data, bytes);
if (ret)
return ret;
* TODO: guest page type may be different with shadow page type,
* when we support PSE page in future.
*/
- ret = init_shadow_page(vgpu, &spt->shadow_page, type);
+ ret = init_shadow_page(vgpu, &spt->shadow_page, type, true);
if (ret) {
gvt_vgpu_err("fail to initialize shadow page for spt\n");
goto err;
}
- ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page,
+ ret = init_guest_page(vgpu, &spt->guest_page,
gfn, ppgtt_write_protection_handler, NULL);
if (ret) {
gvt_vgpu_err("fail to initialize guest page for spt\n");
((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
#define pt_entries(spt) \
- (GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
+ (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
#define for_each_present_guest_entry(spt, e, i) \
for (i = 0; i < pt_entries(spt); i++) \
int v = atomic_read(&spt->refcount);
trace_spt_change(spt->vgpu->id, "die", spt,
- spt->guest_page.gfn, spt->shadow_page.type);
+ spt->guest_page.track.gfn, spt->shadow_page.type);
trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
}
release:
trace_spt_change(spt->vgpu->id, "release", spt,
- spt->guest_page.gfn, spt->shadow_page.type);
+ spt->guest_page.track.gfn, spt->shadow_page.type);
ppgtt_free_shadow_page(spt);
return 0;
fail:
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
struct intel_vgpu_ppgtt_spt *s = NULL;
struct intel_vgpu_guest_page *g;
+ struct intel_vgpu_page_track *t;
int ret;
if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(we->type)))) {
goto fail;
}
- g = intel_vgpu_find_guest_page(vgpu, ops->get_pfn(we));
- if (g) {
+ t = intel_vgpu_find_tracked_page(vgpu, ops->get_pfn(we));
+ if (t) {
+ g = page_track_to_guest_page(t);
s = guest_page_to_ppgtt_spt(g);
ppgtt_get_shadow_page(s);
} else {
goto fail;
}
- ret = intel_gvt_hypervisor_set_wp_page(vgpu, &s->guest_page);
+ ret = intel_gvt_hypervisor_enable_page_track(vgpu,
+ &s->guest_page.track);
if (ret)
goto fail;
if (ret)
goto fail;
- trace_spt_change(vgpu->id, "new", s, s->guest_page.gfn,
+ trace_spt_change(vgpu->id, "new", s, s->guest_page.track.gfn,
s->shadow_page.type);
}
return s;
int ret;
trace_spt_change(spt->vgpu->id, "born", spt,
- spt->guest_page.gfn, spt->shadow_page.type);
+ spt->guest_page.track.gfn, spt->shadow_page.type);
if (gtt_type_is_pte_pt(spt->shadow_page.type)) {
for_each_present_guest_entry(spt, &ge, i) {
old.type = new.type = get_entry_type(spt->guest_page_type);
old.val64 = new.val64 = 0;
- for (index = 0; index < (GTT_PAGE_SIZE >> info->gtt_entry_size_shift);
- index++) {
+ for (index = 0; index < (I915_GTT_PAGE_SIZE >>
+ info->gtt_entry_size_shift); index++) {
ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
ops->get_entry(NULL, &new, index, true,
- oos_page->guest_page->gfn << PAGE_SHIFT, vgpu);
+ oos_page->guest_page->track.gfn << PAGE_SHIFT, vgpu);
if (old.val64 == new.val64
&& !test_and_clear_bit(index, spt->post_shadow_bitmap))
struct intel_gvt *gvt = vgpu->gvt;
int ret;
- ret = intel_gvt_hypervisor_read_gpa(vgpu, gpt->gfn << GTT_PAGE_SHIFT,
- oos_page->mem, GTT_PAGE_SIZE);
+ ret = intel_gvt_hypervisor_read_gpa(vgpu,
+ gpt->track.gfn << I915_GTT_PAGE_SHIFT,
+ oos_page->mem, I915_GTT_PAGE_SIZE);
if (ret)
return ret;
{
int ret;
- ret = intel_gvt_hypervisor_set_wp_page(vgpu, gpt);
+ ret = intel_gvt_hypervisor_enable_page_track(vgpu, &gpt->track);
if (ret)
return ret;
gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
list_add_tail(&oos_page->vm_list, &vgpu->gtt.oos_page_list_head);
- return intel_gvt_hypervisor_unset_wp_page(vgpu, gpt);
+ return intel_gvt_hypervisor_disable_page_track(vgpu, &gpt->track);
}
/**
return 0;
}
- static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
+ static int ppgtt_handle_guest_write_page_table_bytes(
+ struct intel_vgpu_guest_page *gpt,
u64 pa, void *p_data, int bytes)
{
- struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp;
struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
struct intel_vgpu *vgpu = spt->vgpu;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
mm->shadow_page_table = mem + mm->page_table_entry_size;
} else if (mm->type == INTEL_GVT_MM_GGTT) {
mm->page_table_entry_cnt =
- (gvt_ggtt_gm_sz(gvt) >> GTT_PAGE_SHIFT);
+ (gvt_ggtt_gm_sz(gvt) >> I915_GTT_PAGE_SHIFT);
mm->page_table_entry_size = mm->page_table_entry_cnt *
info->gtt_entry_size;
mem = vzalloc(mm->page_table_entry_size);
gma_ops->gma_to_ggtt_pte_index(gma));
if (ret)
goto err;
- gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
- + (gma & ~GTT_PAGE_MASK);
+ gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
+ + (gma & ~I915_GTT_PAGE_MASK);
trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
return gpa;
}
}
- gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
- + (gma & ~GTT_PAGE_MASK);
+ gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
+ + (gma & ~I915_GTT_PAGE_MASK);
trace_gma_translate(vgpu->id, "ppgtt", 0,
mm->page_table_level, gma, gpa);
if (bytes != 4 && bytes != 8)
return -EINVAL;
- gma = g_gtt_index << GTT_PAGE_SHIFT;
+ gma = g_gtt_index << I915_GTT_PAGE_SHIFT;
/* the VM may configure the whole GM space when ballooning is used */
if (!vgpu_gmadr_is_valid(vgpu, gma))
* update the entry in this situation p2m will fail
* settting the shadow entry to point to a scratch page
*/
- ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn);
+ ops->set_pfn(&m, gvt->gtt.scratch_mfn);
}
} else {
m = e;
- ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn);
+ ops->set_pfn(&m, gvt->gtt.scratch_mfn);
}
ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
{
struct intel_vgpu_gtt *gtt = &vgpu->gtt;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
- int page_entry_num = GTT_PAGE_SIZE >>
+ int page_entry_num = I915_GTT_PAGE_SIZE >>
vgpu->gvt->device_info.gtt_entry_size_shift;
void *scratch_pt;
int i;
return -ENOMEM;
}
gtt->scratch_pt[type].page_mfn =
- (unsigned long)(daddr >> GTT_PAGE_SHIFT);
+ (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
vgpu->id, type, gtt->scratch_pt[type].page_mfn);
for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
if (vgpu->gtt.scratch_pt[i].page != NULL) {
daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn <<
- GTT_PAGE_SHIFT);
+ I915_GTT_PAGE_SHIFT);
dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
__free_page(vgpu->gtt.scratch_pt[i].page);
vgpu->gtt.scratch_pt[i].page = NULL;
struct intel_vgpu_gtt *gtt = &vgpu->gtt;
struct intel_vgpu_mm *ggtt_mm;
- hash_init(gtt->guest_page_hash_table);
+ hash_init(gtt->tracked_guest_page_hash_table);
hash_init(gtt->shadow_page_hash_table);
INIT_LIST_HEAD(>t->mm_list_head);
__free_page(virt_to_page(page));
return -ENOMEM;
}
- gvt->gtt.scratch_ggtt_page = virt_to_page(page);
- gvt->gtt.scratch_ggtt_mfn = (unsigned long)(daddr >> GTT_PAGE_SHIFT);
+
+ gvt->gtt.scratch_page = virt_to_page(page);
+ gvt->gtt.scratch_mfn = (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
if (enable_out_of_sync) {
ret = setup_spt_oos(gvt);
if (ret) {
gvt_err("fail to initialize SPT oos\n");
dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
- __free_page(gvt->gtt.scratch_ggtt_page);
+ __free_page(gvt->gtt.scratch_page);
return ret;
}
}
void intel_gvt_clean_gtt(struct intel_gvt *gvt)
{
struct device *dev = &gvt->dev_priv->drm.pdev->dev;
- dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_ggtt_mfn <<
- GTT_PAGE_SHIFT);
+ dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
+ I915_GTT_PAGE_SHIFT);
dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
- __free_page(gvt->gtt.scratch_ggtt_page);
+ __free_page(gvt->gtt.scratch_page);
if (enable_out_of_sync)
clean_spt_oos(gvt);
memset(&e, 0, sizeof(struct intel_gvt_gtt_entry));
e.type = GTT_TYPE_GGTT_PTE;
- ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn);
+ ops->set_pfn(&e, gvt->gtt.scratch_mfn);
e.val64 |= _PAGE_PRESENT;
index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
*/
void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
{
- int i;
-
ppgtt_free_all_shadow_page(vgpu);
/* Shadow pages are only created when there is no page
intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
intel_vgpu_reset_ggtt(vgpu);
-
- /* clear scratch page for security */
- for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
- if (vgpu->gtt.scratch_pt[i].page != NULL)
- memset(page_address(vgpu->gtt.scratch_pt[i].page),
- 0, PAGE_SIZE);
- }
}
return 0;
}
- static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg)
+ /**
+ * intel_gvt_render_mmio_to_ring_id - convert a mmio offset into ring id
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ * Returns:
+ * Ring ID on success, negative error code if failed.
+ */
+ int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt,
+ unsigned int offset)
{
enum intel_engine_id id;
struct intel_engine_cs *engine;
- reg &= ~GENMASK(11, 0);
+ offset &= ~GENMASK(11, 0);
for_each_engine(engine, gvt->dev_priv, id) {
- if (engine->mmio_base == reg)
+ if (engine->mmio_base == offset)
return id;
}
- return -1;
+ return -ENODEV;
}
#define offset_to_fence_num(offset) \
(num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0)))
- static void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
+ void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
{
switch (reason) {
case GVT_FAILSAFE_UNSUPPORTED_GUEST:
break;
case GVT_FAILSAFE_INSUFFICIENT_RESOURCE:
pr_err("Graphics resource is not enough for the guest\n");
+ case GVT_FAILSAFE_GUEST_ERR:
+ pr_err("GVT Internal error for the guest\n");
default:
break;
}
return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
}
+ static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+ {
+ u32 value = *(u32 *)p_data;
+ int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
+
+ if (!intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
+ gvt_vgpu_err("VM(%d) write invalid HWSP address, reg:0x%x, value:0x%x\n",
+ vgpu->id, offset, value);
+ return -EINVAL;
+ }
+ /*
+ * Need to emulate all the HWSP register write to ensure host can
+ * update the VM CSB status correctly. Here listed registers can
+ * support BDW, SKL or other platforms with same HWSP registers.
+ */
+ if (unlikely(ring_id < 0 || ring_id > I915_NUM_ENGINES)) {
+ gvt_vgpu_err("VM(%d) access unknown hardware status page register:0x%x\n",
+ vgpu->id, offset);
+ return -EINVAL;
+ }
+ vgpu->hws_pga[ring_id] = value;
+ gvt_dbg_mmio("VM(%d) write: 0x%x to HWSP: 0x%x\n",
+ vgpu->id, value, offset);
+
+ return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
+ }
+
static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
}
-static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset,
- void *p_data, unsigned int bytes)
-{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- u32 v = *(u32 *)p_data;
-
- if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
- return intel_vgpu_default_mmio_write(vgpu,
- offset, p_data, bytes);
-
- switch (offset) {
- case 0x4ddc:
- /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
- vgpu_vreg(vgpu, offset) = v & ~(1 << 31);
- break;
- case 0x42080:
- /* bypass WaCompressedResourceDisplayNewHashMode */
- vgpu_vreg(vgpu, offset) = v & ~(1 << 15);
- break;
- case 0xe194:
- /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
- vgpu_vreg(vgpu, offset) = v & ~(1 << 8);
- break;
- case 0x7014:
- /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
- vgpu_vreg(vgpu, offset) = v & ~(1 << 13);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
static int mmio_read_from_hw(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ int ring_id;
+ u32 ring_base;
+
+ ring_id = intel_gvt_render_mmio_to_ring_id(gvt, offset);
+ /**
+ * Read HW reg in following case
+ * a. the offset isn't a ring mmio
+ * b. the offset's ring is running on hw.
+ * c. the offset is ring time stamp mmio
+ */
+ if (ring_id >= 0)
+ ring_base = dev_priv->engine[ring_id]->mmio_base;
+
+ if (ring_id < 0 || vgpu == gvt->scheduler.engine_owner[ring_id] ||
+ offset == i915_mmio_reg_offset(RING_TIMESTAMP(ring_base)) ||
+ offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(ring_base))) {
+ mmio_hw_access_pre(dev_priv);
+ vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
+ mmio_hw_access_post(dev_priv);
+ }
- mmio_hw_access_pre(dev_priv);
- vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
- mmio_hw_access_post(dev_priv);
return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
}
static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
+ int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
struct intel_vgpu_execlist *execlist;
u32 data = *(u32 *)p_data;
int ret = 0;
if (WARN_ON(ring_id < 0 || ring_id > I915_NUM_ENGINES - 1))
return -EINVAL;
- execlist = &vgpu->execlist[ring_id];
+ execlist = &vgpu->submission.execlist[ring_id];
- execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data;
+ execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data;
if (execlist->elsp_dwords.index == 3) {
ret = intel_vgpu_submit_execlist(vgpu, ring_id);
if(ret)
static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
+ struct intel_vgpu_submission *s = &vgpu->submission;
u32 data = *(u32 *)p_data;
- int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
+ int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
bool enable_execlist;
+ int ret;
write_vreg(vgpu, offset, p_data, bytes);
(enable_execlist ? "enabling" : "disabling"),
ring_id);
- if (enable_execlist)
- intel_vgpu_start_schedule(vgpu);
+ if (!enable_execlist)
+ return 0;
+
+ if (s->active)
+ return 0;
+
+ ret = intel_vgpu_select_submission_ops(vgpu,
+ INTEL_VGPU_EXECLIST_SUBMISSION);
+ if (ret)
+ return ret;
+
+ intel_vgpu_start_schedule(vgpu);
}
return 0;
}
default:
return -EINVAL;
}
- set_bit(id, (void *)vgpu->tlb_handle_pending);
+ set_bit(id, (void *)vgpu->submission.tlb_handle_pending);
return 0;
}
MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
- MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
- skl_misc_ctl_write);
+ MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, NULL);
MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
#undef RING_REG
- MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL);
+ MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write);
MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x6e570, D_BDW_PLUS);
MMIO_D(0x65f10, D_BDW_PLUS);
- MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
- skl_misc_ctl_write);
+ MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, skl_misc_ctl_write);
- MMIO_DH(0x42080, D_SKL_PLUS, NULL, skl_misc_ctl_write);
+ MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(0x42080, D_SKL_PLUS, NULL, NULL);
MMIO_D(0x45504, D_SKL_PLUS);
MMIO_D(0x45520, D_SKL_PLUS);
MMIO_D(0x46000, D_SKL_PLUS);
gvt->mmio.mmio_block = mmio_blocks;
gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks);
- gvt_dbg_mmio("traced %u virtual mmio registers\n",
- gvt->mmio.num_tracked_mmio);
return 0;
err:
intel_gvt_clean_mmio_info(gvt);
return ret;
}
+ /**
+ * intel_gvt_for_each_tracked_mmio - iterate each tracked mmio
+ * @gvt: a GVT device
+ * @handler: the handler
+ * @data: private data given to handler
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
+ int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
+ void *data)
+ {
+ struct gvt_mmio_block *block = gvt->mmio.mmio_block;
+ struct intel_gvt_mmio_info *e;
+ int i, j, ret;
+
+ hash_for_each(gvt->mmio.mmio_info_table, i, e, node) {
+ ret = handler(gvt, e->offset, data);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) {
+ for (j = 0; j < block->size; j += 4) {
+ ret = handler(gvt,
+ INTEL_GVT_MMIO_OFFSET(block->offset) + j,
+ data);
+ if (ret)
+ return ret;
+ }
+ }
+ return 0;
+ }
/**
* intel_vgpu_default_mmio_read - default MMIO read handler
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
int ring_id = workload->ring_id;
- struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
+ struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
shadow_ctx->engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context;
while (i < context_page_num) {
context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
(u32)((workload->ctx_desc.lrca + i) <<
- GTT_PAGE_SHIFT));
+ I915_GTT_PAGE_SHIFT));
if (context_gpa == INTEL_GVT_INVALID_ADDR) {
gvt_vgpu_err("Invalid guest context descriptor\n");
- return -EINVAL;
+ return -EFAULT;
}
page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
dst = kmap(page);
intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
- GTT_PAGE_SIZE);
+ I915_GTT_PAGE_SIZE);
kunmap(page);
i++;
}
sizeof(*shadow_ring_context),
(void *)shadow_ring_context +
sizeof(*shadow_ring_context),
- GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
+ I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
kunmap(page);
return 0;
return i915_gem_context_force_single_submission(req->ctx);
}
+ static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
+ {
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ u32 ring_base = dev_priv->engine[ring_id]->mmio_base;
+ i915_reg_t reg;
+
+ reg = RING_INSTDONE(ring_base);
+ vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
+ reg = RING_ACTHD(ring_base);
+ vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
+ reg = RING_ACTHD_UDW(ring_base);
+ vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
+ }
+
static int shadow_context_status_change(struct notifier_block *nb,
unsigned long action, void *data)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
enum intel_engine_id ring_id = req->engine->id;
struct intel_vgpu_workload *workload;
+ unsigned long flags;
if (!is_gvt_request(req)) {
- spin_lock_bh(&scheduler->mmio_context_lock);
+ spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
if (action == INTEL_CONTEXT_SCHEDULE_IN &&
scheduler->engine_owner[ring_id]) {
/* Switch ring from vGPU to host. */
NULL, ring_id);
scheduler->engine_owner[ring_id] = NULL;
}
- spin_unlock_bh(&scheduler->mmio_context_lock);
+ spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
return NOTIFY_OK;
}
switch (action) {
case INTEL_CONTEXT_SCHEDULE_IN:
- spin_lock_bh(&scheduler->mmio_context_lock);
+ spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
if (workload->vgpu != scheduler->engine_owner[ring_id]) {
/* Switch ring from host to vGPU or vGPU to vGPU. */
intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
} else
gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
ring_id, workload->vgpu->id);
- spin_unlock_bh(&scheduler->mmio_context_lock);
+ spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
atomic_set(&workload->shadow_ctx_active, 1);
break;
case INTEL_CONTEXT_SCHEDULE_OUT:
case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
+ save_ring_hw_state(workload->vgpu, ring_id);
atomic_set(&workload->shadow_ctx_active, 0);
break;
default:
*/
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct i915_gem_context *shadow_ctx = s->shadow_ctx;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int ring_id = workload->ring_id;
- struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
- struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
- struct intel_vgpu *vgpu = workload->vgpu;
struct intel_ring *ring;
int ret;
shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
- if (!test_and_set_bit(ring_id, vgpu->shadow_ctx_desc_updated))
+ if (!test_and_set_bit(ring_id, s->shadow_ctx_desc_updated))
shadow_context_descriptor_update(shadow_ctx,
dev_priv->engine[ring_id]);
return ret;
}
- int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
+ static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
{
int ring_id = workload->ring_id;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
struct drm_i915_gem_request *rq;
struct intel_vgpu *vgpu = workload->vgpu;
- struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct i915_gem_context *shadow_ctx = s->shadow_ctx;
int ret;
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
return ret;
}
+ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload);
+
+ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
+ {
+ struct intel_gvt *gvt = workload->vgpu->gvt;
+ const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
+ struct intel_vgpu_shadow_bb *bb;
+ int ret;
+
+ list_for_each_entry(bb, &workload->shadow_bb, list) {
+ bb->vma = i915_gem_object_ggtt_pin(bb->obj, NULL, 0, 0, 0);
+ if (IS_ERR(bb->vma)) {
+ ret = PTR_ERR(bb->vma);
+ goto err;
+ }
+
+ /* relocate shadow batch buffer */
+ bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma);
+ if (gmadr_bytes == 8)
+ bb->bb_start_cmd_va[2] = 0;
+
+ /* No one is going to touch shadow bb from now on. */
+ if (bb->clflush & CLFLUSH_AFTER) {
+ drm_clflush_virt_range(bb->va, bb->obj->base.size);
+ bb->clflush &= ~CLFLUSH_AFTER;
+ }
+
+ ret = i915_gem_object_set_to_gtt_domain(bb->obj, false);
+ if (ret)
+ goto err;
+
+ i915_gem_obj_finish_shmem_access(bb->obj);
+ bb->accessing = false;
+
+ i915_vma_move_to_active(bb->vma, workload->req, 0);
+ }
+ return 0;
+ err:
+ release_shadow_batch_buffer(workload);
+ return ret;
+ }
+
+ static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+ {
+ struct intel_vgpu_workload *workload = container_of(wa_ctx,
+ struct intel_vgpu_workload,
+ wa_ctx);
+ int ring_id = workload->ring_id;
+ struct intel_vgpu_submission *s = &workload->vgpu->submission;
+ struct i915_gem_context *shadow_ctx = s->shadow_ctx;
+ struct drm_i915_gem_object *ctx_obj =
+ shadow_ctx->engine[ring_id].state->obj;
+ struct execlist_ring_context *shadow_ring_context;
+ struct page *page;
+
+ page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
+ shadow_ring_context = kmap_atomic(page);
+
+ shadow_ring_context->bb_per_ctx_ptr.val =
+ (shadow_ring_context->bb_per_ctx_ptr.val &
+ (~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
+ shadow_ring_context->rcs_indirect_ctx.val =
+ (shadow_ring_context->rcs_indirect_ctx.val &
+ (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
+
+ kunmap_atomic(shadow_ring_context);
+ return 0;
+ }
+
+ static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+ {
+ struct i915_vma *vma;
+ unsigned char *per_ctx_va =
+ (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
+ wa_ctx->indirect_ctx.size;
+
+ if (wa_ctx->indirect_ctx.size == 0)
+ return 0;
+
+ vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
+ 0, CACHELINE_BYTES, 0);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ /* FIXME: we are not tracking our pinned VMA leaving it
+ * up to the core to fix up the stray pin_count upon
+ * free.
+ */
+
+ wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma);
+
+ wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
+ memset(per_ctx_va, 0, CACHELINE_BYTES);
+
+ update_wa_ctx_2_shadow_ctx(wa_ctx);
+ return 0;
+ }
+
+ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
+ {
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_vgpu_shadow_bb *bb, *pos;
+
+ if (list_empty(&workload->shadow_bb))
+ return;
+
+ bb = list_first_entry(&workload->shadow_bb,
+ struct intel_vgpu_shadow_bb, list);
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+
+ list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
+ if (bb->obj) {
+ if (bb->accessing)
+ i915_gem_obj_finish_shmem_access(bb->obj);
+
+ if (bb->va && !IS_ERR(bb->va))
+ i915_gem_object_unpin_map(bb->obj);
+
+ if (bb->vma && !IS_ERR(bb->vma)) {
+ i915_vma_unpin(bb->vma);
+ i915_vma_close(bb->vma);
+ }
+ __i915_gem_object_release_unless_active(bb->obj);
+ }
+ list_del(&bb->list);
+ kfree(bb);
+ }
+
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ }
+
+ static int prepare_workload(struct intel_vgpu_workload *workload)
+ {
+ struct intel_vgpu *vgpu = workload->vgpu;
+ int ret = 0;
+
+ ret = intel_vgpu_pin_mm(workload->shadow_mm);
+ if (ret) {
+ gvt_vgpu_err("fail to vgpu pin mm\n");
+ return ret;
+ }
+
+ ret = intel_vgpu_sync_oos_pages(workload->vgpu);
+ if (ret) {
+ gvt_vgpu_err("fail to vgpu sync oos pages\n");
+ goto err_unpin_mm;
+ }
+
+ ret = intel_vgpu_flush_post_shadow(workload->vgpu);
+ if (ret) {
+ gvt_vgpu_err("fail to flush post shadow\n");
+ goto err_unpin_mm;
+ }
+
+ ret = intel_gvt_generate_request(workload);
+ if (ret) {
+ gvt_vgpu_err("fail to generate request\n");
+ goto err_unpin_mm;
+ }
+
+ ret = prepare_shadow_batch_buffer(workload);
+ if (ret) {
+ gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
+ goto err_unpin_mm;
+ }
+
+ ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
+ if (ret) {
+ gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
+ goto err_shadow_batch;
+ }
+
+ if (workload->prepare) {
+ ret = workload->prepare(workload);
+ if (ret)
+ goto err_shadow_wa_ctx;
+ }
+
+ return 0;
+ err_shadow_wa_ctx:
+ release_shadow_wa_ctx(&workload->wa_ctx);
+ err_shadow_batch:
+ release_shadow_batch_buffer(workload);
+ err_unpin_mm:
+ intel_vgpu_unpin_mm(workload->shadow_mm);
+ return ret;
+ }
+
static int dispatch_workload(struct intel_vgpu_workload *workload)
{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct i915_gem_context *shadow_ctx = s->shadow_ctx;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int ring_id = workload->ring_id;
- struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
- struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
int ret = 0;
if (ret)
goto out;
- if (workload->prepare) {
- ret = workload->prepare(workload);
- if (ret) {
- engine->context_unpin(engine, shadow_ctx);
- goto out;
- }
+ ret = prepare_workload(workload);
+ if (ret) {
+ engine->context_unpin(engine, shadow_ctx);
+ goto out;
}
out:
gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
- atomic_inc(&workload->vgpu->running_workload_num);
+ atomic_inc(&workload->vgpu->submission.running_workload_num);
out:
mutex_unlock(&gvt->lock);
return workload;
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct i915_gem_context *shadow_ctx = s->shadow_ctx;
int ring_id = workload->ring_id;
- struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
shadow_ctx->engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context;
while (i < context_page_num) {
context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
(u32)((workload->ctx_desc.lrca + i) <<
- GTT_PAGE_SHIFT));
+ I915_GTT_PAGE_SHIFT));
if (context_gpa == INTEL_GVT_INVALID_ADDR) {
gvt_vgpu_err("invalid guest context descriptor\n");
return;
page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
src = kmap(page);
intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
- GTT_PAGE_SIZE);
+ I915_GTT_PAGE_SIZE);
kunmap(page);
i++;
}
sizeof(*shadow_ring_context),
(void *)shadow_ring_context +
sizeof(*shadow_ring_context),
- GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
+ I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
kunmap(page);
}
+ static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
+ {
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_engine_cs *engine;
+ struct intel_vgpu_workload *pos, *n;
+ unsigned int tmp;
+
+ /* free the unsubmited workloads in the queues. */
+ for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
+ list_for_each_entry_safe(pos, n,
+ &s->workload_q_head[engine->id], list) {
+ list_del_init(&pos->list);
+ intel_vgpu_destroy_workload(pos);
+ }
+ clear_bit(engine->id, s->shadow_ctx_desc_updated);
+ }
+ }
+
static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
- struct intel_vgpu_workload *workload;
- struct intel_vgpu *vgpu;
+ struct intel_vgpu_workload *workload =
+ scheduler->current_workload[ring_id];
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_vgpu_submission *s = &vgpu->submission;
int event;
mutex_lock(&gvt->lock);
- workload = scheduler->current_workload[ring_id];
- vgpu = workload->vgpu;
-
/* For the workload w/ request, needs to wait for the context
* switch to make sure request is completed.
* For the workload w/o request, directly complete the workload.
}
mutex_lock(&dev_priv->drm.struct_mutex);
/* unpin shadow ctx as the shadow_ctx update is done */
- engine->context_unpin(engine, workload->vgpu->shadow_ctx);
+ engine->context_unpin(engine, s->shadow_ctx);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
scheduler->current_workload[ring_id] = NULL;
list_del_init(&workload->list);
+
+ if (!workload->status) {
+ release_shadow_batch_buffer(workload);
+ release_shadow_wa_ctx(&workload->wa_ctx);
+ }
+
+ if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
+ /* if workload->status is not successful means HW GPU
+ * has occurred GPU hang or something wrong with i915/GVT,
+ * and GVT won't inject context switch interrupt to guest.
+ * So this error is a vGPU hang actually to the guest.
+ * According to this we should emunlate a vGPU hang. If
+ * there are pending workloads which are already submitted
+ * from guest, we should clean them up like HW GPU does.
+ *
+ * if it is in middle of engine resetting, the pending
+ * workloads won't be submitted to HW GPU and will be
+ * cleaned up during the resetting process later, so doing
+ * the workload clean up here doesn't have any impact.
+ **/
+ clean_workloads(vgpu, ENGINE_MASK(ring_id));
+ }
+
workload->complete(workload);
- atomic_dec(&vgpu->running_workload_num);
+ atomic_dec(&s->running_workload_num);
wake_up(&scheduler->workload_complete_wq);
if (gvt->scheduler.need_reschedule)
FORCEWAKE_ALL);
intel_runtime_pm_put(gvt->dev_priv);
+ if (ret && (vgpu_is_vm_unhealthy(ret)))
+ enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
}
return 0;
}
void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
{
+ struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
- if (atomic_read(&vgpu->running_workload_num)) {
+ if (atomic_read(&s->running_workload_num)) {
gvt_dbg_sched("wait vgpu idle\n");
wait_event(scheduler->workload_complete_wq,
- !atomic_read(&vgpu->running_workload_num));
+ !atomic_read(&s->running_workload_num));
}
}
return ret;
}
- void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
+ /**
+ * intel_vgpu_clean_submission - free submission-related resource for vGPU
+ * @vgpu: a vGPU
+ *
+ * This function is called when a vGPU is being destroyed.
+ *
+ */
+ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
+ {
+ struct intel_vgpu_submission *s = &vgpu->submission;
+
+ intel_vgpu_select_submission_ops(vgpu, 0);
+ i915_gem_context_put(s->shadow_ctx);
+ kmem_cache_destroy(s->workloads);
+ }
+
+
+ /**
+ * intel_vgpu_reset_submission - reset submission-related resource for vGPU
+ * @vgpu: a vGPU
+ * @engine_mask: engines expected to be reset
+ *
+ * This function is called when a vGPU is being destroyed.
+ *
+ */
+ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
+ unsigned long engine_mask)
{
- i915_gem_context_put(vgpu->shadow_ctx);
+ struct intel_vgpu_submission *s = &vgpu->submission;
+
+ if (!s->active)
+ return;
+
+ clean_workloads(vgpu, engine_mask);
+ s->ops->reset(vgpu, engine_mask);
}
- int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
+ /**
+ * intel_vgpu_setup_submission - setup submission-related resource for vGPU
+ * @vgpu: a vGPU
+ *
+ * This function is called when a vGPU is being created.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
{
- atomic_set(&vgpu->running_workload_num, 0);
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ enum intel_engine_id i;
+ struct intel_engine_cs *engine;
+ int ret;
- vgpu->shadow_ctx = i915_gem_context_create_gvt(
+ s->shadow_ctx = i915_gem_context_create_gvt(
&vgpu->gvt->dev_priv->drm);
- if (IS_ERR(vgpu->shadow_ctx))
- return PTR_ERR(vgpu->shadow_ctx);
+ if (IS_ERR(s->shadow_ctx))
+ return PTR_ERR(s->shadow_ctx);
+
+ bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
+
+ s->workloads = kmem_cache_create("gvt-g_vgpu_workload",
+ sizeof(struct intel_vgpu_workload), 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+
+ if (!s->workloads) {
+ ret = -ENOMEM;
+ goto out_shadow_ctx;
+ }
+
+ for_each_engine(engine, vgpu->gvt->dev_priv, i)
+ INIT_LIST_HEAD(&s->workload_q_head[i]);
+
+ atomic_set(&s->running_workload_num, 0);
+ bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
+
+ return 0;
+
+ out_shadow_ctx:
+ i915_gem_context_put(s->shadow_ctx);
+ return ret;
+ }
+
+ /**
+ * intel_vgpu_select_submission_ops - select virtual submission interface
+ * @vgpu: a vGPU
+ * @interface: expected vGPU virtual submission interface
+ *
+ * This function is called when guest configures submission interface.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
+ unsigned int interface)
+ {
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ const struct intel_vgpu_submission_ops *ops[] = {
+ [INTEL_VGPU_EXECLIST_SUBMISSION] =
+ &intel_vgpu_execlist_submission_ops,
+ };
+ int ret;
+
+ if (WARN_ON(interface >= ARRAY_SIZE(ops)))
+ return -EINVAL;
+
+ if (s->active) {
+ s->ops->clean(vgpu);
+ s->active = false;
+ gvt_dbg_core("vgpu%d: de-select ops [ %s ] \n",
+ vgpu->id, s->ops->name);
+ }
+
+ if (interface == 0) {
+ s->ops = NULL;
+ s->virtual_submission_interface = 0;
+ gvt_dbg_core("vgpu%d: no submission ops\n", vgpu->id);
+ return 0;
+ }
+
+ ret = ops[interface]->init(vgpu);
+ if (ret)
+ return ret;
+
+ s->ops = ops[interface];
+ s->virtual_submission_interface = interface;
+ s->active = true;
+
+ gvt_dbg_core("vgpu%d: activate ops [ %s ]\n",
+ vgpu->id, s->ops->name);
+
+ return 0;
+ }
+
+ /**
+ * intel_vgpu_destroy_workload - destroy a vGPU workload
+ * @vgpu: a vGPU
+ *
+ * This function is called when destroy a vGPU workload.
+ *
+ */
+ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
+ {
+ struct intel_vgpu_submission *s = &workload->vgpu->submission;
+
+ if (workload->shadow_mm)
+ intel_gvt_mm_unreference(workload->shadow_mm);
+
+ kmem_cache_free(s->workloads, workload);
+ }
+
+ static struct intel_vgpu_workload *
+ alloc_workload(struct intel_vgpu *vgpu)
+ {
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct intel_vgpu_workload *workload;
- vgpu->shadow_ctx->engine[RCS].initialised = true;
+ workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
+ if (!workload)
+ return ERR_PTR(-ENOMEM);
- bitmap_zero(vgpu->shadow_ctx_desc_updated, I915_NUM_ENGINES);
+ INIT_LIST_HEAD(&workload->list);
+ INIT_LIST_HEAD(&workload->shadow_bb);
+ init_waitqueue_head(&workload->shadow_ctx_status_wq);
+ atomic_set(&workload->shadow_ctx_active, 0);
+
+ workload->status = -EINPROGRESS;
+ workload->shadowed = false;
+ workload->vgpu = vgpu;
+
+ return workload;
+ }
+
+ #define RING_CTX_OFF(x) \
+ offsetof(struct execlist_ring_context, x)
+
+ static void read_guest_pdps(struct intel_vgpu *vgpu,
+ u64 ring_context_gpa, u32 pdp[8])
+ {
+ u64 gpa;
+ int i;
+
+ gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val);
+
+ for (i = 0; i < 8; i++)
+ intel_gvt_hypervisor_read_gpa(vgpu,
+ gpa + i * 8, &pdp[7 - i], 4);
+ }
+
+ static int prepare_mm(struct intel_vgpu_workload *workload)
+ {
+ struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
+ struct intel_vgpu_mm *mm;
+ struct intel_vgpu *vgpu = workload->vgpu;
+ int page_table_level;
+ u32 pdp[8];
+
+ if (desc->addressing_mode == 1) { /* legacy 32-bit */
+ page_table_level = 3;
+ } else if (desc->addressing_mode == 3) { /* legacy 64 bit */
+ page_table_level = 4;
+ } else {
+ gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
+ return -EINVAL;
+ }
+
+ read_guest_pdps(workload->vgpu, workload->ring_context_gpa, pdp);
+
+ mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, page_table_level, pdp);
+ if (mm) {
+ intel_gvt_mm_reference(mm);
+ } else {
+
+ mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
+ pdp, page_table_level, 0);
+ if (IS_ERR(mm)) {
+ gvt_vgpu_err("fail to create mm object.\n");
+ return PTR_ERR(mm);
+ }
+ }
+ workload->shadow_mm = mm;
return 0;
}
+
+ #define same_context(a, b) (((a)->context_id == (b)->context_id) && \
+ ((a)->lrca == (b)->lrca))
+
+ #define get_last_workload(q) \
+ (list_empty(q) ? NULL : container_of(q->prev, \
+ struct intel_vgpu_workload, list))
+ /**
+ * intel_vgpu_create_workload - create a vGPU workload
+ * @vgpu: a vGPU
+ * @desc: a guest context descriptor
+ *
+ * This function is called when creating a vGPU workload.
+ *
+ * Returns:
+ * struct intel_vgpu_workload * on success, negative error code in
+ * pointer if failed.
+ *
+ */
+ struct intel_vgpu_workload *
+ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
+ struct execlist_ctx_descriptor_format *desc)
+ {
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct list_head *q = workload_q_head(vgpu, ring_id);
+ struct intel_vgpu_workload *last_workload = get_last_workload(q);
+ struct intel_vgpu_workload *workload = NULL;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ u64 ring_context_gpa;
+ u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
+ int ret;
+
+ ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
+ (u32)((desc->lrca + 1) << I915_GTT_PAGE_SHIFT));
+ if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
+ gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
+ return ERR_PTR(-EINVAL);
+ }
+
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(ring_header.val), &head, 4);
+
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(ring_tail.val), &tail, 4);
+
+ head &= RB_HEAD_OFF_MASK;
+ tail &= RB_TAIL_OFF_MASK;
+
+ if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
+ gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
+ gvt_dbg_el("ctx head %x real head %lx\n", head,
+ last_workload->rb_tail);
+ /*
+ * cannot use guest context head pointer here,
+ * as it might not be updated at this time
+ */
+ head = last_workload->rb_tail;
+ }
+
+ gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
+
+ /* record some ring buffer register values for scan and shadow */
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(rb_start.val), &start, 4);
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
+
+ workload = alloc_workload(vgpu);
+ if (IS_ERR(workload))
+ return workload;
+
+ workload->ring_id = ring_id;
+ workload->ctx_desc = *desc;
+ workload->ring_context_gpa = ring_context_gpa;
+ workload->rb_head = head;
+ workload->rb_tail = tail;
+ workload->rb_start = start;
+ workload->rb_ctl = ctl;
+
+ if (ring_id == RCS) {
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
+
+ workload->wa_ctx.indirect_ctx.guest_gma =
+ indirect_ctx & INDIRECT_CTX_ADDR_MASK;
+ workload->wa_ctx.indirect_ctx.size =
+ (indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
+ CACHELINE_BYTES;
+ workload->wa_ctx.per_ctx.guest_gma =
+ per_ctx & PER_CTX_ADDR_MASK;
+ workload->wa_ctx.per_ctx.valid = per_ctx & 1;
+ }
+
+ gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
+ workload, ring_id, head, tail, start, ctl);
+
+ ret = prepare_mm(workload);
+ if (ret) {
+ kmem_cache_free(s->workloads, workload);
+ return ERR_PTR(ret);
+ }
+
+ /* Only scan and shadow the first workload in the queue
+ * as there is only one pre-allocated buf-obj for shadow.
+ */
+ if (list_empty(workload_q_head(vgpu, ring_id))) {
+ intel_runtime_pm_get(dev_priv);
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ ret = intel_gvt_scan_and_shadow_workload(workload);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ intel_runtime_pm_put(dev_priv);
+ }
+
+ if (ret && (vgpu_is_vm_unhealthy(ret))) {
+ enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
+ intel_vgpu_destroy_workload(workload);
+ return ERR_PTR(ret);
+ }
+
+ return workload;
+ }
value |= I915_SCHEDULER_CAP_ENABLED;
value |= I915_SCHEDULER_CAP_PRIORITY;
- if (INTEL_INFO(dev_priv)->has_logical_ring_preemption &&
- i915_modparams.enable_execlists &&
- !i915_modparams.enable_guc_submission)
+ if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) &&
+ i915_modparams.enable_execlists)
value |= I915_SCHEDULER_CAP_PREEMPTION;
}
break;
*/
value = 1;
break;
+ case I915_PARAM_HAS_CONTEXT_ISOLATION:
+ value = intel_engines_has_context_isolation(dev_priv);
+ break;
case I915_PARAM_SLICE_MASK:
value = INTEL_INFO(dev_priv)->sseu.slice_mask;
if (!value)
if (!value)
return -ENODEV;
break;
+ case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
+ value = 1000 * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz;
+ break;
default:
DRM_DEBUG("Unknown parameter %d\n", param->param);
return -EINVAL;
if (ret)
goto cleanup_uc;
- intel_modeset_gem_init(dev);
+ intel_setup_overlay(dev_priv);
if (INTEL_INFO(dev_priv)->num_pipes == 0)
return 0;
* We don't keep the workarounds for pre-production hardware, so we expect our
* driver to fail on these machines in one way or another. A little warning on
* dmesg may help both the user and the bug triagers.
+ *
+ * Our policy for removing pre-production workarounds is to keep the
+ * current gen workarounds as a guide to the bring-up of the next gen
+ * (workarounds have a habit of persisting!). Anything older than that
+ * should be removed along with the complications they introduce.
*/
static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
{
mutex_init(&dev_priv->backlight_lock);
spin_lock_init(&dev_priv->uncore.lock);
- spin_lock_init(&dev_priv->mm.object_stat_lock);
mutex_init(&dev_priv->sb_lock);
mutex_init(&dev_priv->modeset_restore_lock);
mutex_init(&dev_priv->av_mutex);
* becaue the HDA driver may require us to enable the audio power
* domain during system suspend.
*/
- pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
+ dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
ret = i915_driver_init_early(dev_priv, ent);
if (ret < 0)
intel_csr_ucode_resume(dev_priv);
- i915_gem_resume(dev_priv);
-
i915_restore_state(dev_priv);
intel_pps_unlock_regs_wa(dev_priv);
intel_opregion_setup(dev_priv);
drm_mode_config_reset(dev);
- mutex_lock(&dev->struct_mutex);
- if (i915_gem_init_hw(dev_priv)) {
- DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
- i915_gem_set_wedged(dev_priv);
- }
- mutex_unlock(&dev->struct_mutex);
-
- intel_guc_resume(dev_priv);
+ i915_gem_resume(dev_priv);
intel_modeset_init_hw(dev);
+ intel_init_clock_gating(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display.hpd_irq_setup)
intel_opregion_notify_adapter(dev_priv, PCI_D0);
- intel_autoenable_gt_powersave(dev_priv);
-
enable_rpm_wakeref_asserts(dev_priv);
return 0;
goto finish;
}
+ static inline int intel_gt_reset_engine(struct drm_i915_private *dev_priv,
+ struct intel_engine_cs *engine)
+ {
+ return intel_gpu_reset(dev_priv, intel_engine_flag(engine));
+ }
+
/**
* i915_reset_engine - reset GPU engine to recover from a hang
* @engine: engine to reset
goto out;
}
- ret = intel_gpu_reset(engine->i915, intel_engine_flag(engine));
+ if (!engine->i915->guc.execbuf_client)
+ ret = intel_gt_reset_engine(engine->i915, engine);
+ else
+ ret = intel_guc_reset_engine(&engine->i915->guc, engine);
if (ret) {
/* If we fail here, we expect to fallback to a global reset */
- DRM_DEBUG_DRIVER("Failed to reset %s, ret=%d\n",
+ DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n",
+ engine->i915->guc.execbuf_client ? "GuC " : "",
engine->name, ret);
goto out;
}
intel_runtime_pm_disable_interrupts(dev_priv);
+ intel_uncore_suspend(dev_priv);
+
ret = 0;
if (IS_GEN9_LP(dev_priv)) {
bxt_display_core_uninit(dev_priv);
if (ret) {
DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
+ intel_uncore_runtime_resume(dev_priv);
+
intel_runtime_pm_enable_interrupts(dev_priv);
enable_rpm_wakeref_asserts(dev_priv);
return ret;
}
- intel_uncore_suspend(dev_priv);
-
enable_rpm_wakeref_asserts(dev_priv);
WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count));
static void vm_free_page(struct i915_address_space *vm, struct page *page)
{
+ /*
+ * On !llc, we need to change the pages back to WB. We only do so
+ * in bulk, so we rarely need to change the page attributes here,
+ * but doing so requires a stop_machine() from deep inside arch/x86/mm.
+ * To make detection of the possible sleep more likely, use an
+ * unconditional might_sleep() for everybody.
+ */
+ might_sleep();
if (!pagevec_add(&vm->free_pages, page))
vm_free_pages_release(vm, false);
}
INIT_LIST_HEAD(&vm->unbound_list);
list_add_tail(&vm->global_link, &dev_priv->vm_list);
- pagevec_init(&vm->free_pages, false);
+ pagevec_init(&vm->free_pages);
}
static void i915_address_space_fini(struct i915_address_space *vm)
return IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_vtd_active();
}
- void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
+ static void gen6_check_and_clear_faults(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
-
- if (INTEL_INFO(dev_priv)->gen < 6)
- return;
+ u32 fault;
for_each_engine(engine, dev_priv, id) {
- u32 fault_reg;
- fault_reg = I915_READ(RING_FAULT_REG(engine));
- if (fault_reg & RING_FAULT_VALID) {
+ fault = I915_READ(RING_FAULT_REG(engine));
+ if (fault & RING_FAULT_VALID) {
DRM_DEBUG_DRIVER("Unexpected fault\n"
"\tAddr: 0x%08lx\n"
"\tAddress space: %s\n"
"\tSource ID: %d\n"
"\tType: %d\n",
- fault_reg & PAGE_MASK,
- fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
- RING_FAULT_SRCID(fault_reg),
- RING_FAULT_FAULT_TYPE(fault_reg));
+ fault & PAGE_MASK,
+ fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
+ RING_FAULT_SRCID(fault),
+ RING_FAULT_FAULT_TYPE(fault));
I915_WRITE(RING_FAULT_REG(engine),
- fault_reg & ~RING_FAULT_VALID);
+ fault & ~RING_FAULT_VALID);
}
}
- /* Engine specific init may not have been done till this point. */
- if (dev_priv->engine[RCS])
- POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
+ POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
+ }
+
+ static void gen8_check_and_clear_faults(struct drm_i915_private *dev_priv)
+ {
+ u32 fault = I915_READ(GEN8_RING_FAULT_REG);
+
+ if (fault & RING_FAULT_VALID) {
+ DRM_DEBUG_DRIVER("Unexpected fault\n"
+ "\tAddr: 0x%08lx\n"
+ "\tEngine ID: %d\n"
+ "\tSource ID: %d\n"
+ "\tType: %d\n",
+ fault & PAGE_MASK,
+ GEN8_RING_FAULT_ENGINE_ID(fault),
+ RING_FAULT_SRCID(fault),
+ RING_FAULT_FAULT_TYPE(fault));
+ I915_WRITE(GEN8_RING_FAULT_REG,
+ fault & ~RING_FAULT_VALID);
+ }
+
+ POSTING_READ(GEN8_RING_FAULT_REG);
+ }
+
+ void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
+ {
+ /* From GEN8 onwards we only have one 'All Engine Fault Register' */
+ if (INTEL_GEN(dev_priv) >= 8)
+ gen8_check_and_clear_faults(dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 6)
+ gen6_check_and_clear_faults(dev_priv);
+ else
+ return;
}
void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
intel_ppat_get(struct drm_i915_private *i915, u8 value)
{
struct intel_ppat *ppat = &i915->ppat;
- struct intel_ppat_entry *entry;
+ struct intel_ppat_entry *entry = NULL;
unsigned int scanned, best_score;
int i;
}
if (scanned == ppat->max_entries) {
- if (!best_score)
+ if (!entry)
return ERR_PTR(-ENOSPC);
kref_get(&entry->ref);
ppat->match = bdw_private_pat_match;
ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
- /* XXX: spec is unclear if this is still needed for CNL+ */
- if (!USES_PPGTT(ppat->i915)) {
- __alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC);
- return;
- }
-
__alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC);
__alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
__alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
{
if (HAS_DDI(dev_priv))
return pipe_config->port_clock; /* SPLL */
- else if (IS_GEN5(dev_priv))
- return ((I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2) * 10000;
else
- return 270000;
+ return dev_priv->fdi_pll_freq;
}
static const struct intel_limit intel_limits_i8xx_dac = {
u32 port_mask;
i915_reg_t dpll_reg;
- switch (dport->port) {
+ switch (dport->base.port) {
case PORT_B:
port_mask = DPLL_PORTB_READY_MASK;
dpll_reg = DPLL(0);
dpll_reg, port_mask, expected_mask,
1000))
WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
- port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
+ port_name(dport->base.port),
+ I915_READ(dpll_reg) & port_mask, expected_mask);
}
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- WARN_ON(!crtc->config->has_pch_encoder);
-
if (HAS_PCH_LPT(dev_priv))
return PIPE_A;
else
case DRM_FORMAT_RGB565:
return PLANE_CTL_FORMAT_RGB_565;
case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
case DRM_FORMAT_XRGB8888:
- return PLANE_CTL_FORMAT_XRGB_8888;
- /*
- * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
- * to be already pre-multiplied. We need to add a knob (or a different
- * DRM_FORMAT) for user-space to configure that.
- */
- case DRM_FORMAT_ABGR8888:
- return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX |
- PLANE_CTL_ALPHA_SW_PREMULTIPLY;
case DRM_FORMAT_ARGB8888:
- return PLANE_CTL_FORMAT_XRGB_8888 |
- PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+ return PLANE_CTL_FORMAT_XRGB_8888;
case DRM_FORMAT_XRGB2101010:
return PLANE_CTL_FORMAT_XRGB_2101010;
case DRM_FORMAT_XBGR2101010:
return 0;
}
+ /*
+ * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
+ * to be already pre-multiplied. We need to add a knob (or a different
+ * DRM_FORMAT) for user-space to configure that.
+ */
+ static u32 skl_plane_ctl_alpha(uint32_t pixel_format)
+ {
+ switch (pixel_format) {
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_ARGB8888:
+ return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+ default:
+ return PLANE_CTL_ALPHA_DISABLE;
+ }
+ }
+
+ static u32 glk_plane_color_ctl_alpha(uint32_t pixel_format)
+ {
+ switch (pixel_format) {
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_ARGB8888:
+ return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
+ default:
+ return PLANE_COLOR_ALPHA_DISABLE;
+ }
+ }
+
static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
{
switch (fb_modifier) {
plane_ctl = PLANE_CTL_ENABLE;
- if (!IS_GEMINILAKE(dev_priv) && !IS_CANNONLAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
+ plane_ctl |= skl_plane_ctl_alpha(fb->format->format);
plane_ctl |=
PLANE_CTL_PIPE_GAMMA_ENABLE |
PLANE_CTL_PIPE_CSC_ENABLE |
return plane_ctl;
}
+ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+ {
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ u32 plane_color_ctl = 0;
+
+ plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
+ plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
+ plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
+ plane_color_ctl |= glk_plane_color_ctl_alpha(fb->format->format);
+
+ return plane_color_ctl;
+ }
+
static int
__intel_display_resume(struct drm_device *dev,
struct drm_atomic_state *state,
for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
if (encoder->type == INTEL_OUTPUT_DP ||
encoder->type == INTEL_OUTPUT_EDP)
- return enc_to_dig_port(&encoder->base)->port;
+ return encoder->port;
}
return -1;
}
}
- void hsw_enable_ips(struct intel_crtc *crtc)
+ void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
}
}
- void hsw_disable_ips(struct intel_crtc *crtc)
+ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- if (!crtc->config->ips_enabled)
+ if (!crtc_state->ips_enabled)
return;
assert_plane_enabled(dev_priv, crtc->plane);
* completely hide the primary plane.
*/
static void
- intel_post_enable_primary(struct drm_crtc *crtc)
+ intel_post_enable_primary(struct drm_crtc *crtc,
+ const struct intel_crtc_state *new_crtc_state)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
* when going from primary only to sprite only and vice
* versa.
*/
- hsw_enable_ips(intel_crtc);
+ hsw_enable_ips(new_crtc_state);
/*
* Gen2 reports pipe underruns whenever all planes are disabled.
/* FIXME move all this to pre_plane_update() with proper state tracking */
static void
- intel_pre_disable_primary(struct drm_crtc *crtc)
+ intel_pre_disable_primary(struct drm_crtc *crtc,
+ const struct intel_crtc_state *old_crtc_state)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
* when going from primary only to sprite only and vice
* versa.
*/
- hsw_disable_ips(intel_crtc);
+ hsw_disable_ips(old_crtc_state);
}
/* FIXME get rid of this and use pre_plane_update */
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- intel_pre_disable_primary(crtc);
+ intel_pre_disable_primary(crtc, to_intel_crtc_state(crtc->state));
/*
* Vblank time updates from the shadow to live plane control register
if (primary_state->base.visible &&
(needs_modeset(&pipe_config->base) ||
!old_primary_state->base.visible))
- intel_post_enable_primary(&crtc->base);
+ intel_post_enable_primary(&crtc->base, pipe_config);
}
}
if (old_primary_state->base.visible &&
(modeset || !primary_state->base.visible))
- intel_pre_disable_primary(&crtc->base);
+ intel_pre_disable_primary(&crtc->base, old_crtc_state);
}
/*
dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
dev_priv->min_cdclk[intel_crtc->pipe] = 0;
+ dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
}
/*
break;
case INTEL_OUTPUT_EDP:
has_panel = true;
- if (enc_to_dig_port(&encoder->base)->port == PORT_A)
+ if (encoder->port == PORT_A)
has_cpu_edp = true;
break;
default:
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- u32 val, base, offset, stride_mult, tiling;
+ u32 val, base, offset, stride_mult, tiling, alpha;
int pipe = crtc->pipe;
int fourcc, pixel_format;
unsigned int aligned_height;
goto error;
pixel_format = val & PLANE_CTL_FORMAT_MASK;
+
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+ alpha = I915_READ(PLANE_COLOR_CTL(pipe, 0));
+ alpha &= PLANE_COLOR_ALPHA_MASK;
+ } else {
+ alpha = val & PLANE_CTL_ALPHA_MASK;
+ }
+
fourcc = skl_format_to_fourcc(pixel_format,
- val & PLANE_CTL_ORDER_RGBX,
- val & PLANE_CTL_ALPHA_MASK);
+ val & PLANE_CTL_ORDER_RGBX, alpha);
fb->format = drm_format_info(fourcc);
tiling = val & PLANE_CTL_TILED_MASK;
}
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
intel_update_cdclk(dev_priv);
+ intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
}
/*
ironlake_get_pfit_config(crtc, pipe_config);
}
- if (IS_HASWELL(dev_priv))
- pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
- (I915_READ(IPS_CTL) & IPS_ENABLE);
-
if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
pipe_config->pixel_multiplier =
u32 offset;
int ret;
- ret = drm_plane_helper_check_state(&plane_state->base,
- &plane_state->clip,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- true, true);
+ ret = drm_atomic_helper_check_plane_state(&plane_state->base,
+ &crtc_state->base,
+ &plane_state->clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true);
if (ret)
return ret;
OUTPUT_TYPE(DP),
OUTPUT_TYPE(EDP),
OUTPUT_TYPE(DSI),
- OUTPUT_TYPE(UNKNOWN),
+ OUTPUT_TYPE(DDI),
OUTPUT_TYPE(DP_MST),
};
switch (encoder->type) {
unsigned int port_mask;
- case INTEL_OUTPUT_UNKNOWN:
+ case INTEL_OUTPUT_DDI:
if (WARN_ON(!HAS_DDI(to_i915(dev))))
break;
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_EDP:
- port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
+ port_mask = 1 << encoder->port;
/* the same port mustn't appear more than once */
if (used_ports & port_mask)
break;
case INTEL_OUTPUT_DP_MST:
used_mst_ports |=
- 1 << enc_to_mst(&encoder->base)->primary->port;
+ 1 << encoder->port;
break;
default:
break;
* Determine output_types before calling the .compute_config()
* hooks so that the hooks can use this information safely.
*/
- pipe_config->output_types |= 1 << encoder->type;
+ if (encoder->compute_output_type)
+ pipe_config->output_types |=
+ BIT(encoder->compute_output_type(encoder, pipe_config,
+ connector_state));
+ else
+ pipe_config->output_types |= BIT(encoder->type);
}
encoder_retry:
bool adjust)
{
bool ret = true;
+ bool fixup_inherited = adjust &&
+ (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
+ !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
#define PIPE_CONF_CHECK_X(name) \
if (current_config->name != pipe_config->name) { \
ret = false; \
}
+ #define PIPE_CONF_CHECK_BOOL(name) \
+ if (current_config->name != pipe_config->name) { \
+ pipe_config_err(adjust, __stringify(name), \
+ "(expected %s, found %s)\n", \
+ yesno(current_config->name), \
+ yesno(pipe_config->name)); \
+ ret = false; \
+ }
+
+ /*
+ * Checks state where we only read out the enabling, but not the entire
+ * state itself (like full infoframes or ELD for audio). These states
+ * require a full modeset on bootup to fix up.
+ */
+ #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) \
+ if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
+ PIPE_CONF_CHECK_BOOL(name); \
+ } else { \
+ pipe_config_err(adjust, __stringify(name), \
+ "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
+ yesno(current_config->name), \
+ yesno(pipe_config->name)); \
+ ret = false; \
+ }
+
#define PIPE_CONF_CHECK_P(name) \
if (current_config->name != pipe_config->name) { \
pipe_config_err(adjust, __stringify(name), \
PIPE_CONF_CHECK_I(cpu_transcoder);
- PIPE_CONF_CHECK_I(has_pch_encoder);
+ PIPE_CONF_CHECK_BOOL(has_pch_encoder);
PIPE_CONF_CHECK_I(fdi_lanes);
PIPE_CONF_CHECK_M_N(fdi_m_n);
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
PIPE_CONF_CHECK_I(pixel_multiplier);
- PIPE_CONF_CHECK_I(has_hdmi_sink);
+ PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- PIPE_CONF_CHECK_I(limited_color_range);
+ PIPE_CONF_CHECK_BOOL(limited_color_range);
- PIPE_CONF_CHECK_I(hdmi_scrambling);
- PIPE_CONF_CHECK_I(hdmi_high_tmds_clock_ratio);
- PIPE_CONF_CHECK_I(has_infoframe);
- PIPE_CONF_CHECK_I(ycbcr420);
+ PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
+ PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
+ PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe);
+ PIPE_CONF_CHECK_BOOL(ycbcr420);
- PIPE_CONF_CHECK_I(has_audio);
+ PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
DRM_MODE_FLAG_INTERLACE);
PIPE_CONF_CHECK_I(pipe_src_w);
PIPE_CONF_CHECK_I(pipe_src_h);
- PIPE_CONF_CHECK_I(pch_pfit.enabled);
+ PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
if (current_config->pch_pfit.enabled) {
PIPE_CONF_CHECK_X(pch_pfit.pos);
PIPE_CONF_CHECK_X(pch_pfit.size);
PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
}
- /* BDW+ don't expose a synchronous way to read the state */
- if (IS_HASWELL(dev_priv))
- PIPE_CONF_CHECK_I(ips_enabled);
-
- PIPE_CONF_CHECK_I(double_wide);
+ PIPE_CONF_CHECK_BOOL(double_wide);
PIPE_CONF_CHECK_P(shared_dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
+ PIPE_CONF_CHECK_I(min_voltage_level);
+
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
+ #undef PIPE_CONF_CHECK_BOOL
+ #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
#undef PIPE_CONF_CHECK_P
#undef PIPE_CONF_CHECK_FLAGS
#undef PIPE_CONF_CHECK_CLOCK_FUZZY
"Encoder connected to wrong pipe %c\n",
pipe_name(pipe));
- if (active) {
- pipe_config->output_types |= 1 << encoder->type;
+ if (active)
encoder->get_config(encoder, pipe_config);
- }
}
intel_crtc_compute_pixel_rate(pipe_config);
* holding all the crtc locks, even if we don't end up
* touching the hardware
*/
- if (!intel_cdclk_state_compare(&dev_priv->cdclk.logical,
- &intel_state->cdclk.logical)) {
+ if (intel_cdclk_changed(&dev_priv->cdclk.logical,
+ &intel_state->cdclk.logical)) {
ret = intel_lock_all_pipes(state);
if (ret < 0)
return ret;
}
/* All pipes must be switched off while we change the cdclk. */
- if (!intel_cdclk_state_compare(&dev_priv->cdclk.actual,
- &intel_state->cdclk.actual)) {
+ if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
+ &intel_state->cdclk.actual)) {
ret = intel_modeset_all_pipes(state);
if (ret < 0)
return ret;
DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
intel_state->cdclk.logical.cdclk,
intel_state->cdclk.actual.cdclk);
+ DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
+ intel_state->cdclk.logical.voltage_level,
+ intel_state->cdclk.actual.voltage_level);
} else {
to_intel_atomic_state(state)->cdclk.logical = dev_priv->cdclk.logical;
}
if (intel_state->modeset) {
memcpy(dev_priv->min_cdclk, intel_state->min_cdclk,
sizeof(intel_state->min_cdclk));
+ memcpy(dev_priv->min_voltage_level,
+ intel_state->min_voltage_level,
+ sizeof(intel_state->min_voltage_level));
dev_priv->active_crtcs = intel_state->active_crtcs;
dev_priv->cdclk.logical = intel_state->cdclk.logical;
dev_priv->cdclk.actual = intel_state->cdclk.actual;
crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
- if (IS_GEMINILAKE(dev_priv))
+ if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
max_dotclk *= 2;
if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
can_position = true;
}
- ret = drm_plane_helper_check_state(&state->base,
- &state->clip,
- min_scale, max_scale,
- can_position, true);
+ ret = drm_atomic_helper_check_plane_state(&state->base,
+ &crtc_state->base,
+ &state->clip,
+ min_scale, max_scale,
+ can_position, true);
if (ret)
return ret;
state->ctl = i9xx_plane_ctl(crtc_state, state);
}
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ state->color_ctl = glk_plane_color_ctl(crtc_state, state);
+
return 0;
}
static void intel_finish_crtc_commit(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_atomic_state *old_intel_state =
to_intel_atomic_state(old_crtc_state->state);
intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
intel_pipe_update_end(new_crtc_state);
+
+ if (new_crtc_state->update_pipe &&
+ !needs_modeset(&new_crtc_state->base) &&
+ old_crtc_state->mode.private_flags & I915_MODE_FLAG_INHERITED) {
+ if (!IS_GEN2(dev_priv))
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, intel_crtc->pipe, true);
+
+ if (new_crtc_state->has_pch_encoder) {
+ enum pipe pch_transcoder =
+ intel_crtc_pch_transcoder(intel_crtc);
+
+ intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
+ }
+ }
}
/**
struct drm_i915_private *dev_priv = to_i915(dev);
intel_update_cdclk(dev_priv);
+ intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
}
cs->wm.need_postvbl_update = true;
dev_priv->display.optimize_watermarks(intel_state, cs);
+
+ to_intel_crtc_state(crtc->state)->wm = cs->wm;
}
put_state:
drm_modeset_acquire_fini(&ctx);
}
+ static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
+ {
+ if (IS_GEN5(dev_priv)) {
+ u32 fdi_pll_clk =
+ I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
+
+ dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
+ } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
+ dev_priv->fdi_pll_freq = 270000;
+ } else {
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
+ }
+
int intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
}
intel_shared_dpll_init(dev);
+ intel_update_fdi_pll_freq(dev_priv);
intel_update_czclk(dev_priv);
intel_modeset_init_hw(dev);
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
/* Clear any frame start delays used for debugging left by the BIOS */
- if (!transcoder_is_dsi(cpu_transcoder)) {
+ if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
i915_reg_t reg = PIPECONF(cpu_transcoder);
I915_WRITE(reg,
crtc_state = to_intel_crtc_state(crtc->base.state);
encoder->base.crtc = &crtc->base;
- crtc_state->output_types |= 1 << encoder->type;
encoder->get_config(encoder, crtc_state);
} else {
encoder->base.crtc = NULL;
}
dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
+ dev_priv->min_voltage_level[crtc->pipe] =
+ crtc_state->min_voltage_level;
intel_pipe_config_sanity_check(dev_priv, crtc_state);
}
}
}
+ static void intel_early_display_was(struct drm_i915_private *dev_priv)
+ {
+ /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
+ if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
+ DARBF_GATING_DIS);
+
+ if (IS_HASWELL(dev_priv)) {
+ /*
+ * WaRsPkgCStateDisplayPMReq:hsw
+ * System hang if this isn't done before disabling all planes!
+ */
+ I915_WRITE(CHICKEN_PAR1_1,
+ I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
+ }
+ }
+
/* Scan out the current hw modeset state,
* and sanitizes it to the current state
*/
struct intel_encoder *encoder;
int i;
- if (IS_HASWELL(dev_priv)) {
- /*
- * WaRsPkgCStateDisplayPMReq:hsw
- * System hang if this isn't done before disabling all planes!
- */
- I915_WRITE(CHICKEN_PAR1_1,
- I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
- }
-
+ intel_early_display_was(dev_priv);
intel_modeset_readout_hw_state(dev);
/* HW state is read out, now we need to sanitize this mess. */
drm_atomic_state_put(state);
}
- void intel_modeset_gem_init(struct drm_device *dev)
- {
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- intel_init_gt_powersave(dev_priv);
-
- intel_init_clock_gating(dev_priv);
-
- intel_setup_overlay(dev_priv);
- }
-
int intel_connector_register(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
}
- static void intel_dp_link_down(struct intel_dp *intel_dp);
+ static void intel_dp_link_down(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state);
static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
- static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
- static void vlv_steal_power_sequencer(struct drm_device *dev,
+ static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
enum pipe pipe);
static void intel_dp_unset_edid(struct intel_dp *intel_dp);
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum port port = dig_port->port;
+ enum port port = dig_port->base.port;
const int *source_rates;
int size;
u32 voltage;
}
static void
- intel_dp_init_panel_power_sequencer(struct drm_device *dev,
- struct intel_dp *intel_dp);
+ intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp);
static void
- intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
- struct intel_dp *intel_dp,
+ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
bool force_disable_vdd);
static void
- intel_dp_pps_init(struct drm_device *dev, struct intel_dp *intel_dp);
+ intel_dp_pps_init(struct intel_dp *intel_dp);
static void pps_lock(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *encoder = &intel_dig_port->base;
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
/*
- * See vlv_power_sequencer_reset() why we need
+ * See intel_power_sequencer_reset() why we need
* a power domain reference here.
*/
intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
static void pps_unlock(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *encoder = &intel_dig_port->base;
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
mutex_unlock(&dev_priv->pps_mutex);
static void
vlv_power_sequencer_kick(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
enum pipe pipe = intel_dp->pps_pipe;
bool pll_enabled, release_cl_override = false;
enum dpio_phy phy = DPIO_PHY(pipe);
if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
"skipping pipe %c power seqeuncer kick due to port %c being active\n",
- pipe_name(pipe), port_name(intel_dig_port->port)))
+ pipe_name(pipe), port_name(intel_dig_port->base.port)))
return;
DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
- pipe_name(pipe), port_name(intel_dig_port->port));
+ pipe_name(pipe), port_name(intel_dig_port->base.port));
/* Preserve the BIOS-computed detected bit. This is
* supposed to be read-only.
static enum pipe
vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe;
lockdep_assert_held(&dev_priv->pps_mutex);
if (WARN_ON(pipe == INVALID_PIPE))
pipe = PIPE_A;
- vlv_steal_power_sequencer(dev, pipe);
+ vlv_steal_power_sequencer(dev_priv, pipe);
intel_dp->pps_pipe = pipe;
DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
pipe_name(intel_dp->pps_pipe),
- port_name(intel_dig_port->port));
+ port_name(intel_dig_port->base.port));
/* init power sequencer on this pipe and port */
- intel_dp_init_panel_power_sequencer(dev, intel_dp);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true);
+ intel_dp_init_panel_power_sequencer(intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
/*
* Even vdd force doesn't work until we've made
static int
bxt_power_sequencer_idx(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
lockdep_assert_held(&dev_priv->pps_mutex);
* Only the HW needs to be reprogrammed, the SW state is fixed and
* has been setup during connector init.
*/
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
+ intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
return 0;
}
static void
vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum port port = intel_dig_port->port;
+ enum port port = intel_dig_port->base.port;
lockdep_assert_held(&dev_priv->pps_mutex);
DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
port_name(port), pipe_name(intel_dp->pps_pipe));
- intel_dp_init_panel_power_sequencer(dev, intel_dp);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
+ intel_dp_init_panel_power_sequencer(intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
}
void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
struct intel_encoder *encoder;
if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
* should use them always.
*/
- for_each_intel_encoder(dev, encoder) {
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp;
if (encoder->type != INTEL_OUTPUT_DP &&
- encoder->type != INTEL_OUTPUT_EDP)
+ encoder->type != INTEL_OUTPUT_EDP &&
+ encoder->type != INTEL_OUTPUT_DDI)
continue;
intel_dp = enc_to_intel_dp(&encoder->base);
+ /* Skip pure DVI/HDMI DDI encoders */
+ if (!i915_mmio_reg_valid(intel_dp->output_reg))
+ continue;
+
WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
if (encoder->type != INTEL_OUTPUT_EDP)
i915_reg_t pp_div;
};
- static void intel_pps_get_registers(struct drm_i915_private *dev_priv,
- struct intel_dp *intel_dp,
+ static void intel_pps_get_registers(struct intel_dp *intel_dp,
struct pps_registers *regs)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
int pps_idx = 0;
memset(regs, 0, sizeof(*regs));
{
struct pps_registers regs;
- intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp,
- ®s);
+ intel_pps_get_registers(intel_dp, ®s);
return regs.pp_ctrl;
}
{
struct pps_registers regs;
- intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp,
- ®s);
+ intel_pps_get_registers(intel_dp, ®s);
return regs.pp_stat;
}
{
struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
edp_notifier);
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
return 0;
static bool edp_have_panel_power(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
lockdep_assert_held(&dev_priv->pps_mutex);
static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
lockdep_assert_held(&dev_priv->pps_mutex);
static void
intel_dp_check_edp(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
if (!intel_dp_is_edp(intel_dp))
return;
static uint32_t
intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
uint32_t status;
bool done;
* like to run at 2MHz. So, take the cdclk or PCH rawclk value and
* divide by 2000 and use that
*/
- if (intel_dig_port->port == PORT_A)
+ if (intel_dig_port->base.port == PORT_A)
return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
else
return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
- if (intel_dig_port->port != PORT_A && HAS_PCH_LPT_H(dev_priv)) {
+ if (intel_dig_port->base.port != PORT_A && HAS_PCH_LPT_H(dev_priv)) {
/* Workaround for non-ULT HSW */
switch (index) {
case 0: return 63;
{
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
enum port port = intel_aux_port(dev_priv,
- dp_to_dig_port(intel_dp)->port);
+ dp_to_dig_port(intel_dp)->base.port);
int i;
intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
intel_dp_aux_init(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- enum port port = intel_dig_port->port;
+ enum port port = intel_dig_port->base.port;
intel_aux_reg_init(intel_dp);
drm_dp_aux_init(&intel_dp->aux);
intel_dp_set_clock(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
const struct dp_link_dpll *divisor = NULL;
int i, count = 0;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- enum port port = dp_to_dig_port(intel_dp)->port;
+ enum port port = encoder->port;
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct intel_digital_connector_state *intel_conn_state =
static void intel_dp_prepare(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- enum port port = dp_to_dig_port(intel_dp)->port;
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ enum port port = encoder->port;
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
- static void intel_pps_verify_state(struct drm_i915_private *dev_priv,
- struct intel_dp *intel_dp);
+ static void intel_pps_verify_state(struct intel_dp *intel_dp);
static void wait_panel_status(struct intel_dp *intel_dp,
u32 mask,
u32 value)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
i915_reg_t pp_stat_reg, pp_ctrl_reg;
lockdep_assert_held(&dev_priv->pps_mutex);
- intel_pps_verify_state(dev_priv, intel_dp);
+ intel_pps_verify_state(intel_dp);
pp_stat_reg = _pp_stat_reg(intel_dp);
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
u32 control;
lockdep_assert_held(&dev_priv->pps_mutex);
*/
static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 pp;
i915_reg_t pp_stat_reg, pp_ctrl_reg;
bool need_to_disable = !intel_dp->want_panel_vdd;
intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
- port_name(intel_dig_port->port));
+ port_name(intel_dig_port->base.port));
if (!edp_have_panel_power(intel_dp))
wait_panel_power_cycle(intel_dp);
*/
if (!edp_have_panel_power(intel_dp)) {
DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
- port_name(intel_dig_port->port));
+ port_name(intel_dig_port->base.port));
msleep(intel_dp->panel_power_up_delay);
}
pps_unlock(intel_dp);
I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
- port_name(dp_to_dig_port(intel_dp)->port));
+ port_name(dp_to_dig_port(intel_dp)->base.port));
}
static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
struct intel_digital_port *intel_dig_port =
dp_to_dig_port(intel_dp);
u32 pp;
return;
DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
- port_name(intel_dig_port->port));
+ port_name(intel_dig_port->base.port));
pp = ironlake_get_pp_control(intel_dp);
pp &= ~EDP_FORCE_VDD;
return;
I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
- port_name(dp_to_dig_port(intel_dp)->port));
+ port_name(dp_to_dig_port(intel_dp)->base.port));
intel_dp->want_panel_vdd = false;
static void edp_panel_on(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
u32 pp;
i915_reg_t pp_ctrl_reg;
return;
DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
- port_name(dp_to_dig_port(intel_dp)->port));
+ port_name(dp_to_dig_port(intel_dp)->base.port));
if (WARN(edp_have_panel_power(intel_dp),
"eDP port %c panel power already on\n",
- port_name(dp_to_dig_port(intel_dp)->port)))
+ port_name(dp_to_dig_port(intel_dp)->base.port)))
return;
wait_panel_power_cycle(intel_dp);
static void edp_panel_off(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
u32 pp;
i915_reg_t pp_ctrl_reg;
return;
DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
- port_name(dp_to_dig_port(intel_dp)->port));
+ port_name(dp_to_dig_port(intel_dp)->base.port));
WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
- port_name(dp_to_dig_port(intel_dp)->port));
+ port_name(dp_to_dig_port(intel_dp)->base.port));
pp = ironlake_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
/* Enable backlight in the panel power control. */
static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
u32 pp;
i915_reg_t pp_ctrl_reg;
/* Disable backlight in the panel power control. */
static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
u32 pp;
i915_reg_t pp_ctrl_reg;
I915_STATE_WARN(cur_state != state,
"DP port %c state assertion failure (expected %s, current %s)\n",
- port_name(dig_port->port),
+ port_name(dig_port->base.port),
onoff(state), onoff(cur_state));
}
#define assert_dp_port_disabled(d) assert_dp_port((d), false)
udelay(200);
}
- static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
+ static void ironlake_edp_pll_off(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *old_crtc_state)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
assert_pipe_disabled(dev_priv, crtc->pipe);
udelay(200);
}
+ static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
+ {
+ /*
+ * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
+ * be capable of signalling downstream hpd with a long pulse.
+ * Whether or not that means D3 is safe to use is not clear,
+ * but let's assume so until proven otherwise.
+ *
+ * FIXME should really check all downstream ports...
+ */
+ return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
+ intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT &&
+ intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
+ }
+
/* If the sink supports it, try to set the power state appropriately */
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
{
return;
if (mode != DRM_MODE_DPMS_ON) {
+ if (downstream_hpd_needs_d0(intel_dp))
+ return;
+
ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
DP_SET_POWER_D3);
} else {
static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- enum port port = dp_to_dig_port(intel_dp)->port;
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ enum port port = encoder->port;
u32 tmp;
bool ret;
static void intel_dp_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
u32 tmp, flags = 0;
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum port port = dp_to_dig_port(intel_dp)->port;
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ enum port port = encoder->port;
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+
+ if (encoder->type == INTEL_OUTPUT_EDP)
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
+ else
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
tmp = I915_READ(intel_dp->output_reg);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
if (old_crtc_state->has_audio)
- intel_audio_codec_disable(encoder);
+ intel_audio_codec_disable(encoder,
+ old_crtc_state, old_conn_state);
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-
intel_disable_dp(encoder, old_crtc_state, old_conn_state);
/* disable the port before the pipe on g4x */
- intel_dp_link_down(intel_dp);
+ intel_dp_link_down(encoder, old_crtc_state);
}
static void ilk_disable_dp(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- enum port port = dp_to_dig_port(intel_dp)->port;
+ enum port port = encoder->port;
- intel_dp_link_down(intel_dp);
+ intel_dp_link_down(encoder, old_crtc_state);
/* Only ilk+ has port A */
if (port == PORT_A)
- ironlake_edp_pll_off(intel_dp);
+ ironlake_edp_pll_off(intel_dp, old_crtc_state);
}
static void vlv_post_disable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-
- intel_dp_link_down(intel_dp);
+ intel_dp_link_down(encoder, old_crtc_state);
}
static void chv_post_disable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- intel_dp_link_down(intel_dp);
+ intel_dp_link_down(encoder, old_crtc_state);
mutex_lock(&dev_priv->sb_lock);
/* Assert data lane reset */
- chv_data_lane_soft_reset(encoder, true);
+ chv_data_lane_soft_reset(encoder, old_crtc_state, true);
mutex_unlock(&dev_priv->sb_lock);
}
uint32_t *DP,
uint8_t dp_train_pat)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum port port = intel_dig_port->port;
+ enum port port = intel_dig_port->base.port;
if (dp_train_pat & DP_TRAINING_PATTERN_MASK)
DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
static void intel_dp_enable_port(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
/* enable with pattern 1 (as per spec) */
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
enum pipe pipe = crtc->pipe;
pps_lock(intel_dp);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_init_panel_power_sequencer(intel_dp);
+ vlv_init_panel_power_sequencer(encoder, pipe_config);
intel_dp_enable_port(intel_dp, pipe_config);
const struct drm_connector_state *conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- enum port port = dp_to_dig_port(intel_dp)->port;
+ enum port port = encoder->port;
intel_dp_prepare(encoder, pipe_config);
* from a port.
*/
DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
- pipe_name(pipe), port_name(intel_dig_port->port));
+ pipe_name(pipe), port_name(intel_dig_port->base.port));
I915_WRITE(pp_on_reg, 0);
POSTING_READ(pp_on_reg);
intel_dp->pps_pipe = INVALID_PIPE;
}
- static void vlv_steal_power_sequencer(struct drm_device *dev,
+ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *encoder;
lockdep_assert_held(&dev_priv->pps_mutex);
- for_each_intel_encoder(dev, encoder) {
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp;
enum port port;
continue;
intel_dp = enc_to_intel_dp(&encoder->base);
- port = dp_to_dig_port(intel_dp)->port;
+ port = dp_to_dig_port(intel_dp)->base.port;
WARN(intel_dp->active_pipe == pipe,
"stealing pipe %c power sequencer from active (e)DP port %c\n",
}
}
- static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
+ static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *encoder = &intel_dig_port->base;
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
lockdep_assert_held(&dev_priv->pps_mutex);
* We may be stealing the power
* sequencer from another port.
*/
- vlv_steal_power_sequencer(dev, crtc->pipe);
+ vlv_steal_power_sequencer(dev_priv, crtc->pipe);
intel_dp->active_pipe = crtc->pipe;
intel_dp->pps_pipe = crtc->pipe;
DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
- pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
+ pipe_name(intel_dp->pps_pipe), port_name(encoder->port));
/* init power sequencer on this pipe and port */
- intel_dp_init_panel_power_sequencer(dev, intel_dp);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true);
+ intel_dp_init_panel_power_sequencer(intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
}
static void vlv_pre_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- vlv_phy_pre_encoder_enable(encoder);
+ vlv_phy_pre_encoder_enable(encoder, pipe_config);
intel_enable_dp(encoder, pipe_config, conn_state);
}
{
intel_dp_prepare(encoder, pipe_config);
- vlv_phy_pre_pll_enable(encoder);
+ vlv_phy_pre_pll_enable(encoder, pipe_config);
}
static void chv_pre_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- chv_phy_pre_encoder_enable(encoder);
+ chv_phy_pre_encoder_enable(encoder, pipe_config);
intel_enable_dp(encoder, pipe_config, conn_state);
{
intel_dp_prepare(encoder, pipe_config);
- chv_phy_pre_pll_enable(encoder);
+ chv_phy_pre_pll_enable(encoder, pipe_config);
}
static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
- chv_phy_post_pll_disable(encoder);
+ chv_phy_post_pll_disable(encoder, old_crtc_state);
}
/*
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
- enum port port = dp_to_dig_port(intel_dp)->port;
+ enum port port = dp_to_dig_port(intel_dp)->base.port;
if (INTEL_GEN(dev_priv) >= 9) {
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
{
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
- enum port port = dp_to_dig_port(intel_dp)->port;
+ enum port port = dp_to_dig_port(intel_dp)->base.port;
if (INTEL_GEN(dev_priv) >= 9) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
void
intel_dp_set_signal_levels(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- enum port port = intel_dig_port->port;
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ enum port port = intel_dig_port->base.port;
uint32_t signal_levels, mask = 0;
uint8_t train_set = intel_dp->train_set[0];
void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum port port = intel_dig_port->port;
+ enum port port = intel_dig_port->base.port;
uint32_t val;
if (!HAS_DDI(dev_priv))
}
static void
- intel_dp_link_down(struct intel_dp *intel_dp)
+ intel_dp_link_down(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
- enum port port = intel_dig_port->port;
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ enum port port = encoder->port;
uint32_t DP = intel_dp->DP;
if (WARN_ON(HAS_DDI(dev_priv)))
if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
sizeof(intel_dp->edp_dpcd))
- DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
+ DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
intel_dp->edp_dpcd);
- /* Intermediate frequency support */
- if (intel_dp->edp_dpcd[0] >= 0x03) { /* eDp v1.4 or higher */
+ /* Read the eDP 1.4+ supported link rates. */
+ if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
int i;
intel_dp->num_sink_rates = i;
}
+ /*
+ * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
+ * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
+ */
if (intel_dp->num_sink_rates)
intel_dp->use_rate_select = true;
else
intel_dp->is_mst);
}
- static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
+ static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state, bool disable_wa)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
u8 buf;
int ret = 0;
int count = 0;
}
out:
- hsw_enable_ips(intel_crtc);
+ if (disable_wa)
+ hsw_enable_ips(crtc_state);
return ret;
}
- static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
+ static int intel_dp_sink_crc_start(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
u8 buf;
int ret;
return -EIO;
if (buf & DP_TEST_SINK_START) {
- ret = intel_dp_sink_crc_stop(intel_dp);
+ ret = intel_dp_sink_crc_stop(intel_dp, crtc_state, false);
if (ret)
return ret;
}
- hsw_disable_ips(intel_crtc);
+ hsw_disable_ips(crtc_state);
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
buf | DP_TEST_SINK_START) < 0) {
- hsw_enable_ips(intel_crtc);
+ hsw_enable_ips(crtc_state);
return -EIO;
}
return 0;
}
- int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
+ int intel_dp_sink_crc(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state, u8 *crc)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
u8 buf;
int count, ret;
int attempts = 6;
- ret = intel_dp_sink_crc_start(intel_dp);
+ ret = intel_dp_sink_crc_start(intel_dp, crtc_state);
if (ret)
return ret;
}
stop:
- intel_dp_sink_crc_stop(intel_dp);
+ intel_dp_sink_crc_stop(intel_dp, crtc_state, true);
return ret;
}
static void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
u8 link_status[DP_LINK_STATUS_SIZE];
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
if (!intel_dp_get_link_status(intel_dp, link_status)) {
DRM_ERROR("Failed to get link status\n");
static bool
intel_dp_short_pulse(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
u8 sink_irq_vector = 0;
u8 old_sink_count = intel_dp->sink_count;
bool ret;
DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
}
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, NULL);
intel_dp_check_link_status(intel_dp);
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex);
if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
/* Send a Hotplug Uevent to userspace to start modeset */
- drm_kms_helper_hotplug_event(intel_encoder->base.dev);
+ drm_kms_helper_hotplug_event(&dev_priv->drm);
}
return true;
static enum drm_connector_status
edp_detect(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
enum drm_connector_status status;
status = intel_panel_detect(dev_priv);
{
u32 bit;
- switch (port->port) {
+ switch (port->base.port) {
case PORT_B:
bit = SDE_PORTB_HOTPLUG;
break;
bit = SDE_PORTD_HOTPLUG;
break;
default:
- MISSING_CASE(port->port);
+ MISSING_CASE(port->base.port);
return false;
}
{
u32 bit;
- switch (port->port) {
+ switch (port->base.port) {
case PORT_B:
bit = SDE_PORTB_HOTPLUG_CPT;
break;
bit = SDE_PORTD_HOTPLUG_CPT;
break;
default:
- MISSING_CASE(port->port);
+ MISSING_CASE(port->base.port);
return false;
}
{
u32 bit;
- switch (port->port) {
+ switch (port->base.port) {
case PORT_A:
bit = SDE_PORTA_HOTPLUG_SPT;
break;
{
u32 bit;
- switch (port->port) {
+ switch (port->base.port) {
case PORT_B:
bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
break;
bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
break;
default:
- MISSING_CASE(port->port);
+ MISSING_CASE(port->base.port);
return false;
}
{
u32 bit;
- switch (port->port) {
+ switch (port->base.port) {
case PORT_B:
bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
break;
bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
break;
default:
- MISSING_CASE(port->port);
+ MISSING_CASE(port->base.port);
return false;
}
static bool ilk_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port)
{
- if (port->port == PORT_A)
+ if (port->base.port == PORT_A)
return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
else
return ibx_digital_port_connected(dev_priv, port);
static bool snb_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port)
{
- if (port->port == PORT_A)
+ if (port->base.port == PORT_A)
return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
else
return cpt_digital_port_connected(dev_priv, port);
static bool ivb_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port)
{
- if (port->port == PORT_A)
+ if (port->base.port == PORT_A)
return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB;
else
return cpt_digital_port_connected(dev_priv, port);
static bool bdw_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port)
{
- if (port->port == PORT_A)
+ if (port->base.port == PORT_A)
return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
else
return cpt_digital_port_connected(dev_priv, port);
}
static int
- intel_dp_long_pulse(struct intel_connector *intel_connector)
+ intel_dp_long_pulse(struct intel_connector *connector)
{
- struct drm_connector *connector = &intel_connector->base;
- struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *intel_encoder = &intel_dig_port->base;
- struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
enum drm_connector_status status;
u8 sink_irq_vector = 0;
- WARN_ON(!drm_modeset_is_locked(&connector->dev->mode_config.connection_mutex));
+ WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
- intel_display_power_get(to_i915(dev), intel_dp->aux_power_domain);
+ intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
/* Can't disconnect eDP, but you can close the lid... */
if (intel_dp_is_edp(intel_dp))
status = edp_detect(intel_dp);
- else if (intel_digital_port_connected(to_i915(dev),
+ else if (intel_digital_port_connected(dev_priv,
dp_to_dig_port(intel_dp)))
status = intel_dp_detect_dpcd(intel_dp);
else
goto out;
}
- if (intel_encoder->type != INTEL_OUTPUT_EDP)
- intel_encoder->type = INTEL_OUTPUT_DP;
-
if (intel_dp->reset_link_params) {
/* Initial max link lane count */
intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
intel_dp->aux.i2c_defer_count = 0;
intel_dp_set_edid(intel_dp);
- if (intel_dp_is_edp(intel_dp) || intel_connector->detect_edid)
+ if (intel_dp_is_edp(intel_dp) || connector->detect_edid)
status = connector_status_connected;
intel_dp->detect_done = true;
if (status != connector_status_connected && !intel_dp->is_mst)
intel_dp_unset_edid(intel_dp);
- intel_display_power_put(to_i915(dev), intel_dp->aux_power_domain);
+ intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
return status;
}
intel_dp_set_edid(intel_dp);
intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
-
- if (intel_encoder->type != INTEL_OUTPUT_EDP)
- intel_encoder->type = INTEL_OUTPUT_DP;
}
static int intel_dp_get_modes(struct drm_connector *connector)
static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
lockdep_assert_held(&dev_priv->pps_mutex);
if (intel_dp_is_edp(intel_dp)) {
/* Reinit the power sequencer, in case BIOS did something with it. */
- intel_dp_pps_init(encoder->dev, intel_dp);
+ intel_dp_pps_init(intel_dp);
intel_edp_panel_vdd_sanitize(intel_dp);
}
intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
{
struct intel_dp *intel_dp = &intel_dig_port->dp;
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
enum irqreturn ret = IRQ_NONE;
- if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
- intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
- intel_dig_port->base.type = INTEL_OUTPUT_DP;
-
if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
/*
* vdd off can generate a long pulse on eDP which
* "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
*/
DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
- port_name(intel_dig_port->port));
+ port_name(intel_dig_port->base.port));
return IRQ_HANDLED;
}
DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
- port_name(intel_dig_port->port),
+ port_name(intel_dig_port->base.port),
long_hpd ? "long" : "short");
if (long_hpd) {
}
static void
- intel_pps_readout_hw_state(struct drm_i915_private *dev_priv,
- struct intel_dp *intel_dp, struct edp_power_seq *seq)
+ intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
struct pps_registers regs;
- intel_pps_get_registers(dev_priv, intel_dp, ®s);
+ intel_pps_get_registers(intel_dp, ®s);
/* Workaround: Need to write PP_CONTROL with the unlock key as
* the very first thing. */
}
static void
- intel_pps_verify_state(struct drm_i915_private *dev_priv,
- struct intel_dp *intel_dp)
+ intel_pps_verify_state(struct intel_dp *intel_dp)
{
struct edp_power_seq hw;
struct edp_power_seq *sw = &intel_dp->pps_delays;
- intel_pps_readout_hw_state(dev_priv, intel_dp, &hw);
+ intel_pps_readout_hw_state(intel_dp, &hw);
if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
}
static void
- intel_dp_init_panel_power_sequencer(struct drm_device *dev,
- struct intel_dp *intel_dp)
+ intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
struct edp_power_seq cur, vbt, spec,
*final = &intel_dp->pps_delays;
if (final->t11_t12 != 0)
return;
- intel_pps_readout_hw_state(dev_priv, intel_dp, &cur);
+ intel_pps_readout_hw_state(intel_dp, &cur);
intel_pps_dump_state("cur", &cur);
}
static void
- intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
- struct intel_dp *intel_dp,
+ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
bool force_disable_vdd)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
u32 pp_on, pp_off, pp_div, port_sel = 0;
int div = dev_priv->rawclk_freq / 1000;
struct pps_registers regs;
- enum port port = dp_to_dig_port(intel_dp)->port;
+ enum port port = dp_to_dig_port(intel_dp)->base.port;
const struct edp_power_seq *seq = &intel_dp->pps_delays;
lockdep_assert_held(&dev_priv->pps_mutex);
- intel_pps_get_registers(dev_priv, intel_dp, ®s);
+ intel_pps_get_registers(intel_dp, ®s);
/*
* On some VLV machines the BIOS can leave the VDD
I915_READ(regs.pp_div));
}
- static void intel_dp_pps_init(struct drm_device *dev,
- struct intel_dp *intel_dp)
+ static void intel_dp_pps_init(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
vlv_initial_power_sequencer_setup(intel_dp);
} else {
- intel_dp_init_panel_power_sequencer(dev, intel_dp);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
+ intel_dp_init_panel_power_sequencer(intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
}
}
dig_port = dp_to_dig_port(intel_dp);
encoder = &dig_port->base;
- intel_crtc = to_intel_crtc(encoder->base.crtc);
if (!intel_crtc) {
DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
void intel_edp_drrs_enable(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
if (!crtc_state->has_drrs) {
DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
void intel_edp_drrs_disable(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
if (!old_crtc_state->has_drrs)
return;
/**
* intel_dp_drrs_init - Init basic DRRS work and mutex.
- * @intel_connector: eDP connector
+ * @connector: eDP connector
* @fixed_mode: preferred mode of panel
*
* This function is called only once at driver load to initialize basic
* from VBT setting).
*/
static struct drm_display_mode *
- intel_dp_drrs_init(struct intel_connector *intel_connector,
- struct drm_display_mode *fixed_mode)
+ intel_dp_drrs_init(struct intel_connector *connector,
+ struct drm_display_mode *fixed_mode)
{
- struct drm_connector *connector = &intel_connector->base;
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_display_mode *downclock_mode = NULL;
INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
return NULL;
}
- downclock_mode = intel_find_panel_downclock
- (dev_priv, fixed_mode, connector);
+ downclock_mode = intel_find_panel_downclock(dev_priv, fixed_mode,
+ &connector->base);
if (!downclock_mode) {
DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct intel_connector *intel_connector)
{
- struct drm_connector *connector = &intel_connector->base;
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *intel_encoder = &intel_dig_port->base;
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_connector *connector = &intel_connector->base;
struct drm_display_mode *fixed_mode = NULL;
struct drm_display_mode *alt_fixed_mode = NULL;
struct drm_display_mode *downclock_mode = NULL;
* eDP and LVDS bail out early in this case to prevent interfering
* with an already powered-on LVDS power sequencer.
*/
- if (intel_get_lvds_encoder(dev)) {
+ if (intel_get_lvds_encoder(&dev_priv->drm)) {
WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
DRM_INFO("LVDS was detected, not registering eDP\n");
pps_lock(intel_dp);
intel_dp_init_panel_power_timestamps(intel_dp);
- intel_dp_pps_init(dev, intel_dp);
+ intel_dp_pps_init(intel_dp);
intel_edp_panel_vdd_sanitize(intel_dp);
pps_unlock(intel_dp);
if (drm_add_edid_modes(connector, edid)) {
drm_mode_connector_update_edid_property(connector,
edid);
- drm_edid_to_eld(connector, edid);
} else {
kfree(edid);
edid = ERR_PTR(-EINVAL);
struct intel_encoder *encoder = &intel_dig_port->base;
struct intel_dp *intel_dp = &intel_dig_port->dp;
- encoder->hpd_pin = intel_hpd_pin(intel_dig_port->port);
+ encoder->hpd_pin = intel_hpd_pin(encoder->port);
- switch (intel_dig_port->port) {
+ switch (encoder->port) {
case PORT_A:
intel_dp->aux_power_domain = POWER_DOMAIN_AUX_A;
break;
intel_dp->aux_power_domain = POWER_DOMAIN_AUX_D;
break;
default:
- MISSING_CASE(intel_dig_port->port);
+ MISSING_CASE(encoder->port);
}
}
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- enum port port = intel_dig_port->port;
+ enum port port = intel_encoder->port;
int type;
/* Initialize the work for modeset in case of link train failure */
intel_encoder->disable = g4x_disable_dp;
}
- intel_dig_port->port = port;
intel_dig_port->dp.output_reg = output_reg;
intel_dig_port->max_lanes = 4;
* contexts. Note that it's important that we check the condition again after
* having timed out, since the timeout could be due to preemption or similar and
* we've never had a chance to check the condition before the timeout.
- *
- * TODO: When modesetting has fully transitioned to atomic, the below
- * drm_can_sleep() can be removed and in_atomic()/!in_atomic() asserts
- * added.
*/
#define _wait_for(COND, US, W) ({ \
unsigned long timeout__ = jiffies + usecs_to_jiffies(US) + 1; \
int ret__; \
+ might_sleep(); \
for (;;) { \
bool expired__ = time_after(jiffies, timeout__); \
if (COND) { \
ret__ = -ETIMEDOUT; \
break; \
} \
- if ((W) && drm_can_sleep()) { \
- usleep_range((W), (W)*2); \
- } else { \
- cpu_relax(); \
- } \
+ usleep_range((W), (W) * 2); \
} \
ret__; \
})
INTEL_OUTPUT_DP = 7,
INTEL_OUTPUT_EDP = 8,
INTEL_OUTPUT_DSI = 9,
- INTEL_OUTPUT_UNKNOWN = 10,
+ INTEL_OUTPUT_DDI = 10,
INTEL_OUTPUT_DP_MST = 11,
};
enum port port;
unsigned int cloneable;
void (*hot_plug)(struct intel_encoder *);
+ enum intel_output_type (*compute_output_type)(struct intel_encoder *,
+ struct intel_crtc_state *,
+ struct drm_connector_state *);
bool (*compute_config)(struct intel_encoder *,
struct intel_crtc_state *,
struct drm_connector_state *);
unsigned int active_crtcs;
/* minimum acceptable cdclk for each pipe */
int min_cdclk[I915_MAX_PIPES];
+ /* minimum acceptable voltage level for each pipe */
+ u8 min_voltage_level[I915_MAX_PIPES];
struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS];
/* plane control register */
u32 ctl;
+ /* plane color control register */
+ u32 color_ctl;
+
/*
* scaler_id
* = -1 : not using a scaler
*/
uint8_t lane_lat_optim_mask;
+ /* minimum acceptable voltage level */
+ u8 min_voltage_level;
+
/* Panel fitter controls for gen2-gen4 + VLV */
struct {
u32 control;
struct intel_digital_port {
struct intel_encoder base;
- enum port port;
u32 saved_port_bits;
struct intel_dp dp;
struct intel_hdmi hdmi;
static inline enum dpio_channel
vlv_dport_to_channel(struct intel_digital_port *dport)
{
- switch (dport->port) {
+ switch (dport->base.port) {
case PORT_B:
case PORT_D:
return DPIO_CH0;
static inline enum dpio_phy
vlv_dport_to_phy(struct intel_digital_port *dport)
{
- switch (dport->port) {
+ switch (dport->base.port) {
case PORT_B:
case PORT_C:
return DPIO_PHY0;
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
switch (intel_encoder->type) {
- case INTEL_OUTPUT_UNKNOWN:
+ case INTEL_OUTPUT_DDI:
WARN_ON(!HAS_DDI(to_i915(encoder->dev)));
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_EDP:
void hsw_fdi_link_train(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state);
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
- enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
- void intel_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config);
void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
bool state);
+ void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
+ struct intel_crtc_state *crtc_state);
u32 bxt_signal_levels(struct intel_dp *intel_dp);
uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
void intel_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
- void intel_audio_codec_disable(struct intel_encoder *encoder);
+ void intel_audio_codec_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state);
void i915_audio_component_init(struct drm_i915_private *dev_priv);
void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
void intel_audio_init(struct drm_i915_private *dev_priv);
void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
void intel_update_cdclk(struct drm_i915_private *dev_priv);
void intel_update_rawclk(struct drm_i915_private *dev_priv);
- bool intel_cdclk_state_compare(const struct intel_cdclk_state *a,
+ bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
const struct intel_cdclk_state *b);
+ bool intel_cdclk_changed(const struct intel_cdclk_state *a,
+ const struct intel_cdclk_state *b);
void intel_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state);
+ void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
+ const char *context);
/* intel_display.c */
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
bool intel_crtc_active(struct intel_crtc *crtc);
- void hsw_enable_ips(struct intel_crtc *crtc);
- void hsw_disable_ips(struct intel_crtc *crtc);
+ void hsw_enable_ips(const struct intel_crtc_state *crtc_state);
+ void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
enum intel_display_power_domain intel_port_to_power_domain(enum port port);
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_state *pipe_config);
return i915_ggtt_offset(state->vma);
}
+ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
void intel_dp_encoder_reset(struct drm_encoder *encoder);
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
void intel_dp_encoder_destroy(struct drm_encoder *encoder);
- int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
+ int intel_dp_sink_crc(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state);
int intel_backlight_device_register(struct intel_connector *connector);
void intel_backlight_device_unregister(struct intel_connector *connector);
#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
-static int intel_backlight_device_register(struct intel_connector *connector)
+static inline int intel_backlight_device_register(struct intel_connector *connector)
{
return 0;
}
void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv);
void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
- void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv);
void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
void gen6_rps_busy(struct drm_i915_private *dev_priv);
if ((val & VIDEO_DIP_ENABLE) == 0)
return false;
- if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
+ if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port))
return false;
return val & (VIDEO_DIP_ENABLE_AVI |
if ((val & VIDEO_DIP_ENABLE) == 0)
return false;
- if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
+ if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port))
return false;
return val & (VIDEO_DIP_ENABLE_AVI |
if ((val & VIDEO_DIP_ENABLE) == 0)
return false;
- if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
+ if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port))
return false;
return val & (VIDEO_DIP_ENABLE_AVI |
crtc_state->limited_color_range ?
HDMI_QUANTIZATION_RANGE_LIMITED :
HDMI_QUANTIZATION_RANGE_FULL,
- intel_hdmi->rgb_quant_range_selectable);
+ intel_hdmi->rgb_quant_range_selectable,
+ is_hdmi2_sink);
/* TODO: handle pixel repetition for YCBCR420 outputs */
intel_write_infoframe(encoder, crtc_state, &frame);
static void
intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
union hdmi_infoframe frame;
int ret;
ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi,
+ conn_state->connector,
&crtc_state->base.adjusted_mode);
if (ret < 0)
return;
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
i915_reg_t reg = VIDEO_DIP_CTL;
u32 val = I915_READ(reg);
- u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
+ u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port);
assert_hdmi_port_disabled(intel_hdmi);
intel_hdmi_set_avi_infoframe(encoder, crtc_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
- intel_hdmi_set_hdmi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
static bool hdmi_sink_is_deep_color(const struct drm_connector_state *conn_state)
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
- u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
+ u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port);
assert_hdmi_port_disabled(intel_hdmi);
intel_hdmi_set_avi_infoframe(encoder, crtc_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
- intel_hdmi_set_hdmi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
static void cpt_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_avi_infoframe(encoder, crtc_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
- intel_hdmi_set_hdmi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
static void vlv_set_infoframes(struct drm_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
- u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
+ u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port);
assert_hdmi_port_disabled(intel_hdmi);
intel_hdmi_set_avi_infoframe(encoder, crtc_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
- intel_hdmi_set_hdmi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
static void hsw_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_avi_infoframe(encoder, crtc_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
- intel_hdmi_set_hdmi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
u32 tmp, flags = 0;
int dotclock;
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_HDMI);
+
tmp = I915_READ(intel_hdmi->hdmi_reg);
if (tmp & SDVO_HSYNC_ACTIVE_HIGH)
const struct drm_connector_state *old_conn_state)
{
if (old_crtc_state->has_audio)
- intel_audio_codec_disable(encoder);
+ intel_audio_codec_disable(encoder,
+ old_crtc_state, old_conn_state);
intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
}
const struct drm_connector_state *old_conn_state)
{
if (old_crtc_state->has_audio)
- intel_audio_codec_disable(encoder);
+ intel_audio_codec_disable(encoder,
+ old_crtc_state, old_conn_state);
}
static void pch_post_disable_hdmi(struct intel_encoder *encoder,
intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
}
- static int intel_hdmi_source_max_tmds_clock(struct drm_i915_private *dev_priv)
+ static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder)
{
- if (IS_G4X(dev_priv))
- return 165000;
- else if (IS_GEMINILAKE(dev_priv))
- return 594000;
- else if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
- return 300000;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ const struct ddi_vbt_port_info *info =
+ &dev_priv->vbt.ddi_port_info[encoder->port];
+ int max_tmds_clock;
+
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ max_tmds_clock = 594000;
+ else if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv))
+ max_tmds_clock = 300000;
+ else if (INTEL_GEN(dev_priv) >= 5)
+ max_tmds_clock = 225000;
else
- return 225000;
+ max_tmds_clock = 165000;
+
+ if (info->max_tmds_clock)
+ max_tmds_clock = min(max_tmds_clock, info->max_tmds_clock);
+
+ return max_tmds_clock;
}
static int hdmi_port_clock_limit(struct intel_hdmi *hdmi,
bool respect_downstream_limits,
bool force_dvi)
{
- struct drm_device *dev = intel_hdmi_to_dev(hdmi);
- int max_tmds_clock = intel_hdmi_source_max_tmds_clock(to_i915(dev));
+ struct intel_encoder *encoder = &hdmi_to_dig_port(hdmi)->base;
+ int max_tmds_clock = intel_hdmi_source_max_tmds_clock(encoder);
if (respect_downstream_limits) {
struct intel_connector *connector = hdmi->attached_connector;
if (HAS_GMCH_DISPLAY(dev_priv))
return false;
+ if (crtc_state->pipe_bpp <= 8*3)
+ return false;
+
+ if (!crtc_state->has_hdmi_sink)
+ return false;
+
/*
* HDMI 12bpc affects the clocks, so it's only possible
* when not cloning with other encoder types.
* outputs. We also need to check that the higher clock still fits
* within limits.
*/
- if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink && !force_dvi &&
- hdmi_port_clock_valid(intel_hdmi, clock_12bpc, true, force_dvi) == MODE_OK &&
- hdmi_12bpc_possible(pipe_config)) {
+ if (hdmi_12bpc_possible(pipe_config) &&
+ hdmi_port_clock_valid(intel_hdmi, clock_12bpc, true, force_dvi) == MODE_OK) {
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
desired_bpp = 12*3;
pipe_config->lane_count = 4;
- if (scdc->scrambling.supported && IS_GEMINILAKE(dev_priv)) {
+ if (scdc->scrambling.supported && (INTEL_GEN(dev_priv) >= 10 ||
+ IS_GEMINILAKE(dev_priv))) {
if (scdc->scrambling.low_rates)
pipe_config->hdmi_scrambling = true;
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
- enum port port = hdmi_to_dig_port(hdmi)->port;
+ enum port port = hdmi_to_dig_port(hdmi)->base.port;
struct i2c_adapter *adapter =
intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
enum drm_dp_dual_mode_type type = drm_dp_dual_mode_detect(adapter);
intel_hdmi_unset_edid(connector);
- if (intel_hdmi_set_edid(connector)) {
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
-
- hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
+ if (intel_hdmi_set_edid(connector))
status = connector_status_connected;
- } else
+ else
status = connector_status_disconnected;
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
static void
intel_hdmi_force(struct drm_connector *connector)
{
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
-
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
return;
intel_hdmi_set_edid(connector);
- hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
}
static int intel_hdmi_get_modes(struct drm_connector *connector)
const struct drm_connector_state *conn_state)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- vlv_phy_pre_encoder_enable(encoder);
+ vlv_phy_pre_encoder_enable(encoder, pipe_config);
/* HDMI 1.0V-2dB */
vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a,
{
intel_hdmi_prepare(encoder, pipe_config);
- vlv_phy_pre_pll_enable(encoder);
+ vlv_phy_pre_pll_enable(encoder, pipe_config);
}
static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
{
intel_hdmi_prepare(encoder, pipe_config);
- chv_phy_pre_pll_enable(encoder);
+ chv_phy_pre_pll_enable(encoder, pipe_config);
}
static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- chv_phy_post_pll_disable(encoder);
+ chv_phy_post_pll_disable(encoder, old_crtc_state);
}
static void vlv_hdmi_post_disable(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
/* Reset lanes to avoid HDMI flicker (VLV w/a) */
- vlv_phy_reset_lanes(encoder);
+ vlv_phy_reset_lanes(encoder, old_crtc_state);
}
static void chv_hdmi_post_disable(struct intel_encoder *encoder,
mutex_lock(&dev_priv->sb_lock);
/* Assert data lane reset */
- chv_data_lane_soft_reset(encoder, true);
+ chv_data_lane_soft_reset(encoder, old_crtc_state, true);
mutex_unlock(&dev_priv->sb_lock);
}
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- chv_phy_pre_encoder_enable(encoder);
+ chv_phy_pre_encoder_enable(encoder, pipe_config);
/* FIXME: Program the support xxx V-dB */
/* Use 800mV-0dB */
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- enum port port = intel_dig_port->port;
+ enum port port = intel_encoder->port;
DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
port_name(port));
connector->doublescan_allowed = 0;
connector->stereo_allowed = 1;
- if (IS_GEMINILAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
connector->ycbcr_420_allowed = true;
intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port);
if (IS_G4X(dev_priv))
intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI;
- intel_dig_port->port = port;
intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
intel_dig_port->max_lanes = 4;
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_
};
struct drm_i915_gem_request;
- struct intel_render_state;
/*
* Engine IDs definitions.
*/
struct intel_engine_execlists {
/**
- * @irq_tasklet: softirq tasklet for bottom handler
+ * @tasklet: softirq tasklet for bottom handler
*/
- struct tasklet_struct irq_tasklet;
+ struct tasklet_struct tasklet;
/**
* @default_priolist: priority list for I915_PRIORITY_NORMAL
struct intel_engine_cs {
struct drm_i915_private *i915;
char name[INTEL_ENGINE_CS_MAX_NAME];
+
enum intel_engine_id id;
- unsigned int uabi_id;
unsigned int hw_id;
unsigned int guc_id;
+ u8 uabi_id;
+ u8 uabi_class;
+
u8 class;
u8 instance;
u32 context_size;
struct intel_ring *buffer;
struct intel_timeline *timeline;
- struct intel_render_state *render_state;
+ struct drm_i915_gem_object *default_state;
atomic_t irq_count;
unsigned long irq_posted;
struct timer_list hangcheck; /* detect missed interrupts */
unsigned int hangcheck_interrupts;
+ unsigned int irq_enabled;
bool irq_armed : 1;
- bool irq_enabled : 1;
I915_SELFTEST_DECLARE(bool mock : 1);
} breadcrumbs;
void (*reset_hw)(struct intel_engine_cs *engine,
struct drm_i915_gem_request *req);
+ void (*park)(struct intel_engine_cs *engine);
+ void (*unpark)(struct intel_engine_cs *engine);
+
void (*set_default_submission)(struct intel_engine_cs *engine);
struct intel_ring *(*context_pin)(struct intel_engine_cs *engine,
return test_bit(bit, (unsigned long *)&execlists->active);
}
+ void
+ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists);
+
+ void
+ execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
+
static inline unsigned int
execlists_num_ports(const struct intel_engine_execlists * const execlists)
{
*/
#define I915_GEM_HWS_INDEX 0x30
#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
+ #define I915_GEM_HWS_PREEMPT_INDEX 0x32
+ #define I915_GEM_HWS_PREEMPT_ADDR (I915_GEM_HWS_PREEMPT_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
#define I915_GEM_HWS_SCRATCH_INDEX 0x40
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
+ int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes);
u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req,
unsigned int n);
return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
}
+ static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine)
+ {
+ return engine->status_page.ggtt_offset + I915_GEM_HWS_PREEMPT_ADDR;
+ }
+
/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
#define ENGINE_WAKEUP_WAITER BIT(0)
#define ENGINE_WAKEUP_ASLEEP BIT(1)
+ void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine);
+ void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine);
+
void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
return batch + 6;
}
+ static inline u32 *
+ gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset)
+ {
+ /* We're using qword write, offset should be aligned to 8 bytes. */
+ GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
+
+ /* w/a for post sync ops following a GPGPU operation we
+ * need a prior CS_STALL, which is emitted by the flush
+ * following the batch.
+ */
+ *cs++ = GFX_OP_PIPE_CONTROL(6);
+ *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE;
+ *cs++ = gtt_offset;
+ *cs++ = 0;
+ *cs++ = value;
+ /* We're thrashing one dword of HWS. */
+ *cs++ = 0;
+
+ return cs;
+ }
+
+ static inline u32 *
+ gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset)
+ {
+ /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
+ GEM_BUG_ON(gtt_offset & (1 << 5));
+ /* Offset should be aligned to 8 bytes for both (QW/DW) write types */
+ GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
+
+ *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
+ *cs++ = gtt_offset | MI_FLUSH_DW_USE_GTT;
+ *cs++ = 0;
+ *cs++ = value;
+
+ return cs;
+ }
+
bool intel_engine_is_idle(struct intel_engine_cs *engine);
bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
- void intel_engines_mark_idle(struct drm_i915_private *i915);
+ bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine);
+
+ void intel_engines_park(struct drm_i915_private *i915);
+ void intel_engines_unpark(struct drm_i915_private *i915);
+
void intel_engines_reset_default_submission(struct drm_i915_private *i915);
+ unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915);
bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
+/* SPDX-License-Identifier: GPL-2.0 */
/* List each unit test as selftest(name, function)
*
* The name is used as both an enum and expanded as subtest__name to create
selftest(hugepages, i915_gem_huge_page_live_selftests)
selftest(contexts, i915_gem_context_live_selftests)
selftest(hangcheck, intel_hangcheck_live_selftests)
+ selftest(guc, intel_guc_live_selftest)