W: http://serial.sourceforge.net
S: Maintained
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty.git
F: drivers/tty/serial/8250*
F: include/linux/serial_8250.h
W: http://www.lesswatts.org/projects/acpi/
Q: http://patchwork.kernel.org/project/linux-acpi/list/
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux
S: Supported
F: drivers/acpi/
F: drivers/pnp/pnpacpi/
F: drivers/platform/x86/wmi.c
AD1889 ALSA SOUND DRIVER
W: http://wiki.parisc-linux.org/AD1889
+ S: Odd Fixes
F: arch/alpha/
AMD IOMMU (AMD-VI)
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux-2.6-iommu.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
S: Supported
F: drivers/iommu/amd_iommu*.[ch]
F: include/linux/amd-iommu.h
ARM/CLKDEV SUPPORT
+ S: Maintained
F: arch/arm/include/asm/clkdev.h
F: drivers/clk/clkdev.c
S: Maintained
T: git git://git.pengutronix.de/git/imx/linux-2.6.git
- F: arch/arm/mach-mx*/
F: arch/arm/mach-imx/
F: arch/arm/plat-mxc/
- ARM/FREESCALE IMX51
- S: Maintained
- F: arch/arm/mach-mx5/
-
ARM/FREESCALE IMX6
ARM/H4700 (HP IPAQ HX4700) MACHINE SUPPORT
S: Maintained
F: arch/arm/mach-pxa/hx4700.c
F: arch/arm/mach-pxa/include/mach/hx4700.h
+ F: sound/soc/pxa/hx4700.c
ARM/HP JORNADA 7XX MACHINE SUPPORT
F: drivers/platform/msm/
F: drivers/*/pm8???-*
F: include/linux/mfd/pm8xxx/
- T: git git://codeaurora.org/quic/kernel/davidb/linux-msm.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/davidb/linux-msm.git
S: Maintained
ARM/TOSA MACHINE SUPPORT
W: http://wireless.kernel.org/en/users/Drivers/ath5k
F: include/linux/atm*
ATMEL AT91 MCI DRIVER
- M: Nicolas Ferre <nicolas.ferre@atmel.com>
+ M: Ludovic Desroches <ludovic.desroches@atmel.com>
W: http://www.atmel.com/products/AT91/
W: http://www.at91.com/
F: drivers/mmc/host/at91_mci.c
ATMEL AT91 / AT32 MCI DRIVER
- M: Nicolas Ferre <nicolas.ferre@atmel.com>
+ M: Ludovic Desroches <ludovic.desroches@atmel.com>
S: Maintained
F: drivers/mmc/host/atmel-mci.c
F: drivers/mmc/host/atmel-mci-regs.h
B43 WIRELESS DRIVER
W: http://linuxwireless.org/en/users/Drivers/b43
S: Maintained
F: drivers/net/wireless/b43/
W: http://linuxwireless.org/en/users/Drivers/b43
S: Maintained
F: drivers/net/wireless/b43legacy/
BLOCK LAYER
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-2.6-block.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
S: Maintained
F: block/
BLUETOOTH DRIVERS
W: http://www.bluez.org/
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/padovan/bluetooth-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/padovan/bluetooth.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/jh/bluetooth.git
S: Maintained
F: drivers/bluetooth/
BLUETOOTH SUBSYSTEM
W: http://www.bluez.org/
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/padovan/bluetooth-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/padovan/bluetooth.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/jh/bluetooth.git
S: Maintained
F: net/bluetooth/
F: include/net/bluetooth/
BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER
W: http://linuxtv.org
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
S: Maintained
F: Documentation/video4linux/bttv/
F: drivers/media/video/bt8xx/bttv*
CAFE CMOS INTEGRATED CAMERA CONTROLLER DRIVER
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
S: Maintained
F: Documentation/video4linux/cafe_ccic
F: drivers/media/video/marvell-ccic/
F: include/linux/can/netlink.h
F: include/linux/can/platform/
+ CAPABILITIES
+ S: Supported
+ F: include/linux/capability.h
+ F: security/capability.c
+ F: security/commoncap.c
+
CELL BROADBAND ENGINE ARCHITECTURE
CISCO VIC ETHERNET NIC DRIVER
S: Supported
F: drivers/net/ethernet/cisco/enic/
S: Supported
F: sound/soc/codecs/cs4270*
+ CLEANCACHE API
+ S: Maintained
+ F: mm/cleancache.c
+ F: include/linux/cleancache.h
+
CLK API
+ S: Maintained
F: include/linux/clk.h
CISCO FCOE HBA DRIVER
CONTROL GROUPS (CGROUPS)
+ M: Li Zefan <lizefan@huawei.com>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
W: http://linuxtv.org
W: http://www.ivtvdriver.org/index.php/Cx18
S: Maintained
S: Orphan
F: drivers/net/wan/pc300*
+ CYTTSP TOUCHSCREEN DRIVER
+ S: Maintained
+ F: drivers/input/touchscreen/cyttsp*
+ F: include/linux/input/cyttsp.h
+
DAMA SLAVE for AX.25
W: http://yaina.de/jreuter/
S: Maintained
DEVICE-MAPPER (LVM)
- P: Alasdair Kergon
W: http://sources.redhat.com/dm
Q: http://patchwork.kernel.org/project/dm-devel/list/
+ T: quilt http://people.redhat.com/agk/patches/linux/editing/
S: Maintained
F: Documentation/device-mapper/
F: drivers/md/dm*
+ F: drivers/md/persistent-data/
F: include/linux/device-mapper.h
F: include/linux/dm-*.h
F: fs/quota/
F: include/linux/quota*.h
+ DISPLAYLINK USB 2.0 FRAMEBUFFER DRIVER (UDLFB)
+ S: Maintained
+ W: http://plugable.com/category/projects/udlfb/
+ F: drivers/video/udlfb.c
+ F: include/video/udlfb.h
+ F: Documentation/fb/udlfb.txt
+
DISTRIBUTED LOCK MANAGER (DLM)
DOCUMENTATION
- T: quilt http://userweb.kernel.org/~rdunlap/kernel-doc-patches/current/
+ T: quilt http://xenotime.net/kernel-doc-patches/current/
S: Maintained
F: Documentation/
DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
S: Supported
F: Documentation/kobject.txt
F: drivers/base/
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/keithp/linux-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/keithp/linux.git
S: Supported
F: drivers/gpu/drm/i915
F: include/drm/i915*
EDAC-CORE
W: bluesmoke.sourceforge.net
S: Supported
F: Documentation/edac.txt
- F: drivers/edac/edac_*
+ F: drivers/edac/
F: include/linux/edac.h
EDAC-AMD64
W: bluesmoke.sourceforge.net
S: Supported
F: drivers/edac/amd64_edac*
EDAC-E752X
W: bluesmoke.sourceforge.net
S: Maintained
F: drivers/edac/e752x_edac.c
EDAC-E7XXX
W: bluesmoke.sourceforge.net
S: Maintained
F: drivers/edac/e7xxx_edac.c
EDAC-I82443BXGX
W: bluesmoke.sourceforge.net
S: Maintained
F: drivers/edac/i82443bxgx_edac.c
EDAC-I3000
W: bluesmoke.sourceforge.net
S: Maintained
F: drivers/edac/i3000_edac.c
EDAC-I5000
W: bluesmoke.sourceforge.net
S: Maintained
F: drivers/edac/i5000_edac.c
EDAC-I82975X
W: bluesmoke.sourceforge.net
S: Maintained
F: drivers/edac/i82975x_edac.c
EDAC-PASEMI
W: bluesmoke.sourceforge.net
S: Maintained
F: drivers/edac/pasemi_edac.c
EDAC-R82600
W: bluesmoke.sourceforge.net
S: Maintained
F: drivers/edac/r82600_edac.c
S: Supported
F: security/integrity/evm/
+ EXYNOS DP DRIVER
+ S: Maintained
+ F: drivers/video/exynos/exynos_dp*
+
+ EXYNOS MIPI DISPLAY DRIVERS
+ S: Maintained
+ F: drivers/video/exynos/exynos_mipi*
+ F: include/video/exynos_mipi*
+
F71805F HARDWARE MONITORING DRIVER
F: arch/frv/
FUJITSU LAPTOP EXTRAS
-M: Jonathan Woithe <jwoithe@physics.adelaide.edu.au>
+M: Jonathan Woithe <jwoithe@just42.net>
S: Maintained
F: drivers/platform/x86/fujitsu-laptop.c
F: drivers/media/video/m5mols/
F: include/media/m5mols.h
+ FUJITSU TABLET EXTRAS
+ S: Maintained
+ F: drivers/platform/x86/fujitsu-tablet.c
+
FUSE: FILESYSTEM IN USERSPACE
W: http://sources.redhat.com/cluster/
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-fixes.git
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-3.0-fixes.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-3.0-nmw.git
S: Supported
F: Documentation/filesystems/gfs2*.txt
F: fs/gfs2/
GSPCA FINEPIX SUBDRIVER
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
S: Maintained
F: drivers/media/video/gspca/finepix.c
GSPCA GL860 SUBDRIVER
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
S: Maintained
F: drivers/media/video/gspca/gl860/
GSPCA M5602 SUBDRIVER
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
S: Maintained
F: drivers/media/video/gspca/m5602/
GSPCA PAC207 SONIXB SUBDRIVER
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
S: Maintained
F: drivers/media/video/gspca/pac207.c
GSPCA SN9C20X SUBDRIVER
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
S: Maintained
F: drivers/media/video/gspca/sn9c20x.c
GSPCA T613 SUBDRIVER
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
S: Maintained
F: drivers/media/video/gspca/t613.c
W: http://moinejf.free.fr
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
S: Maintained
F: drivers/media/video/gspca/
F: include/linux/hwspinlock.h
HARMONY SOUND DRIVER
S: Maintained
F: sound/parisc/harmony.*
Q: http://patchwork.ozlabs.org/project/linux-ide/list/
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/ide-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/ide.git
S: Maintained
F: Documentation/ide/
F: drivers/ide/
F: net/ieee802154/
F: drivers/ieee802154/
+ IIO SUBSYSTEM AND DRIVERS
+ S: Maintained
+ F: drivers/staging/iio/
+
IKANOS/ADI EAGLE ADSL USB DRIVER
INTEL IDLE DRIVER
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-idle-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux.git
S: Supported
F: drivers/idle/intel_idle.c
S: Supported
F: arch/x86/platform/mrst/pmu.*
- INTEL PRO/WIRELESS 2100 NETWORK CONNECTION SUPPORT
+ INTEL PRO/WIRELESS 2100, 2200BG, 2915ABG NETWORK CONNECTION SUPPORT
- S: Orphan
+ S: Maintained
F: Documentation/networking/README.ipw2100
- F: drivers/net/wireless/ipw2x00/ipw2100.*
-
- INTEL PRO/WIRELESS 2915ABG NETWORK CONNECTION SUPPORT
- S: Orphan
F: Documentation/networking/README.ipw2200
- F: drivers/net/wireless/ipw2x00/ipw2200.*
+ F: drivers/net/wireless/ipw2x00/
INTEL(R) TRUSTED EXECUTION TECHNOLOGY (TXT)
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
F: kernel/irq/
+ IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
+ T: git git://git.secretlab.ca/git/linux-2.6.git irqdomain/next
+ S: Maintained
+ F: Documentation/IRQ-domain.txt
+ F: include/linux/irqdomain.h
+ F: kernel/irq/irqdomain.c
+
ISAPNP
S: Maintained
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
W: http://www.ivtvdriver.org
S: Maintained
F: Documentation/video4linux/*.ivtv
KERNEL AUTOMOUNTER v4 (AUTOFS4)
- L: autofs@linux.kernel.org
+ L: autofs@vger.kernel.org
S: Maintained
F: fs/autofs4/
KERNEL BUILD + files below scripts/ (unless maintained elsewhere)
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild-2.6.git for-next
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild-2.6.git rc-fixes
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git for-next
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git rc-fixes
S: Maintained
F: Documentation/kbuild/
W: http://lguest.ozlabs.org/
S: Odd Fixes
- F: Documentation/virtual/lguest/
+ F: arch/x86/include/asm/lguest*.h
F: arch/x86/lguest/
F: drivers/lguest/
F: include/linux/lguest*.h
- F: arch/x86/include/asm/lguest*.h
+ F: tools/lguest/
LINUX FOR IBM pSERIES (RS/6000)
W: http://www.penguinppc.org/
- T: git git://git.infradead.org/users/jwboyer/powerpc-4xx.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/jwboyer/powerpc-4xx.git
S: Maintained
F: arch/powerpc/platforms/40x/
F: arch/powerpc/platforms/44x/
W: http://www.linux-ntfs.org/content/view/19/37/
S: Maintained
F: Documentation/ldm.txt
- F: fs/partitions/ldm.*
+ F: block/partitions/ldm.*
LogFS
F: drivers/hwmon/ltc4261.c
LTP (Linux Test Project)
W: http://ltp.sourceforge.net/
+ T: git git://github.com/linux-test-project/ltp.git
T: git git://ltp.git.sourceforge.net/gitroot/ltp/ltp-dev
S: Maintained
W: http://linuxwireless.org/
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless.git
S: Maintained
F: Documentation/networking/mac80211-injection.txt
F: include/net/mac80211.h
W: http://linuxwireless.org/en/developers/Documentation/mac80211/RateControl/PID
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless.git
S: Maintained
F: net/mac80211/rc80211_pid*
S: Maintained
+ MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2)
+ S: Maintained
+ F: drivers/net/ethernet/marvell/sk*
+
MARVELL LIBERTAS WIRELESS DRIVER
S: Odd Fixes
F: drivers/mmc/host/mvsdio.*
- MARVELL YUKON / SYSKONNECT DRIVER
- W: http://www.syskonnect.com
- S: Supported
-
MATROX FRAMEBUFFER DRIVER
S: Orphan
W: http://linuxtv.org
Q: http://patchwork.kernel.org/project/linux-media/list/
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
S: Maintained
F: Documentation/dvb/
F: Documentation/video4linux/
S: Supported
F: arch/microblaze/
+ MICROCHANNEL ARCHITECTURE (MCA)
+ S: Maintained
+ F: Documentation/mca.txt
+ F: drivers/mca/
+ F: include/linux/mca*
+
MICROTEK X6 SCANNER
S: Maintained
F: Documentation/mips/
F: arch/mips/
- MISCELLANEOUS MCA-SUPPORT
- S: Maintained
- F: Documentation/ia64/mca.txt
- F: Documentation/mca.txt
- F: drivers/mca/
- F: include/linux/mca*
-
MODULE SUPPORT
S: Maintained
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
S: Maintained
F: net/ipv4/
F: net/ipv6/
Q: http://patchwork.kernel.org/project/linux-wireless/list/
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless.git
S: Maintained
F: net/mac80211/
F: net/rfkill/
NETWORKING DRIVERS
W: http://www.linuxfoundation.org/en/Net
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
S: Odd Fixes
F: drivers/net/
F: include/linux/if_*
W: http://www.tuxera.com/
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/aia21/ntfs-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/aia21/ntfs.git
S: Supported
F: Documentation/filesystems/ntfs.txt
F: fs/ntfs/
OMNIVISION OV7670 SENSOR DRIVER
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
S: Maintained
F: drivers/media/video/ov7670.c
ORINOCO DRIVER
W: http://linuxwireless.org/en/users/Drivers/orinoco
W: http://www.nongnu.org/orinoco/
S: Orphan
F: drivers/block/paride/
PARISC ARCHITECTURE
W: http://www.parisc-linux.org/
Q: http://patchwork.kernel.org/project/linux-parisc/list/
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/kyle/parisc-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/parisc-2.6.git
S: Maintained
F: arch/parisc/
F: drivers/parisc/
F: Documentation/powerpc/eeh-pci-error-recovery.txt
PCI SUBSYSTEM
Q: http://patchwork.kernel.org/project/linux-pci/list/
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci.git
S: Supported
F: Documentation/PCI/
F: drivers/pci/
F: include/linux/pci*
PCI HOTPLUG
S: Supported
F: drivers/pci/hotplug
PERFORMANCE EVENTS SUBSYSTEM
- M: Ingo Molnar <mingo@elte.hu>
+ M: Ingo Molnar <mingo@redhat.com>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
S: Supported
S: Maintained
F: drivers/block/ps3vram.c
+ PTP HARDWARE CLOCK SUPPORT
+ S: Maintained
+ W: http://linuxptp.sourceforge.net/
+ F: Documentation/ABI/testing/sysfs-ptp
+ F: Documentation/ptp/*
+ F: drivers/net/gianfar_ptp.c
+ F: drivers/net/phy/dp83640*
+ F: drivers/ptp/*
+ F: include/linux/ptp_cl*
+
PTRACE SUPPORT
W: http://www.isely.net/pvrusb2/
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
S: Maintained
F: Documentation/video4linux/README.pvrusb2
F: drivers/media/video/pvrusb2/
PXA2xx/PXA3xx SUPPORT
- M: Haojian Zhuang <haojian.zhuang@marvell.com>
+ M: Haojian Zhuang <haojian.zhuang@gmail.com>
T: git git://github.com/hzhuang1/linux.git
T: git git://git.linaro.org/people/ycmiao/pxa-linux.git
MMP SUPPORT
- M: Haojian Zhuang <haojian.zhuang@marvell.com>
+ M: Haojian Zhuang <haojian.zhuang@gmail.com>
T: git git://github.com/hzhuang1/linux.git
T: git git://git.linaro.org/people/ycmiao/pxa-linux.git
S: Supported
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
F: Documentation/RCU/torture.txt
F: kernel/rcutorture.c
W: http://www.rdrop.com/users/paulmck/rclock/
S: Supported
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
F: Documentation/RCU/
F: include/linux/rcu*
F: include/linux/srcu*
F: drivers/base/regmap/
F: include/linux/regmap.h
+ REMOTE PROCESSOR (REMOTEPROC) SUBSYSTEM
+ S: Maintained
+ F: drivers/remoteproc/
+ F: Documentation/remoteproc.txt
+ F: include/linux/remoteproc.h
+
RFKILL
S: Supported
F: arch/s390/
F: drivers/s390/
- F: fs/partitions/ibm.c
+ F: block/partitions/ibm.c
F: Documentation/s390/
F: Documentation/DocBook/s390*
SAA7146 VIDEO4LINUX-2 DRIVER
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
W: http://www.mihu.de/linux/saa7146
S: Maintained
F: drivers/media/common/saa7146*
F: drivers/media/video/*7146*
F: include/media/*7146*
+ SAMSUNG LAPTOP DRIVER
+ S: Maintained
+ F: drivers/platform/x86/samsung-laptop.c
+
SAMSUNG AUDIO (ASoC) DRIVERS
F: drivers/watchdog/sc1200wdt.c
SCHEDULER
- M: Ingo Molnar <mingo@elte.hu>
+ M: Ingo Molnar <mingo@redhat.com>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core
S: Maintained
SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) ST SPEAR DRIVER
S: Maintained
F: drivers/mmc/host/sdhci-spear.c
SECURITY SUBSYSTEM
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/linux-security.git
W: http://security.wiki.kernel.org/
SELINUX SECURITY MODULE
W: http://selinuxproject.org
TI DAVINCI MACHINE SUPPORT
+ T: git git://gitorious.org/linux-davinci/linux-davinci.git
Q: http://patchwork.kernel.org/project/linux-davinci/list/
S: Supported
F: arch/arm/mach-davinci
S: Maintained
F: drivers/usb/misc/sisusbvga/
- SKGE, SKY2 10/100/1000 GIGABIT ETHERNET DRIVERS
- S: Maintained
- F: drivers/net/ethernet/marvell/sk*
-
SLAB ALLOCATOR
SOC-CAMERA V4L2 SUBSYSTEM
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
S: Maintained
F: include/media/v4l2*
F: drivers/media/video/v4l2*
Q: http://patchwork.ozlabs.org/project/sparclinux/list/
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6.git
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next.git
S: Maintained
F: arch/sparc/
F: drivers/sbus/
SPARC SERIAL DRIVERS
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6.git
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next.git
S: Maintained
+ F: include/linux/sunserialcore.h
F: drivers/tty/serial/suncore.c
- F: drivers/tty/serial/suncore.h
F: drivers/tty/serial/sunhv.c
F: drivers/tty/serial/sunsab.c
F: drivers/tty/serial/sunsab.h
F: drivers/tty/serial/sunzilog.c
F: drivers/tty/serial/sunzilog.h
+ SPARSE CHECKER
+ W: https://sparse.wiki.kernel.org/
+ T: git git://git.kernel.org/pub/scm/devel/sparse/sparse.git
+ T: git git://git.kernel.org/pub/scm/devel/sparse/chrisl/sparse.git
+ S: Maintained
+ F: include/linux/compiler.h
+
SPEAR PLATFORM SUPPORT
W: http://www.st.com/spear
S: Maintained
F: arch/arm/plat-spear/
SPEAR3XX MACHINE SUPPORT
W: http://www.st.com/spear
S: Maintained
F: arch/arm/mach-spear3xx/
SPEAR6XX MACHINE SUPPORT
W: http://www.st.com/spear
S: Maintained
F: arch/arm/mach-spear6xx/
SPEAR CLOCK FRAMEWORK SUPPORT
W: http://www.st.com/spear
S: Maintained
F: arch/arm/mach-spear*/clock.c
SPEAR PAD MULTIPLEXING SUPPORT
W: http://www.st.com/spear
S: Maintained
F: arch/arm/plat-spear/include/plat/padmux.h
S: Odd Fixes
F: drivers/staging/olpc_dcon/
+ STAGING - OZMO DEVICES USB OVER WIFI DRIVER
+ S: Maintained
+ F: drivers/staging/ozwpan/
+
STAGING - PARALLEL LCD/KEYPAD PANEL DRIVER
S: Odd Fixes
S: Odd Fixes
F: drivers/staging/tidspbridge/
- STAGING - TRIDENT TVMASTER TMxxxx USB VIDEO CAPTURE DRIVERS
- S: Odd Fixes
- F: drivers/staging/tm6000/
-
STAGING - USB ENE SM/MS CARD READER DRIVER
S: Odd Fixes
L: http://groups.google.com/group/linux-iscsi-target-dev
W: http://www.linux-iscsi.org
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/lio-core-2.6.git master
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/lio-core.git master
S: Supported
F: drivers/target/
F: include/target/
TEGRA SUPPORT
- M: Stephen Warren <swarren@nvidia.com>
+ M: Stephen Warren <swarren@wwwdotorg.org>
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/olof/tegra.git
+ Q: http://patchwork.ozlabs.org/project/linux-tegra/list/
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-tegra.git
S: Supported
F: arch/arm/mach-tegra
TTY LAYER
S: Supported
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty.git
F: drivers/tty/
F: drivers/tty/serial/serial_core.c
F: include/linux/serial_core.h
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
W: http://www.linux-projects.org
S: Maintained
F: drivers/media/video/et61x251/
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
W: http://www.linux-projects.org
S: Maintained
F: Documentation/video4linux/sn9c102.txt
W: http://www.linux-usb.org
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb.git
S: Supported
F: Documentation/usb/
F: drivers/net/usb/
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
W: http://www.ideasonboard.org/uvc/
S: Maintained
F: drivers/media/video/uvc/
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
W: http://www.linux-projects.org
S: Maintained
F: Documentation/video4linux/w9968cf.txt
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
W: http://royale.zerezo.com/zr364xx/
S: Maintained
F: Documentation/video4linux/zr364xx.txt
W: http://opensource.wolfsonmicro.com/node/15
W: http://www.slimlogic.co.uk/?p=48
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/lrg/voltage-2.6.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/lrg/regulator.git
S: Supported
F: drivers/regulator/
F: include/linux/regulator/
W: http://www.linux-watchdog.org/
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/wim/linux-2.6-watchdog.git
+ T: git git://www.linux-watchdog.org/linux-watchdog.git
S: Maintained
F: Documentation/watchdog/
F: drivers/watchdog/
F: Documentation/filesystems/xfs.txt
F: fs/xfs/
+ XILINX AXI ETHERNET DRIVER
+ S: Maintained
+ F: drivers/net/ethernet/xilinx/xilinx_axienet*
+
XILINX SYSTEMACE DRIVER
W: http://www.secretlab.ca/
VERSION = 3
- PATCHLEVEL = 3
+ PATCHLEVEL = 4
SUBLEVEL = 0
EXTRAVERSION = -rc2
NAME = Saber-toothed Squirrel
# Files to ignore in find ... statements
-RCS_FIND_IGNORE := \( -name SCCS -o -name BitKeeper -o -name .svn -o -name CVS -o -name .pc -o -name .hg -o -name .git \) -prune -o
-export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exclude CVS --exclude .pc --exclude .hg --exclude .git
+RCS_FIND_IGNORE := \( -name SCCS -o -name BitKeeper -o -name .svn -o -name CVS \
+ -o -name .pc -o -name .hg -o -name .git \) -prune -o
+export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
+ --exclude CVS --exclude .pc --exclude .hg --exclude .git
# ===========================================================================
# Rules shared between *config targets and build targets
ifneq ($(KBUILD_SRC),)
@$(kecho) ' Using $(srctree) as source for kernel'
$(Q)if [ -f $(srctree)/.config -o -d $(srctree)/include/config ]; then \
- echo " $(srctree) is not clean, please run 'make mrproper'";\
+ echo " $(srctree) is not clean, please run 'make mrproper'"; \
echo " in the '$(srctree)' directory.";\
/bin/false; \
fi;
endef
define filechk_version.h
- (echo \#define LINUX_VERSION_CODE $(shell \
- expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 0$(SUBLEVEL)); \
+ (echo \#define LINUX_VERSION_CODE $(shell \
+ expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 0$(SUBLEVEL)); \
echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))';)
endef
#
clean: rm-dirs := $(CLEAN_DIRS)
clean: rm-files := $(CLEAN_FILES)
- clean-dirs := $(addprefix _clean_, . $(vmlinux-alldirs) Documentation)
+ clean-dirs := $(addprefix _clean_, . $(vmlinux-alldirs) Documentation samples)
PHONY += $(clean-dirs) clean archclean
$(clean-dirs):
ok = 0;
/* If both conditions above are met, we are fine. */
- DBGA("pci_dac_dma_supported %s from %p\n",
+ DBGA("pci_dac_dma_supported %s from %pf\n",
ok ? "yes" : "no", __builtin_return_address(0));
return ok;
&& paddr + size <= __direct_map_size) {
ret = paddr + __direct_map_base;
- DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %p\n",
+ DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %pf\n",
cpu_addr, size, ret, __builtin_return_address(0));
return ret;
if (dac_allowed) {
ret = paddr + alpha_mv.pci_dac_offset;
- DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %p\n",
+ DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %pf\n",
cpu_addr, size, ret, __builtin_return_address(0));
return ret;
ret = arena->dma_base + dma_ofs * PAGE_SIZE;
ret += (unsigned long)cpu_addr & ~PAGE_MASK;
- DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %p\n",
+ DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %pf\n",
cpu_addr, size, npages, ret, __builtin_return_address(0));
return ret;
&& dma_addr < __direct_map_base + __direct_map_size) {
/* Nothing to do. */
- DBGA2("pci_unmap_single: direct [%llx,%zx] from %p\n",
+ DBGA2("pci_unmap_single: direct [%llx,%zx] from %pf\n",
dma_addr, size, __builtin_return_address(0));
return;
}
if (dma_addr > 0xffffffff) {
- DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %p\n",
+ DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %pf\n",
dma_addr, size, __builtin_return_address(0));
return;
}
spin_unlock_irqrestore(&arena->lock, flags);
- DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %p\n",
+ DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %pf\n",
dma_addr, size, npages, __builtin_return_address(0));
}
else DMA_ADDRP is undefined. */
static void *alpha_pci_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_addrp, gfp_t gfp)
+ dma_addr_t *dma_addrp, gfp_t gfp,
+ struct dma_attrs *attrs)
{
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
void *cpu_addr;
cpu_addr = (void *)__get_free_pages(gfp, order);
if (! cpu_addr) {
printk(KERN_INFO "pci_alloc_consistent: "
- "get_free_pages failed from %p\n",
+ "get_free_pages failed from %pf\n",
__builtin_return_address(0));
/* ??? Really atomic allocation? Otherwise we could play
with vmalloc and sg if we can't find contiguous memory. */
goto try_again;
}
- DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %p\n",
+ DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %pf\n",
size, cpu_addr, *dma_addrp, __builtin_return_address(0));
return cpu_addr;
DMA_ADDR past this call are illegal. */
static void alpha_pci_free_coherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_addr)
+ void *cpu_addr, dma_addr_t dma_addr,
+ struct dma_attrs *attrs)
{
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
free_pages((unsigned long)cpu_addr, get_order(size));
- DBGA2("pci_free_consistent: [%llx,%zx] from %p\n",
+ DBGA2("pci_free_consistent: [%llx,%zx] from %pf\n",
dma_addr, size, __builtin_return_address(0));
}
}
struct dma_map_ops alpha_pci_ops = {
- .alloc_coherent = alpha_pci_alloc_coherent,
- .free_coherent = alpha_pci_free_coherent,
+ .alloc = alpha_pci_alloc_coherent,
+ .free = alpha_pci_free_coherent,
.map_page = alpha_pci_map_page,
.unmap_page = alpha_pci_unmap_page,
.map_sg = alpha_pci_map_sg,
#include <asm/cpu.h>
#include <asm/processor.h>
#include <linux/atomic.h>
- #include <asm/system.h>
#include <asm/hardirq.h>
#include <asm/mmu_context.h>
#include <asm/mipsmtregs.h>
unsigned int nconfig7 = oconfig7;
if (mt_opt_norps) {
- printk("\"norps\" option deprectated: use \"rpsctl=\"\n");
+ printk("\"norps\" option deprecated: use \"rpsctl=\"\n");
}
if (mt_opt_rpsctl >= 0) {
printk("34K return prediction stack override set to %d.\n",
{
struct ltq_pci_data *ltq_pci_data =
(struct ltq_pci_data *) pdev->dev.platform_data;
- pci_probe_only = 0;
+
+ pci_clear_flags(PCI_PROBE_ONLY);
ltq_pci_irq_map = ltq_pci_data->irq;
ltq_pci_membase = ioremap_nocache(PCI_CR_BASE_ADDR, PCI_CR_SIZE);
ltq_pci_mapped_cfg =
{
int ret = platform_driver_register(<q_pci_driver);
if (ret)
- printk(KERN_INFO "ltq_pci: Error registering platfom driver!");
+ printk(KERN_INFO "ltq_pci: Error registering platform driver!");
return ret;
}
/* nothing to do if vm isn't bound */
if (vm->id == -1)
- return 0;;
+ return 0;
bo_va = radeon_bo_va(bo, vm);
if (bo_va == NULL) {
if (bo_va == NULL)
return 0;
- list_del(&bo_va->bo_list);
mutex_lock(&vm->mutex);
radeon_mutex_lock(&rdev->cs_mutex);
radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
radeon_mutex_unlock(&rdev->cs_mutex);
list_del(&bo_va->vm_list);
mutex_unlock(&vm->mutex);
+ list_del(&bo_va->bo_list);
kfree(bo_va);
return 0;
.oobavail = 8,
};
- /**
- * struct docg3_bch - BCH engine
- */
- static struct bch_control *docg3_bch;
-
static inline u8 doc_readb(struct docg3 *docg3, u16 reg)
{
- u8 val = readb(docg3->base + reg);
+ u8 val = readb(docg3->cascade->base + reg);
trace_docg3_io(0, 8, reg, (int)val);
return val;
static inline u16 doc_readw(struct docg3 *docg3, u16 reg)
{
- u16 val = readw(docg3->base + reg);
+ u16 val = readw(docg3->cascade->base + reg);
trace_docg3_io(0, 16, reg, (int)val);
return val;
static inline void doc_writeb(struct docg3 *docg3, u8 val, u16 reg)
{
- writeb(val, docg3->base + reg);
+ writeb(val, docg3->cascade->base + reg);
trace_docg3_io(1, 8, reg, val);
}
static inline void doc_writew(struct docg3 *docg3, u16 val, u16 reg)
{
- writew(val, docg3->base + reg);
+ writew(val, docg3->cascade->base + reg);
trace_docg3_io(1, 16, reg, val);
}
* leveling counters are stored. To access this last area of 4 bytes, a special
* mode must be input to the flash ASIC.
*
- * Returns 0 if no error occured, -EIO else.
+ * Returns 0 if no error occurred, -EIO else.
*/
static int doc_set_extra_page_mode(struct docg3 *docg3)
{
for (i = 0; i < DOC_ECC_BCH_SIZE; i++)
ecc[i] = bitrev8(hwecc[i]);
- numerrs = decode_bch(docg3_bch, NULL, DOC_ECC_BCH_COVERED_BYTES,
+ numerrs = decode_bch(docg3->cascade->bch, NULL,
+ DOC_ECC_BCH_COVERED_BYTES,
NULL, ecc, NULL, errorpos);
BUG_ON(numerrs == -EINVAL);
if (numerrs < 0)
* - one read of 512 bytes at offset 0
* - one read of 512 bytes at offset 512 + 16
*
- * Returns 0 if successful, -EIO if a read error occured.
+ * Returns 0 if successful, -EIO if a read error occurred.
*/
static int doc_read_page_prepare(struct docg3 *docg3, int block0, int block1,
int page, int offset)
* doc_read_page_getbytes - Reads bytes from a prepared page
* @docg3: the device
* @len: the number of bytes to be read (must be a multiple of 4)
- * @buf: the buffer to be filled in
+ * @buf: the buffer to be filled in (or NULL is forget bytes)
* @first: 1 if first time read, DOC_READADDRESS should be set
*
*/
*
* Reads flash memory OOB area of pages.
*
- * Returns 0 if read successfull, of -EIO, -EINVAL if an error occured
+ * Returns 0 if read successful, of -EIO, -EINVAL if an error occurred
*/
static int doc_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
struct docg3 *docg3 = mtd->priv;
- int block0, block1, page, ret, ofs = 0;
+ int block0, block1, page, ret, skip, ofs = 0;
u8 *oobbuf = ops->oobbuf;
u8 *buf = ops->datbuf;
size_t len, ooblen, nbdata, nboob;
doc_dbg("doc_read_oob(from=%lld, mode=%d, data=(%p:%zu), oob=(%p:%zu))\n",
from, ops->mode, buf, len, oobbuf, ooblen);
- if ((len % DOC_LAYOUT_PAGE_SIZE) || (ooblen % DOC_LAYOUT_OOB_SIZE) ||
- (from % DOC_LAYOUT_PAGE_SIZE))
+ if (ooblen % DOC_LAYOUT_OOB_SIZE)
return -EINVAL;
- ret = -EINVAL;
- calc_block_sector(from + len, &block0, &block1, &page, &ofs,
- docg3->reliable);
- if (block1 > docg3->max_block)
- goto err;
+ if (from + len > mtd->size)
+ return -EINVAL;
ops->oobretlen = 0;
ops->retlen = 0;
ret = 0;
+ skip = from % DOC_LAYOUT_PAGE_SIZE;
+ mutex_lock(&docg3->cascade->lock);
while (!ret && (len > 0 || ooblen > 0)) {
- calc_block_sector(from, &block0, &block1, &page, &ofs,
+ calc_block_sector(from - skip, &block0, &block1, &page, &ofs,
docg3->reliable);
- nbdata = min_t(size_t, len, (size_t)DOC_LAYOUT_PAGE_SIZE);
+ nbdata = min_t(size_t, len, DOC_LAYOUT_PAGE_SIZE - skip);
nboob = min_t(size_t, ooblen, (size_t)DOC_LAYOUT_OOB_SIZE);
ret = doc_read_page_prepare(docg3, block0, block1, page, ofs);
if (ret < 0)
- goto err;
+ goto out;
ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES);
if (ret < 0)
goto err_in_read;
- ret = doc_read_page_getbytes(docg3, nbdata, buf, 1);
+ ret = doc_read_page_getbytes(docg3, skip, NULL, 1);
+ if (ret < skip)
+ goto err_in_read;
+ ret = doc_read_page_getbytes(docg3, nbdata, buf, 0);
if (ret < nbdata)
goto err_in_read;
- doc_read_page_getbytes(docg3, DOC_LAYOUT_PAGE_SIZE - nbdata,
+ doc_read_page_getbytes(docg3,
+ DOC_LAYOUT_PAGE_SIZE - nbdata - skip,
NULL, 0);
ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0);
if (ret < nboob)
len -= nbdata;
ooblen -= nboob;
from += DOC_LAYOUT_PAGE_SIZE;
+ skip = 0;
}
+ out:
+ mutex_unlock(&docg3->cascade->lock);
return ret;
err_in_read:
doc_read_page_finish(docg3);
- err:
- return ret;
+ goto out;
}
/**
* Reads flash memory pages. This function does not read the OOB chunk, but only
* the page data.
*
- * Returns 0 if read successfull, of -EIO, -EINVAL if an error occured
+ * Returns 0 if read successful, of -EIO, -EINVAL if an error occurred
*/
static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
* Wait for the chip to be ready again after erase or write operation, and check
* erase/write status.
*
- * Returns 0 if erase successfull, -EIO if erase/write issue, -ETIMEOUT if
+ * Returns 0 if erase successful, -EIO if erase/write issue, -ETIMEOUT if
* timeout
*/
static int doc_write_erase_wait_status(struct docg3 *docg3)
{
- int status, ret = 0;
+ int i, status, ret = 0;
- if (!doc_is_ready(docg3))
- usleep_range(3000, 3000);
+ for (i = 0; !doc_is_ready(docg3) && i < 5; i++)
+ msleep(20);
if (!doc_is_ready(docg3)) {
doc_dbg("Timeout reached and the chip is still not ready\n");
ret = -EAGAIN;
* Erase a bunch of contiguous blocks, by pairs, as a "mtd" page of 1024 is
* split into 2 pages of 512 bytes on 2 contiguous blocks.
*
- * Returns 0 if erase successful, -EINVAL if adressing error, -EIO if erase
+ * Returns 0 if erase successful, -EINVAL if addressing error, -EIO if erase
* issue
*/
static int doc_erase(struct mtd_info *mtd, struct erase_info *info)
int block0, block1, page, ret, ofs = 0;
doc_dbg("doc_erase(from=%lld, len=%lld\n", info->addr, info->len);
- doc_set_device_id(docg3, docg3->device_id);
info->state = MTD_ERASE_PENDING;
calc_block_sector(info->addr + info->len, &block0, &block1, &page,
&ofs, docg3->reliable);
ret = -EINVAL;
- if (block1 > docg3->max_block || page || ofs)
+ if (info->addr + info->len > mtd->size || page || ofs)
goto reset_err;
ret = 0;
calc_block_sector(info->addr, &block0, &block1, &page, &ofs,
docg3->reliable);
+ mutex_lock(&docg3->cascade->lock);
+ doc_set_device_id(docg3, docg3->device_id);
doc_set_reliable_mode(docg3);
for (len = info->len; !ret && len > 0; len -= mtd->erasesize) {
info->state = MTD_ERASING;
block0 += 2;
block1 += 2;
}
+ mutex_unlock(&docg3->cascade->lock);
if (ret)
goto reset_err;
* Or provide data without OOB, and then a all zeroed OOB will be used (ECC will
* still be filled in if asked for).
*
- * Returns 0 is successfull, EINVAL if length is not 14 bytes
+ * Returns 0 is successful, EINVAL if length is not 14 bytes
*/
static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
struct mtd_oob_ops *ops)
{
struct docg3 *docg3 = mtd->priv;
- int block0, block1, page, ret, pofs = 0, autoecc, oobdelta;
+ int ret, autoecc, oobdelta;
u8 *oobbuf = ops->oobbuf;
u8 *buf = ops->datbuf;
size_t len, ooblen;
if (len && ooblen &&
(len / DOC_LAYOUT_PAGE_SIZE) != (ooblen / oobdelta))
return -EINVAL;
-
- ret = -EINVAL;
- calc_block_sector(ofs + len, &block0, &block1, &page, &pofs,
- docg3->reliable);
- if (block1 > docg3->max_block)
- goto err;
+ if (ofs + len > mtd->size)
+ return -EINVAL;
ops->oobretlen = 0;
ops->retlen = 0;
if (autoecc < 0)
return autoecc;
+ mutex_lock(&docg3->cascade->lock);
while (!ret && len > 0) {
memset(oob, 0, sizeof(oob));
if (ofs == docg3->oob_write_ofs)
}
ops->retlen += DOC_LAYOUT_PAGE_SIZE;
}
- err:
+
doc_set_device_id(docg3, 0);
+ mutex_unlock(&docg3->cascade->lock);
return ret;
}
struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
int dps0;
+ mutex_lock(&docg3->cascade->lock);
doc_set_device_id(docg3, docg3->device_id);
dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
doc_set_device_id(docg3, 0);
+ mutex_unlock(&docg3->cascade->lock);
return sprintf(buf, "%d\n", !(dps0 & DOC_DPS_KEY_OK));
}
struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
int dps1;
+ mutex_lock(&docg3->cascade->lock);
doc_set_device_id(docg3, docg3->device_id);
dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
doc_set_device_id(docg3, 0);
+ mutex_unlock(&docg3->cascade->lock);
return sprintf(buf, "%d\n", !(dps1 & DOC_DPS_KEY_OK));
}
if (count != DOC_LAYOUT_DPS_KEY_LENGTH)
return -EINVAL;
+ mutex_lock(&docg3->cascade->lock);
doc_set_device_id(docg3, docg3->device_id);
for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++)
doc_writeb(docg3, buf[i], DOC_DPS0_KEY);
doc_set_device_id(docg3, 0);
+ mutex_unlock(&docg3->cascade->lock);
return count;
}
if (count != DOC_LAYOUT_DPS_KEY_LENGTH)
return -EINVAL;
+ mutex_lock(&docg3->cascade->lock);
doc_set_device_id(docg3, docg3->device_id);
for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++)
doc_writeb(docg3, buf[i], DOC_DPS1_KEY);
doc_set_device_id(docg3, 0);
+ mutex_unlock(&docg3->cascade->lock);
return count;
}
};
static int doc_register_sysfs(struct platform_device *pdev,
- struct mtd_info **floors)
+ struct docg3_cascade *cascade)
{
int ret = 0, floor, i = 0;
struct device *dev = &pdev->dev;
- for (floor = 0; !ret && floor < DOC_MAX_NBFLOORS && floors[floor];
- floor++)
+ for (floor = 0; !ret && floor < DOC_MAX_NBFLOORS &&
+ cascade->floors[floor]; floor++)
for (i = 0; !ret && i < 4; i++)
ret = device_create_file(dev, &doc_sys_attrs[floor][i]);
if (!ret)
}
static void doc_unregister_sysfs(struct platform_device *pdev,
- struct mtd_info **floors)
+ struct docg3_cascade *cascade)
{
struct device *dev = &pdev->dev;
int floor, i;
- for (floor = 0; floor < DOC_MAX_NBFLOORS && floors[floor];
+ for (floor = 0; floor < DOC_MAX_NBFLOORS && cascade->floors[floor];
floor++)
for (i = 0; i < 4; i++)
device_remove_file(dev, &doc_sys_attrs[floor][i]);
struct docg3 *docg3 = (struct docg3 *)s->private;
int pos = 0;
- u8 fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+ u8 fctrl;
+
+ mutex_lock(&docg3->cascade->lock);
+ fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+ mutex_unlock(&docg3->cascade->lock);
pos += seq_printf(s,
"FlashControl : 0x%02x (%s,CE# %s,%s,%s,flash %s)\n",
{
struct docg3 *docg3 = (struct docg3 *)s->private;
- int pos = 0;
- int pctrl = doc_register_readb(docg3, DOC_ASICMODE);
- int mode = pctrl & 0x03;
+ int pos = 0, pctrl, mode;
+
+ mutex_lock(&docg3->cascade->lock);
+ pctrl = doc_register_readb(docg3, DOC_ASICMODE);
+ mode = pctrl & 0x03;
+ mutex_unlock(&docg3->cascade->lock);
pos += seq_printf(s,
"%04x : RAM_WE=%d,RSTIN_RESET=%d,BDETCT_RESET=%d,WRITE_ENABLE=%d,POWERDOWN=%d,MODE=%d%d (",
{
struct docg3 *docg3 = (struct docg3 *)s->private;
int pos = 0;
- int id = doc_register_readb(docg3, DOC_DEVICESELECT);
+ int id;
+
+ mutex_lock(&docg3->cascade->lock);
+ id = doc_register_readb(docg3, DOC_DEVICESELECT);
+ mutex_unlock(&docg3->cascade->lock);
pos += seq_printf(s, "DeviceId = %d\n", id);
return pos;
int pos = 0;
int protect, dps0, dps0_low, dps0_high, dps1, dps1_low, dps1_high;
+ mutex_lock(&docg3->cascade->lock);
protect = doc_register_readb(docg3, DOC_PROTECTION);
dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
dps0_low = doc_register_readw(docg3, DOC_DPS0_ADDRLOW);
dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
dps1_low = doc_register_readw(docg3, DOC_DPS1_ADDRLOW);
dps1_high = doc_register_readw(docg3, DOC_DPS1_ADDRHIGH);
+ mutex_unlock(&docg3->cascade->lock);
pos += seq_printf(s, "Protection = 0x%02x (",
protect);
switch (chip_id) {
case DOC_CHIPID_G3:
- mtd->name = kasprintf(GFP_KERNEL, "DiskOnChip G3 floor %d",
+ mtd->name = kasprintf(GFP_KERNEL, "docg3.%d",
docg3->device_id);
docg3->max_block = 2047;
break;
mtd->erasesize = DOC_LAYOUT_BLOCK_SIZE * DOC_LAYOUT_NBPLANES;
if (docg3->reliable == 2)
mtd->erasesize /= 2;
- mtd->writesize = DOC_LAYOUT_PAGE_SIZE;
+ mtd->writebufsize = mtd->writesize = DOC_LAYOUT_PAGE_SIZE;
mtd->oobsize = DOC_LAYOUT_OOB_SIZE;
mtd->owner = THIS_MODULE;
- mtd->erase = doc_erase;
- mtd->read = doc_read;
- mtd->write = doc_write;
- mtd->read_oob = doc_read_oob;
- mtd->write_oob = doc_write_oob;
- mtd->block_isbad = doc_block_isbad;
+ mtd->_erase = doc_erase;
+ mtd->_read = doc_read;
+ mtd->_write = doc_write;
+ mtd->_read_oob = doc_read_oob;
+ mtd->_write_oob = doc_write_oob;
+ mtd->_block_isbad = doc_block_isbad;
mtd->ecclayout = &docg3_oobinfo;
+ mtd->ecc_strength = DOC_ECC_BCH_T;
}
/**
* @base: the io space where the device is probed
* @floor: the floor of the probed device
* @dev: the device
+ * @cascade: the cascade of chips this devices will belong to
*
* Checks whether a device at the specified IO range, and floor is available.
*
* if a memory allocation failed. If floor 0 is checked, a reset of the ASIC is
* launched.
*/
- static struct mtd_info *doc_probe_device(void __iomem *base, int floor,
- struct device *dev)
+ static struct mtd_info * __init
+ doc_probe_device(struct docg3_cascade *cascade, int floor, struct device *dev)
{
int ret, bbt_nbpages;
u16 chip_id, chip_id_inv;
docg3->dev = dev;
docg3->device_id = floor;
- docg3->base = base;
+ docg3->cascade = cascade;
doc_set_device_id(docg3, docg3->device_id);
if (!floor)
doc_set_asic_mode(docg3, DOC_ASICMODE_RESET);
switch (chip_id) {
case DOC_CHIPID_G3:
doc_info("Found a G3 DiskOnChip at addr %p, floor %d\n",
- base, floor);
+ docg3->cascade->base, floor);
break;
default:
doc_err("Chip id %04x is not a DiskOnChip G3 chip\n", chip_id);
* docg3_resume - Awakens docg3 floor
* @pdev: platfrom device
*
- * Returns 0 (always successfull)
+ * Returns 0 (always successful)
*/
static int docg3_resume(struct platform_device *pdev)
{
int i;
+ struct docg3_cascade *cascade;
struct mtd_info **docg3_floors, *mtd;
struct docg3 *docg3;
- docg3_floors = platform_get_drvdata(pdev);
+ cascade = platform_get_drvdata(pdev);
+ docg3_floors = cascade->floors;
mtd = docg3_floors[0];
docg3 = mtd->priv;
static int docg3_suspend(struct platform_device *pdev, pm_message_t state)
{
int floor, i;
+ struct docg3_cascade *cascade;
struct mtd_info **docg3_floors, *mtd;
struct docg3 *docg3;
u8 ctrl, pwr_down;
- docg3_floors = platform_get_drvdata(pdev);
+ cascade = platform_get_drvdata(pdev);
+ docg3_floors = cascade->floors;
for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) {
mtd = docg3_floors[floor];
if (!mtd)
struct resource *ress;
void __iomem *base;
int ret, floor, found = 0;
- struct mtd_info **docg3_floors;
+ struct docg3_cascade *cascade;
ret = -ENXIO;
ress = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = ioremap(ress->start, DOC_IOSPACE_SIZE);
ret = -ENOMEM;
- docg3_floors = kzalloc(sizeof(*docg3_floors) * DOC_MAX_NBFLOORS,
- GFP_KERNEL);
- if (!docg3_floors)
+ cascade = kzalloc(sizeof(*cascade) * DOC_MAX_NBFLOORS,
+ GFP_KERNEL);
+ if (!cascade)
goto nomem1;
- docg3_bch = init_bch(DOC_ECC_BCH_M, DOC_ECC_BCH_T,
+ cascade->base = base;
+ mutex_init(&cascade->lock);
+ cascade->bch = init_bch(DOC_ECC_BCH_M, DOC_ECC_BCH_T,
DOC_ECC_BCH_PRIMPOLY);
- if (!docg3_bch)
+ if (!cascade->bch)
goto nomem2;
for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) {
- mtd = doc_probe_device(base, floor, dev);
+ mtd = doc_probe_device(cascade, floor, dev);
if (IS_ERR(mtd)) {
ret = PTR_ERR(mtd);
goto err_probe;
else
continue;
}
- docg3_floors[floor] = mtd;
+ cascade->floors[floor] = mtd;
ret = mtd_device_parse_register(mtd, part_probes, NULL, NULL,
0);
if (ret)
found++;
}
- ret = doc_register_sysfs(pdev, docg3_floors);
+ ret = doc_register_sysfs(pdev, cascade);
if (ret)
goto err_probe;
if (!found)
goto notfound;
- platform_set_drvdata(pdev, docg3_floors);
- doc_dbg_register(docg3_floors[0]->priv);
+ platform_set_drvdata(pdev, cascade);
+ doc_dbg_register(cascade->floors[0]->priv);
return 0;
notfound:
ret = -ENODEV;
dev_info(dev, "No supported DiskOnChip found\n");
err_probe:
- free_bch(docg3_bch);
+ kfree(cascade->bch);
for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
- if (docg3_floors[floor])
- doc_release_device(docg3_floors[floor]);
+ if (cascade->floors[floor])
+ doc_release_device(cascade->floors[floor]);
nomem2:
- kfree(docg3_floors);
+ kfree(cascade);
nomem1:
iounmap(base);
noress:
*/
static int __exit docg3_release(struct platform_device *pdev)
{
- struct mtd_info **docg3_floors = platform_get_drvdata(pdev);
- struct docg3 *docg3 = docg3_floors[0]->priv;
- void __iomem *base = docg3->base;
+ struct docg3_cascade *cascade = platform_get_drvdata(pdev);
+ struct docg3 *docg3 = cascade->floors[0]->priv;
+ void __iomem *base = cascade->base;
int floor;
- doc_unregister_sysfs(pdev, docg3_floors);
+ doc_unregister_sysfs(pdev, cascade);
doc_dbg_unregister(docg3);
for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
- if (docg3_floors[floor])
- doc_release_device(docg3_floors[floor]);
+ if (cascade->floors[floor])
+ doc_release_device(cascade->floors[floor]);
- kfree(docg3_floors);
- free_bch(docg3_bch);
+ free_bch(docg3->cascade->bch);
+ kfree(cascade);
iounmap(base);
return 0;
}
#include <linux/pci-aspm.h>
#include <linux/prefetch.h>
- #include <asm/system.h>
#include <asm/io.h>
#include <asm/irq.h>
RTL_CFG_2
};
- static void rtl_hw_start_8169(struct net_device *);
- static void rtl_hw_start_8168(struct net_device *);
- static void rtl_hw_start_8101(struct net_device *);
-
static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
__le16 tx_underun;
};
+ enum rtl_flag {
+ RTL_FLAG_TASK_ENABLED,
+ RTL_FLAG_TASK_SLOW_PENDING,
+ RTL_FLAG_TASK_RESET_PENDING,
+ RTL_FLAG_TASK_PHY_PENDING,
+ RTL_FLAG_MAX
+ };
+
+ struct rtl8169_stats {
+ u64 packets;
+ u64 bytes;
+ struct u64_stats_sync syncp;
+ };
+
struct rtl8169_private {
void __iomem *mmio_addr; /* memory map physical address */
struct pci_dev *pci_dev;
struct net_device *dev;
struct napi_struct napi;
- spinlock_t lock;
u32 msg_enable;
u16 txd_version;
u16 mac_version;
u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
u32 dirty_rx;
u32 dirty_tx;
+ struct rtl8169_stats rx_stats;
+ struct rtl8169_stats tx_stats;
struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */
struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
dma_addr_t TxPhyAddr;
struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
struct timer_list timer;
u16 cp_cmd;
- u16 intr_event;
- u16 napi_event;
- u16 intr_mask;
+
+ u16 event_slow;
struct mdio_ops {
void (*write)(void __iomem *, int, int);
unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
unsigned int (*link_ok)(void __iomem *);
int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
- struct delayed_work task;
+
+ struct {
+ DECLARE_BITMAP(flags, RTL_FLAG_MAX);
+ struct mutex mutex;
+ struct work_struct work;
+ } wk;
+
unsigned features;
struct mii_if_info mii;
MODULE_FIRMWARE(FIRMWARE_8168F_1);
MODULE_FIRMWARE(FIRMWARE_8168F_2);
- static int rtl8169_open(struct net_device *dev);
- static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
- struct net_device *dev);
- static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance);
- static int rtl8169_init_ring(struct net_device *dev);
- static void rtl_hw_start(struct net_device *dev);
- static int rtl8169_close(struct net_device *dev);
- static void rtl_set_rx_mode(struct net_device *dev);
- static void rtl8169_tx_timeout(struct net_device *dev);
- static struct net_device_stats *rtl8169_get_stats(struct net_device *dev);
- static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *,
- void __iomem *, u32 budget);
- static int rtl8169_change_mtu(struct net_device *dev, int new_mtu);
- static void rtl8169_down(struct net_device *dev);
- static void rtl8169_rx_clear(struct rtl8169_private *tp);
- static int rtl8169_poll(struct napi_struct *napi, int budget);
+ static void rtl_lock_work(struct rtl8169_private *tp)
+ {
+ mutex_lock(&tp->wk.mutex);
+ }
+
+ static void rtl_unlock_work(struct rtl8169_private *tp)
+ {
+ mutex_unlock(&tp->wk.mutex);
+ }
static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
{
return value;
}
+ static u16 rtl_get_events(struct rtl8169_private *tp)
+ {
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R16(IntrStatus);
+ }
+
+ static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
+ {
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ RTL_W16(IntrStatus, bits);
+ mmiowb();
+ }
+
+ static void rtl_irq_disable(struct rtl8169_private *tp)
+ {
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ RTL_W16(IntrMask, 0);
+ mmiowb();
+ }
+
+ static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits)
+ {
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ RTL_W16(IntrMask, bits);
+ }
+
+ #define RTL_EVENT_NAPI_RX (RxOK | RxErr)
+ #define RTL_EVENT_NAPI_TX (TxOK | TxErr)
+ #define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
+
+ static void rtl_irq_enable_all(struct rtl8169_private *tp)
+ {
+ rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow);
+ }
+
static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
- RTL_W16(IntrMask, 0x0000);
- RTL_W16(IntrStatus, tp->intr_event);
+ rtl_irq_disable(tp);
+ rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow);
RTL_R8(ChipCmd);
}
struct rtl8169_private *tp,
void __iomem *ioaddr, bool pm)
{
- unsigned long flags;
-
- spin_lock_irqsave(&tp->lock, flags);
if (tp->link_ok(ioaddr)) {
rtl_link_chg_patch(tp);
/* This is to cancel a scheduled suspend if there's one. */
if (pm)
pm_schedule_suspend(&tp->pci_dev->dev, 5000);
}
- spin_unlock_irqrestore(&tp->lock, flags);
}
static void rtl8169_check_link_status(struct net_device *dev,
{
struct rtl8169_private *tp = netdev_priv(dev);
- spin_lock_irq(&tp->lock);
+ rtl_lock_work(tp);
wol->supported = WAKE_ANY;
wol->wolopts = __rtl8169_get_wol(tp);
- spin_unlock_irq(&tp->lock);
+ rtl_unlock_work(tp);
}
static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
{
struct rtl8169_private *tp = netdev_priv(dev);
- spin_lock_irq(&tp->lock);
+ rtl_lock_work(tp);
if (wol->wolopts)
tp->features |= RTL_FEATURE_WOL;
else
tp->features &= ~RTL_FEATURE_WOL;
__rtl8169_set_wol(tp, wol->wolopts);
- spin_unlock_irq(&tp->lock);
+
+ rtl_unlock_work(tp);
device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct rtl8169_private *tp = netdev_priv(dev);
- unsigned long flags;
int ret;
del_timer_sync(&tp->timer);
- spin_lock_irqsave(&tp->lock, flags);
+ rtl_lock_work(tp);
ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
cmd->duplex, cmd->advertising);
- spin_unlock_irqrestore(&tp->lock, flags);
+ rtl_unlock_work(tp);
return ret;
}
return features;
}
- static int rtl8169_set_features(struct net_device *dev,
- netdev_features_t features)
+ static void __rtl8169_set_features(struct net_device *dev,
+ netdev_features_t features)
{
struct rtl8169_private *tp = netdev_priv(dev);
+ netdev_features_t changed = features ^ dev->features;
void __iomem *ioaddr = tp->mmio_addr;
- unsigned long flags;
- spin_lock_irqsave(&tp->lock, flags);
+ if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)))
+ return;
- if (features & NETIF_F_RXCSUM)
- tp->cp_cmd |= RxChkSum;
- else
- tp->cp_cmd &= ~RxChkSum;
+ if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)) {
+ if (features & NETIF_F_RXCSUM)
+ tp->cp_cmd |= RxChkSum;
+ else
+ tp->cp_cmd &= ~RxChkSum;
- if (dev->features & NETIF_F_HW_VLAN_RX)
- tp->cp_cmd |= RxVlan;
- else
- tp->cp_cmd &= ~RxVlan;
+ if (dev->features & NETIF_F_HW_VLAN_RX)
+ tp->cp_cmd |= RxVlan;
+ else
+ tp->cp_cmd &= ~RxVlan;
- RTL_W16(CPlusCmd, tp->cp_cmd);
- RTL_R16(CPlusCmd);
+ RTL_W16(CPlusCmd, tp->cp_cmd);
+ RTL_R16(CPlusCmd);
+ }
+ if (changed & NETIF_F_RXALL) {
+ int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt));
+ if (features & NETIF_F_RXALL)
+ tmp |= (AcceptErr | AcceptRunt);
+ RTL_W32(RxConfig, tmp);
+ }
+ }
+
+ static int rtl8169_set_features(struct net_device *dev,
+ netdev_features_t features)
+ {
+ struct rtl8169_private *tp = netdev_priv(dev);
- spin_unlock_irqrestore(&tp->lock, flags);
+ rtl_lock_work(tp);
+ __rtl8169_set_features(dev, features);
+ rtl_unlock_work(tp);
return 0;
}
+
static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
struct sk_buff *skb)
{
static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct rtl8169_private *tp = netdev_priv(dev);
- unsigned long flags;
int rc;
- spin_lock_irqsave(&tp->lock, flags);
-
+ rtl_lock_work(tp);
rc = tp->get_settings(dev, cmd);
+ rtl_unlock_work(tp);
- spin_unlock_irqrestore(&tp->lock, flags);
return rc;
}
void *p)
{
struct rtl8169_private *tp = netdev_priv(dev);
- unsigned long flags;
if (regs->len > R8169_REGS_SIZE)
regs->len = R8169_REGS_SIZE;
- spin_lock_irqsave(&tp->lock, flags);
+ rtl_lock_work(tp);
memcpy_fromio(p, tp->mmio_addr, regs->len);
- spin_unlock_irqrestore(&tp->lock, flags);
+ rtl_unlock_work(tp);
}
static u32 rtl8169_get_msglevel(struct net_device *dev)
}
}
- static void rtl8169_phy_timer(unsigned long __opaque)
+ static void rtl_phy_work(struct rtl8169_private *tp)
{
- struct net_device *dev = (struct net_device *)__opaque;
- struct rtl8169_private *tp = netdev_priv(dev);
struct timer_list *timer = &tp->timer;
void __iomem *ioaddr = tp->mmio_addr;
unsigned long timeout = RTL8169_PHY_TIMEOUT;
assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
- spin_lock_irq(&tp->lock);
-
if (tp->phy_reset_pending(tp)) {
/*
* A busy loop could burn quite a few cycles on nowadays CPU.
}
if (tp->link_ok(ioaddr))
- goto out_unlock;
+ return;
- netif_warn(tp, link, dev, "PHY reset until link up\n");
+ netif_warn(tp, link, tp->dev, "PHY reset until link up\n");
tp->phy_reset_enable(tp);
out_mod_timer:
mod_timer(timer, jiffies + timeout);
- out_unlock:
- spin_unlock_irq(&tp->lock);
}
- #ifdef CONFIG_NET_POLL_CONTROLLER
- /*
- * Polling 'interrupt' - used by things like netconsole to send skbs
- * without having to re-enable interrupts. It's not called while
- * the interrupt routine is executing.
- */
- static void rtl8169_netpoll(struct net_device *dev)
+ static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
+ {
+ if (!test_and_set_bit(flag, tp->wk.flags))
+ schedule_work(&tp->wk.work);
+ }
+
+ static void rtl8169_phy_timer(unsigned long __opaque)
{
+ struct net_device *dev = (struct net_device *)__opaque;
struct rtl8169_private *tp = netdev_priv(dev);
- struct pci_dev *pdev = tp->pci_dev;
- disable_irq(pdev->irq);
- rtl8169_interrupt(pdev->irq, dev);
- enable_irq(pdev->irq);
+ rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
}
- #endif
static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
void __iomem *ioaddr)
low = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24);
high = addr[4] | (addr[5] << 8);
- spin_lock_irq(&tp->lock);
+ rtl_lock_work(tp);
RTL_W8(Cfg9346, Cfg9346_Unlock);
RTL_W8(Cfg9346, Cfg9346_Lock);
- spin_unlock_irq(&tp->lock);
+ rtl_unlock_work(tp);
}
static int rtl_set_mac_address(struct net_device *dev, void *p)
return -EOPNOTSUPP;
}
- static const struct rtl_cfg_info {
- void (*hw_start)(struct net_device *);
- unsigned int region;
- unsigned int align;
- u16 intr_event;
- u16 napi_event;
- unsigned features;
- u8 default_ver;
- } rtl_cfg_infos [] = {
- [RTL_CFG_0] = {
- .hw_start = rtl_hw_start_8169,
- .region = 1,
- .align = 0,
- .intr_event = SYSErr | LinkChg | RxOverflow |
- RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
- .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
- .features = RTL_FEATURE_GMII,
- .default_ver = RTL_GIGA_MAC_VER_01,
- },
- [RTL_CFG_1] = {
- .hw_start = rtl_hw_start_8168,
- .region = 2,
- .align = 8,
- .intr_event = SYSErr | LinkChg | RxOverflow |
- TxErr | TxOK | RxOK | RxErr,
- .napi_event = TxErr | TxOK | RxOK | RxOverflow,
- .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
- .default_ver = RTL_GIGA_MAC_VER_11,
- },
- [RTL_CFG_2] = {
- .hw_start = rtl_hw_start_8101,
- .region = 2,
- .align = 8,
- .intr_event = SYSErr | LinkChg | RxOverflow | PCSTimeout |
- RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
- .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
- .features = RTL_FEATURE_MSI,
- .default_ver = RTL_GIGA_MAC_VER_13,
- }
- };
-
- /* Cfg9346_Unlock assumed. */
- static unsigned rtl_try_msi(struct rtl8169_private *tp,
- const struct rtl_cfg_info *cfg)
- {
- void __iomem *ioaddr = tp->mmio_addr;
- unsigned msi = 0;
- u8 cfg2;
-
- cfg2 = RTL_R8(Config2) & ~MSIEnable;
- if (cfg->features & RTL_FEATURE_MSI) {
- if (pci_enable_msi(tp->pci_dev)) {
- netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
- } else {
- cfg2 |= MSIEnable;
- msi = RTL_FEATURE_MSI;
- }
- }
- if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
- RTL_W8(Config2, cfg2);
- return msi;
- }
-
static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
{
if (tp->features & RTL_FEATURE_MSI) {
}
}
- static const struct net_device_ops rtl8169_netdev_ops = {
- .ndo_open = rtl8169_open,
- .ndo_stop = rtl8169_close,
- .ndo_get_stats = rtl8169_get_stats,
- .ndo_start_xmit = rtl8169_start_xmit,
- .ndo_tx_timeout = rtl8169_tx_timeout,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = rtl8169_change_mtu,
- .ndo_fix_features = rtl8169_fix_features,
- .ndo_set_features = rtl8169_set_features,
- .ndo_set_mac_address = rtl_set_mac_address,
- .ndo_do_ioctl = rtl8169_ioctl,
- .ndo_set_rx_mode = rtl_set_rx_mode,
- #ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = rtl8169_netpoll,
- #endif
-
- };
-
static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)
{
struct mdio_ops *ops = &tp->mdio_ops;
static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
rtl_generic_op(tp, tp->jumbo_ops.enable);
+ RTL_W8(Cfg9346, Cfg9346_Lock);
}
static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
rtl_generic_op(tp, tp->jumbo_ops.disable);
+ RTL_W8(Cfg9346, Cfg9346_Lock);
}
static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
- struct pci_dev *pdev = tp->pci_dev;
RTL_W8(MaxTxPacketSize, 0x3f);
RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
RTL_W8(Config4, RTL_R8(Config4) | 0x01);
- pci_write_config_byte(pdev, 0x79, 0x20);
+ rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
}
static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
- struct pci_dev *pdev = tp->pci_dev;
RTL_W8(MaxTxPacketSize, 0x0c);
RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
RTL_W8(Config4, RTL_R8(Config4) & ~0x01);
- pci_write_config_byte(pdev, 0x79, 0x50);
+ rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
}
static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
}
}
- static int __devinit
- rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
{
- const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
- const unsigned int region = cfg->region;
- struct rtl8169_private *tp;
- struct mii_if_info *mii;
- struct net_device *dev;
- void __iomem *ioaddr;
- int chipset, i;
- int rc;
+ struct rtl_fw *rtl_fw;
+ const char *name;
+ int rc = -ENOMEM;
- if (netif_msg_drv(&debug)) {
- printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
- MODULENAME, RTL8169_VERSION);
- }
+ name = rtl_lookup_firmware_name(tp);
+ if (!name)
+ goto out_no_firmware;
- dev = alloc_etherdev(sizeof (*tp));
- if (!dev) {
- if (netif_msg_drv(&debug))
- dev_err(&pdev->dev, "unable to alloc new ethernet\n");
- rc = -ENOMEM;
- goto out;
- }
+ rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
+ if (!rtl_fw)
+ goto err_warn;
- SET_NETDEV_DEV(dev, &pdev->dev);
- dev->netdev_ops = &rtl8169_netdev_ops;
- tp = netdev_priv(dev);
- tp->dev = dev;
- tp->pci_dev = pdev;
- tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
+ rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev);
+ if (rc < 0)
+ goto err_free;
- mii = &tp->mii;
- mii->dev = dev;
- mii->mdio_read = rtl_mdio_read;
- mii->mdio_write = rtl_mdio_write;
- mii->phy_id_mask = 0x1f;
- mii->reg_num_mask = 0x1f;
- mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
+ rc = rtl_check_firmware(tp, rtl_fw);
+ if (rc < 0)
+ goto err_release_firmware;
- /* disable ASPM completely as that cause random device stop working
- * problems as well as full system hangs for some PCIe devices users */
- pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
- PCIE_LINK_STATE_CLKPM);
+ tp->rtl_fw = rtl_fw;
+ out:
+ return;
- /* enable device (incl. PCI PM wakeup and hotplug setup) */
- rc = pci_enable_device(pdev);
- if (rc < 0) {
- netif_err(tp, probe, dev, "enable failure\n");
- goto err_out_free_dev_1;
- }
+ err_release_firmware:
+ release_firmware(rtl_fw->fw);
+ err_free:
+ kfree(rtl_fw);
+ err_warn:
+ netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n",
+ name, rc);
+ out_no_firmware:
+ tp->rtl_fw = NULL;
+ goto out;
+ }
- if (pci_set_mwi(pdev) < 0)
- netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
+ static void rtl_request_firmware(struct rtl8169_private *tp)
+ {
+ if (IS_ERR(tp->rtl_fw))
+ rtl_request_uncached_firmware(tp);
+ }
- /* make sure PCI base addr 1 is MMIO */
- if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
- netif_err(tp, probe, dev,
- "region #%d not an MMIO resource, aborting\n",
- region);
- rc = -ENODEV;
- goto err_out_mwi_2;
- }
+ static void rtl_rx_close(struct rtl8169_private *tp)
+ {
+ void __iomem *ioaddr = tp->mmio_addr;
- /* check for weird/broken PCI region reporting */
- if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
- netif_err(tp, probe, dev,
- "Invalid PCI region size(s), aborting\n");
- rc = -ENODEV;
- goto err_out_mwi_2;
- }
+ RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
+ }
- rc = pci_request_regions(pdev, MODULENAME);
- if (rc < 0) {
- netif_err(tp, probe, dev, "could not request regions\n");
- goto err_out_mwi_2;
- }
+ static void rtl8169_hw_reset(struct rtl8169_private *tp)
+ {
+ void __iomem *ioaddr = tp->mmio_addr;
- tp->cp_cmd = RxChkSum;
+ /* Disable interrupts */
+ rtl8169_irq_mask_and_ack(tp);
- if ((sizeof(dma_addr_t) > 4) &&
- !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
- tp->cp_cmd |= PCIDAC;
- dev->features |= NETIF_F_HIGHDMA;
- } else {
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc < 0) {
- netif_err(tp, probe, dev, "DMA configuration failed\n");
- goto err_out_free_res_3;
- }
- }
+ rtl_rx_close(tp);
- /* ioremap MMIO region */
- ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
- if (!ioaddr) {
- netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
- rc = -EIO;
- goto err_out_free_res_3;
+ if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_28 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_31) {
+ while (RTL_R8(TxPoll) & NPQ)
+ udelay(20);
+ } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_35 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_36) {
+ RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
+ while (!(RTL_R32(TxConfig) & TXCFG_EMPTY))
+ udelay(100);
+ } else {
+ RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
+ udelay(100);
}
- tp->mmio_addr = ioaddr;
-
- if (!pci_is_pcie(pdev))
- netif_info(tp, probe, dev, "not PCI Express\n");
- /* Identify chip attached to board */
- rtl8169_get_mac_version(tp, dev, cfg->default_ver);
+ rtl_hw_reset(tp);
+ }
- rtl_init_rxcfg(tp);
+ static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
+ {
+ void __iomem *ioaddr = tp->mmio_addr;
- RTL_W16(IntrMask, 0x0000);
+ /* Set DMA burst size and Interframe Gap Time */
+ RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
+ (InterFrameGap << TxInterFrameGapShift));
+ }
- rtl_hw_reset(tp);
+ static void rtl_hw_start(struct net_device *dev)
+ {
+ struct rtl8169_private *tp = netdev_priv(dev);
- RTL_W16(IntrStatus, 0xffff);
+ tp->hw_start(dev);
- pci_set_master(pdev);
+ rtl_irq_enable_all(tp);
+ }
+ static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
+ void __iomem *ioaddr)
+ {
/*
- * Pretend we are using VLANs; This bypasses a nasty bug where
- * Interrupts stop flowing on high load on 8110SCd controllers.
+ * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
+ * register to be written before TxDescAddrLow to work.
+ * Switching from MMIO to I/O access fixes the issue as well.
*/
- if (tp->mac_version == RTL_GIGA_MAC_VER_05)
- tp->cp_cmd |= RxVlan;
+ RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
+ RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
+ RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
+ RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
+ }
- rtl_init_mdio_ops(tp);
- rtl_init_pll_power_ops(tp);
- rtl_init_jumbo_ops(tp);
+ static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
+ {
+ u16 cmd;
- rtl8169_print_mac_version(tp);
+ cmd = RTL_R16(CPlusCmd);
+ RTL_W16(CPlusCmd, cmd);
+ return cmd;
+ }
- chipset = tp->mac_version;
- tp->txd_version = rtl_chip_infos[chipset].txd_version;
+ static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
+ {
+ /* Low hurts. Let's disable the filtering. */
+ RTL_W16(RxMaxSize, rx_buf_sz + 1);
+ }
- RTL_W8(Cfg9346, Cfg9346_Unlock);
- RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
- RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
- if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
- tp->features |= RTL_FEATURE_WOL;
- if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
- tp->features |= RTL_FEATURE_WOL;
- tp->features |= rtl_try_msi(tp, cfg);
- RTL_W8(Cfg9346, Cfg9346_Lock);
+ static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
+ {
+ static const struct rtl_cfg2_info {
+ u32 mac_version;
+ u32 clk;
+ u32 val;
+ } cfg2_info [] = {
+ { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd
+ { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
+ { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
+ { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
+ };
+ const struct rtl_cfg2_info *p = cfg2_info;
+ unsigned int i;
+ u32 clk;
- if (rtl_tbi_enabled(tp)) {
- tp->set_speed = rtl8169_set_speed_tbi;
- tp->get_settings = rtl8169_gset_tbi;
- tp->phy_reset_enable = rtl8169_tbi_reset_enable;
- tp->phy_reset_pending = rtl8169_tbi_reset_pending;
- tp->link_ok = rtl8169_tbi_link_ok;
- tp->do_ioctl = rtl_tbi_ioctl;
- } else {
- tp->set_speed = rtl8169_set_speed_xmii;
- tp->get_settings = rtl8169_gset_xmii;
- tp->phy_reset_enable = rtl8169_xmii_reset_enable;
- tp->phy_reset_pending = rtl8169_xmii_reset_pending;
- tp->link_ok = rtl8169_xmii_link_ok;
- tp->do_ioctl = rtl_xmii_ioctl;
+ clk = RTL_R8(Config2) & PCI_Clock_66MHz;
+ for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) {
+ if ((p->mac_version == mac_version) && (p->clk == clk)) {
+ RTL_W32(0x7c, p->val);
+ break;
+ }
}
+ }
- spin_lock_init(&tp->lock);
+ static void rtl_set_rx_mode(struct net_device *dev)
+ {
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ u32 mc_filter[2]; /* Multicast hash filter */
+ int rx_mode;
+ u32 tmp = 0;
- /* Get MAC address */
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = RTL_R8(MAC0 + i);
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+ if (dev->flags & IFF_PROMISC) {
+ /* Unconditionally log net taps. */
+ netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
+ rx_mode =
+ AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
+ AcceptAllPhys;
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter perfectly -- accept all multicasts. */
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ } else {
+ struct netdev_hw_addr *ha;
- SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
- dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
- dev->irq = pdev->irq;
- dev->base_addr = (unsigned long) ioaddr;
+ rx_mode = AcceptBroadcast | AcceptMyPhys;
+ mc_filter[1] = mc_filter[0] = 0;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ rx_mode |= AcceptMulticast;
+ }
+ }
- netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
+ if (dev->features & NETIF_F_RXALL)
+ rx_mode |= (AcceptErr | AcceptRunt);
- /* don't enable SG, IP_CSUM and TSO by default - it might not work
- * properly for all devices */
- dev->features |= NETIF_F_RXCSUM |
- NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
- dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
- NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
- dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
- NETIF_F_HIGHDMA;
+ if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
+ u32 data = mc_filter[0];
- if (tp->mac_version == RTL_GIGA_MAC_VER_05)
- /* 8110SCd requires hardware Rx VLAN - disallow toggling */
- dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
+ mc_filter[0] = swab32(mc_filter[1]);
+ mc_filter[1] = swab32(data);
+ }
- tp->intr_mask = 0xffff;
- tp->hw_start = cfg->hw_start;
- tp->intr_event = cfg->intr_event;
- tp->napi_event = cfg->napi_event;
+ RTL_W32(MAR0 + 4, mc_filter[1]);
+ RTL_W32(MAR0 + 0, mc_filter[0]);
- tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
- ~(RxBOVF | RxFOVF) : ~0;
+ RTL_W32(RxConfig, tmp);
+ }
- init_timer(&tp->timer);
- tp->timer.data = (unsigned long) dev;
- tp->timer.function = rtl8169_phy_timer;
+ static void rtl_hw_start_8169(struct net_device *dev)
+ {
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ struct pci_dev *pdev = tp->pci_dev;
- tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
+ if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
+ RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
+ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
+ }
- rc = register_netdev(dev);
- if (rc < 0)
- goto err_out_msi_4;
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
+ if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_02 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_03 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_04)
+ RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
- pci_set_drvdata(pdev, dev);
+ rtl_init_rxcfg(tp);
- netif_info(tp, probe, dev, "%s at 0x%lx, %pM, XID %08x IRQ %d\n",
- rtl_chip_infos[chipset].name, dev->base_addr, dev->dev_addr,
- (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), dev->irq);
- if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
- netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
- "tx checksumming: %s]\n",
- rtl_chip_infos[chipset].jumbo_max,
- rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko");
- }
+ RTL_W8(EarlyTxThres, NoEarlyTx);
- if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
- tp->mac_version == RTL_GIGA_MAC_VER_28 ||
- tp->mac_version == RTL_GIGA_MAC_VER_31) {
- rtl8168_driver_start(tp);
- }
+ rtl_set_rx_max_size(ioaddr, rx_buf_sz);
- device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
+ if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_02 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_03 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_04)
+ rtl_set_rx_tx_config_registers(tp);
- if (pci_dev_run_wake(pdev))
- pm_runtime_put_noidle(&pdev->dev);
+ tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
- netif_carrier_off(dev);
+ if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_03) {
+ dprintk("Set MAC Reg C+CR Offset 0xE0. "
+ "Bit-3 and bit-14 MUST be 1\n");
+ tp->cp_cmd |= (1 << 14);
+ }
- out:
- return rc;
+ RTL_W16(CPlusCmd, tp->cp_cmd);
- err_out_msi_4:
- rtl_disable_msi(pdev, tp);
- iounmap(ioaddr);
- err_out_free_res_3:
- pci_release_regions(pdev);
- err_out_mwi_2:
- pci_clear_mwi(pdev);
- pci_disable_device(pdev);
- err_out_free_dev_1:
- free_netdev(dev);
- goto out;
- }
+ rtl8169_set_magic_reg(ioaddr, tp->mac_version);
- static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
- {
- struct net_device *dev = pci_get_drvdata(pdev);
- struct rtl8169_private *tp = netdev_priv(dev);
+ /*
+ * Undocumented corner. Supposedly:
+ * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
+ */
+ RTL_W16(IntrMitigate, 0x0000);
- if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
- tp->mac_version == RTL_GIGA_MAC_VER_28 ||
- tp->mac_version == RTL_GIGA_MAC_VER_31) {
- rtl8168_driver_stop(tp);
- }
+ rtl_set_rx_tx_desc_registers(tp, ioaddr);
- cancel_delayed_work_sync(&tp->task);
+ if (tp->mac_version != RTL_GIGA_MAC_VER_01 &&
+ tp->mac_version != RTL_GIGA_MAC_VER_02 &&
+ tp->mac_version != RTL_GIGA_MAC_VER_03 &&
+ tp->mac_version != RTL_GIGA_MAC_VER_04) {
+ RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+ rtl_set_rx_tx_config_registers(tp);
+ }
- unregister_netdev(dev);
+ RTL_W8(Cfg9346, Cfg9346_Lock);
- rtl_release_firmware(tp);
+ /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
+ RTL_R8(IntrMask);
- if (pci_dev_run_wake(pdev))
- pm_runtime_get_noresume(&pdev->dev);
+ RTL_W32(RxMissed, 0);
- /* restore original MAC address */
- rtl_rar_set(tp, dev->perm_addr);
+ rtl_set_rx_mode(dev);
- rtl_disable_msi(pdev, tp);
- rtl8169_release_board(pdev, dev, tp->mmio_addr);
- pci_set_drvdata(pdev, NULL);
+ /* no early-rx interrupts */
+ RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
}
- static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
+ static void rtl_csi_access_enable(void __iomem *ioaddr, u32 bits)
{
- struct rtl_fw *rtl_fw;
- const char *name;
- int rc = -ENOMEM;
+ u32 csi;
- name = rtl_lookup_firmware_name(tp);
- if (!name)
- goto out_no_firmware;
+ csi = rtl_csi_read(ioaddr, 0x070c) & 0x00ffffff;
+ rtl_csi_write(ioaddr, 0x070c, csi | bits);
+ }
- rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
- if (!rtl_fw)
- goto err_warn;
+ static void rtl_csi_access_enable_1(void __iomem *ioaddr)
+ {
+ rtl_csi_access_enable(ioaddr, 0x17000000);
+ }
- rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev);
- if (rc < 0)
- goto err_free;
+ static void rtl_csi_access_enable_2(void __iomem *ioaddr)
+ {
+ rtl_csi_access_enable(ioaddr, 0x27000000);
+ }
- rc = rtl_check_firmware(tp, rtl_fw);
- if (rc < 0)
- goto err_release_firmware;
+ struct ephy_info {
+ unsigned int offset;
+ u16 mask;
+ u16 bits;
+ };
- tp->rtl_fw = rtl_fw;
- out:
- return;
+ static void rtl_ephy_init(void __iomem *ioaddr, const struct ephy_info *e, int len)
+ {
+ u16 w;
- err_release_firmware:
- release_firmware(rtl_fw->fw);
- err_free:
- kfree(rtl_fw);
- err_warn:
- netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n",
- name, rc);
- out_no_firmware:
- tp->rtl_fw = NULL;
- goto out;
+ while (len-- > 0) {
+ w = (rtl_ephy_read(ioaddr, e->offset) & ~e->mask) | e->bits;
+ rtl_ephy_write(ioaddr, e->offset, w);
+ e++;
+ }
}
- static void rtl_request_firmware(struct rtl8169_private *tp)
+ static void rtl_disable_clock_request(struct pci_dev *pdev)
{
- if (IS_ERR(tp->rtl_fw))
- rtl_request_uncached_firmware(tp);
+ int cap = pci_pcie_cap(pdev);
+
+ if (cap) {
+ u16 ctl;
+
+ pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl);
+ ctl &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
+ pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl);
+ }
}
- static int rtl8169_open(struct net_device *dev)
+ static void rtl_enable_clock_request(struct pci_dev *pdev)
{
- struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
- struct pci_dev *pdev = tp->pci_dev;
- int retval = -ENOMEM;
+ int cap = pci_pcie_cap(pdev);
- pm_runtime_get_sync(&pdev->dev);
+ if (cap) {
+ u16 ctl;
- /*
- * Rx and Tx descriptors needs 256 bytes alignment.
- * dma_alloc_coherent provides more.
- */
- tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
- &tp->TxPhyAddr, GFP_KERNEL);
- if (!tp->TxDescArray)
- goto err_pm_runtime_put;
+ pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl);
+ ctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
+ pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl);
+ }
+ }
- tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
- &tp->RxPhyAddr, GFP_KERNEL);
- if (!tp->RxDescArray)
- goto err_free_tx_0;
+ #define R8168_CPCMD_QUIRK_MASK (\
+ EnableBist | \
+ Mac_dbgo_oe | \
+ Force_half_dup | \
+ Force_rxflow_en | \
+ Force_txflow_en | \
+ Cxpl_dbg_sel | \
+ ASF | \
+ PktCntrDisable | \
+ Mac_dbgo_sel)
+
+ static void rtl_hw_start_8168bb(void __iomem *ioaddr, struct pci_dev *pdev)
+ {
+ RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
- retval = rtl8169_init_ring(dev);
- if (retval < 0)
- goto err_free_rx_1;
+ RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
- INIT_DELAYED_WORK(&tp->task, NULL);
+ rtl_tx_performance_tweak(pdev,
+ (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
+ }
- smp_mb();
+ static void rtl_hw_start_8168bef(void __iomem *ioaddr, struct pci_dev *pdev)
+ {
+ rtl_hw_start_8168bb(ioaddr, pdev);
- rtl_request_firmware(tp);
+ RTL_W8(MaxTxPacketSize, TxPacketMax);
- retval = request_irq(dev->irq, rtl8169_interrupt,
- (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
- dev->name, dev);
- if (retval < 0)
- goto err_release_fw_2;
+ RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
+ }
- napi_enable(&tp->napi);
+ static void __rtl_hw_start_8168cp(void __iomem *ioaddr, struct pci_dev *pdev)
+ {
+ RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
- rtl8169_init_phy(dev, tp);
+ RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
- rtl8169_set_features(dev, dev->features);
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
- rtl_pll_power_up(tp);
+ rtl_disable_clock_request(pdev);
- rtl_hw_start(dev);
+ RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
+ }
- tp->saved_wolopts = 0;
- pm_runtime_put_noidle(&pdev->dev);
+ static void rtl_hw_start_8168cp_1(void __iomem *ioaddr, struct pci_dev *pdev)
+ {
+ static const struct ephy_info e_info_8168cp[] = {
+ { 0x01, 0, 0x0001 },
+ { 0x02, 0x0800, 0x1000 },
+ { 0x03, 0, 0x0042 },
+ { 0x06, 0x0080, 0x0000 },
+ { 0x07, 0, 0x2000 }
+ };
- rtl8169_check_link_status(dev, tp, ioaddr);
- out:
- return retval;
+ rtl_csi_access_enable_2(ioaddr);
- err_release_fw_2:
- rtl_release_firmware(tp);
- rtl8169_rx_clear(tp);
- err_free_rx_1:
- dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
- tp->RxPhyAddr);
- tp->RxDescArray = NULL;
- err_free_tx_0:
- dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
- tp->TxPhyAddr);
- tp->TxDescArray = NULL;
- err_pm_runtime_put:
- pm_runtime_put_noidle(&pdev->dev);
- goto out;
+ rtl_ephy_init(ioaddr, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
+
+ __rtl_hw_start_8168cp(ioaddr, pdev);
}
- static void rtl_rx_close(struct rtl8169_private *tp)
+ static void rtl_hw_start_8168cp_2(void __iomem *ioaddr, struct pci_dev *pdev)
{
- void __iomem *ioaddr = tp->mmio_addr;
+ rtl_csi_access_enable_2(ioaddr);
- RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
+ RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
+
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+ RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
}
- static void rtl8169_hw_reset(struct rtl8169_private *tp)
+ static void rtl_hw_start_8168cp_3(void __iomem *ioaddr, struct pci_dev *pdev)
{
- void __iomem *ioaddr = tp->mmio_addr;
+ rtl_csi_access_enable_2(ioaddr);
- /* Disable interrupts */
- rtl8169_irq_mask_and_ack(tp);
+ RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
- rtl_rx_close(tp);
+ /* Magic. */
+ RTL_W8(DBG_REG, 0x20);
- if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
- tp->mac_version == RTL_GIGA_MAC_VER_28 ||
- tp->mac_version == RTL_GIGA_MAC_VER_31) {
- while (RTL_R8(TxPoll) & NPQ)
- udelay(20);
- } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
- tp->mac_version == RTL_GIGA_MAC_VER_35 ||
- tp->mac_version == RTL_GIGA_MAC_VER_36) {
- RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
- while (!(RTL_R32(TxConfig) & TXCFG_EMPTY))
- udelay(100);
- } else {
- RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
- udelay(100);
- }
+ RTL_W8(MaxTxPacketSize, TxPacketMax);
- rtl_hw_reset(tp);
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+ RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
}
- static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
+ static void rtl_hw_start_8168c_1(void __iomem *ioaddr, struct pci_dev *pdev)
{
- void __iomem *ioaddr = tp->mmio_addr;
+ static const struct ephy_info e_info_8168c_1[] = {
+ { 0x02, 0x0800, 0x1000 },
+ { 0x03, 0, 0x0002 },
+ { 0x06, 0x0080, 0x0000 }
+ };
- /* Set DMA burst size and Interframe Gap Time */
- RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
- (InterFrameGap << TxInterFrameGapShift));
+ rtl_csi_access_enable_2(ioaddr);
+
+ RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
+
+ rtl_ephy_init(ioaddr, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
+
+ __rtl_hw_start_8168cp(ioaddr, pdev);
}
- static void rtl_hw_start(struct net_device *dev)
+ static void rtl_hw_start_8168c_2(void __iomem *ioaddr, struct pci_dev *pdev)
{
- struct rtl8169_private *tp = netdev_priv(dev);
+ static const struct ephy_info e_info_8168c_2[] = {
+ { 0x01, 0, 0x0001 },
+ { 0x03, 0x0400, 0x0220 }
+ };
- tp->hw_start(dev);
+ rtl_csi_access_enable_2(ioaddr);
- netif_start_queue(dev);
+ rtl_ephy_init(ioaddr, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
+
+ __rtl_hw_start_8168cp(ioaddr, pdev);
}
- static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
- void __iomem *ioaddr)
+ static void rtl_hw_start_8168c_3(void __iomem *ioaddr, struct pci_dev *pdev)
{
- /*
- * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
- * register to be written before TxDescAddrLow to work.
- * Switching from MMIO to I/O access fixes the issue as well.
- */
- RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
- RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
- RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
- RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
+ rtl_hw_start_8168c_2(ioaddr, pdev);
}
- static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
+ static void rtl_hw_start_8168c_4(void __iomem *ioaddr, struct pci_dev *pdev)
{
- u16 cmd;
+ rtl_csi_access_enable_2(ioaddr);
- cmd = RTL_R16(CPlusCmd);
- RTL_W16(CPlusCmd, cmd);
- return cmd;
+ __rtl_hw_start_8168cp(ioaddr, pdev);
}
- static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
+ static void rtl_hw_start_8168d(void __iomem *ioaddr, struct pci_dev *pdev)
{
- /* Low hurts. Let's disable the filtering. */
- RTL_W16(RxMaxSize, rx_buf_sz + 1);
- }
+ rtl_csi_access_enable_2(ioaddr);
- static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
- {
- static const struct rtl_cfg2_info {
- u32 mac_version;
- u32 clk;
- u32 val;
- } cfg2_info [] = {
- { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd
- { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
- { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
- { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
- };
- const struct rtl_cfg2_info *p = cfg2_info;
- unsigned int i;
- u32 clk;
+ rtl_disable_clock_request(pdev);
- clk = RTL_R8(Config2) & PCI_Clock_66MHz;
- for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) {
- if ((p->mac_version == mac_version) && (p->clk == clk)) {
- RTL_W32(0x7c, p->val);
- break;
- }
- }
+ RTL_W8(MaxTxPacketSize, TxPacketMax);
+
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+ RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
}
- static void rtl_hw_start_8169(struct net_device *dev)
+ static void rtl_hw_start_8168dp(void __iomem *ioaddr, struct pci_dev *pdev)
{
- struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
- struct pci_dev *pdev = tp->pci_dev;
+ rtl_csi_access_enable_1(ioaddr);
- if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
- RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
- pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
- }
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
- RTL_W8(Cfg9346, Cfg9346_Unlock);
- if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
- tp->mac_version == RTL_GIGA_MAC_VER_02 ||
- tp->mac_version == RTL_GIGA_MAC_VER_03 ||
- tp->mac_version == RTL_GIGA_MAC_VER_04)
- RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+ RTL_W8(MaxTxPacketSize, TxPacketMax);
- rtl_init_rxcfg(tp);
+ rtl_disable_clock_request(pdev);
+ }
- RTL_W8(EarlyTxThres, NoEarlyTx);
+ static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev)
+ {
+ static const struct ephy_info e_info_8168d_4[] = {
+ { 0x0b, ~0, 0x48 },
+ { 0x19, 0x20, 0x50 },
+ { 0x0c, ~0, 0x20 }
+ };
+ int i;
- rtl_set_rx_max_size(ioaddr, rx_buf_sz);
+ rtl_csi_access_enable_1(ioaddr);
- if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
- tp->mac_version == RTL_GIGA_MAC_VER_02 ||
- tp->mac_version == RTL_GIGA_MAC_VER_03 ||
- tp->mac_version == RTL_GIGA_MAC_VER_04)
- rtl_set_rx_tx_config_registers(tp);
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
- tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
+ RTL_W8(MaxTxPacketSize, TxPacketMax);
- if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
- tp->mac_version == RTL_GIGA_MAC_VER_03) {
- dprintk("Set MAC Reg C+CR Offset 0xE0. "
- "Bit-3 and bit-14 MUST be 1\n");
- tp->cp_cmd |= (1 << 14);
+ for (i = 0; i < ARRAY_SIZE(e_info_8168d_4); i++) {
+ const struct ephy_info *e = e_info_8168d_4 + i;
+ u16 w;
+
+ w = rtl_ephy_read(ioaddr, e->offset);
+ rtl_ephy_write(ioaddr, 0x03, (w & e->mask) | e->bits);
}
- RTL_W16(CPlusCmd, tp->cp_cmd);
+ rtl_enable_clock_request(pdev);
+ }
+
+ static void rtl_hw_start_8168e_1(void __iomem *ioaddr, struct pci_dev *pdev)
+ {
+ static const struct ephy_info e_info_8168e_1[] = {
+ { 0x00, 0x0200, 0x0100 },
+ { 0x00, 0x0000, 0x0004 },
+ { 0x06, 0x0002, 0x0001 },
+ { 0x06, 0x0000, 0x0030 },
+ { 0x07, 0x0000, 0x2000 },
+ { 0x00, 0x0000, 0x0020 },
+ { 0x03, 0x5800, 0x2000 },
+ { 0x03, 0x0000, 0x0001 },
+ { 0x01, 0x0800, 0x1000 },
+ { 0x07, 0x0000, 0x4000 },
+ { 0x1e, 0x0000, 0x2000 },
+ { 0x19, 0xffff, 0xfe6c },
+ { 0x0a, 0x0000, 0x0040 }
+ };
- rtl8169_set_magic_reg(ioaddr, tp->mac_version);
+ rtl_csi_access_enable_2(ioaddr);
- /*
- * Undocumented corner. Supposedly:
- * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
- */
- RTL_W16(IntrMitigate, 0x0000);
+ rtl_ephy_init(ioaddr, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
- rtl_set_rx_tx_desc_registers(tp, ioaddr);
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
- if (tp->mac_version != RTL_GIGA_MAC_VER_01 &&
- tp->mac_version != RTL_GIGA_MAC_VER_02 &&
- tp->mac_version != RTL_GIGA_MAC_VER_03 &&
- tp->mac_version != RTL_GIGA_MAC_VER_04) {
- RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
- rtl_set_rx_tx_config_registers(tp);
- }
+ RTL_W8(MaxTxPacketSize, TxPacketMax);
- RTL_W8(Cfg9346, Cfg9346_Lock);
+ rtl_disable_clock_request(pdev);
- /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
- RTL_R8(IntrMask);
+ /* Reset tx FIFO pointer */
+ RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST);
+ RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST);
- RTL_W32(RxMissed, 0);
+ RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
+ }
- rtl_set_rx_mode(dev);
+ static void rtl_hw_start_8168e_2(void __iomem *ioaddr, struct pci_dev *pdev)
+ {
+ static const struct ephy_info e_info_8168e_2[] = {
+ { 0x09, 0x0000, 0x0080 },
+ { 0x19, 0x0000, 0x0224 }
+ };
- /* no early-rx interrupts */
- RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
+ rtl_csi_access_enable_1(ioaddr);
- /* Enable all known interrupts by setting the interrupt mask. */
- RTL_W16(IntrMask, tp->intr_event);
- }
+ rtl_ephy_init(ioaddr, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
- static void rtl_csi_access_enable(void __iomem *ioaddr, u32 bits)
- {
- u32 csi;
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
- csi = rtl_csi_read(ioaddr, 0x070c) & 0x00ffffff;
- rtl_csi_write(ioaddr, 0x070c, csi | bits);
- }
+ rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
+ rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
+ rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
+ ERIAR_EXGMAC);
- static void rtl_csi_access_enable_1(void __iomem *ioaddr)
- {
- rtl_csi_access_enable(ioaddr, 0x17000000);
- }
+ RTL_W8(MaxTxPacketSize, EarlySize);
- static void rtl_csi_access_enable_2(void __iomem *ioaddr)
- {
- rtl_csi_access_enable(ioaddr, 0x27000000);
- }
+ rtl_disable_clock_request(pdev);
- struct ephy_info {
- unsigned int offset;
- u16 mask;
- u16 bits;
- };
+ RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
+ RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
- static void rtl_ephy_init(void __iomem *ioaddr, const struct ephy_info *e, int len)
- {
- u16 w;
+ /* Adjust EEE LED frequency */
+ RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
- while (len-- > 0) {
- w = (rtl_ephy_read(ioaddr, e->offset) & ~e->mask) | e->bits;
- rtl_ephy_write(ioaddr, e->offset, w);
- e++;
- }
+ RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
+ RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
+ RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
}
- static void rtl_disable_clock_request(struct pci_dev *pdev)
+ static void rtl_hw_start_8168f_1(void __iomem *ioaddr, struct pci_dev *pdev)
{
- int cap = pci_pcie_cap(pdev);
+ static const struct ephy_info e_info_8168f_1[] = {
+ { 0x06, 0x00c0, 0x0020 },
+ { 0x08, 0x0001, 0x0002 },
+ { 0x09, 0x0000, 0x0080 },
+ { 0x19, 0x0000, 0x0224 }
+ };
- if (cap) {
- u16 ctl;
+ rtl_csi_access_enable_1(ioaddr);
- pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl);
- ctl &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
- pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl);
- }
- }
+ rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
- static void rtl_enable_clock_request(struct pci_dev *pdev)
- {
- int cap = pci_pcie_cap(pdev);
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
- if (cap) {
- u16 ctl;
+ rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
+ rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
+ rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
+ rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
+ rtl_w1w0_eri(ioaddr, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
+ rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
+ ERIAR_EXGMAC);
- pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl);
- ctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
- pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl);
- }
- }
+ RTL_W8(MaxTxPacketSize, EarlySize);
- #define R8168_CPCMD_QUIRK_MASK (\
- EnableBist | \
- Mac_dbgo_oe | \
- Force_half_dup | \
- Force_rxflow_en | \
- Force_txflow_en | \
- Cxpl_dbg_sel | \
- ASF | \
- PktCntrDisable | \
- Mac_dbgo_sel)
+ rtl_disable_clock_request(pdev);
- static void rtl_hw_start_8168bb(void __iomem *ioaddr, struct pci_dev *pdev)
- {
- RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
+ RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
+ RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
- RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
+ /* Adjust EEE LED frequency */
+ RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
- rtl_tx_performance_tweak(pdev,
- (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
+ RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
+ RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
+ RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
}
- static void rtl_hw_start_8168bef(void __iomem *ioaddr, struct pci_dev *pdev)
+ static void rtl_hw_start_8168(struct net_device *dev)
{
- rtl_hw_start_8168bb(ioaddr, pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ struct pci_dev *pdev = tp->pci_dev;
+
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
RTL_W8(MaxTxPacketSize, TxPacketMax);
- RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
- }
+ rtl_set_rx_max_size(ioaddr, rx_buf_sz);
- static void __rtl_hw_start_8168cp(void __iomem *ioaddr, struct pci_dev *pdev)
- {
- RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
+ tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
- RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
+ RTL_W16(CPlusCmd, tp->cp_cmd);
- rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+ RTL_W16(IntrMitigate, 0x5151);
- rtl_disable_clock_request(pdev);
+ /* Work around for RxFIFO overflow. */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
+ tp->event_slow |= RxFIFOOver | PCSTimeout;
+ tp->event_slow &= ~RxOverflow;
+ }
- RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
- }
+ rtl_set_rx_tx_desc_registers(tp, ioaddr);
- static void rtl_hw_start_8168cp_1(void __iomem *ioaddr, struct pci_dev *pdev)
- {
- static const struct ephy_info e_info_8168cp[] = {
- { 0x01, 0, 0x0001 },
- { 0x02, 0x0800, 0x1000 },
- { 0x03, 0, 0x0042 },
- { 0x06, 0x0080, 0x0000 },
- { 0x07, 0, 0x2000 }
- };
+ rtl_set_rx_mode(dev);
- rtl_csi_access_enable_2(ioaddr);
+ RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
+ (InterFrameGap << TxInterFrameGapShift));
- rtl_ephy_init(ioaddr, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
+ RTL_R8(IntrMask);
- __rtl_hw_start_8168cp(ioaddr, pdev);
- }
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_11:
+ rtl_hw_start_8168bb(ioaddr, pdev);
+ break;
- static void rtl_hw_start_8168cp_2(void __iomem *ioaddr, struct pci_dev *pdev)
- {
- rtl_csi_access_enable_2(ioaddr);
+ case RTL_GIGA_MAC_VER_12:
+ case RTL_GIGA_MAC_VER_17:
+ rtl_hw_start_8168bef(ioaddr, pdev);
+ break;
- RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
+ case RTL_GIGA_MAC_VER_18:
+ rtl_hw_start_8168cp_1(ioaddr, pdev);
+ break;
- rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+ case RTL_GIGA_MAC_VER_19:
+ rtl_hw_start_8168c_1(ioaddr, pdev);
+ break;
- RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
- }
+ case RTL_GIGA_MAC_VER_20:
+ rtl_hw_start_8168c_2(ioaddr, pdev);
+ break;
- static void rtl_hw_start_8168cp_3(void __iomem *ioaddr, struct pci_dev *pdev)
- {
- rtl_csi_access_enable_2(ioaddr);
+ case RTL_GIGA_MAC_VER_21:
+ rtl_hw_start_8168c_3(ioaddr, pdev);
+ break;
- RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
+ case RTL_GIGA_MAC_VER_22:
+ rtl_hw_start_8168c_4(ioaddr, pdev);
+ break;
- /* Magic. */
- RTL_W8(DBG_REG, 0x20);
+ case RTL_GIGA_MAC_VER_23:
+ rtl_hw_start_8168cp_2(ioaddr, pdev);
+ break;
- RTL_W8(MaxTxPacketSize, TxPacketMax);
+ case RTL_GIGA_MAC_VER_24:
+ rtl_hw_start_8168cp_3(ioaddr, pdev);
+ break;
- rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+ case RTL_GIGA_MAC_VER_25:
+ case RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_27:
+ rtl_hw_start_8168d(ioaddr, pdev);
+ break;
+
+ case RTL_GIGA_MAC_VER_28:
+ rtl_hw_start_8168d_4(ioaddr, pdev);
+ break;
+
+ case RTL_GIGA_MAC_VER_31:
+ rtl_hw_start_8168dp(ioaddr, pdev);
+ break;
- RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
- }
+ case RTL_GIGA_MAC_VER_32:
+ case RTL_GIGA_MAC_VER_33:
+ rtl_hw_start_8168e_1(ioaddr, pdev);
+ break;
+ case RTL_GIGA_MAC_VER_34:
+ rtl_hw_start_8168e_2(ioaddr, pdev);
+ break;
- static void rtl_hw_start_8168c_1(void __iomem *ioaddr, struct pci_dev *pdev)
- {
- static const struct ephy_info e_info_8168c_1[] = {
- { 0x02, 0x0800, 0x1000 },
- { 0x03, 0, 0x0002 },
- { 0x06, 0x0080, 0x0000 }
- };
+ case RTL_GIGA_MAC_VER_35:
+ case RTL_GIGA_MAC_VER_36:
+ rtl_hw_start_8168f_1(ioaddr, pdev);
+ break;
- rtl_csi_access_enable_2(ioaddr);
+ default:
+ printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
+ dev->name, tp->mac_version);
+ break;
+ }
- RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
+ RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
- rtl_ephy_init(ioaddr, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
+ RTL_W8(Cfg9346, Cfg9346_Lock);
- __rtl_hw_start_8168cp(ioaddr, pdev);
+ RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
}
- static void rtl_hw_start_8168c_2(void __iomem *ioaddr, struct pci_dev *pdev)
+ #define R810X_CPCMD_QUIRK_MASK (\
+ EnableBist | \
+ Mac_dbgo_oe | \
+ Force_half_dup | \
+ Force_rxflow_en | \
+ Force_txflow_en | \
+ Cxpl_dbg_sel | \
+ ASF | \
+ PktCntrDisable | \
+ Mac_dbgo_sel)
+
+ static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
{
- static const struct ephy_info e_info_8168c_2[] = {
- { 0x01, 0, 0x0001 },
- { 0x03, 0x0400, 0x0220 }
+ static const struct ephy_info e_info_8102e_1[] = {
+ { 0x01, 0, 0x6e65 },
+ { 0x02, 0, 0x091f },
+ { 0x03, 0, 0xc2f9 },
+ { 0x06, 0, 0xafb5 },
+ { 0x07, 0, 0x0e00 },
+ { 0x19, 0, 0xec80 },
+ { 0x01, 0, 0x2e65 },
+ { 0x01, 0, 0x6e65 }
};
+ u8 cfg1;
rtl_csi_access_enable_2(ioaddr);
- rtl_ephy_init(ioaddr, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
+ RTL_W8(DBG_REG, FIX_NAK_1);
- __rtl_hw_start_8168cp(ioaddr, pdev);
- }
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
- static void rtl_hw_start_8168c_3(void __iomem *ioaddr, struct pci_dev *pdev)
- {
- rtl_hw_start_8168c_2(ioaddr, pdev);
- }
+ RTL_W8(Config1,
+ LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
+ RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
- static void rtl_hw_start_8168c_4(void __iomem *ioaddr, struct pci_dev *pdev)
- {
- rtl_csi_access_enable_2(ioaddr);
+ cfg1 = RTL_R8(Config1);
+ if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
+ RTL_W8(Config1, cfg1 & ~LEDS0);
- __rtl_hw_start_8168cp(ioaddr, pdev);
+ rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
}
- static void rtl_hw_start_8168d(void __iomem *ioaddr, struct pci_dev *pdev)
+ static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev)
{
rtl_csi_access_enable_2(ioaddr);
- rtl_disable_clock_request(pdev);
-
- RTL_W8(MaxTxPacketSize, TxPacketMax);
-
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
- RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
+ RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
+ RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
}
- static void rtl_hw_start_8168dp(void __iomem *ioaddr, struct pci_dev *pdev)
+ static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
{
- rtl_csi_access_enable_1(ioaddr);
-
- rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
-
- RTL_W8(MaxTxPacketSize, TxPacketMax);
+ rtl_hw_start_8102e_2(ioaddr, pdev);
- rtl_disable_clock_request(pdev);
+ rtl_ephy_write(ioaddr, 0x03, 0xc2f9);
}
- static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev)
+ static void rtl_hw_start_8105e_1(void __iomem *ioaddr, struct pci_dev *pdev)
{
- static const struct ephy_info e_info_8168d_4[] = {
- { 0x0b, ~0, 0x48 },
- { 0x19, 0x20, 0x50 },
- { 0x0c, ~0, 0x20 }
+ static const struct ephy_info e_info_8105e_1[] = {
+ { 0x07, 0, 0x4000 },
+ { 0x19, 0, 0x0200 },
+ { 0x19, 0, 0x0020 },
+ { 0x1e, 0, 0x2000 },
+ { 0x03, 0, 0x0001 },
+ { 0x19, 0, 0x0100 },
+ { 0x19, 0, 0x0004 },
+ { 0x0a, 0, 0x0020 }
};
- int i;
-
- rtl_csi_access_enable_1(ioaddr);
-
- rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
- RTL_W8(MaxTxPacketSize, TxPacketMax);
+ /* Force LAN exit from ASPM if Rx/Tx are not idle */
+ RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
- for (i = 0; i < ARRAY_SIZE(e_info_8168d_4); i++) {
- const struct ephy_info *e = e_info_8168d_4 + i;
- u16 w;
+ /* Disable Early Tally Counter */
+ RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
- w = rtl_ephy_read(ioaddr, e->offset);
- rtl_ephy_write(ioaddr, 0x03, (w & e->mask) | e->bits);
- }
+ RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
+ RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
- rtl_enable_clock_request(pdev);
+ rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
}
- static void rtl_hw_start_8168e_1(void __iomem *ioaddr, struct pci_dev *pdev)
+ static void rtl_hw_start_8105e_2(void __iomem *ioaddr, struct pci_dev *pdev)
{
- static const struct ephy_info e_info_8168e_1[] = {
- { 0x00, 0x0200, 0x0100 },
- { 0x00, 0x0000, 0x0004 },
- { 0x06, 0x0002, 0x0001 },
- { 0x06, 0x0000, 0x0030 },
- { 0x07, 0x0000, 0x2000 },
- { 0x00, 0x0000, 0x0020 },
- { 0x03, 0x5800, 0x2000 },
- { 0x03, 0x0000, 0x0001 },
- { 0x01, 0x0800, 0x1000 },
- { 0x07, 0x0000, 0x4000 },
- { 0x1e, 0x0000, 0x2000 },
- { 0x19, 0xffff, 0xfe6c },
- { 0x0a, 0x0000, 0x0040 }
- };
-
- rtl_csi_access_enable_2(ioaddr);
-
- rtl_ephy_init(ioaddr, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
-
- rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
-
- RTL_W8(MaxTxPacketSize, TxPacketMax);
-
- rtl_disable_clock_request(pdev);
-
- /* Reset tx FIFO pointer */
- RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST);
- RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST);
-
- RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
+ rtl_hw_start_8105e_1(ioaddr, pdev);
+ rtl_ephy_write(ioaddr, 0x1e, rtl_ephy_read(ioaddr, 0x1e) | 0x8000);
}
- static void rtl_hw_start_8168e_2(void __iomem *ioaddr, struct pci_dev *pdev)
+ static void rtl_hw_start_8101(struct net_device *dev)
{
- static const struct ephy_info e_info_8168e_2[] = {
- { 0x09, 0x0000, 0x0080 },
- { 0x19, 0x0000, 0x0224 }
- };
-
- rtl_csi_access_enable_1(ioaddr);
-
- rtl_ephy_init(ioaddr, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ struct pci_dev *pdev = tp->pci_dev;
- rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
+ tp->event_slow &= ~RxFIFOOver;
- rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
- ERIAR_EXGMAC);
+ if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_16) {
+ int cap = pci_pcie_cap(pdev);
- RTL_W8(MaxTxPacketSize, EarlySize);
+ if (cap) {
+ pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_NOSNOOP_EN);
+ }
+ }
- rtl_disable_clock_request(pdev);
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
- RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
- RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_07:
+ rtl_hw_start_8102e_1(ioaddr, pdev);
+ break;
- /* Adjust EEE LED frequency */
- RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
+ case RTL_GIGA_MAC_VER_08:
+ rtl_hw_start_8102e_3(ioaddr, pdev);
+ break;
- RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
- RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
- RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
- }
+ case RTL_GIGA_MAC_VER_09:
+ rtl_hw_start_8102e_2(ioaddr, pdev);
+ break;
- static void rtl_hw_start_8168f_1(void __iomem *ioaddr, struct pci_dev *pdev)
- {
- static const struct ephy_info e_info_8168f_1[] = {
- { 0x06, 0x00c0, 0x0020 },
- { 0x08, 0x0001, 0x0002 },
- { 0x09, 0x0000, 0x0080 },
- { 0x19, 0x0000, 0x0224 }
- };
+ case RTL_GIGA_MAC_VER_29:
+ rtl_hw_start_8105e_1(ioaddr, pdev);
+ break;
+ case RTL_GIGA_MAC_VER_30:
+ rtl_hw_start_8105e_2(ioaddr, pdev);
+ break;
+ }
- rtl_csi_access_enable_1(ioaddr);
+ RTL_W8(Cfg9346, Cfg9346_Lock);
- rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
+ RTL_W8(MaxTxPacketSize, TxPacketMax);
- rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+ rtl_set_rx_max_size(ioaddr, rx_buf_sz);
- rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
- ERIAR_EXGMAC);
+ tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
+ RTL_W16(CPlusCmd, tp->cp_cmd);
- RTL_W8(MaxTxPacketSize, EarlySize);
+ RTL_W16(IntrMitigate, 0x0000);
- rtl_disable_clock_request(pdev);
+ rtl_set_rx_tx_desc_registers(tp, ioaddr);
- RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
- RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
+ RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+ rtl_set_rx_tx_config_registers(tp);
- /* Adjust EEE LED frequency */
- RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
+ RTL_R8(IntrMask);
- RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
- RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
- RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
+ rtl_set_rx_mode(dev);
+
+ RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
}
- static void rtl_hw_start_8168(struct net_device *dev)
+ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
{
struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
- struct pci_dev *pdev = tp->pci_dev;
- RTL_W8(Cfg9346, Cfg9346_Unlock);
+ if (new_mtu < ETH_ZLEN ||
+ new_mtu > rtl_chip_infos[tp->mac_version].jumbo_max)
+ return -EINVAL;
- RTL_W8(MaxTxPacketSize, TxPacketMax);
+ if (new_mtu > ETH_DATA_LEN)
+ rtl_hw_jumbo_enable(tp);
+ else
+ rtl_hw_jumbo_disable(tp);
- rtl_set_rx_max_size(ioaddr, rx_buf_sz);
+ dev->mtu = new_mtu;
+ netdev_update_features(dev);
- tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
+ return 0;
+ }
- RTL_W16(CPlusCmd, tp->cp_cmd);
+ static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
+ {
+ desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
+ desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
+ }
- RTL_W16(IntrMitigate, 0x5151);
+ static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
+ void **data_buff, struct RxDesc *desc)
+ {
+ dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz,
+ DMA_FROM_DEVICE);
- /* Work around for RxFIFO overflow. */
- if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
- tp->intr_event |= RxFIFOOver | PCSTimeout;
- tp->intr_event &= ~RxOverflow;
+ kfree(*data_buff);
+ *data_buff = NULL;
+ rtl8169_make_unusable_by_asic(desc);
+ }
+
+ static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
+ {
+ u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
+
+ desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
+ }
+
+ static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
+ u32 rx_buf_sz)
+ {
+ desc->addr = cpu_to_le64(mapping);
+ wmb();
+ rtl8169_mark_to_asic(desc, rx_buf_sz);
+ }
+
+ static inline void *rtl8169_align(void *data)
+ {
+ return (void *)ALIGN((long)data, 16);
+ }
+
+ static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
+ struct RxDesc *desc)
+ {
+ void *data;
+ dma_addr_t mapping;
+ struct device *d = &tp->pci_dev->dev;
+ struct net_device *dev = tp->dev;
+ int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
+
+ data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
+ if (!data)
+ return NULL;
+
+ if (rtl8169_align(data) != data) {
+ kfree(data);
+ data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node);
+ if (!data)
+ return NULL;
}
- rtl_set_rx_tx_desc_registers(tp, ioaddr);
+ mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(d, mapping))) {
+ if (net_ratelimit())
+ netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
+ goto err_out;
+ }
- rtl_set_rx_mode(dev);
+ rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
+ return data;
- RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
- (InterFrameGap << TxInterFrameGapShift));
+ err_out:
+ kfree(data);
+ return NULL;
+ }
- RTL_R8(IntrMask);
+ static void rtl8169_rx_clear(struct rtl8169_private *tp)
+ {
+ unsigned int i;
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_11:
- rtl_hw_start_8168bb(ioaddr, pdev);
- break;
+ for (i = 0; i < NUM_RX_DESC; i++) {
+ if (tp->Rx_databuff[i]) {
+ rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
+ tp->RxDescArray + i);
+ }
+ }
+ }
- case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_17:
- rtl_hw_start_8168bef(ioaddr, pdev);
- break;
+ static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
+ {
+ desc->opts1 |= cpu_to_le32(RingEnd);
+ }
- case RTL_GIGA_MAC_VER_18:
- rtl_hw_start_8168cp_1(ioaddr, pdev);
- break;
+ static int rtl8169_rx_fill(struct rtl8169_private *tp)
+ {
+ unsigned int i;
- case RTL_GIGA_MAC_VER_19:
- rtl_hw_start_8168c_1(ioaddr, pdev);
- break;
+ for (i = 0; i < NUM_RX_DESC; i++) {
+ void *data;
- case RTL_GIGA_MAC_VER_20:
- rtl_hw_start_8168c_2(ioaddr, pdev);
- break;
+ if (tp->Rx_databuff[i])
+ continue;
- case RTL_GIGA_MAC_VER_21:
- rtl_hw_start_8168c_3(ioaddr, pdev);
- break;
+ data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
+ if (!data) {
+ rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
+ goto err_out;
+ }
+ tp->Rx_databuff[i] = data;
+ }
- case RTL_GIGA_MAC_VER_22:
- rtl_hw_start_8168c_4(ioaddr, pdev);
- break;
+ rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
+ return 0;
- case RTL_GIGA_MAC_VER_23:
- rtl_hw_start_8168cp_2(ioaddr, pdev);
- break;
+ err_out:
+ rtl8169_rx_clear(tp);
+ return -ENOMEM;
+ }
- case RTL_GIGA_MAC_VER_24:
- rtl_hw_start_8168cp_3(ioaddr, pdev);
- break;
+ static int rtl8169_init_ring(struct net_device *dev)
+ {
+ struct rtl8169_private *tp = netdev_priv(dev);
- case RTL_GIGA_MAC_VER_25:
- case RTL_GIGA_MAC_VER_26:
- case RTL_GIGA_MAC_VER_27:
- rtl_hw_start_8168d(ioaddr, pdev);
- break;
+ rtl8169_init_ring_indexes(tp);
- case RTL_GIGA_MAC_VER_28:
- rtl_hw_start_8168d_4(ioaddr, pdev);
- break;
+ memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
+ memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *));
- case RTL_GIGA_MAC_VER_31:
- rtl_hw_start_8168dp(ioaddr, pdev);
- break;
+ return rtl8169_rx_fill(tp);
+ }
- case RTL_GIGA_MAC_VER_32:
- case RTL_GIGA_MAC_VER_33:
- rtl_hw_start_8168e_1(ioaddr, pdev);
- break;
- case RTL_GIGA_MAC_VER_34:
- rtl_hw_start_8168e_2(ioaddr, pdev);
- break;
+ static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
+ struct TxDesc *desc)
+ {
+ unsigned int len = tx_skb->len;
- case RTL_GIGA_MAC_VER_35:
- case RTL_GIGA_MAC_VER_36:
- rtl_hw_start_8168f_1(ioaddr, pdev);
- break;
+ dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE);
- default:
- printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
- dev->name, tp->mac_version);
- break;
- }
+ desc->opts1 = 0x00;
+ desc->opts2 = 0x00;
+ desc->addr = 0x00;
+ tx_skb->len = 0;
+ }
- RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
+ unsigned int n)
+ {
+ unsigned int i;
- RTL_W8(Cfg9346, Cfg9346_Lock);
+ for (i = 0; i < n; i++) {
+ unsigned int entry = (start + i) % NUM_TX_DESC;
+ struct ring_info *tx_skb = tp->tx_skb + entry;
+ unsigned int len = tx_skb->len;
- RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
+ if (len) {
+ struct sk_buff *skb = tx_skb->skb;
- RTL_W16(IntrMask, tp->intr_event);
+ rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
+ tp->TxDescArray + entry);
+ if (skb) {
+ tp->dev->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ tx_skb->skb = NULL;
+ }
+ }
+ }
}
- #define R810X_CPCMD_QUIRK_MASK (\
- EnableBist | \
- Mac_dbgo_oe | \
- Force_half_dup | \
- Force_rxflow_en | \
- Force_txflow_en | \
- Cxpl_dbg_sel | \
- ASF | \
- PktCntrDisable | \
- Mac_dbgo_sel)
+ static void rtl8169_tx_clear(struct rtl8169_private *tp)
+ {
+ rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
+ tp->cur_tx = tp->dirty_tx = 0;
+ netdev_reset_queue(tp->dev);
+ }
- static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
+ static void rtl_reset_work(struct rtl8169_private *tp)
{
- static const struct ephy_info e_info_8102e_1[] = {
- { 0x01, 0, 0x6e65 },
- { 0x02, 0, 0x091f },
- { 0x03, 0, 0xc2f9 },
- { 0x06, 0, 0xafb5 },
- { 0x07, 0, 0x0e00 },
- { 0x19, 0, 0xec80 },
- { 0x01, 0, 0x2e65 },
- { 0x01, 0, 0x6e65 }
- };
- u8 cfg1;
-
- rtl_csi_access_enable_2(ioaddr);
+ struct net_device *dev = tp->dev;
+ int i;
- RTL_W8(DBG_REG, FIX_NAK_1);
+ napi_disable(&tp->napi);
+ netif_stop_queue(dev);
+ synchronize_sched();
- rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+ rtl8169_hw_reset(tp);
- RTL_W8(Config1,
- LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
- RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
+ for (i = 0; i < NUM_RX_DESC; i++)
+ rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
- cfg1 = RTL_R8(Config1);
- if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
- RTL_W8(Config1, cfg1 & ~LEDS0);
+ rtl8169_tx_clear(tp);
+ rtl8169_init_ring_indexes(tp);
- rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
+ napi_enable(&tp->napi);
+ rtl_hw_start(dev);
+ netif_wake_queue(dev);
+ rtl8169_check_link_status(dev, tp, tp->mmio_addr);
}
- static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev)
+ static void rtl8169_tx_timeout(struct net_device *dev)
{
- rtl_csi_access_enable_2(ioaddr);
-
- rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+ struct rtl8169_private *tp = netdev_priv(dev);
- RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
- RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
+ rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
}
- static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
+ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
+ u32 *opts)
{
- rtl_hw_start_8102e_2(ioaddr, pdev);
+ struct skb_shared_info *info = skb_shinfo(skb);
+ unsigned int cur_frag, entry;
+ struct TxDesc * uninitialized_var(txd);
+ struct device *d = &tp->pci_dev->dev;
- rtl_ephy_write(ioaddr, 0x03, 0xc2f9);
- }
+ entry = tp->cur_tx;
+ for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
+ const skb_frag_t *frag = info->frags + cur_frag;
+ dma_addr_t mapping;
+ u32 status, len;
+ void *addr;
- static void rtl_hw_start_8105e_1(void __iomem *ioaddr, struct pci_dev *pdev)
- {
- static const struct ephy_info e_info_8105e_1[] = {
- { 0x07, 0, 0x4000 },
- { 0x19, 0, 0x0200 },
- { 0x19, 0, 0x0020 },
- { 0x1e, 0, 0x2000 },
- { 0x03, 0, 0x0001 },
- { 0x19, 0, 0x0100 },
- { 0x19, 0, 0x0004 },
- { 0x0a, 0, 0x0020 }
- };
+ entry = (entry + 1) % NUM_TX_DESC;
- /* Force LAN exit from ASPM if Rx/Tx are not idle */
- RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
+ txd = tp->TxDescArray + entry;
+ len = skb_frag_size(frag);
+ addr = skb_frag_address(frag);
+ mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(d, mapping))) {
+ if (net_ratelimit())
+ netif_err(tp, drv, tp->dev,
+ "Failed to map TX fragments DMA!\n");
+ goto err_out;
+ }
- /* Disable Early Tally Counter */
- RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
+ /* Anti gcc 2.95.3 bugware (sic) */
+ status = opts[0] | len |
+ (RingEnd * !((entry + 1) % NUM_TX_DESC));
- RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
- RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
+ txd->opts1 = cpu_to_le32(status);
+ txd->opts2 = cpu_to_le32(opts[1]);
+ txd->addr = cpu_to_le64(mapping);
- rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
+ tp->tx_skb[entry].len = len;
+ }
+
+ if (cur_frag) {
+ tp->tx_skb[entry].skb = skb;
+ txd->opts1 |= cpu_to_le32(LastFrag);
+ }
+
+ return cur_frag;
+
+ err_out:
+ rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
+ return -EIO;
}
- static void rtl_hw_start_8105e_2(void __iomem *ioaddr, struct pci_dev *pdev)
+ static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
+ struct sk_buff *skb, u32 *opts)
{
- rtl_hw_start_8105e_1(ioaddr, pdev);
- rtl_ephy_write(ioaddr, 0x1e, rtl_ephy_read(ioaddr, 0x1e) | 0x8000);
+ const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
+ u32 mss = skb_shinfo(skb)->gso_size;
+ int offset = info->opts_offset;
+
+ if (mss) {
+ opts[0] |= TD_LSO;
+ opts[offset] |= min(mss, TD_MSS_MAX) << info->mss_shift;
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ const struct iphdr *ip = ip_hdr(skb);
+
+ if (ip->protocol == IPPROTO_TCP)
+ opts[offset] |= info->checksum.tcp;
+ else if (ip->protocol == IPPROTO_UDP)
+ opts[offset] |= info->checksum.udp;
+ else
+ WARN_ON_ONCE(1);
+ }
}
- static void rtl_hw_start_8101(struct net_device *dev)
+ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
+ unsigned int entry = tp->cur_tx % NUM_TX_DESC;
+ struct TxDesc *txd = tp->TxDescArray + entry;
void __iomem *ioaddr = tp->mmio_addr;
- struct pci_dev *pdev = tp->pci_dev;
+ struct device *d = &tp->pci_dev->dev;
+ dma_addr_t mapping;
+ u32 status, len;
+ u32 opts[2];
+ int frags;
- if (tp->mac_version >= RTL_GIGA_MAC_VER_30) {
- tp->intr_event &= ~RxFIFOOver;
- tp->napi_event &= ~RxFIFOOver;
+ if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
+ netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
+ goto err_stop_0;
}
- if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
- tp->mac_version == RTL_GIGA_MAC_VER_16) {
- int cap = pci_pcie_cap(pdev);
+ if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
+ goto err_stop_0;
- if (cap) {
- pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL,
- PCI_EXP_DEVCTL_NOSNOOP_EN);
- }
+ len = skb_headlen(skb);
+ mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(d, mapping))) {
+ if (net_ratelimit())
+ netif_err(tp, drv, dev, "Failed to map TX DMA!\n");
+ goto err_dma_0;
}
- RTL_W8(Cfg9346, Cfg9346_Unlock);
-
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_07:
- rtl_hw_start_8102e_1(ioaddr, pdev);
- break;
+ tp->tx_skb[entry].len = len;
+ txd->addr = cpu_to_le64(mapping);
- case RTL_GIGA_MAC_VER_08:
- rtl_hw_start_8102e_3(ioaddr, pdev);
- break;
+ opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
+ opts[0] = DescOwn;
- case RTL_GIGA_MAC_VER_09:
- rtl_hw_start_8102e_2(ioaddr, pdev);
- break;
+ rtl8169_tso_csum(tp, skb, opts);
- case RTL_GIGA_MAC_VER_29:
- rtl_hw_start_8105e_1(ioaddr, pdev);
- break;
- case RTL_GIGA_MAC_VER_30:
- rtl_hw_start_8105e_2(ioaddr, pdev);
- break;
+ frags = rtl8169_xmit_frags(tp, skb, opts);
+ if (frags < 0)
+ goto err_dma_1;
+ else if (frags)
+ opts[0] |= FirstFrag;
+ else {
+ opts[0] |= FirstFrag | LastFrag;
+ tp->tx_skb[entry].skb = skb;
}
- RTL_W8(Cfg9346, Cfg9346_Lock);
+ txd->opts2 = cpu_to_le32(opts[1]);
- RTL_W8(MaxTxPacketSize, TxPacketMax);
+ netdev_sent_queue(dev, skb->len);
- rtl_set_rx_max_size(ioaddr, rx_buf_sz);
+ skb_tx_timestamp(skb);
- tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
- RTL_W16(CPlusCmd, tp->cp_cmd);
+ wmb();
- RTL_W16(IntrMitigate, 0x0000);
+ /* Anti gcc 2.95.3 bugware (sic) */
+ status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
+ txd->opts1 = cpu_to_le32(status);
- rtl_set_rx_tx_desc_registers(tp, ioaddr);
+ tp->cur_tx += frags + 1;
- RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
- rtl_set_rx_tx_config_registers(tp);
+ wmb();
- RTL_R8(IntrMask);
+ RTL_W8(TxPoll, NPQ);
- rtl_set_rx_mode(dev);
+ mmiowb();
- RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
+ if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
+ /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
+ * not miss a ring update when it notices a stopped queue.
+ */
+ smp_wmb();
+ netif_stop_queue(dev);
+ /* Sync with rtl_tx:
+ * - publish queue status and cur_tx ring index (write barrier)
+ * - refresh dirty_tx ring index (read barrier).
+ * May the current thread have a pessimistic view of the ring
+ * status and forget to wake up queue, a racing rtl_tx thread
+ * can't.
+ */
+ smp_mb();
+ if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)
+ netif_wake_queue(dev);
+ }
+
+ return NETDEV_TX_OK;
+
+ err_dma_1:
+ rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
+ err_dma_0:
+ dev_kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
- RTL_W16(IntrMask, tp->intr_event);
+ err_stop_0:
+ netif_stop_queue(dev);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_BUSY;
}
- static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
+ static void rtl8169_pcierr_interrupt(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
+ struct pci_dev *pdev = tp->pci_dev;
+ u16 pci_status, pci_cmd;
- if (new_mtu < ETH_ZLEN ||
- new_mtu > rtl_chip_infos[tp->mac_version].jumbo_max)
- return -EINVAL;
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
+ pci_read_config_word(pdev, PCI_STATUS, &pci_status);
- if (new_mtu > ETH_DATA_LEN)
- rtl_hw_jumbo_enable(tp);
+ netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
+ pci_cmd, pci_status);
+
+ /*
+ * The recovery sequence below admits a very elaborated explanation:
+ * - it seems to work;
+ * - I did not see what else could be done;
+ * - it makes iop3xx happy.
+ *
+ * Feel free to adjust to your needs.
+ */
+ if (pdev->broken_parity_status)
+ pci_cmd &= ~PCI_COMMAND_PARITY;
else
- rtl_hw_jumbo_disable(tp);
-
- dev->mtu = new_mtu;
- netdev_update_features(dev);
+ pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
- return 0;
- }
+ pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
- static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
- {
- desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
- desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
- }
+ pci_write_config_word(pdev, PCI_STATUS,
+ pci_status & (PCI_STATUS_DETECTED_PARITY |
+ PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
+ PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
- static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
- void **data_buff, struct RxDesc *desc)
- {
- dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz,
- DMA_FROM_DEVICE);
+ /* The infamous DAC f*ckup only happens at boot time */
+ if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
+ void __iomem *ioaddr = tp->mmio_addr;
- kfree(*data_buff);
- *data_buff = NULL;
- rtl8169_make_unusable_by_asic(desc);
- }
+ netif_info(tp, intr, dev, "disabling PCI DAC\n");
+ tp->cp_cmd &= ~PCIDAC;
+ RTL_W16(CPlusCmd, tp->cp_cmd);
+ dev->features &= ~NETIF_F_HIGHDMA;
+ }
- static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
- {
- u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
+ rtl8169_hw_reset(tp);
- desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
+ rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
}
- static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
- u32 rx_buf_sz)
- {
- desc->addr = cpu_to_le64(mapping);
- wmb();
- rtl8169_mark_to_asic(desc, rx_buf_sz);
- }
+ struct rtl_txc {
+ int packets;
+ int bytes;
+ };
- static inline void *rtl8169_align(void *data)
+ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
{
- return (void *)ALIGN((long)data, 16);
- }
+ struct rtl8169_stats *tx_stats = &tp->tx_stats;
+ unsigned int dirty_tx, tx_left;
+ struct rtl_txc txc = { 0, 0 };
- static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
- struct RxDesc *desc)
- {
- void *data;
- dma_addr_t mapping;
- struct device *d = &tp->pci_dev->dev;
- struct net_device *dev = tp->dev;
- int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
+ dirty_tx = tp->dirty_tx;
+ smp_rmb();
+ tx_left = tp->cur_tx - dirty_tx;
- data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
- if (!data)
- return NULL;
+ while (tx_left > 0) {
+ unsigned int entry = dirty_tx % NUM_TX_DESC;
+ struct ring_info *tx_skb = tp->tx_skb + entry;
+ u32 status;
- if (rtl8169_align(data) != data) {
- kfree(data);
- data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node);
- if (!data)
- return NULL;
- }
+ rmb();
+ status = le32_to_cpu(tp->TxDescArray[entry].opts1);
+ if (status & DescOwn)
+ break;
- mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(d, mapping))) {
- if (net_ratelimit())
- netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
- goto err_out;
+ rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
+ tp->TxDescArray + entry);
+ if (status & LastFrag) {
+ struct sk_buff *skb = tx_skb->skb;
+
+ txc.packets++;
+ txc.bytes += skb->len;
+ dev_kfree_skb(skb);
+ tx_skb->skb = NULL;
+ }
+ dirty_tx++;
+ tx_left--;
}
- rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
- return data;
+ u64_stats_update_begin(&tx_stats->syncp);
+ tx_stats->packets += txc.packets;
+ tx_stats->bytes += txc.bytes;
+ u64_stats_update_end(&tx_stats->syncp);
- err_out:
- kfree(data);
- return NULL;
- }
+ netdev_completed_queue(dev, txc.packets, txc.bytes);
- static void rtl8169_rx_clear(struct rtl8169_private *tp)
- {
- unsigned int i;
+ if (tp->dirty_tx != dirty_tx) {
+ tp->dirty_tx = dirty_tx;
+ /* Sync with rtl8169_start_xmit:
+ * - publish dirty_tx ring index (write barrier)
+ * - refresh cur_tx ring index and queue status (read barrier)
+ * May the current thread miss the stopped queue condition,
+ * a racing xmit thread can only have a right view of the
+ * ring status.
+ */
+ smp_mb();
+ if (netif_queue_stopped(dev) &&
+ (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
+ netif_wake_queue(dev);
+ }
+ /*
+ * 8168 hack: TxPoll requests are lost when the Tx packets are
+ * too close. Let's kick an extra TxPoll request when a burst
+ * of start_xmit activity is detected (if it is not detected,
+ * it is slow enough). -- FR
+ */
+ if (tp->cur_tx != dirty_tx) {
+ void __iomem *ioaddr = tp->mmio_addr;
- for (i = 0; i < NUM_RX_DESC; i++) {
- if (tp->Rx_databuff[i]) {
- rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
- tp->RxDescArray + i);
+ RTL_W8(TxPoll, NPQ);
}
}
}
- static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
+ static inline int rtl8169_fragmented_frame(u32 status)
{
- desc->opts1 |= cpu_to_le32(RingEnd);
+ return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
}
- static int rtl8169_rx_fill(struct rtl8169_private *tp)
+ static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
{
- unsigned int i;
-
- for (i = 0; i < NUM_RX_DESC; i++) {
- void *data;
+ u32 status = opts1 & RxProtoMask;
- if (tp->Rx_databuff[i])
- continue;
+ if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
+ ((status == RxProtoUDP) && !(opts1 & UDPFail)))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+ }
- data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
- if (!data) {
- rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
- goto err_out;
- }
- tp->Rx_databuff[i] = data;
- }
+ static struct sk_buff *rtl8169_try_rx_copy(void *data,
+ struct rtl8169_private *tp,
+ int pkt_size,
+ dma_addr_t addr)
+ {
+ struct sk_buff *skb;
+ struct device *d = &tp->pci_dev->dev;
- rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
- return 0;
+ data = rtl8169_align(data);
+ dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
+ prefetch(data);
+ skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
+ if (skb)
+ memcpy(skb->data, data, pkt_size);
+ dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
- err_out:
- rtl8169_rx_clear(tp);
- return -ENOMEM;
+ return skb;
}
- static int rtl8169_init_ring(struct net_device *dev)
+ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
{
- struct rtl8169_private *tp = netdev_priv(dev);
+ unsigned int cur_rx, rx_left;
+ unsigned int count;
- rtl8169_init_ring_indexes(tp);
+ cur_rx = tp->cur_rx;
+ rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
+ rx_left = min(rx_left, budget);
- memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
- memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *));
+ for (; rx_left > 0; rx_left--, cur_rx++) {
+ unsigned int entry = cur_rx % NUM_RX_DESC;
+ struct RxDesc *desc = tp->RxDescArray + entry;
+ u32 status;
- return rtl8169_rx_fill(tp);
- }
+ rmb();
+ status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
- static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
- struct TxDesc *desc)
- {
- unsigned int len = tx_skb->len;
+ if (status & DescOwn)
+ break;
+ if (unlikely(status & RxRES)) {
+ netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
+ status);
+ dev->stats.rx_errors++;
+ if (status & (RxRWT | RxRUNT))
+ dev->stats.rx_length_errors++;
+ if (status & RxCRC)
+ dev->stats.rx_crc_errors++;
+ if (status & RxFOVF) {
+ rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
+ dev->stats.rx_fifo_errors++;
+ }
+ if ((status & (RxRUNT | RxCRC)) &&
+ !(status & (RxRWT | RxFOVF)) &&
+ (dev->features & NETIF_F_RXALL))
+ goto process_pkt;
- dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE);
+ rtl8169_mark_to_asic(desc, rx_buf_sz);
+ } else {
+ struct sk_buff *skb;
+ dma_addr_t addr;
+ int pkt_size;
- desc->opts1 = 0x00;
- desc->opts2 = 0x00;
- desc->addr = 0x00;
- tx_skb->len = 0;
- }
+ process_pkt:
+ addr = le64_to_cpu(desc->addr);
+ if (likely(!(dev->features & NETIF_F_RXFCS)))
+ pkt_size = (status & 0x00003fff) - 4;
+ else
+ pkt_size = status & 0x00003fff;
- static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
- unsigned int n)
- {
- unsigned int i;
+ /*
+ * The driver does not support incoming fragmented
+ * frames. They are seen as a symptom of over-mtu
+ * sized frames.
+ */
+ if (unlikely(rtl8169_fragmented_frame(status))) {
+ dev->stats.rx_dropped++;
+ dev->stats.rx_length_errors++;
+ rtl8169_mark_to_asic(desc, rx_buf_sz);
+ continue;
+ }
- for (i = 0; i < n; i++) {
- unsigned int entry = (start + i) % NUM_TX_DESC;
- struct ring_info *tx_skb = tp->tx_skb + entry;
- unsigned int len = tx_skb->len;
+ skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
+ tp, pkt_size, addr);
+ rtl8169_mark_to_asic(desc, rx_buf_sz);
+ if (!skb) {
+ dev->stats.rx_dropped++;
+ continue;
+ }
- if (len) {
- struct sk_buff *skb = tx_skb->skb;
+ rtl8169_rx_csum(skb, status);
+ skb_put(skb, pkt_size);
+ skb->protocol = eth_type_trans(skb, dev);
- rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
- tp->TxDescArray + entry);
- if (skb) {
- tp->dev->stats.tx_dropped++;
- dev_kfree_skb(skb);
- tx_skb->skb = NULL;
- }
+ rtl8169_rx_vlan_tag(desc, skb);
+
+ napi_gro_receive(&tp->napi, skb);
+
+ u64_stats_update_begin(&tp->rx_stats.syncp);
+ tp->rx_stats.packets++;
+ tp->rx_stats.bytes += pkt_size;
+ u64_stats_update_end(&tp->rx_stats.syncp);
+ }
+
+ /* Work around for AMD plateform. */
+ if ((desc->opts2 & cpu_to_le32(0xfffe000)) &&
+ (tp->mac_version == RTL_GIGA_MAC_VER_05)) {
+ desc->opts2 = 0;
+ cur_rx++;
}
}
- }
- static void rtl8169_tx_clear(struct rtl8169_private *tp)
- {
- rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
- tp->cur_tx = tp->dirty_tx = 0;
- }
+ count = cur_rx - tp->cur_rx;
+ tp->cur_rx = cur_rx;
- static void rtl8169_schedule_work(struct net_device *dev, work_func_t task)
- {
- struct rtl8169_private *tp = netdev_priv(dev);
+ tp->dirty_rx += count;
- PREPARE_DELAYED_WORK(&tp->task, task);
- schedule_delayed_work(&tp->task, 4);
+ return count;
}
- static void rtl8169_wait_for_quiescence(struct net_device *dev)
+ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
{
+ struct net_device *dev = dev_instance;
struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
-
- synchronize_irq(dev->irq);
-
- /* Wait for any pending NAPI task to complete */
- napi_disable(&tp->napi);
+ int handled = 0;
+ u16 status;
- rtl8169_irq_mask_and_ack(tp);
+ status = rtl_get_events(tp);
+ if (status && status != 0xffff) {
+ status &= RTL_EVENT_NAPI | tp->event_slow;
+ if (status) {
+ handled = 1;
- tp->intr_mask = 0xffff;
- RTL_W16(IntrMask, tp->intr_event);
- napi_enable(&tp->napi);
+ rtl_irq_disable(tp);
+ napi_schedule(&tp->napi);
+ }
+ }
+ return IRQ_RETVAL(handled);
}
- static void rtl8169_reinit_task(struct work_struct *work)
+ /*
+ * Workqueue context.
+ */
+ static void rtl_slow_event_work(struct rtl8169_private *tp)
{
- struct rtl8169_private *tp =
- container_of(work, struct rtl8169_private, task.work);
struct net_device *dev = tp->dev;
- int ret;
-
- rtnl_lock();
+ u16 status;
+
+ status = rtl_get_events(tp) & tp->event_slow;
+ rtl_ack_events(tp, status);
+
+ if (unlikely(status & RxFIFOOver)) {
+ switch (tp->mac_version) {
+ /* Work around for rx fifo overflow */
+ case RTL_GIGA_MAC_VER_11:
+ netif_stop_queue(dev);
+ /* XXX - Hack alert. See rtl_task(). */
+ set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
+ default:
+ break;
+ }
+ }
- if (!netif_running(dev))
- goto out_unlock;
+ if (unlikely(status & SYSErr))
+ rtl8169_pcierr_interrupt(dev);
- rtl8169_wait_for_quiescence(dev);
- rtl8169_close(dev);
+ if (status & LinkChg)
+ __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
- ret = rtl8169_open(dev);
- if (unlikely(ret < 0)) {
- if (net_ratelimit())
- netif_err(tp, drv, dev,
- "reinit failure (status = %d). Rescheduling\n",
- ret);
- rtl8169_schedule_work(dev, rtl8169_reinit_task);
- }
+ napi_disable(&tp->napi);
+ rtl_irq_disable(tp);
- out_unlock:
- rtnl_unlock();
+ napi_enable(&tp->napi);
+ napi_schedule(&tp->napi);
}
- static void rtl8169_reset_task(struct work_struct *work)
+ static void rtl_task(struct work_struct *work)
{
+ static const struct {
+ int bitnr;
+ void (*action)(struct rtl8169_private *);
+ } rtl_work[] = {
+ /* XXX - keep rtl_slow_event_work() as first element. */
+ { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work },
+ { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work },
+ { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work }
+ };
struct rtl8169_private *tp =
- container_of(work, struct rtl8169_private, task.work);
+ container_of(work, struct rtl8169_private, wk.work);
struct net_device *dev = tp->dev;
int i;
- rtnl_lock();
+ rtl_lock_work(tp);
- if (!netif_running(dev))
+ if (!netif_running(dev) ||
+ !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
goto out_unlock;
- rtl8169_hw_reset(tp);
-
- rtl8169_wait_for_quiescence(dev);
-
- for (i = 0; i < NUM_RX_DESC; i++)
- rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
-
- rtl8169_tx_clear(tp);
- rtl8169_init_ring_indexes(tp);
+ for (i = 0; i < ARRAY_SIZE(rtl_work); i++) {
+ bool pending;
- rtl_hw_start(dev);
- netif_wake_queue(dev);
- rtl8169_check_link_status(dev, tp, tp->mmio_addr);
+ pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags);
+ if (pending)
+ rtl_work[i].action(tp);
+ }
out_unlock:
- rtnl_unlock();
- }
-
- static void rtl8169_tx_timeout(struct net_device *dev)
- {
- rtl8169_schedule_work(dev, rtl8169_reset_task);
+ rtl_unlock_work(tp);
}
- static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
- u32 *opts)
+ static int rtl8169_poll(struct napi_struct *napi, int budget)
{
- struct skb_shared_info *info = skb_shinfo(skb);
- unsigned int cur_frag, entry;
- struct TxDesc * uninitialized_var(txd);
- struct device *d = &tp->pci_dev->dev;
-
- entry = tp->cur_tx;
- for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
- const skb_frag_t *frag = info->frags + cur_frag;
- dma_addr_t mapping;
- u32 status, len;
- void *addr;
+ struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
+ struct net_device *dev = tp->dev;
+ u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
+ int work_done= 0;
+ u16 status;
- entry = (entry + 1) % NUM_TX_DESC;
+ status = rtl_get_events(tp);
+ rtl_ack_events(tp, status & ~tp->event_slow);
- txd = tp->TxDescArray + entry;
- len = skb_frag_size(frag);
- addr = skb_frag_address(frag);
- mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(d, mapping))) {
- if (net_ratelimit())
- netif_err(tp, drv, tp->dev,
- "Failed to map TX fragments DMA!\n");
- goto err_out;
- }
+ if (status & RTL_EVENT_NAPI_RX)
+ work_done = rtl_rx(dev, tp, (u32) budget);
- /* Anti gcc 2.95.3 bugware (sic) */
- status = opts[0] | len |
- (RingEnd * !((entry + 1) % NUM_TX_DESC));
+ if (status & RTL_EVENT_NAPI_TX)
+ rtl_tx(dev, tp);
- txd->opts1 = cpu_to_le32(status);
- txd->opts2 = cpu_to_le32(opts[1]);
- txd->addr = cpu_to_le64(mapping);
+ if (status & tp->event_slow) {
+ enable_mask &= ~tp->event_slow;
- tp->tx_skb[entry].len = len;
+ rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING);
}
- if (cur_frag) {
- tp->tx_skb[entry].skb = skb;
- txd->opts1 |= cpu_to_le32(LastFrag);
- }
+ if (work_done < budget) {
+ napi_complete(napi);
- return cur_frag;
+ rtl_irq_enable(tp, enable_mask);
+ mmiowb();
+ }
- err_out:
- rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
- return -EIO;
+ return work_done;
}
- static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
- struct sk_buff *skb, u32 *opts)
+ static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
{
- const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
- u32 mss = skb_shinfo(skb)->gso_size;
- int offset = info->opts_offset;
+ struct rtl8169_private *tp = netdev_priv(dev);
- if (mss) {
- opts[0] |= TD_LSO;
- opts[offset] |= min(mss, TD_MSS_MAX) << info->mss_shift;
- } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
- const struct iphdr *ip = ip_hdr(skb);
+ if (tp->mac_version > RTL_GIGA_MAC_VER_06)
+ return;
- if (ip->protocol == IPPROTO_TCP)
- opts[offset] |= info->checksum.tcp;
- else if (ip->protocol == IPPROTO_UDP)
- opts[offset] |= info->checksum.udp;
- else
- WARN_ON_ONCE(1);
- }
+ dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
+ RTL_W32(RxMissed, 0);
}
- static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
+ static void rtl8169_down(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
- unsigned int entry = tp->cur_tx % NUM_TX_DESC;
- struct TxDesc *txd = tp->TxDescArray + entry;
void __iomem *ioaddr = tp->mmio_addr;
- struct device *d = &tp->pci_dev->dev;
- dma_addr_t mapping;
- u32 status, len;
- u32 opts[2];
- int frags;
- if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
- netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
- goto err_stop_0;
- }
+ del_timer_sync(&tp->timer);
- if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
- goto err_stop_0;
+ napi_disable(&tp->napi);
+ netif_stop_queue(dev);
- len = skb_headlen(skb);
- mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(d, mapping))) {
- if (net_ratelimit())
- netif_err(tp, drv, dev, "Failed to map TX DMA!\n");
- goto err_dma_0;
- }
+ rtl8169_hw_reset(tp);
+ /*
+ * At this point device interrupts can not be enabled in any function,
+ * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
+ * and napi is disabled (rtl8169_poll).
+ */
+ rtl8169_rx_missed(dev, ioaddr);
- tp->tx_skb[entry].len = len;
- txd->addr = cpu_to_le64(mapping);
+ /* Give a racing hard_start_xmit a few cycles to complete. */
+ synchronize_sched();
- opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
- opts[0] = DescOwn;
+ rtl8169_tx_clear(tp);
- rtl8169_tso_csum(tp, skb, opts);
+ rtl8169_rx_clear(tp);
- frags = rtl8169_xmit_frags(tp, skb, opts);
- if (frags < 0)
- goto err_dma_1;
- else if (frags)
- opts[0] |= FirstFrag;
- else {
- opts[0] |= FirstFrag | LastFrag;
- tp->tx_skb[entry].skb = skb;
- }
+ rtl_pll_power_down(tp);
+ }
- txd->opts2 = cpu_to_le32(opts[1]);
+ static int rtl8169_close(struct net_device *dev)
+ {
+ struct rtl8169_private *tp = netdev_priv(dev);
+ struct pci_dev *pdev = tp->pci_dev;
- wmb();
+ pm_runtime_get_sync(&pdev->dev);
- /* Anti gcc 2.95.3 bugware (sic) */
- status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
- txd->opts1 = cpu_to_le32(status);
+ /* Update counters before going down */
+ rtl8169_update_counters(dev);
- tp->cur_tx += frags + 1;
+ rtl_lock_work(tp);
+ clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
- wmb();
+ rtl8169_down(dev);
+ rtl_unlock_work(tp);
- RTL_W8(TxPoll, NPQ);
+ free_irq(pdev->irq, dev);
- if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
- netif_stop_queue(dev);
- smp_rmb();
- if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)
- netif_wake_queue(dev);
- }
+ dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
+ tp->RxPhyAddr);
+ dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
+ tp->TxPhyAddr);
+ tp->TxDescArray = NULL;
+ tp->RxDescArray = NULL;
- return NETDEV_TX_OK;
+ pm_runtime_put_sync(&pdev->dev);
- err_dma_1:
- rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
- err_dma_0:
- dev_kfree_skb(skb);
- dev->stats.tx_dropped++;
- return NETDEV_TX_OK;
+ return 0;
+ }
- err_stop_0:
- netif_stop_queue(dev);
- dev->stats.tx_dropped++;
- return NETDEV_TX_BUSY;
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+ static void rtl8169_netpoll(struct net_device *dev)
+ {
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ rtl8169_interrupt(tp->pci_dev->irq, dev);
}
+ #endif
- static void rtl8169_pcierr_interrupt(struct net_device *dev)
+ static int rtl_open(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
- u16 pci_status, pci_cmd;
-
- pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
- pci_read_config_word(pdev, PCI_STATUS, &pci_status);
+ int retval = -ENOMEM;
- netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
- pci_cmd, pci_status);
+ pm_runtime_get_sync(&pdev->dev);
/*
- * The recovery sequence below admits a very elaborated explanation:
- * - it seems to work;
- * - I did not see what else could be done;
- * - it makes iop3xx happy.
- *
- * Feel free to adjust to your needs.
- * Rx and Tx desscriptors needs 256 bytes alignment.
++ * Rx and Tx descriptors needs 256 bytes alignment.
+ * dma_alloc_coherent provides more.
*/
- if (pdev->broken_parity_status)
- pci_cmd &= ~PCI_COMMAND_PARITY;
- else
- pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
+ tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
+ &tp->TxPhyAddr, GFP_KERNEL);
+ if (!tp->TxDescArray)
+ goto err_pm_runtime_put;
- pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
+ tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
+ &tp->RxPhyAddr, GFP_KERNEL);
+ if (!tp->RxDescArray)
+ goto err_free_tx_0;
- pci_write_config_word(pdev, PCI_STATUS,
- pci_status & (PCI_STATUS_DETECTED_PARITY |
- PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
- PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
+ retval = rtl8169_init_ring(dev);
+ if (retval < 0)
+ goto err_free_rx_1;
- /* The infamous DAC f*ckup only happens at boot time */
- if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
- void __iomem *ioaddr = tp->mmio_addr;
+ INIT_WORK(&tp->wk.work, rtl_task);
- netif_info(tp, intr, dev, "disabling PCI DAC\n");
- tp->cp_cmd &= ~PCIDAC;
- RTL_W16(CPlusCmd, tp->cp_cmd);
- dev->features &= ~NETIF_F_HIGHDMA;
- }
+ smp_mb();
- rtl8169_hw_reset(tp);
+ rtl_request_firmware(tp);
- rtl8169_schedule_work(dev, rtl8169_reinit_task);
- }
+ retval = request_irq(pdev->irq, rtl8169_interrupt,
+ (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
+ dev->name, dev);
+ if (retval < 0)
+ goto err_release_fw_2;
- static void rtl8169_tx_interrupt(struct net_device *dev,
- struct rtl8169_private *tp,
- void __iomem *ioaddr)
- {
- unsigned int dirty_tx, tx_left;
+ rtl_lock_work(tp);
- dirty_tx = tp->dirty_tx;
- smp_rmb();
- tx_left = tp->cur_tx - dirty_tx;
+ set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
- while (tx_left > 0) {
- unsigned int entry = dirty_tx % NUM_TX_DESC;
- struct ring_info *tx_skb = tp->tx_skb + entry;
- u32 status;
+ napi_enable(&tp->napi);
- rmb();
- status = le32_to_cpu(tp->TxDescArray[entry].opts1);
- if (status & DescOwn)
- break;
+ rtl8169_init_phy(dev, tp);
- rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
- tp->TxDescArray + entry);
- if (status & LastFrag) {
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += tx_skb->skb->len;
- dev_kfree_skb(tx_skb->skb);
- tx_skb->skb = NULL;
- }
- dirty_tx++;
- tx_left--;
- }
+ __rtl8169_set_features(dev, dev->features);
- if (tp->dirty_tx != dirty_tx) {
- tp->dirty_tx = dirty_tx;
- smp_wmb();
- if (netif_queue_stopped(dev) &&
- (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
- netif_wake_queue(dev);
- }
- /*
- * 8168 hack: TxPoll requests are lost when the Tx packets are
- * too close. Let's kick an extra TxPoll request when a burst
- * of start_xmit activity is detected (if it is not detected,
- * it is slow enough). -- FR
- */
- smp_rmb();
- if (tp->cur_tx != dirty_tx)
- RTL_W8(TxPoll, NPQ);
- }
- }
+ rtl_pll_power_up(tp);
- static inline int rtl8169_fragmented_frame(u32 status)
- {
- return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
- }
+ rtl_hw_start(dev);
- static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
- {
- u32 status = opts1 & RxProtoMask;
+ netif_start_queue(dev);
- if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
- ((status == RxProtoUDP) && !(opts1 & UDPFail)))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
- skb_checksum_none_assert(skb);
- }
+ rtl_unlock_work(tp);
- static struct sk_buff *rtl8169_try_rx_copy(void *data,
- struct rtl8169_private *tp,
- int pkt_size,
- dma_addr_t addr)
- {
- struct sk_buff *skb;
- struct device *d = &tp->pci_dev->dev;
+ tp->saved_wolopts = 0;
+ pm_runtime_put_noidle(&pdev->dev);
- data = rtl8169_align(data);
- dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
- prefetch(data);
- skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
- if (skb)
- memcpy(skb->data, data, pkt_size);
- dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
+ rtl8169_check_link_status(dev, tp, ioaddr);
+ out:
+ return retval;
- return skb;
+ err_release_fw_2:
+ rtl_release_firmware(tp);
+ rtl8169_rx_clear(tp);
+ err_free_rx_1:
+ dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
+ tp->RxPhyAddr);
+ tp->RxDescArray = NULL;
+ err_free_tx_0:
+ dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
+ tp->TxPhyAddr);
+ tp->TxDescArray = NULL;
+ err_pm_runtime_put:
+ pm_runtime_put_noidle(&pdev->dev);
+ goto out;
}
- static int rtl8169_rx_interrupt(struct net_device *dev,
- struct rtl8169_private *tp,
- void __iomem *ioaddr, u32 budget)
+ static struct rtnl_link_stats64 *
+ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
- unsigned int cur_rx, rx_left;
- unsigned int count;
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ unsigned int start;
- cur_rx = tp->cur_rx;
- rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
- rx_left = min(rx_left, budget);
+ if (netif_running(dev))
+ rtl8169_rx_missed(dev, ioaddr);
- for (; rx_left > 0; rx_left--, cur_rx++) {
- unsigned int entry = cur_rx % NUM_RX_DESC;
- struct RxDesc *desc = tp->RxDescArray + entry;
- u32 status;
+ do {
+ start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp);
+ stats->rx_packets = tp->rx_stats.packets;
+ stats->rx_bytes = tp->rx_stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start));
- rmb();
- status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
- if (status & DescOwn)
- break;
- if (unlikely(status & RxRES)) {
- netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
- status);
- dev->stats.rx_errors++;
- if (status & (RxRWT | RxRUNT))
- dev->stats.rx_length_errors++;
- if (status & RxCRC)
- dev->stats.rx_crc_errors++;
- if (status & RxFOVF) {
- rtl8169_schedule_work(dev, rtl8169_reset_task);
- dev->stats.rx_fifo_errors++;
- }
- rtl8169_mark_to_asic(desc, rx_buf_sz);
- } else {
- struct sk_buff *skb;
- dma_addr_t addr = le64_to_cpu(desc->addr);
- int pkt_size = (status & 0x00003fff) - 4;
+ do {
+ start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp);
+ stats->tx_packets = tp->tx_stats.packets;
+ stats->tx_bytes = tp->tx_stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start));
- /*
- * The driver does not support incoming fragmented
- * frames. They are seen as a symptom of over-mtu
- * sized frames.
- */
- if (unlikely(rtl8169_fragmented_frame(status))) {
- dev->stats.rx_dropped++;
- dev->stats.rx_length_errors++;
- rtl8169_mark_to_asic(desc, rx_buf_sz);
- continue;
- }
+ stats->rx_dropped = dev->stats.rx_dropped;
+ stats->tx_dropped = dev->stats.tx_dropped;
+ stats->rx_length_errors = dev->stats.rx_length_errors;
+ stats->rx_errors = dev->stats.rx_errors;
+ stats->rx_crc_errors = dev->stats.rx_crc_errors;
+ stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
+ stats->rx_missed_errors = dev->stats.rx_missed_errors;
- skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
- tp, pkt_size, addr);
- rtl8169_mark_to_asic(desc, rx_buf_sz);
- if (!skb) {
- dev->stats.rx_dropped++;
- continue;
- }
+ return stats;
+ }
- rtl8169_rx_csum(skb, status);
- skb_put(skb, pkt_size);
- skb->protocol = eth_type_trans(skb, dev);
+ static void rtl8169_net_suspend(struct net_device *dev)
+ {
+ struct rtl8169_private *tp = netdev_priv(dev);
- rtl8169_rx_vlan_tag(desc, skb);
+ if (!netif_running(dev))
+ return;
- napi_gro_receive(&tp->napi, skb);
+ netif_device_detach(dev);
+ netif_stop_queue(dev);
- dev->stats.rx_bytes += pkt_size;
- dev->stats.rx_packets++;
- }
+ rtl_lock_work(tp);
+ napi_disable(&tp->napi);
+ clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
+ rtl_unlock_work(tp);
- /* Work around for AMD plateform. */
- if ((desc->opts2 & cpu_to_le32(0xfffe000)) &&
- (tp->mac_version == RTL_GIGA_MAC_VER_05)) {
- desc->opts2 = 0;
- cur_rx++;
- }
- }
+ rtl_pll_power_down(tp);
+ }
- count = cur_rx - tp->cur_rx;
- tp->cur_rx = cur_rx;
+ #ifdef CONFIG_PM
- tp->dirty_rx += count;
+ static int rtl8169_suspend(struct device *device)
+ {
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
- return count;
+ rtl8169_net_suspend(dev);
+
+ return 0;
}
- static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
+ static void __rtl8169_resume(struct net_device *dev)
{
- struct net_device *dev = dev_instance;
struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
- int handled = 0;
- int status;
-
- /* loop handling interrupts until we have no new ones or
- * we hit a invalid/hotplug case.
- */
- status = RTL_R16(IntrStatus);
- while (status && status != 0xffff) {
- status &= tp->intr_event;
- if (!status)
- break;
- handled = 1;
+ netif_device_attach(dev);
- /* Handle all of the error cases first. These will reset
- * the chip, so just exit the loop.
- */
- if (unlikely(!netif_running(dev))) {
- rtl8169_hw_reset(tp);
- break;
- }
+ rtl_pll_power_up(tp);
- if (unlikely(status & RxFIFOOver)) {
- switch (tp->mac_version) {
- /* Work around for rx fifo overflow */
- case RTL_GIGA_MAC_VER_11:
- netif_stop_queue(dev);
- rtl8169_tx_timeout(dev);
- goto done;
- default:
- break;
- }
- }
+ rtl_lock_work(tp);
+ napi_enable(&tp->napi);
+ set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
+ rtl_unlock_work(tp);
- if (unlikely(status & SYSErr)) {
- rtl8169_pcierr_interrupt(dev);
- break;
- }
+ rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
+ }
- if (status & LinkChg)
- __rtl8169_check_link_status(dev, tp, ioaddr, true);
+ static int rtl8169_resume(struct device *device)
+ {
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
- /* We need to see the lastest version of tp->intr_mask to
- * avoid ignoring an MSI interrupt and having to wait for
- * another event which may never come.
- */
- smp_rmb();
- if (status & tp->intr_mask & tp->napi_event) {
- RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
- tp->intr_mask = ~tp->napi_event;
+ rtl8169_init_phy(dev, tp);
- if (likely(napi_schedule_prep(&tp->napi)))
- __napi_schedule(&tp->napi);
- else
- netif_info(tp, intr, dev,
- "interrupt %04x in poll\n", status);
- }
+ if (netif_running(dev))
+ __rtl8169_resume(dev);
- /* We only get a new MSI interrupt when all active irq
- * sources on the chip have been acknowledged. So, ack
- * everything we've seen and check if new sources have become
- * active to avoid blocking all interrupts from the chip.
- */
- RTL_W16(IntrStatus,
- (status & RxFIFOOver) ? (status | RxOverflow) : status);
- status = RTL_R16(IntrStatus);
- }
- done:
- return IRQ_RETVAL(handled);
+ return 0;
}
- static int rtl8169_poll(struct napi_struct *napi, int budget)
+ static int rtl8169_runtime_suspend(struct device *device)
{
- struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
- struct net_device *dev = tp->dev;
- void __iomem *ioaddr = tp->mmio_addr;
- int work_done;
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
- work_done = rtl8169_rx_interrupt(dev, tp, ioaddr, (u32) budget);
- rtl8169_tx_interrupt(dev, tp, ioaddr);
+ if (!tp->TxDescArray)
+ return 0;
- if (work_done < budget) {
- napi_complete(napi);
+ rtl_lock_work(tp);
+ tp->saved_wolopts = __rtl8169_get_wol(tp);
+ __rtl8169_set_wol(tp, WAKE_ANY);
+ rtl_unlock_work(tp);
- /* We need for force the visibility of tp->intr_mask
- * for other CPUs, as we can loose an MSI interrupt
- * and potentially wait for a retransmit timeout if we don't.
- * The posted write to IntrMask is safe, as it will
- * eventually make it to the chip and we won't loose anything
- * until it does.
- */
- tp->intr_mask = 0xffff;
- wmb();
- RTL_W16(IntrMask, tp->intr_event);
- }
+ rtl8169_net_suspend(dev);
- return work_done;
+ return 0;
}
- static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
+ static int rtl8169_runtime_resume(struct device *device)
{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
- if (tp->mac_version > RTL_GIGA_MAC_VER_06)
- return;
+ if (!tp->TxDescArray)
+ return 0;
- dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
- RTL_W32(RxMissed, 0);
+ rtl_lock_work(tp);
+ __rtl8169_set_wol(tp, tp->saved_wolopts);
+ tp->saved_wolopts = 0;
+ rtl_unlock_work(tp);
+
+ rtl8169_init_phy(dev, tp);
+
+ __rtl8169_resume(dev);
+
+ return 0;
}
- static void rtl8169_down(struct net_device *dev)
+ static int rtl8169_runtime_idle(struct device *device)
{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
-
- del_timer_sync(&tp->timer);
- netif_stop_queue(dev);
-
- napi_disable(&tp->napi);
+ return tp->TxDescArray ? -EBUSY : 0;
+ }
- spin_lock_irq(&tp->lock);
+ static const struct dev_pm_ops rtl8169_pm_ops = {
+ .suspend = rtl8169_suspend,
+ .resume = rtl8169_resume,
+ .freeze = rtl8169_suspend,
+ .thaw = rtl8169_resume,
+ .poweroff = rtl8169_suspend,
+ .restore = rtl8169_resume,
+ .runtime_suspend = rtl8169_runtime_suspend,
+ .runtime_resume = rtl8169_runtime_resume,
+ .runtime_idle = rtl8169_runtime_idle,
+ };
- rtl8169_hw_reset(tp);
- /*
- * At this point device interrupts can not be enabled in any function,
- * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task,
- * rtl8169_reinit_task) and napi is disabled (rtl8169_poll).
- */
- rtl8169_rx_missed(dev, ioaddr);
+ #define RTL8169_PM_OPS (&rtl8169_pm_ops)
- spin_unlock_irq(&tp->lock);
+ #else /* !CONFIG_PM */
- synchronize_irq(dev->irq);
+ #define RTL8169_PM_OPS NULL
- /* Give a racing hard_start_xmit a few cycles to complete. */
- synchronize_sched(); /* FIXME: should this be synchronize_irq()? */
+ #endif /* !CONFIG_PM */
- rtl8169_tx_clear(tp);
+ static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
+ {
+ void __iomem *ioaddr = tp->mmio_addr;
- rtl8169_rx_clear(tp);
+ /* WoL fails with 8168b when the receiver is disabled. */
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_11:
+ case RTL_GIGA_MAC_VER_12:
+ case RTL_GIGA_MAC_VER_17:
+ pci_clear_master(tp->pci_dev);
- rtl_pll_power_down(tp);
+ RTL_W8(ChipCmd, CmdRxEnb);
+ /* PCI commit */
+ RTL_R8(ChipCmd);
+ break;
+ default:
+ break;
+ }
}
- static int rtl8169_close(struct net_device *dev)
+ static void rtl_shutdown(struct pci_dev *pdev)
{
+ struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
- struct pci_dev *pdev = tp->pci_dev;
+ struct device *d = &pdev->dev;
- pm_runtime_get_sync(&pdev->dev);
+ pm_runtime_get_sync(d);
- /* Update counters before going down */
- rtl8169_update_counters(dev);
+ rtl8169_net_suspend(dev);
- rtl8169_down(dev);
+ /* Restore original MAC address */
+ rtl_rar_set(tp, dev->perm_addr);
- free_irq(dev->irq, dev);
+ rtl8169_hw_reset(tp);
- dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
- tp->RxPhyAddr);
- dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
- tp->TxPhyAddr);
- tp->TxDescArray = NULL;
- tp->RxDescArray = NULL;
+ if (system_state == SYSTEM_POWER_OFF) {
+ if (__rtl8169_get_wol(tp) & WAKE_ANY) {
+ rtl_wol_suspend_quirk(tp);
+ rtl_wol_shutdown_quirk(tp);
+ }
- pm_runtime_put_sync(&pdev->dev);
+ pci_wake_from_d3(pdev, true);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
- return 0;
+ pm_runtime_put_noidle(d);
}
- static void rtl_set_rx_mode(struct net_device *dev)
+ static void __devexit rtl_remove_one(struct pci_dev *pdev)
{
+ struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
- unsigned long flags;
- u32 mc_filter[2]; /* Multicast hash filter */
- int rx_mode;
- u32 tmp = 0;
-
- if (dev->flags & IFF_PROMISC) {
- /* Unconditionally log net taps. */
- netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
- rx_mode =
- AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
- AcceptAllPhys;
- mc_filter[1] = mc_filter[0] = 0xffffffff;
- } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
- (dev->flags & IFF_ALLMULTI)) {
- /* Too many to filter perfectly -- accept all multicasts. */
- rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
- mc_filter[1] = mc_filter[0] = 0xffffffff;
- } else {
- struct netdev_hw_addr *ha;
- rx_mode = AcceptBroadcast | AcceptMyPhys;
- mc_filter[1] = mc_filter[0] = 0;
- netdev_for_each_mc_addr(ha, dev) {
- int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
- mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
- rx_mode |= AcceptMulticast;
- }
+ if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_28 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_31) {
+ rtl8168_driver_stop(tp);
}
- spin_lock_irqsave(&tp->lock, flags);
-
- tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
+ cancel_work_sync(&tp->wk.work);
- if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
- u32 data = mc_filter[0];
+ unregister_netdev(dev);
- mc_filter[0] = swab32(mc_filter[1]);
- mc_filter[1] = swab32(data);
- }
+ rtl_release_firmware(tp);
- RTL_W32(MAR0 + 4, mc_filter[1]);
- RTL_W32(MAR0 + 0, mc_filter[0]);
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_get_noresume(&pdev->dev);
- RTL_W32(RxConfig, tmp);
+ /* restore original MAC address */
+ rtl_rar_set(tp, dev->perm_addr);
- spin_unlock_irqrestore(&tp->lock, flags);
+ rtl_disable_msi(pdev, tp);
+ rtl8169_release_board(pdev, dev, tp->mmio_addr);
+ pci_set_drvdata(pdev, NULL);
}
- /**
- * rtl8169_get_stats - Get rtl8169 read/write statistics
- * @dev: The Ethernet Device to get statistics for
- *
- * Get TX/RX statistics for rtl8169
- */
- static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
+ static const struct net_device_ops rtl_netdev_ops = {
+ .ndo_open = rtl_open,
+ .ndo_stop = rtl8169_close,
+ .ndo_get_stats64 = rtl8169_get_stats64,
+ .ndo_start_xmit = rtl8169_start_xmit,
+ .ndo_tx_timeout = rtl8169_tx_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = rtl8169_change_mtu,
+ .ndo_fix_features = rtl8169_fix_features,
+ .ndo_set_features = rtl8169_set_features,
+ .ndo_set_mac_address = rtl_set_mac_address,
+ .ndo_do_ioctl = rtl8169_ioctl,
+ .ndo_set_rx_mode = rtl_set_rx_mode,
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = rtl8169_netpoll,
+ #endif
+
+ };
+
+ static const struct rtl_cfg_info {
+ void (*hw_start)(struct net_device *);
+ unsigned int region;
+ unsigned int align;
+ u16 event_slow;
+ unsigned features;
+ u8 default_ver;
+ } rtl_cfg_infos [] = {
+ [RTL_CFG_0] = {
+ .hw_start = rtl_hw_start_8169,
+ .region = 1,
+ .align = 0,
+ .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver,
+ .features = RTL_FEATURE_GMII,
+ .default_ver = RTL_GIGA_MAC_VER_01,
+ },
+ [RTL_CFG_1] = {
+ .hw_start = rtl_hw_start_8168,
+ .region = 2,
+ .align = 8,
+ .event_slow = SYSErr | LinkChg | RxOverflow,
+ .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
+ .default_ver = RTL_GIGA_MAC_VER_11,
+ },
+ [RTL_CFG_2] = {
+ .hw_start = rtl_hw_start_8101,
+ .region = 2,
+ .align = 8,
+ .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver |
+ PCSTimeout,
+ .features = RTL_FEATURE_MSI,
+ .default_ver = RTL_GIGA_MAC_VER_13,
+ }
+ };
+
+ /* Cfg9346_Unlock assumed. */
+ static unsigned rtl_try_msi(struct rtl8169_private *tp,
+ const struct rtl_cfg_info *cfg)
{
- struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
- unsigned long flags;
+ unsigned msi = 0;
+ u8 cfg2;
- if (netif_running(dev)) {
- spin_lock_irqsave(&tp->lock, flags);
- rtl8169_rx_missed(dev, ioaddr);
- spin_unlock_irqrestore(&tp->lock, flags);
+ cfg2 = RTL_R8(Config2) & ~MSIEnable;
+ if (cfg->features & RTL_FEATURE_MSI) {
+ if (pci_enable_msi(tp->pci_dev)) {
+ netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
+ } else {
+ cfg2 |= MSIEnable;
+ msi = RTL_FEATURE_MSI;
+ }
}
-
- return &dev->stats;
+ if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
+ RTL_W8(Config2, cfg2);
+ return msi;
}
- static void rtl8169_net_suspend(struct net_device *dev)
+ static int __devinit
+ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct rtl8169_private *tp = netdev_priv(dev);
+ const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
+ const unsigned int region = cfg->region;
+ struct rtl8169_private *tp;
+ struct mii_if_info *mii;
+ struct net_device *dev;
+ void __iomem *ioaddr;
+ int chipset, i;
+ int rc;
- if (!netif_running(dev))
- return;
+ if (netif_msg_drv(&debug)) {
+ printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
+ MODULENAME, RTL8169_VERSION);
+ }
- rtl_pll_power_down(tp);
+ dev = alloc_etherdev(sizeof (*tp));
+ if (!dev) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ dev->netdev_ops = &rtl_netdev_ops;
+ tp = netdev_priv(dev);
+ tp->dev = dev;
+ tp->pci_dev = pdev;
+ tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
+
+ mii = &tp->mii;
+ mii->dev = dev;
+ mii->mdio_read = rtl_mdio_read;
+ mii->mdio_write = rtl_mdio_write;
+ mii->phy_id_mask = 0x1f;
+ mii->reg_num_mask = 0x1f;
+ mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
+
+ /* disable ASPM completely as that cause random device stop working
+ * problems as well as full system hangs for some PCIe devices users */
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
- netif_device_detach(dev);
- netif_stop_queue(dev);
- }
+ /* enable device (incl. PCI PM wakeup and hotplug setup) */
+ rc = pci_enable_device(pdev);
+ if (rc < 0) {
+ netif_err(tp, probe, dev, "enable failure\n");
+ goto err_out_free_dev_1;
+ }
- #ifdef CONFIG_PM
+ if (pci_set_mwi(pdev) < 0)
+ netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
- static int rtl8169_suspend(struct device *device)
- {
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ /* make sure PCI base addr 1 is MMIO */
+ if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
+ netif_err(tp, probe, dev,
+ "region #%d not an MMIO resource, aborting\n",
+ region);
+ rc = -ENODEV;
+ goto err_out_mwi_2;
+ }
- rtl8169_net_suspend(dev);
+ /* check for weird/broken PCI region reporting */
+ if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
+ netif_err(tp, probe, dev,
+ "Invalid PCI region size(s), aborting\n");
+ rc = -ENODEV;
+ goto err_out_mwi_2;
+ }
- return 0;
- }
+ rc = pci_request_regions(pdev, MODULENAME);
+ if (rc < 0) {
+ netif_err(tp, probe, dev, "could not request regions\n");
+ goto err_out_mwi_2;
+ }
- static void __rtl8169_resume(struct net_device *dev)
- {
- struct rtl8169_private *tp = netdev_priv(dev);
+ tp->cp_cmd = RxChkSum;
- netif_device_attach(dev);
+ if ((sizeof(dma_addr_t) > 4) &&
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
+ tp->cp_cmd |= PCIDAC;
+ dev->features |= NETIF_F_HIGHDMA;
+ } else {
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc < 0) {
+ netif_err(tp, probe, dev, "DMA configuration failed\n");
+ goto err_out_free_res_3;
+ }
+ }
- rtl_pll_power_up(tp);
+ /* ioremap MMIO region */
+ ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
+ if (!ioaddr) {
+ netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
+ rc = -EIO;
+ goto err_out_free_res_3;
+ }
+ tp->mmio_addr = ioaddr;
- rtl8169_schedule_work(dev, rtl8169_reset_task);
- }
+ if (!pci_is_pcie(pdev))
+ netif_info(tp, probe, dev, "not PCI Express\n");
- static int rtl8169_resume(struct device *device)
- {
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
- struct rtl8169_private *tp = netdev_priv(dev);
+ /* Identify chip attached to board */
+ rtl8169_get_mac_version(tp, dev, cfg->default_ver);
- rtl8169_init_phy(dev, tp);
+ rtl_init_rxcfg(tp);
- if (netif_running(dev))
- __rtl8169_resume(dev);
+ rtl_irq_disable(tp);
- return 0;
- }
+ rtl_hw_reset(tp);
- static int rtl8169_runtime_suspend(struct device *device)
- {
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
- struct rtl8169_private *tp = netdev_priv(dev);
+ rtl_ack_events(tp, 0xffff);
- if (!tp->TxDescArray)
- return 0;
+ pci_set_master(pdev);
- spin_lock_irq(&tp->lock);
- tp->saved_wolopts = __rtl8169_get_wol(tp);
- __rtl8169_set_wol(tp, WAKE_ANY);
- spin_unlock_irq(&tp->lock);
+ /*
+ * Pretend we are using VLANs; This bypasses a nasty bug where
+ * Interrupts stop flowing on high load on 8110SCd controllers.
+ */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_05)
+ tp->cp_cmd |= RxVlan;
- rtl8169_net_suspend(dev);
+ rtl_init_mdio_ops(tp);
+ rtl_init_pll_power_ops(tp);
+ rtl_init_jumbo_ops(tp);
- return 0;
- }
+ rtl8169_print_mac_version(tp);
- static int rtl8169_runtime_resume(struct device *device)
- {
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
- struct rtl8169_private *tp = netdev_priv(dev);
+ chipset = tp->mac_version;
+ tp->txd_version = rtl_chip_infos[chipset].txd_version;
- if (!tp->TxDescArray)
- return 0;
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
+ RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
+ RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
+ if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
+ tp->features |= RTL_FEATURE_WOL;
+ if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
+ tp->features |= RTL_FEATURE_WOL;
+ tp->features |= rtl_try_msi(tp, cfg);
+ RTL_W8(Cfg9346, Cfg9346_Lock);
- spin_lock_irq(&tp->lock);
- __rtl8169_set_wol(tp, tp->saved_wolopts);
- tp->saved_wolopts = 0;
- spin_unlock_irq(&tp->lock);
+ if (rtl_tbi_enabled(tp)) {
+ tp->set_speed = rtl8169_set_speed_tbi;
+ tp->get_settings = rtl8169_gset_tbi;
+ tp->phy_reset_enable = rtl8169_tbi_reset_enable;
+ tp->phy_reset_pending = rtl8169_tbi_reset_pending;
+ tp->link_ok = rtl8169_tbi_link_ok;
+ tp->do_ioctl = rtl_tbi_ioctl;
+ } else {
+ tp->set_speed = rtl8169_set_speed_xmii;
+ tp->get_settings = rtl8169_gset_xmii;
+ tp->phy_reset_enable = rtl8169_xmii_reset_enable;
+ tp->phy_reset_pending = rtl8169_xmii_reset_pending;
+ tp->link_ok = rtl8169_xmii_link_ok;
+ tp->do_ioctl = rtl_xmii_ioctl;
+ }
- rtl8169_init_phy(dev, tp);
+ mutex_init(&tp->wk.mutex);
- __rtl8169_resume(dev);
+ /* Get MAC address */
+ for (i = 0; i < ETH_ALEN; i++)
+ dev->dev_addr[i] = RTL_R8(MAC0 + i);
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
- return 0;
- }
+ SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
+ dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
- static int rtl8169_runtime_idle(struct device *device)
- {
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
- struct rtl8169_private *tp = netdev_priv(dev);
+ netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
- return tp->TxDescArray ? -EBUSY : 0;
- }
+ /* don't enable SG, IP_CSUM and TSO by default - it might not work
+ * properly for all devices */
+ dev->features |= NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
- static const struct dev_pm_ops rtl8169_pm_ops = {
- .suspend = rtl8169_suspend,
- .resume = rtl8169_resume,
- .freeze = rtl8169_suspend,
- .thaw = rtl8169_resume,
- .poweroff = rtl8169_suspend,
- .restore = rtl8169_resume,
- .runtime_suspend = rtl8169_runtime_suspend,
- .runtime_resume = rtl8169_runtime_resume,
- .runtime_idle = rtl8169_runtime_idle,
- };
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+ NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+ NETIF_F_HIGHDMA;
- #define RTL8169_PM_OPS (&rtl8169_pm_ops)
+ if (tp->mac_version == RTL_GIGA_MAC_VER_05)
+ /* 8110SCd requires hardware Rx VLAN - disallow toggling */
+ dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
- #else /* !CONFIG_PM */
+ dev->hw_features |= NETIF_F_RXALL;
+ dev->hw_features |= NETIF_F_RXFCS;
- #define RTL8169_PM_OPS NULL
+ tp->hw_start = cfg->hw_start;
+ tp->event_slow = cfg->event_slow;
- #endif /* !CONFIG_PM */
+ tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
+ ~(RxBOVF | RxFOVF) : ~0;
- static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
- {
- void __iomem *ioaddr = tp->mmio_addr;
+ init_timer(&tp->timer);
+ tp->timer.data = (unsigned long) dev;
+ tp->timer.function = rtl8169_phy_timer;
- /* WoL fails with 8168b when the receiver is disabled. */
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_11:
- case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_17:
- pci_clear_master(tp->pci_dev);
+ tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
- RTL_W8(ChipCmd, CmdRxEnb);
- /* PCI commit */
- RTL_R8(ChipCmd);
- break;
- default:
- break;
- }
- }
+ rc = register_netdev(dev);
+ if (rc < 0)
+ goto err_out_msi_4;
- static void rtl_shutdown(struct pci_dev *pdev)
- {
- struct net_device *dev = pci_get_drvdata(pdev);
- struct rtl8169_private *tp = netdev_priv(dev);
+ pci_set_drvdata(pdev, dev);
- rtl8169_net_suspend(dev);
+ netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
+ rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr,
+ (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq);
+ if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
+ netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
+ "tx checksumming: %s]\n",
+ rtl_chip_infos[chipset].jumbo_max,
+ rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko");
+ }
- /* Restore original MAC address */
- rtl_rar_set(tp, dev->perm_addr);
+ if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_28 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_31) {
+ rtl8168_driver_start(tp);
+ }
- spin_lock_irq(&tp->lock);
+ device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
- rtl8169_hw_reset(tp);
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_put_noidle(&pdev->dev);
- spin_unlock_irq(&tp->lock);
+ netif_carrier_off(dev);
- if (system_state == SYSTEM_POWER_OFF) {
- if (__rtl8169_get_wol(tp) & WAKE_ANY) {
- rtl_wol_suspend_quirk(tp);
- rtl_wol_shutdown_quirk(tp);
- }
+ out:
+ return rc;
- pci_wake_from_d3(pdev, true);
- pci_set_power_state(pdev, PCI_D3hot);
- }
+ err_out_msi_4:
+ rtl_disable_msi(pdev, tp);
+ iounmap(ioaddr);
+ err_out_free_res_3:
+ pci_release_regions(pdev);
+ err_out_mwi_2:
+ pci_clear_mwi(pdev);
+ pci_disable_device(pdev);
+ err_out_free_dev_1:
+ free_netdev(dev);
+ goto out;
}
static struct pci_driver rtl8169_pci_driver = {
.name = MODULENAME,
.id_table = rtl8169_pci_tbl,
- .probe = rtl8169_init_one,
- .remove = __devexit_p(rtl8169_remove_one),
+ .probe = rtl_init_one,
+ .remove = __devexit_p(rtl_remove_one),
.shutdown = rtl_shutdown,
.driver.pm = RTL8169_PM_OPS,
};
#define BQ27500_REG_SOC 0x2C
#define BQ27500_REG_DCAP 0x3C /* Design capacity */
- #define BQ27500_FLAG_DSG BIT(0) /* Discharging */
+ #define BQ27500_FLAG_DSC BIT(0)
#define BQ27500_FLAG_SOCF BIT(1) /* State-of-Charge threshold final */
#define BQ27500_FLAG_SOC1 BIT(2) /* State-of-Charge threshold 1 */
- #define BQ27500_FLAG_CHG BIT(8) /* Charging */
- #define BQ27500_FLAG_FC BIT(9) /* Fully charged */
+ #define BQ27500_FLAG_FC BIT(9)
#define BQ27000_RS 20 /* Resistor sense */
struct bq27x00_reg_cache cache = {0, };
bool is_bq27500 = di->chip == BQ27500;
- cache.flags = bq27x00_read(di, BQ27x00_REG_FLAGS, is_bq27500);
+ cache.flags = bq27x00_read(di, BQ27x00_REG_FLAGS, !is_bq27500);
if (cache.flags >= 0) {
if (!is_bq27500 && (cache.flags & BQ27000_FLAG_CI)) {
dev_info(di->dev, "battery is not calibrated! ignoring capacity values\n");
if (di->chip == BQ27500) {
if (di->cache.flags & BQ27500_FLAG_FC)
status = POWER_SUPPLY_STATUS_FULL;
- else if (di->cache.flags & BQ27500_FLAG_DSG)
+ else if (di->cache.flags & BQ27500_FLAG_DSC)
status = POWER_SUPPLY_STATUS_DISCHARGING;
- else if (di->cache.flags & BQ27500_FLAG_CHG)
- status = POWER_SUPPLY_STATUS_CHARGING;
- else if (power_supply_am_i_supplied(&di->bat))
- status = POWER_SUPPLY_STATUS_NOT_CHARGING;
else
- status = POWER_SUPPLY_STATUS_UNKNOWN;
+ status = POWER_SUPPLY_STATUS_CHARGING;
} else {
if (di->cache.flags & BQ27000_FLAG_FC)
status = POWER_SUPPLY_STATUS_FULL;
}
/*
- * Return the battery Voltage in milivolts
+ * Return the battery Voltage in millivolts
* Or < 0 if something fails.
*/
static int bq27x00_battery_voltage(struct bq27x00_device_info *di,
#include <linux/mfd/tps65910.h>
#define TPS65910_SUPPLY_STATE_ENABLED 0x1
+ #define EXT_SLEEP_CONTROL (TPS65910_SLEEP_CONTROL_EXT_INPUT_EN1 | \
+ TPS65910_SLEEP_CONTROL_EXT_INPUT_EN2 | \
+ TPS65910_SLEEP_CONTROL_EXT_INPUT_EN3 | \
+ TPS65911_SLEEP_CONTROL_EXT_INPUT_SLEEP)
-/* supported VIO voltages in milivolts */
+/* supported VIO voltages in millivolts */
static const u16 VIO_VSEL_table[] = {
1500, 1800, 2500, 3300,
};
/* VSEL tables for TPS65910 specific LDOs and dcdc's */
-/* supported VDD3 voltages in milivolts */
+/* supported VDD3 voltages in millivolts */
static const u16 VDD3_VSEL_table[] = {
5000,
};
-/* supported VDIG1 voltages in milivolts */
+/* supported VDIG1 voltages in millivolts */
static const u16 VDIG1_VSEL_table[] = {
1200, 1500, 1800, 2700,
};
-/* supported VDIG2 voltages in milivolts */
+/* supported VDIG2 voltages in millivolts */
static const u16 VDIG2_VSEL_table[] = {
1000, 1100, 1200, 1800,
};
-/* supported VPLL voltages in milivolts */
+/* supported VPLL voltages in millivolts */
static const u16 VPLL_VSEL_table[] = {
1000, 1100, 1800, 2500,
};
-/* supported VDAC voltages in milivolts */
+/* supported VDAC voltages in millivolts */
static const u16 VDAC_VSEL_table[] = {
1800, 2600, 2800, 2850,
};
-/* supported VAUX1 voltages in milivolts */
+/* supported VAUX1 voltages in millivolts */
static const u16 VAUX1_VSEL_table[] = {
1800, 2500, 2800, 2850,
};
-/* supported VAUX2 voltages in milivolts */
+/* supported VAUX2 voltages in millivolts */
static const u16 VAUX2_VSEL_table[] = {
1800, 2800, 2900, 3300,
};
-/* supported VAUX33 voltages in milivolts */
+/* supported VAUX33 voltages in millivolts */
static const u16 VAUX33_VSEL_table[] = {
1800, 2000, 2800, 3300,
};
-/* supported VMMC voltages in milivolts */
+/* supported VMMC voltages in millivolts */
static const u16 VMMC_VSEL_table[] = {
1800, 2800, 3000, 3300,
};
const char *name;
unsigned min_uV;
unsigned max_uV;
- u8 table_len;
- const u16 *table;
+ u8 n_voltages;
+ const u16 *voltage_table;
+ int enable_time_us;
};
static struct tps_info tps65910_regs[] = {
{
.name = "VRTC",
+ .enable_time_us = 2200,
},
{
.name = "VIO",
.min_uV = 1500000,
.max_uV = 3300000,
- .table_len = ARRAY_SIZE(VIO_VSEL_table),
- .table = VIO_VSEL_table,
+ .n_voltages = ARRAY_SIZE(VIO_VSEL_table),
+ .voltage_table = VIO_VSEL_table,
+ .enable_time_us = 350,
},
{
.name = "VDD1",
.min_uV = 600000,
.max_uV = 4500000,
+ .enable_time_us = 350,
},
{
.name = "VDD2",
.min_uV = 600000,
.max_uV = 4500000,
+ .enable_time_us = 350,
},
{
.name = "VDD3",
.min_uV = 5000000,
.max_uV = 5000000,
- .table_len = ARRAY_SIZE(VDD3_VSEL_table),
- .table = VDD3_VSEL_table,
+ .n_voltages = ARRAY_SIZE(VDD3_VSEL_table),
+ .voltage_table = VDD3_VSEL_table,
+ .enable_time_us = 200,
},
{
.name = "VDIG1",
.min_uV = 1200000,
.max_uV = 2700000,
- .table_len = ARRAY_SIZE(VDIG1_VSEL_table),
- .table = VDIG1_VSEL_table,
+ .n_voltages = ARRAY_SIZE(VDIG1_VSEL_table),
+ .voltage_table = VDIG1_VSEL_table,
+ .enable_time_us = 100,
},
{
.name = "VDIG2",
.min_uV = 1000000,
.max_uV = 1800000,
- .table_len = ARRAY_SIZE(VDIG2_VSEL_table),
- .table = VDIG2_VSEL_table,
+ .n_voltages = ARRAY_SIZE(VDIG2_VSEL_table),
+ .voltage_table = VDIG2_VSEL_table,
+ .enable_time_us = 100,
},
{
.name = "VPLL",
.min_uV = 1000000,
.max_uV = 2500000,
- .table_len = ARRAY_SIZE(VPLL_VSEL_table),
- .table = VPLL_VSEL_table,
+ .n_voltages = ARRAY_SIZE(VPLL_VSEL_table),
+ .voltage_table = VPLL_VSEL_table,
+ .enable_time_us = 100,
},
{
.name = "VDAC",
.min_uV = 1800000,
.max_uV = 2850000,
- .table_len = ARRAY_SIZE(VDAC_VSEL_table),
- .table = VDAC_VSEL_table,
+ .n_voltages = ARRAY_SIZE(VDAC_VSEL_table),
+ .voltage_table = VDAC_VSEL_table,
+ .enable_time_us = 100,
},
{
.name = "VAUX1",
.min_uV = 1800000,
.max_uV = 2850000,
- .table_len = ARRAY_SIZE(VAUX1_VSEL_table),
- .table = VAUX1_VSEL_table,
+ .n_voltages = ARRAY_SIZE(VAUX1_VSEL_table),
+ .voltage_table = VAUX1_VSEL_table,
+ .enable_time_us = 100,
},
{
.name = "VAUX2",
.min_uV = 1800000,
.max_uV = 3300000,
- .table_len = ARRAY_SIZE(VAUX2_VSEL_table),
- .table = VAUX2_VSEL_table,
+ .n_voltages = ARRAY_SIZE(VAUX2_VSEL_table),
+ .voltage_table = VAUX2_VSEL_table,
+ .enable_time_us = 100,
},
{
.name = "VAUX33",
.min_uV = 1800000,
.max_uV = 3300000,
- .table_len = ARRAY_SIZE(VAUX33_VSEL_table),
- .table = VAUX33_VSEL_table,
+ .n_voltages = ARRAY_SIZE(VAUX33_VSEL_table),
+ .voltage_table = VAUX33_VSEL_table,
+ .enable_time_us = 100,
},
{
.name = "VMMC",
.min_uV = 1800000,
.max_uV = 3300000,
- .table_len = ARRAY_SIZE(VMMC_VSEL_table),
- .table = VMMC_VSEL_table,
+ .n_voltages = ARRAY_SIZE(VMMC_VSEL_table),
+ .voltage_table = VMMC_VSEL_table,
+ .enable_time_us = 100,
},
};
static struct tps_info tps65911_regs[] = {
+ {
+ .name = "VRTC",
+ .enable_time_us = 2200,
+ },
{
.name = "VIO",
.min_uV = 1500000,
.max_uV = 3300000,
- .table_len = ARRAY_SIZE(VIO_VSEL_table),
- .table = VIO_VSEL_table,
+ .n_voltages = ARRAY_SIZE(VIO_VSEL_table),
+ .voltage_table = VIO_VSEL_table,
+ .enable_time_us = 350,
},
{
.name = "VDD1",
.min_uV = 600000,
.max_uV = 4500000,
+ .n_voltages = 73,
+ .enable_time_us = 350,
},
{
.name = "VDD2",
.min_uV = 600000,
.max_uV = 4500000,
+ .n_voltages = 73,
+ .enable_time_us = 350,
},
{
.name = "VDDCTRL",
.min_uV = 600000,
.max_uV = 1400000,
+ .n_voltages = 65,
+ .enable_time_us = 900,
},
{
.name = "LDO1",
.min_uV = 1000000,
.max_uV = 3300000,
+ .n_voltages = 47,
+ .enable_time_us = 420,
},
{
.name = "LDO2",
.min_uV = 1000000,
.max_uV = 3300000,
+ .n_voltages = 47,
+ .enable_time_us = 420,
},
{
.name = "LDO3",
.min_uV = 1000000,
.max_uV = 3300000,
+ .n_voltages = 24,
+ .enable_time_us = 230,
},
{
.name = "LDO4",
.min_uV = 1000000,
.max_uV = 3300000,
+ .n_voltages = 47,
+ .enable_time_us = 230,
},
{
.name = "LDO5",
.min_uV = 1000000,
.max_uV = 3300000,
+ .n_voltages = 24,
+ .enable_time_us = 230,
},
{
.name = "LDO6",
.min_uV = 1000000,
.max_uV = 3300000,
+ .n_voltages = 24,
+ .enable_time_us = 230,
},
{
.name = "LDO7",
.min_uV = 1000000,
.max_uV = 3300000,
+ .n_voltages = 24,
+ .enable_time_us = 230,
},
{
.name = "LDO8",
.min_uV = 1000000,
.max_uV = 3300000,
+ .n_voltages = 24,
+ .enable_time_us = 230,
},
};
+ #define EXT_CONTROL_REG_BITS(id, regs_offs, bits) (((regs_offs) << 8) | (bits))
+ static unsigned int tps65910_ext_sleep_control[] = {
+ 0,
+ EXT_CONTROL_REG_BITS(VIO, 1, 0),
+ EXT_CONTROL_REG_BITS(VDD1, 1, 1),
+ EXT_CONTROL_REG_BITS(VDD2, 1, 2),
+ EXT_CONTROL_REG_BITS(VDD3, 1, 3),
+ EXT_CONTROL_REG_BITS(VDIG1, 0, 1),
+ EXT_CONTROL_REG_BITS(VDIG2, 0, 2),
+ EXT_CONTROL_REG_BITS(VPLL, 0, 6),
+ EXT_CONTROL_REG_BITS(VDAC, 0, 7),
+ EXT_CONTROL_REG_BITS(VAUX1, 0, 3),
+ EXT_CONTROL_REG_BITS(VAUX2, 0, 4),
+ EXT_CONTROL_REG_BITS(VAUX33, 0, 5),
+ EXT_CONTROL_REG_BITS(VMMC, 0, 0),
+ };
+
+ static unsigned int tps65911_ext_sleep_control[] = {
+ 0,
+ EXT_CONTROL_REG_BITS(VIO, 1, 0),
+ EXT_CONTROL_REG_BITS(VDD1, 1, 1),
+ EXT_CONTROL_REG_BITS(VDD2, 1, 2),
+ EXT_CONTROL_REG_BITS(VDDCTRL, 1, 3),
+ EXT_CONTROL_REG_BITS(LDO1, 0, 1),
+ EXT_CONTROL_REG_BITS(LDO2, 0, 2),
+ EXT_CONTROL_REG_BITS(LDO3, 0, 7),
+ EXT_CONTROL_REG_BITS(LDO4, 0, 6),
+ EXT_CONTROL_REG_BITS(LDO5, 0, 3),
+ EXT_CONTROL_REG_BITS(LDO6, 0, 0),
+ EXT_CONTROL_REG_BITS(LDO7, 0, 5),
+ EXT_CONTROL_REG_BITS(LDO8, 0, 4),
+ };
+
struct tps65910_reg {
struct regulator_desc *desc;
struct tps65910 *mfd;
int num_regulators;
int mode;
int (*get_ctrl_reg)(int);
+ unsigned int *ext_sleep_control;
+ unsigned int board_ext_control[TPS65910_NUM_REGS];
};
static inline int tps65910_read(struct tps65910_reg *pmic, u8 reg)
return tps65910_clear_bits(mfd, reg, TPS65910_SUPPLY_STATE_ENABLED);
}
+ static int tps65910_enable_time(struct regulator_dev *dev)
+ {
+ struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+ int id = rdev_get_id(dev);
+ return pmic->info[id]->enable_time_us;
+ }
static int tps65910_set_mode(struct regulator_dev *dev, unsigned int mode)
{
if (value < 0)
return value;
- if (value & LDO_ST_ON_BIT)
+ if (!(value & LDO_ST_ON_BIT))
return REGULATOR_MODE_STANDBY;
else if (value & LDO_ST_MODE_BIT)
return REGULATOR_MODE_IDLE;
return REGULATOR_MODE_NORMAL;
}
- static int tps65910_get_voltage_dcdc(struct regulator_dev *dev)
+ static int tps65910_get_voltage_dcdc_sel(struct regulator_dev *dev)
{
struct tps65910_reg *pmic = rdev_get_drvdata(dev);
- int id = rdev_get_id(dev), voltage = 0;
+ int id = rdev_get_id(dev);
int opvsel = 0, srvsel = 0, vselmax = 0, mult = 0, sr = 0;
switch (id) {
srvsel = 3;
if (srvsel > vselmax)
srvsel = vselmax;
- srvsel -= 3;
-
- voltage = (srvsel * VDD1_2_OFFSET + VDD1_2_MIN_VOLT) * 100;
+ return srvsel - 3;
} else {
/* normalise to valid range*/
opvsel = 3;
if (opvsel > vselmax)
opvsel = vselmax;
- opvsel -= 3;
-
- voltage = (opvsel * VDD1_2_OFFSET + VDD1_2_MIN_VOLT) * 100;
+ return opvsel - 3;
}
-
- voltage *= mult;
-
- return voltage;
+ return -EINVAL;
}
static int tps65910_get_voltage(struct regulator_dev *dev)
return -EINVAL;
}
- voltage = pmic->info[id]->table[value] * 1000;
+ voltage = pmic->info[id]->voltage_table[value] * 1000;
return voltage;
}
step_mv = 100;
break;
case TPS65910_REG_VIO:
- return pmic->info[id]->table[value] * 1000;
- break;
+ value &= LDO_SEL_MASK;
+ value >>= LDO_SEL_SHIFT;
+ return pmic->info[id]->voltage_table[value] * 1000;
default:
return -EINVAL;
}
return (LDO_MIN_VOLT + value * step_mv) * 1000;
}
- static int tps65910_set_voltage_dcdc(struct regulator_dev *dev,
- unsigned selector)
+ static int tps65910_set_voltage_dcdc_sel(struct regulator_dev *dev,
+ unsigned selector)
{
struct tps65910_reg *pmic = rdev_get_drvdata(dev);
int id = rdev_get_id(dev), vsel;
tps65910_reg_write(pmic, TPS65910_VDD2_OP, vsel);
break;
case TPS65911_REG_VDDCTRL:
- vsel = selector;
+ vsel = selector + 3;
tps65910_reg_write(pmic, TPS65911_VDDCTRL_OP, vsel);
}
return 0;
}
- static int tps65910_set_voltage(struct regulator_dev *dev, unsigned selector)
+ static int tps65910_set_voltage_sel(struct regulator_dev *dev,
+ unsigned selector)
{
struct tps65910_reg *pmic = rdev_get_drvdata(dev);
int reg, id = rdev_get_id(dev);
return -EINVAL;
}
- static int tps65911_set_voltage(struct regulator_dev *dev, unsigned selector)
+ static int tps65911_set_voltage_sel(struct regulator_dev *dev,
+ unsigned selector)
{
struct tps65910_reg *pmic = rdev_get_drvdata(dev);
int reg, id = rdev_get_id(dev);
case TPS65911_REG_LDO6:
case TPS65911_REG_LDO7:
case TPS65911_REG_LDO8:
- case TPS65910_REG_VIO:
return tps65910_modify_bits(pmic, reg,
(selector << LDO_SEL_SHIFT), LDO3_SEL_MASK);
+ case TPS65910_REG_VIO:
+ return tps65910_modify_bits(pmic, reg,
+ (selector << LDO_SEL_SHIFT), LDO_SEL_MASK);
}
return -EINVAL;
if (id < TPS65910_REG_VIO || id > TPS65910_REG_VMMC)
return -EINVAL;
- if (selector >= pmic->info[id]->table_len)
+ if (selector >= pmic->info[id]->n_voltages)
return -EINVAL;
else
- voltage = pmic->info[id]->table[selector] * 1000;
+ voltage = pmic->info[id]->voltage_table[selector] * 1000;
return voltage;
}
step_mv = 100;
break;
case TPS65910_REG_VIO:
- return pmic->info[id]->table[selector] * 1000;
+ return pmic->info[id]->voltage_table[selector] * 1000;
default:
return -EINVAL;
}
return (LDO_MIN_VOLT + selector * step_mv) * 1000;
}
+ static int tps65910_set_voltage_dcdc_time_sel(struct regulator_dev *dev,
+ unsigned int old_selector, unsigned int new_selector)
+ {
+ int id = rdev_get_id(dev);
+ int old_volt, new_volt;
+
+ old_volt = tps65910_list_voltage_dcdc(dev, old_selector);
+ if (old_volt < 0)
+ return old_volt;
+
+ new_volt = tps65910_list_voltage_dcdc(dev, new_selector);
+ if (new_volt < 0)
+ return new_volt;
+
+ /* VDD1 and VDD2 are 12.5mV/us, VDDCTRL is 100mV/20us */
+ switch (id) {
+ case TPS65910_REG_VDD1:
+ case TPS65910_REG_VDD2:
+ return DIV_ROUND_UP(abs(old_volt - new_volt), 12500);
+ case TPS65911_REG_VDDCTRL:
+ return DIV_ROUND_UP(abs(old_volt - new_volt), 5000);
+ }
+ return -EINVAL;
+ }
+
/* Regulator ops (except VRTC) */
static struct regulator_ops tps65910_ops_dcdc = {
.is_enabled = tps65910_is_enabled,
.enable = tps65910_enable,
.disable = tps65910_disable,
+ .enable_time = tps65910_enable_time,
.set_mode = tps65910_set_mode,
.get_mode = tps65910_get_mode,
- .get_voltage = tps65910_get_voltage_dcdc,
- .set_voltage_sel = tps65910_set_voltage_dcdc,
+ .get_voltage_sel = tps65910_get_voltage_dcdc_sel,
+ .set_voltage_sel = tps65910_set_voltage_dcdc_sel,
+ .set_voltage_time_sel = tps65910_set_voltage_dcdc_time_sel,
.list_voltage = tps65910_list_voltage_dcdc,
};
.is_enabled = tps65910_is_enabled,
.enable = tps65910_enable,
.disable = tps65910_disable,
+ .enable_time = tps65910_enable_time,
.set_mode = tps65910_set_mode,
.get_mode = tps65910_get_mode,
.get_voltage = tps65910_get_voltage_vdd3,
.is_enabled = tps65910_is_enabled,
.enable = tps65910_enable,
.disable = tps65910_disable,
+ .enable_time = tps65910_enable_time,
.set_mode = tps65910_set_mode,
.get_mode = tps65910_get_mode,
.get_voltage = tps65910_get_voltage,
- .set_voltage_sel = tps65910_set_voltage,
+ .set_voltage_sel = tps65910_set_voltage_sel,
.list_voltage = tps65910_list_voltage,
};
.is_enabled = tps65910_is_enabled,
.enable = tps65910_enable,
.disable = tps65910_disable,
+ .enable_time = tps65910_enable_time,
.set_mode = tps65910_set_mode,
.get_mode = tps65910_get_mode,
.get_voltage = tps65911_get_voltage,
- .set_voltage_sel = tps65911_set_voltage,
+ .set_voltage_sel = tps65911_set_voltage_sel,
.list_voltage = tps65911_list_voltage,
};
+ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
+ int id, int ext_sleep_config)
+ {
+ struct tps65910 *mfd = pmic->mfd;
+ u8 regoffs = (pmic->ext_sleep_control[id] >> 8) & 0xFF;
+ u8 bit_pos = (1 << pmic->ext_sleep_control[id] & 0xFF);
+ int ret;
+
+ /*
+ * Regulator can not be control from multiple external input EN1, EN2
+ * and EN3 together.
+ */
+ if (ext_sleep_config & EXT_SLEEP_CONTROL) {
+ int en_count;
+ en_count = ((ext_sleep_config &
+ TPS65910_SLEEP_CONTROL_EXT_INPUT_EN1) != 0);
+ en_count += ((ext_sleep_config &
+ TPS65910_SLEEP_CONTROL_EXT_INPUT_EN2) != 0);
+ en_count += ((ext_sleep_config &
+ TPS65910_SLEEP_CONTROL_EXT_INPUT_EN3) != 0);
+ en_count += ((ext_sleep_config &
+ TPS65911_SLEEP_CONTROL_EXT_INPUT_SLEEP) != 0);
+ if (en_count > 1) {
+ dev_err(mfd->dev,
+ "External sleep control flag is not proper\n");
+ return -EINVAL;
+ }
+ }
+
+ pmic->board_ext_control[id] = ext_sleep_config;
+
+ /* External EN1 control */
+ if (ext_sleep_config & TPS65910_SLEEP_CONTROL_EXT_INPUT_EN1)
+ ret = tps65910_set_bits(mfd,
+ TPS65910_EN1_LDO_ASS + regoffs, bit_pos);
+ else
+ ret = tps65910_clear_bits(mfd,
+ TPS65910_EN1_LDO_ASS + regoffs, bit_pos);
+ if (ret < 0) {
+ dev_err(mfd->dev,
+ "Error in configuring external control EN1\n");
+ return ret;
+ }
+
+ /* External EN2 control */
+ if (ext_sleep_config & TPS65910_SLEEP_CONTROL_EXT_INPUT_EN2)
+ ret = tps65910_set_bits(mfd,
+ TPS65910_EN2_LDO_ASS + regoffs, bit_pos);
+ else
+ ret = tps65910_clear_bits(mfd,
+ TPS65910_EN2_LDO_ASS + regoffs, bit_pos);
+ if (ret < 0) {
+ dev_err(mfd->dev,
+ "Error in configuring external control EN2\n");
+ return ret;
+ }
+
+ /* External EN3 control for TPS65910 LDO only */
+ if ((tps65910_chip_id(mfd) == TPS65910) &&
+ (id >= TPS65910_REG_VDIG1)) {
+ if (ext_sleep_config & TPS65910_SLEEP_CONTROL_EXT_INPUT_EN3)
+ ret = tps65910_set_bits(mfd,
+ TPS65910_EN3_LDO_ASS + regoffs, bit_pos);
+ else
+ ret = tps65910_clear_bits(mfd,
+ TPS65910_EN3_LDO_ASS + regoffs, bit_pos);
+ if (ret < 0) {
+ dev_err(mfd->dev,
+ "Error in configuring external control EN3\n");
+ return ret;
+ }
+ }
+
+ /* Return if no external control is selected */
+ if (!(ext_sleep_config & EXT_SLEEP_CONTROL)) {
+ /* Clear all sleep controls */
+ ret = tps65910_clear_bits(mfd,
+ TPS65910_SLEEP_KEEP_LDO_ON + regoffs, bit_pos);
+ if (!ret)
+ ret = tps65910_clear_bits(mfd,
+ TPS65910_SLEEP_SET_LDO_OFF + regoffs, bit_pos);
+ if (ret < 0)
+ dev_err(mfd->dev,
+ "Error in configuring SLEEP register\n");
+ return ret;
+ }
+
+ /*
+ * For regulator that has separate operational and sleep register make
+ * sure that operational is used and clear sleep register to turn
+ * regulator off when external control is inactive
+ */
+ if ((id == TPS65910_REG_VDD1) ||
+ (id == TPS65910_REG_VDD2) ||
+ ((id == TPS65911_REG_VDDCTRL) &&
+ (tps65910_chip_id(mfd) == TPS65911))) {
+ int op_reg_add = pmic->get_ctrl_reg(id) + 1;
+ int sr_reg_add = pmic->get_ctrl_reg(id) + 2;
+ int opvsel = tps65910_reg_read(pmic, op_reg_add);
+ int srvsel = tps65910_reg_read(pmic, sr_reg_add);
+ if (opvsel & VDD1_OP_CMD_MASK) {
+ u8 reg_val = srvsel & VDD1_OP_SEL_MASK;
+ ret = tps65910_reg_write(pmic, op_reg_add, reg_val);
+ if (ret < 0) {
+ dev_err(mfd->dev,
+ "Error in configuring op register\n");
+ return ret;
+ }
+ }
+ ret = tps65910_reg_write(pmic, sr_reg_add, 0);
+ if (ret < 0) {
+ dev_err(mfd->dev, "Error in settting sr register\n");
+ return ret;
+ }
+ }
+
+ ret = tps65910_clear_bits(mfd,
+ TPS65910_SLEEP_KEEP_LDO_ON + regoffs, bit_pos);
+ if (!ret) {
+ if (ext_sleep_config & TPS65911_SLEEP_CONTROL_EXT_INPUT_SLEEP)
+ ret = tps65910_set_bits(mfd,
+ TPS65910_SLEEP_SET_LDO_OFF + regoffs, bit_pos);
+ else
+ ret = tps65910_clear_bits(mfd,
+ TPS65910_SLEEP_SET_LDO_OFF + regoffs, bit_pos);
+ }
+ if (ret < 0)
+ dev_err(mfd->dev,
+ "Error in configuring SLEEP register\n");
+
+ return ret;
+ }
+
static __devinit int tps65910_probe(struct platform_device *pdev)
{
struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent);
case TPS65910:
pmic->get_ctrl_reg = &tps65910_get_ctrl_register;
pmic->num_regulators = ARRAY_SIZE(tps65910_regs);
+ pmic->ext_sleep_control = tps65910_ext_sleep_control;
info = tps65910_regs;
break;
case TPS65911:
pmic->get_ctrl_reg = &tps65911_get_ctrl_register;
pmic->num_regulators = ARRAY_SIZE(tps65911_regs);
+ pmic->ext_sleep_control = tps65911_ext_sleep_control;
info = tps65911_regs;
break;
default:
pmic->desc[i].name = info->name;
pmic->desc[i].id = i;
- pmic->desc[i].n_voltages = info->table_len;
+ pmic->desc[i].n_voltages = info->n_voltages;
if (i == TPS65910_REG_VDD1 || i == TPS65910_REG_VDD2) {
pmic->desc[i].ops = &tps65910_ops_dcdc;
pmic->desc[i].ops = &tps65911_ops;
}
+ err = tps65910_set_ext_sleep_config(pmic, i,
+ pmic_plat_data->regulator_ext_sleep_control[i]);
+ /*
+ * Failing on regulator for configuring externally control
+ * is not a serious issue, just throw warning.
+ */
+ if (err < 0)
+ dev_warn(tps65910->dev,
+ "Failed to initialise ext control config\n");
+
pmic->desc[i].type = REGULATOR_VOLTAGE;
pmic->desc[i].owner = THIS_MODULE;
return 0;
}
+ static void tps65910_shutdown(struct platform_device *pdev)
+ {
+ struct tps65910_reg *pmic = platform_get_drvdata(pdev);
+ int i;
+
+ /*
+ * Before bootloader jumps to kernel, it makes sure that required
+ * external control signals are in desired state so that given rails
+ * can be configure accordingly.
+ * If rails are configured to be controlled from external control
+ * then before shutting down/rebooting the system, the external
+ * control configuration need to be remove from the rails so that
+ * its output will be available as per register programming even
+ * if external controls are removed. This is require when the POR
+ * value of the control signals are not in active state and before
+ * bootloader initializes it, the system requires the rail output
+ * to be active for booting.
+ */
+ for (i = 0; i < pmic->num_regulators; i++) {
+ int err;
+ if (!pmic->rdev[i])
+ continue;
+
+ err = tps65910_set_ext_sleep_config(pmic, i, 0);
+ if (err < 0)
+ dev_err(&pdev->dev,
+ "Error in clearing external control\n");
+ }
+ }
+
static struct platform_driver tps65910_driver = {
.driver = {
.name = "tps65910-pmic",
},
.probe = tps65910_probe,
.remove = __devexit_p(tps65910_remove),
+ .shutdown = tps65910_shutdown,
};
static int __init tps65910_init(void)
module_exit(tps65910_cleanup);
- MODULE_DESCRIPTION("TPS6507x voltage regulator driver");
+ MODULE_DESCRIPTION("TPS65910/TPS65911 voltage regulator driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:tps65910-pmic");
struct gs_port *port;
int status;
- if (port_num < 0 || port_num >= n_ports)
- return -ENXIO;
-
do {
mutex_lock(&ports[port_num].lock);
port = ports[port_num].port;
unsigned long flags;
int status;
- pr_vdebug("gs_put_char: (%d,%p) char=0x%x, called from %p\n",
+ pr_vdebug("gs_put_char: (%d,%p) char=0x%x, called from %pf\n",
port->port_num, tty, ch, __builtin_return_address(0));
spin_lock_irqsave(&port->port_lock, flags);
if (!gs_tty_driver)
return -ENOMEM;
- gs_tty_driver->owner = THIS_MODULE;
gs_tty_driver->driver_name = "g_serial";
gs_tty_driver->name = PREFIX;
/* uses dynamically assigned dev_t values */
u32 sys_clksrc;
/* Allocate new device private */
- fbdev = kzalloc(sizeof(struct au1100fb_device), GFP_KERNEL);
+ fbdev = devm_kzalloc(&dev->dev, sizeof(struct au1100fb_device),
+ GFP_KERNEL);
if (!fbdev) {
print_err("fail to allocate device private record");
return -ENOMEM;
au1100fb_fix.mmio_start = regs_res->start;
au1100fb_fix.mmio_len = resource_size(regs_res);
- if (!request_mem_region(au1100fb_fix.mmio_start, au1100fb_fix.mmio_len,
- DRIVER_NAME)) {
+ if (!devm_request_mem_region(au1100fb_fix.mmio_start,
+ au1100fb_fix.mmio_len,
+ DRIVER_NAME)) {
print_err("fail to lock memory region at 0x%08lx",
au1100fb_fix.mmio_start);
return -EBUSY;
fbdev->fb_len = fbdev->panel->xres * fbdev->panel->yres *
(fbdev->panel->bpp >> 3) * AU1100FB_NBR_VIDEO_BUFFERS;
- fbdev->fb_mem = dma_alloc_coherent(&dev->dev, PAGE_ALIGN(fbdev->fb_len),
- &fbdev->fb_phys, GFP_KERNEL);
+ fbdev->fb_mem = dmam_alloc_coherent(&dev->dev, &dev->dev,
+ PAGE_ALIGN(fbdev->fb_len),
+ &fbdev->fb_phys, GFP_KERNEL);
if (!fbdev->fb_mem) {
print_err("fail to allocate frambuffer (size: %dK))",
fbdev->fb_len / 1024);
for (page = (unsigned long)fbdev->fb_mem;
page < PAGE_ALIGN((unsigned long)fbdev->fb_mem + fbdev->fb_len);
page += PAGE_SIZE) {
-#if CONFIG_DMA_NONCOHERENT
+#ifdef CONFIG_DMA_NONCOHERENT
SetPageReserved(virt_to_page(CAC_ADDR((void *)page)));
#else
SetPageReserved(virt_to_page(page));
fbdev->info.fbops = &au1100fb_ops;
fbdev->info.fix = au1100fb_fix;
- if (!(fbdev->info.pseudo_palette = kzalloc(sizeof(u32) * 16, GFP_KERNEL))) {
+ fbdev->info.pseudo_palette =
+ devm_kzalloc(&dev->dev, sizeof(u32) * 16, GFP_KERNEL);
+ if (!fbdev->info.pseudo_palette)
return -ENOMEM;
- }
if (fb_alloc_cmap(&fbdev->info.cmap, AU1100_LCD_NBR_PALETTE_ENTRIES, 0) < 0) {
print_err("Fail to allocate colormap (%d entries)",
AU1100_LCD_NBR_PALETTE_ENTRIES);
- kfree(fbdev->info.pseudo_palette);
return -EFAULT;
}
return 0;
failed:
- if (fbdev->regs) {
- release_mem_region(fbdev->regs_phys, fbdev->regs_len);
- }
if (fbdev->fb_mem) {
dma_free_noncoherent(&dev->dev, fbdev->fb_len, fbdev->fb_mem,
fbdev->fb_phys);
if (fbdev->info.cmap.len != 0) {
fb_dealloc_cmap(&fbdev->info.cmap);
}
- kfree(fbdev);
platform_set_drvdata(dev, NULL);
- return 0;
+ return -ENODEV;
}
int au1100fb_drv_remove(struct platform_device *dev)
/* Clean up all probe data */
unregister_framebuffer(&fbdev->info);
- release_mem_region(fbdev->regs_phys, fbdev->regs_len);
-
- dma_free_coherent(&dev->dev, PAGE_ALIGN(fbdev->fb_len), fbdev->fb_mem,
- fbdev->fb_phys);
-
fb_dealloc_cmap(&fbdev->info.cmap);
- kfree(fbdev->info.pseudo_palette);
- kfree((void*)fbdev);
return 0;
}
static struct extent_io_ops btree_extent_io_ops;
static void end_workqueue_fn(struct btrfs_work *work);
static void free_fs_root(struct btrfs_root *root);
- static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
+ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
int read_only);
- static int btrfs_destroy_ordered_operations(struct btrfs_root *root);
- static int btrfs_destroy_ordered_extents(struct btrfs_root *root);
+ static void btrfs_destroy_ordered_operations(struct btrfs_root *root);
+ static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
struct btrfs_root *root);
- static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t);
- static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
+ static void btrfs_destroy_pending_snapshots(struct btrfs_transaction *t);
+ static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
static int btrfs_destroy_marked_extents(struct btrfs_root *root,
struct extent_io_tree *dirty_pages,
int mark);
static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
struct extent_io_tree *pinned_extents);
- static int btrfs_cleanup_transaction(struct btrfs_root *root);
/*
* end_io_wq structs are used to do processing in task context when an IO is
*/
u64 bio_offset;
struct btrfs_work work;
+ int error;
};
/*
return 0;
lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
- 0, &cached_state, GFP_NOFS);
- if (extent_buffer_uptodate(io_tree, eb, cached_state) &&
+ 0, &cached_state);
+ if (extent_buffer_uptodate(eb) &&
btrfs_header_generation(eb) == parent_transid) {
ret = 0;
goto out;
(unsigned long long)parent_transid,
(unsigned long long)btrfs_header_generation(eb));
ret = 1;
- clear_extent_buffer_uptodate(io_tree, eb, &cached_state);
+ clear_extent_buffer_uptodate(eb);
out:
unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
&cached_state, GFP_NOFS);
u64 start, u64 parent_transid)
{
struct extent_io_tree *io_tree;
+ int failed = 0;
int ret;
int num_copies = 0;
int mirror_num = 0;
+ int failed_mirror = 0;
clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
ret = read_extent_buffer_pages(io_tree, eb, start,
WAIT_COMPLETE,
btree_get_extent, mirror_num);
- if (!ret &&
- !verify_parent_transid(io_tree, eb, parent_transid))
- return ret;
+ if (!ret && !verify_parent_transid(io_tree, eb, parent_transid))
+ break;
/*
* This buffer's crc is fine, but its contents are corrupted, so
* any less wrong.
*/
if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
- return ret;
+ break;
+
+ if (!failed_mirror) {
+ failed = 1;
+ printk(KERN_ERR "failed mirror was %d\n", eb->failed_mirror);
+ failed_mirror = eb->failed_mirror;
+ }
num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
eb->start, eb->len);
if (num_copies == 1)
- return ret;
+ break;
mirror_num++;
+ if (mirror_num == failed_mirror)
+ mirror_num++;
+
if (mirror_num > num_copies)
- return ret;
+ break;
}
- return -EIO;
+
+ if (failed && !ret)
+ repair_eb_io_failure(root, eb, failed_mirror);
+
+ return ret;
}
/*
struct extent_io_tree *tree;
u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
u64 found_start;
- unsigned long len;
struct extent_buffer *eb;
- int ret;
tree = &BTRFS_I(page->mapping->host)->io_tree;
- if (page->private == EXTENT_PAGE_PRIVATE) {
- WARN_ON(1);
- goto out;
- }
- if (!page->private) {
- WARN_ON(1);
- goto out;
- }
- len = page->private >> 2;
- WARN_ON(len == 0);
-
- eb = alloc_extent_buffer(tree, start, len, page);
- if (eb == NULL) {
- WARN_ON(1);
- goto out;
- }
- ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE,
- btrfs_header_generation(eb));
- BUG_ON(ret);
- WARN_ON(!btrfs_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN));
-
+ eb = (struct extent_buffer *)page->private;
+ if (page != eb->pages[0])
+ return 0;
found_start = btrfs_header_bytenr(eb);
if (found_start != start) {
WARN_ON(1);
- goto err;
+ return 0;
}
- if (eb->first_page != page) {
+ if (eb->pages[0] != page) {
WARN_ON(1);
- goto err;
+ return 0;
}
if (!PageUptodate(page)) {
WARN_ON(1);
- goto err;
+ return 0;
}
csum_tree_block(root, eb, 0);
- err:
- free_extent_buffer(eb);
- out:
return 0;
}
return 0;
}
+ struct extent_buffer *find_eb_for_page(struct extent_io_tree *tree,
+ struct page *page, int max_walk)
+ {
+ struct extent_buffer *eb;
+ u64 start = page_offset(page);
+ u64 target = start;
+ u64 min_start;
+
+ if (start < max_walk)
+ min_start = 0;
+ else
+ min_start = start - max_walk;
+
+ while (start >= min_start) {
+ eb = find_extent_buffer(tree, start, 0);
+ if (eb) {
+ /*
+ * we found an extent buffer and it contains our page
+ * horray!
+ */
+ if (eb->start <= target &&
+ eb->start + eb->len > target)
+ return eb;
+
+ /* we found an extent buffer that wasn't for us */
+ free_extent_buffer(eb);
+ return NULL;
+ }
+ if (start == 0)
+ break;
+ start -= PAGE_CACHE_SIZE;
+ }
+ return NULL;
+ }
+
static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
struct extent_state *state)
{
struct extent_io_tree *tree;
u64 found_start;
int found_level;
- unsigned long len;
struct extent_buffer *eb;
struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
int ret = 0;
+ int reads_done;
- tree = &BTRFS_I(page->mapping->host)->io_tree;
- if (page->private == EXTENT_PAGE_PRIVATE)
- goto out;
if (!page->private)
goto out;
- len = page->private >> 2;
- WARN_ON(len == 0);
+ tree = &BTRFS_I(page->mapping->host)->io_tree;
+ eb = (struct extent_buffer *)page->private;
+
+ /* the pending IO might have been the only thing that kept this buffer
+ * in memory. Make sure we have a ref for all this other checks
+ */
+ extent_buffer_get(eb);
+
+ reads_done = atomic_dec_and_test(&eb->io_pages);
+ if (!reads_done)
+ goto err;
- eb = alloc_extent_buffer(tree, start, len, page);
- if (eb == NULL) {
+ if (test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
ret = -EIO;
- goto out;
+ goto err;
}
found_start = btrfs_header_bytenr(eb);
- if (found_start != start) {
+ if (found_start != eb->start) {
printk_ratelimited(KERN_INFO "btrfs bad tree block start "
"%llu %llu\n",
(unsigned long long)found_start,
ret = -EIO;
goto err;
}
- if (eb->first_page != page) {
- printk(KERN_INFO "btrfs bad first page %lu %lu\n",
- eb->first_page->index, page->index);
- WARN_ON(1);
- ret = -EIO;
- goto err;
- }
if (check_tree_block_fsid(root, eb)) {
printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n",
(unsigned long long)eb->start);
ret = -EIO;
}
- end = min_t(u64, eb->len, PAGE_CACHE_SIZE);
- end = eb->start + end - 1;
+ if (!ret)
+ set_extent_buffer_uptodate(eb);
err:
if (test_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) {
clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags);
btree_readahead_hook(root, eb, eb->start, ret);
}
+ if (ret)
+ clear_extent_buffer_uptodate(eb);
free_extent_buffer(eb);
out:
return ret;
}
- static int btree_io_failed_hook(struct bio *failed_bio,
- struct page *page, u64 start, u64 end,
- int mirror_num, struct extent_state *state)
+ static int btree_io_failed_hook(struct page *page, int failed_mirror)
{
- struct extent_io_tree *tree;
- unsigned long len;
struct extent_buffer *eb;
struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
- tree = &BTRFS_I(page->mapping->host)->io_tree;
- if (page->private == EXTENT_PAGE_PRIVATE)
- goto out;
- if (!page->private)
- goto out;
-
- len = page->private >> 2;
- WARN_ON(len == 0);
-
- eb = alloc_extent_buffer(tree, start, len, page);
- if (eb == NULL)
- goto out;
-
- if (test_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) {
- clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags);
+ eb = (struct extent_buffer *)page->private;
+ set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
+ eb->failed_mirror = failed_mirror;
+ if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
btree_readahead_hook(root, eb, eb->start, -EIO);
- }
- free_extent_buffer(eb);
-
- out:
return -EIO; /* we fixed nothing */
}
static void run_one_async_start(struct btrfs_work *work)
{
struct async_submit_bio *async;
+ int ret;
async = container_of(work, struct async_submit_bio, work);
- async->submit_bio_start(async->inode, async->rw, async->bio,
- async->mirror_num, async->bio_flags,
- async->bio_offset);
+ ret = async->submit_bio_start(async->inode, async->rw, async->bio,
+ async->mirror_num, async->bio_flags,
+ async->bio_offset);
+ if (ret)
+ async->error = ret;
}
static void run_one_async_done(struct btrfs_work *work)
waitqueue_active(&fs_info->async_submit_wait))
wake_up(&fs_info->async_submit_wait);
+ /* If an error occured we just want to clean up the bio and move on */
+ if (async->error) {
+ bio_endio(async->bio, async->error);
+ return;
+ }
+
async->submit_bio_done(async->inode, async->rw, async->bio,
async->mirror_num, async->bio_flags,
async->bio_offset);
async->bio_flags = bio_flags;
async->bio_offset = bio_offset;
+ async->error = 0;
+
atomic_inc(&fs_info->nr_async_submits);
if (rw & REQ_SYNC)
struct bio_vec *bvec = bio->bi_io_vec;
int bio_index = 0;
struct btrfs_root *root;
+ int ret = 0;
WARN_ON(bio->bi_vcnt <= 0);
while (bio_index < bio->bi_vcnt) {
root = BTRFS_I(bvec->bv_page->mapping->host)->root;
- csum_dirty_buffer(root, bvec->bv_page);
+ ret = csum_dirty_buffer(root, bvec->bv_page);
+ if (ret)
+ break;
bio_index++;
bvec++;
}
- return 0;
+ return ret;
}
static int __btree_submit_bio_start(struct inode *inode, int rw,
* when we're called for a write, we're already in the async
* submission context. Just jump into btrfs_map_bio
*/
- btree_csum_one_bio(bio);
- return 0;
+ return btree_csum_one_bio(bio);
}
static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
{
int ret;
- ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
- bio, 1);
- BUG_ON(ret);
-
if (!(rw & REQ_WRITE)) {
+
/*
* called for a read, do the setup so that checksum validation
* can happen in the async kernel threads
*/
+ ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
+ bio, 1);
+ if (ret)
+ return ret;
return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
mirror_num, 0);
}
}
#endif
- static int btree_writepage(struct page *page, struct writeback_control *wbc)
- {
- struct extent_io_tree *tree;
- struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
- struct extent_buffer *eb;
- int was_dirty;
-
- tree = &BTRFS_I(page->mapping->host)->io_tree;
- if (!(current->flags & PF_MEMALLOC)) {
- return extent_write_full_page(tree, page,
- btree_get_extent, wbc);
- }
-
- redirty_page_for_writepage(wbc, page);
- eb = btrfs_find_tree_block(root, page_offset(page), PAGE_CACHE_SIZE);
- WARN_ON(!eb);
-
- was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
- if (!was_dirty) {
- spin_lock(&root->fs_info->delalloc_lock);
- root->fs_info->dirty_metadata_bytes += PAGE_CACHE_SIZE;
- spin_unlock(&root->fs_info->delalloc_lock);
- }
- free_extent_buffer(eb);
-
- unlock_page(page);
- return 0;
- }
static int btree_writepages(struct address_space *mapping,
struct writeback_control *wbc)
if (num_dirty < thresh)
return 0;
}
- return extent_writepages(tree, mapping, btree_get_extent, wbc);
+ return btree_write_cache_pages(mapping, wbc);
}
static int btree_readpage(struct file *file, struct page *page)
static int btree_releasepage(struct page *page, gfp_t gfp_flags)
{
- struct extent_io_tree *tree;
- struct extent_map_tree *map;
- int ret;
-
if (PageWriteback(page) || PageDirty(page))
return 0;
-
- tree = &BTRFS_I(page->mapping->host)->io_tree;
- map = &BTRFS_I(page->mapping->host)->extent_tree;
-
/*
* We need to mask out eg. __GFP_HIGHMEM and __GFP_DMA32 as we're doing
* slab allocation from alloc_extent_state down the callchain where
*/
gfp_flags &= ~GFP_SLAB_BUG_MASK;
- ret = try_release_extent_state(map, tree, page, gfp_flags);
- if (!ret)
- return 0;
-
- ret = try_release_extent_buffer(tree, page);
- if (ret == 1) {
- ClearPagePrivate(page);
- set_page_private(page, 0);
- page_cache_release(page);
- }
-
- return ret;
+ return try_release_extent_buffer(page, gfp_flags);
}
static void btree_invalidatepage(struct page *page, unsigned long offset)
}
}
+ static int btree_set_page_dirty(struct page *page)
+ {
+ struct extent_buffer *eb;
+
+ BUG_ON(!PagePrivate(page));
+ eb = (struct extent_buffer *)page->private;
+ BUG_ON(!eb);
+ BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
+ BUG_ON(!atomic_read(&eb->refs));
+ btrfs_assert_tree_locked(eb);
+ return __set_page_dirty_nobuffers(page);
+ }
+
static const struct address_space_operations btree_aops = {
.readpage = btree_readpage,
- .writepage = btree_writepage,
.writepages = btree_writepages,
.releasepage = btree_releasepage,
.invalidatepage = btree_invalidatepage,
#ifdef CONFIG_MIGRATION
.migratepage = btree_migratepage,
#endif
+ .set_page_dirty = btree_set_page_dirty,
};
int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
free_extent_buffer(buf);
return -EIO;
- } else if (extent_buffer_uptodate(io_tree, buf, NULL)) {
+ } else if (extent_buffer_uptodate(buf)) {
*eb = buf;
} else {
free_extent_buffer(buf);
struct extent_buffer *eb;
eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
- bytenr, blocksize, NULL);
+ bytenr, blocksize);
return eb;
}
int btrfs_write_tree_block(struct extent_buffer *buf)
{
- return filemap_fdatawrite_range(buf->first_page->mapping, buf->start,
+ return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
buf->start + buf->len - 1);
}
int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
{
- return filemap_fdatawait_range(buf->first_page->mapping,
+ return filemap_fdatawait_range(buf->pages[0]->mapping,
buf->start, buf->start + buf->len - 1);
}
return NULL;
ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
-
- if (ret == 0)
- set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
return buf;
}
- int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
- struct extent_buffer *buf)
+ void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ struct extent_buffer *buf)
{
- struct inode *btree_inode = root->fs_info->btree_inode;
if (btrfs_header_generation(buf) ==
root->fs_info->running_transaction->transid) {
btrfs_assert_tree_locked(buf);
spin_lock(&root->fs_info->delalloc_lock);
if (root->fs_info->dirty_metadata_bytes >= buf->len)
root->fs_info->dirty_metadata_bytes -= buf->len;
- else
- WARN_ON(1);
+ else {
+ spin_unlock(&root->fs_info->delalloc_lock);
+ btrfs_panic(root->fs_info, -EOVERFLOW,
+ "Can't clear %lu bytes from "
+ " dirty_mdatadata_bytes (%lu)",
+ buf->len,
+ root->fs_info->dirty_metadata_bytes);
+ }
spin_unlock(&root->fs_info->delalloc_lock);
}
/* ugh, clear_extent_buffer_dirty needs to lock the page */
btrfs_set_lock_blocking(buf);
- clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
- buf);
+ clear_extent_buffer_dirty(buf);
}
- return 0;
}
- static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
- u32 stripesize, struct btrfs_root *root,
- struct btrfs_fs_info *fs_info,
- u64 objectid)
+ static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
+ u32 stripesize, struct btrfs_root *root,
+ struct btrfs_fs_info *fs_info,
+ u64 objectid)
{
root->node = NULL;
root->commit_root = NULL;
root->defrag_running = 0;
root->root_key.objectid = objectid;
root->anon_dev = 0;
- return 0;
}
- static int find_and_setup_root(struct btrfs_root *tree_root,
- struct btrfs_fs_info *fs_info,
- u64 objectid,
- struct btrfs_root *root)
+ static int __must_check find_and_setup_root(struct btrfs_root *tree_root,
+ struct btrfs_fs_info *fs_info,
+ u64 objectid,
+ struct btrfs_root *root)
{
int ret;
u32 blocksize;
&root->root_item, &root->root_key);
if (ret > 0)
return -ENOENT;
- BUG_ON(ret);
+ else if (ret < 0)
+ return ret;
generation = btrfs_root_generation(&root->root_item);
blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
blocksize, generation);
root->commit_root = btrfs_root_node(root);
- BUG_ON(!root->node);
+ BUG_ON(!root->node); /* -ENOMEM */
out:
if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
root->ref_cows = 1;
return 0;
}
- static int bio_ready_for_csum(struct bio *bio)
- {
- u64 length = 0;
- u64 buf_len = 0;
- u64 start = 0;
- struct page *page;
- struct extent_io_tree *io_tree = NULL;
- struct bio_vec *bvec;
- int i;
- int ret;
-
- bio_for_each_segment(bvec, bio, i) {
- page = bvec->bv_page;
- if (page->private == EXTENT_PAGE_PRIVATE) {
- length += bvec->bv_len;
- continue;
- }
- if (!page->private) {
- length += bvec->bv_len;
- continue;
- }
- length = bvec->bv_len;
- buf_len = page->private >> 2;
- start = page_offset(page) + bvec->bv_offset;
- io_tree = &BTRFS_I(page->mapping->host)->io_tree;
- }
- /* are we fully contained in this bio? */
- if (buf_len <= length)
- return 1;
-
- ret = extent_range_uptodate(io_tree, start + length,
- start + buf_len - 1);
- return ret;
- }
-
/*
* called by the kthread helper functions to finally call the bio end_io
* functions. This is where read checksum verification actually happens
bio = end_io_wq->bio;
fs_info = end_io_wq->info;
- /* metadata bio reads are special because the whole tree block must
- * be checksummed at once. This makes sure the entire block is in
- * ram and up to date before trying to verify things. For
- * blocksize <= pagesize, it is basically a noop
- */
- if (!(bio->bi_rw & REQ_WRITE) && end_io_wq->metadata &&
- !bio_ready_for_csum(bio)) {
- btrfs_queue_worker(&fs_info->endio_meta_workers,
- &end_io_wq->work);
- return;
- }
error = end_io_wq->error;
bio->bi_private = end_io_wq->private;
bio->bi_end_io = end_io_wq->end_io;
u64 transid;
unsigned long now;
unsigned long delay;
- int ret;
+ bool cannot_commit;
do {
+ cannot_commit = false;
delay = HZ * 30;
vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
mutex_lock(&root->fs_info->transaction_kthread_mutex);
transid = cur->transid;
spin_unlock(&root->fs_info->trans_lock);
+ /* If the file system is aborted, this will always fail. */
trans = btrfs_join_transaction(root);
- BUG_ON(IS_ERR(trans));
+ if (IS_ERR(trans)) {
+ cannot_commit = true;
+ goto sleep;
+ }
if (transid == trans->transid) {
- ret = btrfs_commit_transaction(trans, root);
- BUG_ON(ret);
+ btrfs_commit_transaction(trans, root);
} else {
btrfs_end_transaction(trans, root);
}
if (!try_to_freeze()) {
set_current_state(TASK_INTERRUPTIBLE);
if (!kthread_should_stop() &&
- !btrfs_transaction_blocked(root->fs_info))
+ (!btrfs_transaction_blocked(root->fs_info) ||
+ cannot_commit))
schedule_timeout(delay);
__set_current_state(TASK_RUNNING);
}
RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
fs_info->btree_inode->i_mapping);
+ BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0;
extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
__setup_root(4096, 4096, 4096, 4096, tree_root,
fs_info, BTRFS_ROOT_TREE_OBJECTID);
+ invalidate_bdev(fs_devices->latest_bdev);
bh = btrfs_read_dev_super(fs_devices->latest_bdev);
if (!bh) {
err = -EINVAL;
/* check FS state, whether FS is broken. */
fs_info->fs_state |= btrfs_super_flags(disk_super);
- btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
+ ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
+ if (ret) {
+ printk(KERN_ERR "btrfs: superblock contains fatal errors\n");
+ err = ret;
+ goto fail_alloc;
+ }
/*
* run through our array of backup supers and setup
goto fail_alloc;
}
+ if (btrfs_super_leafsize(disk_super) !=
+ btrfs_super_nodesize(disk_super)) {
+ printk(KERN_ERR "BTRFS: couldn't mount because metadata "
+ "blocksizes don't match. node %d leaf %d\n",
+ btrfs_super_nodesize(disk_super),
+ btrfs_super_leafsize(disk_super));
+ err = -EINVAL;
+ goto fail_alloc;
+ }
+ if (btrfs_super_leafsize(disk_super) > BTRFS_MAX_METADATA_BLOCKSIZE) {
+ printk(KERN_ERR "BTRFS: couldn't mount because metadata "
+ "blocksize (%d) was too large\n",
+ btrfs_super_leafsize(disk_super));
+ err = -EINVAL;
+ goto fail_alloc;
+ }
+
features = btrfs_super_incompat_flags(disk_super);
features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
if (tree_root->fs_info->compress_type & BTRFS_COMPRESS_LZO)
features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
+
+ /*
+ * flag our filesystem as having big metadata blocks if
+ * they are bigger than the page size
+ */
+ if (btrfs_super_leafsize(disk_super) > PAGE_CACHE_SIZE) {
+ if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
+ printk(KERN_INFO "btrfs flagging fs with big metadata feature\n");
+ features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
+ }
+
+ nodesize = btrfs_super_nodesize(disk_super);
+ leafsize = btrfs_super_leafsize(disk_super);
+ sectorsize = btrfs_super_sectorsize(disk_super);
+ stripesize = btrfs_super_stripesize(disk_super);
+
+ /*
+ * mixed block groups end up with duplicate but slightly offset
+ * extent buffers for the same range. It leads to corruptions
+ */
+ if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
+ (sectorsize != leafsize)) {
+ printk(KERN_WARNING "btrfs: unequal leaf/node/sector sizes "
+ "are not allowed for mixed block groups on %s\n",
+ sb->s_id);
+ goto fail_alloc;
+ }
+
btrfs_set_super_incompat_flags(disk_super, features);
features = btrfs_super_compat_ro_flags(disk_super) &
fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
4 * 1024 * 1024 / PAGE_CACHE_SIZE);
- nodesize = btrfs_super_nodesize(disk_super);
- leafsize = btrfs_super_leafsize(disk_super);
- sectorsize = btrfs_super_sectorsize(disk_super);
- stripesize = btrfs_super_stripesize(disk_super);
tree_root->nodesize = nodesize;
tree_root->leafsize = leafsize;
tree_root->sectorsize = sectorsize;
goto fail_sb_buffer;
}
+ if (sectorsize < PAGE_SIZE) {
+ printk(KERN_WARNING "btrfs: Incompatible sector size "
+ "found on %s\n", sb->s_id);
+ goto fail_sb_buffer;
+ }
+
mutex_lock(&fs_info->chunk_mutex);
ret = btrfs_read_sys_array(tree_root);
mutex_unlock(&fs_info->chunk_mutex);
chunk_root->node = read_tree_block(chunk_root,
btrfs_super_chunk_root(disk_super),
blocksize, generation);
- BUG_ON(!chunk_root->node);
+ BUG_ON(!chunk_root->node); /* -ENOMEM */
if (!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
sb->s_id);
btrfs_close_extra_devices(fs_devices);
+ if (!fs_devices->latest_bdev) {
+ printk(KERN_CRIT "btrfs: failed to read devices on %s\n",
+ sb->s_id);
+ goto fail_tree_roots;
+ }
+
retry_root_backup:
blocksize = btrfs_level_size(tree_root,
btrfs_super_root_level(disk_super));
log_tree_root->node = read_tree_block(tree_root, bytenr,
blocksize,
generation + 1);
+ /* returns with log_tree_root freed on success */
ret = btrfs_recover_log_trees(log_tree_root);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_error(tree_root->fs_info, ret,
+ "Failed to recover log tree");
+ free_extent_buffer(log_tree_root->node);
+ kfree(log_tree_root);
+ goto fail_trans_kthread;
+ }
if (sb->s_flags & MS_RDONLY) {
- ret = btrfs_commit_super(tree_root);
- BUG_ON(ret);
+ ret = btrfs_commit_super(tree_root);
+ if (ret)
+ goto fail_trans_kthread;
}
}
ret = btrfs_find_orphan_roots(tree_root);
- BUG_ON(ret);
+ if (ret)
+ goto fail_trans_kthread;
if (!(sb->s_flags & MS_RDONLY)) {
ret = btrfs_cleanup_fs_roots(fs_info);
- BUG_ON(ret);
+ if (ret) {
+ }
ret = btrfs_recover_relocation(tree_root);
if (ret < 0) {
* one reference for us, and we leave it for the
* caller
*/
- device->flush_bio = NULL;;
+ device->flush_bio = NULL;
bio = bio_alloc(GFP_NOFS, 0);
if (!bio)
return -ENOMEM;
if (total_errors > max_errors) {
printk(KERN_ERR "btrfs: %d errors while writing supers\n",
total_errors);
+
+ /* This shouldn't happen. FUA is masked off if unsupported */
BUG();
}
}
mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
if (total_errors > max_errors) {
- printk(KERN_ERR "btrfs: %d errors while writing supers\n",
- total_errors);
- BUG();
+ btrfs_error(root->fs_info, -EIO,
+ "%d errors while writing supers", total_errors);
+ return -EIO;
}
return 0;
}
return ret;
}
- int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
+ /* Kill all outstanding I/O */
+ void btrfs_abort_devices(struct btrfs_root *root)
+ {
+ struct list_head *head;
+ struct btrfs_device *dev;
+ mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
+ head = &root->fs_info->fs_devices->devices;
+ list_for_each_entry_rcu(dev, head, dev_list) {
+ blk_abort_queue(dev->bdev->bd_disk->queue);
+ }
+ mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+ }
+
+ void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
{
spin_lock(&fs_info->fs_roots_radix_lock);
radix_tree_delete(&fs_info->fs_roots_radix,
__btrfs_remove_free_space_cache(root->free_ino_pinned);
__btrfs_remove_free_space_cache(root->free_ino_ctl);
free_fs_root(root);
- return 0;
}
static void free_fs_root(struct btrfs_root *root)
kfree(root);
}
- static int del_fs_roots(struct btrfs_fs_info *fs_info)
+ static void del_fs_roots(struct btrfs_fs_info *fs_info)
{
int ret;
struct btrfs_root *gang[8];
for (i = 0; i < ret; i++)
btrfs_free_fs_root(fs_info, gang[i]);
}
- return 0;
}
int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
if (IS_ERR(trans))
return PTR_ERR(trans);
ret = btrfs_commit_transaction(trans, root);
- BUG_ON(ret);
+ if (ret)
+ return ret;
/* run commit again to drop the original snapshot */
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return PTR_ERR(trans);
- btrfs_commit_transaction(trans, root);
+ ret = btrfs_commit_transaction(trans, root);
+ if (ret)
+ return ret;
ret = btrfs_write_and_wait_transaction(NULL, root);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_error(root->fs_info, ret,
+ "Failed to sync btree inode to disk.");
+ return ret;
+ }
ret = write_ctree_super(NULL, root, 0);
return ret;
int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
{
int ret;
- struct inode *btree_inode = buf->first_page->mapping->host;
+ struct inode *btree_inode = buf->pages[0]->mapping->host;
- ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf,
- NULL);
+ ret = extent_buffer_uptodate(buf);
if (!ret)
return ret;
int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
{
- struct inode *btree_inode = buf->first_page->mapping->host;
- return set_extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree,
- buf);
+ return set_extent_buffer_uptodate(buf);
}
void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
{
- struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
+ struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
u64 transid = btrfs_header_generation(buf);
- struct inode *btree_inode = root->fs_info->btree_inode;
int was_dirty;
btrfs_assert_tree_locked(buf);
(unsigned long long)root->fs_info->generation);
WARN_ON(1);
}
- was_dirty = set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
- buf);
+ was_dirty = set_extent_buffer_dirty(buf);
if (!was_dirty) {
spin_lock(&root->fs_info->delalloc_lock);
root->fs_info->dirty_metadata_bytes += buf->len;
int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
{
- struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
- int ret;
- ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
- if (ret == 0)
- set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
- return ret;
+ struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
+ return btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
}
static int btree_lock_page_hook(struct page *page, void *data,
{
struct inode *inode = page->mapping->host;
struct btrfs_root *root = BTRFS_I(inode)->root;
- struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct extent_buffer *eb;
- unsigned long len;
- u64 bytenr = page_offset(page);
- if (page->private == EXTENT_PAGE_PRIVATE)
+ /*
+ * We culled this eb but the page is still hanging out on the mapping,
+ * carry on.
+ */
+ if (!PagePrivate(page))
goto out;
- len = page->private >> 2;
- eb = find_extent_buffer(io_tree, bytenr, len);
- if (!eb)
+ eb = (struct extent_buffer *)page->private;
+ if (!eb) {
+ WARN_ON(1);
+ goto out;
+ }
+ if (page != eb->pages[0])
goto out;
if (!btrfs_try_tree_write_lock(eb)) {
}
btrfs_tree_unlock(eb);
- free_extent_buffer(eb);
out:
if (!trylock_page(page)) {
flush_fn(data);
return 0;
}
- static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
+ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
int read_only)
{
+ if (btrfs_super_csum_type(fs_info->super_copy) >= ARRAY_SIZE(btrfs_csum_sizes)) {
+ printk(KERN_ERR "btrfs: unsupported checksum algorithm\n");
+ return -EINVAL;
+ }
+
if (read_only)
- return;
+ return 0;
- if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
+ if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
printk(KERN_WARNING "warning: mount fs with errors, "
"running btrfsck is recommended\n");
+ }
+
+ return 0;
}
int btrfs_error_commit_super(struct btrfs_root *root)
return ret;
}
- static int btrfs_destroy_ordered_operations(struct btrfs_root *root)
+ static void btrfs_destroy_ordered_operations(struct btrfs_root *root)
{
struct btrfs_inode *btrfs_inode;
struct list_head splice;
spin_unlock(&root->fs_info->ordered_extent_lock);
mutex_unlock(&root->fs_info->ordered_operations_mutex);
-
- return 0;
}
- static int btrfs_destroy_ordered_extents(struct btrfs_root *root)
+ static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
{
struct list_head splice;
struct btrfs_ordered_extent *ordered;
}
spin_unlock(&root->fs_info->ordered_extent_lock);
-
- return 0;
}
- static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
- struct btrfs_root *root)
+ int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
+ struct btrfs_root *root)
{
struct rb_node *node;
struct btrfs_delayed_ref_root *delayed_refs;
delayed_refs = &trans->delayed_refs;
+ again:
spin_lock(&delayed_refs->lock);
if (delayed_refs->num_entries == 0) {
spin_unlock(&delayed_refs->lock);
struct btrfs_delayed_ref_head *head;
head = btrfs_delayed_node_to_head(ref);
+ spin_unlock(&delayed_refs->lock);
mutex_lock(&head->mutex);
kfree(head->extent_op);
delayed_refs->num_heads--;
delayed_refs->num_heads_ready--;
list_del_init(&head->cluster);
mutex_unlock(&head->mutex);
+ btrfs_put_delayed_ref(ref);
+ goto again;
}
-
spin_unlock(&delayed_refs->lock);
btrfs_put_delayed_ref(ref);
return ret;
}
- static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t)
+ static void btrfs_destroy_pending_snapshots(struct btrfs_transaction *t)
{
struct btrfs_pending_snapshot *snapshot;
struct list_head splice;
kfree(snapshot);
}
-
- return 0;
}
- static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
+ static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
{
struct btrfs_inode *btrfs_inode;
struct list_head splice;
}
spin_unlock(&root->fs_info->delalloc_lock);
-
- return 0;
}
static int btrfs_destroy_marked_extents(struct btrfs_root *root,
return 0;
}
- static int btrfs_cleanup_transaction(struct btrfs_root *root)
+ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
+ struct btrfs_root *root)
+ {
+ btrfs_destroy_delayed_refs(cur_trans, root);
+ btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
+ cur_trans->dirty_pages.dirty_bytes);
+
+ /* FIXME: cleanup wait for commit */
+ cur_trans->in_commit = 1;
+ cur_trans->blocked = 1;
+ if (waitqueue_active(&root->fs_info->transaction_blocked_wait))
+ wake_up(&root->fs_info->transaction_blocked_wait);
+
+ cur_trans->blocked = 0;
+ if (waitqueue_active(&root->fs_info->transaction_wait))
+ wake_up(&root->fs_info->transaction_wait);
+
+ cur_trans->commit_done = 1;
+ if (waitqueue_active(&cur_trans->commit_wait))
+ wake_up(&cur_trans->commit_wait);
+
+ btrfs_destroy_pending_snapshots(cur_trans);
+
+ btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages,
+ EXTENT_DIRTY);
+
+ /*
+ memset(cur_trans, 0, sizeof(*cur_trans));
+ kmem_cache_free(btrfs_transaction_cachep, cur_trans);
+ */
+ }
+
+ int btrfs_cleanup_transaction(struct btrfs_root *root)
{
struct btrfs_transaction *t;
LIST_HEAD(list);
- WARN_ON(1);
-
mutex_lock(&root->fs_info->transaction_kthread_mutex);
spin_lock(&root->fs_info->trans_lock);
return 0;
}
+ static int btree_writepage_io_failed_hook(struct bio *bio, struct page *page,
+ u64 start, u64 end,
+ struct extent_state *state)
+ {
+ struct super_block *sb = page->mapping->host->i_sb;
+ struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+ btrfs_error(fs_info, -EIO,
+ "Error occured while writing out btree at %llu", start);
+ return -EIO;
+ }
+
static struct extent_io_ops btree_extent_io_ops = {
.write_cache_pages_lock_hook = btree_lock_page_hook,
.readpage_end_io_hook = btree_readpage_end_io_hook,
.submit_bio_hook = btree_submit_bio_hook,
/* note we're sharing with inode.c for the merge bio hook */
.merge_bio_hook = btrfs_merge_bio_hook,
+ .writepage_io_failed_hook = btree_writepage_io_failed_hook,
};
- * Jonathan Woithe <jwoithe@physics.adelaide.edu.au>
+ * Jonathan Woithe <jwoithe@just42.net>
*
* This driver is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
ALC_AUTOMUTE_MIXER, /* mute/unmute mixer widget AMP */
};
+ #define MAX_VOL_NIDS 0x40
+
struct alc_spec {
/* codec parameterization */
const struct snd_kcontrol_new *mixers[5]; /* mixer arrays */
const hda_nid_t *capsrc_nids;
hda_nid_t dig_in_nid; /* digital-in NID; optional */
hda_nid_t mixer_nid; /* analog-mixer NID */
- DECLARE_BITMAP(vol_ctls, 0x20 << 1);
- DECLARE_BITMAP(sw_ctls, 0x20 << 1);
+ DECLARE_BITMAP(vol_ctls, MAX_VOL_NIDS << 1);
+ DECLARE_BITMAP(sw_ctls, MAX_VOL_NIDS << 1);
/* capture setup for dynamic dual-adc switch */
hda_nid_t cur_adc;
/* for virtual master */
hda_nid_t vmaster_nid;
+ struct hda_vmaster_mute_hook vmaster_mute;
#ifdef CONFIG_SND_HDA_POWER_SAVE
struct hda_loopback_check loopback;
+ int num_loopbacks;
+ struct hda_amp_list loopback_list[8];
#endif
/* for PLL fix */
struct snd_array bind_ctls;
};
- #define ALC_MODEL_AUTO 0 /* common for all chips */
-
static bool check_amp_caps(struct hda_codec *codec, hda_nid_t nid,
int dir, unsigned int bits)
{
int i, type, num_conns;
hda_nid_t nid;
+ if (!spec->input_mux)
+ return 0;
+
mux_idx = adc_idx >= spec->num_mux_defs ? 0 : adc_idx;
imux = &spec->input_mux[mux_idx];
if (!imux->num_items && mux_idx > 0)
snd_hda_jack_report_sync(codec);
}
+ /* update the master volume per volume-knob's unsol event */
+ static void alc_update_knob_master(struct hda_codec *codec, hda_nid_t nid)
+ {
+ unsigned int val;
+ struct snd_kcontrol *kctl;
+ struct snd_ctl_elem_value *uctl;
+
+ kctl = snd_hda_find_mixer_ctl(codec, "Master Playback Volume");
+ if (!kctl)
+ return;
+ uctl = kzalloc(sizeof(*uctl), GFP_KERNEL);
+ if (!uctl)
+ return;
+ val = snd_hda_codec_read(codec, nid, 0,
+ AC_VERB_GET_VOLUME_KNOB_CONTROL, 0);
+ val &= HDA_AMP_VOLMASK;
+ uctl->value.integer.value[0] = val;
+ uctl->value.integer.value[1] = val;
+ kctl->put(kctl, uctl);
+ kfree(uctl);
+ }
+
/* unsolicited event for HP jack sensing */
static void alc_sku_unsol_event(struct hda_codec *codec, unsigned int res)
{
+ int action;
+
if (codec->vendor_id == 0x10ec0880)
res >>= 28;
else
res >>= 26;
- res = snd_hda_jack_get_action(codec, res);
- alc_exec_unsol_event(codec, res);
+ action = snd_hda_jack_get_action(codec, res);
+ if (action == ALC_DCVOL_EVENT) {
+ /* Execute the dc-vol event here as it requires the NID
+ * but we don't pass NID to alc_exec_unsol_event().
+ * Once when we convert all static quirks to the auto-parser,
+ * this can be integerated into there.
+ */
+ struct hda_jack_tbl *jack;
+ jack = snd_hda_jack_tbl_get_from_tag(codec, res);
+ if (jack)
+ alc_update_knob_master(codec, jack->nid);
+ return;
+ }
+ alc_exec_unsol_event(codec, action);
}
/* call init functions of standard auto-mute helpers */
"Disabled", "Enabled"
};
static const char * const texts3[] = {
- "Disabled", "Speaker Only", "Line-Out+Speaker"
+ "Disabled", "Speaker Only", "Line Out+Speaker"
};
const char * const *texts;
return true;
}
- /* rebuild imux for matching with the given auto-mic pins (if not yet) */
- static bool alc_rebuild_imux_for_auto_mic(struct hda_codec *codec)
- {
- struct alc_spec *spec = codec->spec;
- struct hda_input_mux *imux;
- static char * const texts[3] = {
- "Mic", "Internal Mic", "Dock Mic"
- };
- int i;
-
- if (!spec->auto_mic)
- return false;
- imux = &spec->private_imux[0];
- if (spec->input_mux == imux)
- return true;
- spec->imux_pins[0] = spec->ext_mic_pin;
- spec->imux_pins[1] = spec->int_mic_pin;
- spec->imux_pins[2] = spec->dock_mic_pin;
- for (i = 0; i < 3; i++) {
- strcpy(imux->items[i].label, texts[i]);
- if (spec->imux_pins[i]) {
- hda_nid_t pin = spec->imux_pins[i];
- int c;
- for (c = 0; c < spec->num_adc_nids; c++) {
- hda_nid_t cap = get_capsrc(spec, c);
- int idx = get_connection_index(codec, cap, pin);
- if (idx >= 0) {
- imux->items[i].index = idx;
- break;
- }
- }
- imux->num_items = i + 1;
- }
- }
- spec->num_mux_defs = 1;
- spec->input_mux = imux;
- return true;
- }
-
/* check whether all auto-mic pins are valid; setup indices if OK */
static bool alc_auto_mic_check_imux(struct hda_codec *codec)
{
ALC_FIXUP_ACT_PRE_PROBE,
ALC_FIXUP_ACT_PROBE,
ALC_FIXUP_ACT_INIT,
+ ALC_FIXUP_ACT_BUILD,
};
static void alc_apply_fixup(struct hda_codec *codec, int action)
int id = -1;
const char *name = NULL;
+ /* when model=nofixup is given, don't pick up any fixups */
+ if (codec->modelname && !strcmp(codec->modelname, "nofixup")) {
+ spec->fixup_list = NULL;
+ spec->fixup_id = -1;
+ return;
+ }
+
if (codec->modelname && models) {
while (models->name) {
if (!strcmp(codec->modelname, models->name)) {
/*
* slave controls for virtual master
*/
- static const char * const alc_slave_vols[] = {
- "Front Playback Volume",
- "Surround Playback Volume",
- "Center Playback Volume",
- "LFE Playback Volume",
- "Side Playback Volume",
- "Headphone Playback Volume",
- "Speaker Playback Volume",
- "Mono Playback Volume",
- "Line-Out Playback Volume",
- "PCM Playback Volume",
- NULL,
- };
-
- static const char * const alc_slave_sws[] = {
- "Front Playback Switch",
- "Surround Playback Switch",
- "Center Playback Switch",
- "LFE Playback Switch",
- "Side Playback Switch",
- "Headphone Playback Switch",
- "Speaker Playback Switch",
- "Mono Playback Switch",
- "IEC958 Playback Switch",
- "Line-Out Playback Switch",
- "PCM Playback Switch",
+ static const char * const alc_slave_pfxs[] = {
+ "Front", "Surround", "Center", "LFE", "Side",
+ "Headphone", "Speaker", "Mono", "Line Out",
+ "CLFE", "Bass Speaker", "PCM",
NULL,
};
snd_hda_set_vmaster_tlv(codec, spec->vmaster_nid,
HDA_OUTPUT, vmaster_tlv);
err = snd_hda_add_vmaster(codec, "Master Playback Volume",
- vmaster_tlv, alc_slave_vols);
+ vmaster_tlv, alc_slave_pfxs,
+ "Playback Volume");
if (err < 0)
return err;
}
if (!spec->no_analog &&
!snd_hda_find_mixer_ctl(codec, "Master Playback Switch")) {
- err = snd_hda_add_vmaster(codec, "Master Playback Switch",
- NULL, alc_slave_sws);
+ err = __snd_hda_add_vmaster(codec, "Master Playback Switch",
+ NULL, alc_slave_pfxs,
+ "Playback Switch",
+ true, &spec->vmaster_mute.sw_kctl);
if (err < 0)
return err;
}
int err = __alc_build_controls(codec);
if (err < 0)
return err;
- return snd_hda_jack_add_kctls(codec, &spec->autocfg);
+ err = snd_hda_jack_add_kctls(codec, &spec->autocfg);
+ if (err < 0)
+ return err;
+ alc_apply_fixup(codec, ALC_FIXUP_ACT_BUILD);
+ return 0;
}
*/
static void alc_init_special_input_src(struct hda_codec *codec);
+ static void alc_auto_init_std(struct hda_codec *codec);
static int alc_init(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
unsigned int i;
+ if (spec->init_hook)
+ spec->init_hook(codec);
+
alc_fix_pll(codec);
alc_auto_init_amp(codec, spec->init_amp);
for (i = 0; i < spec->num_init_verbs; i++)
snd_hda_sequence_write(codec, spec->init_verbs[i]);
alc_init_special_input_src(codec);
-
- if (spec->init_hook)
- spec->init_hook(codec);
+ alc_auto_init_std(codec);
alc_apply_fixup(codec, ALC_FIXUP_ACT_INIT);
"%s Analog", codec->chip_name);
info->name = spec->stream_name_analog;
- if (spec->multiout.dac_nids > 0) {
+ if (spec->multiout.num_dacs > 0) {
p = spec->stream_analog_playback;
if (!p)
p = &alc_pcm_analog_playback;
return channel_name[ch];
}
+ #ifdef CONFIG_SND_HDA_POWER_SAVE
+ /* add the powersave loopback-list entry */
+ static void add_loopback_list(struct alc_spec *spec, hda_nid_t mix, int idx)
+ {
+ struct hda_amp_list *list;
+
+ if (spec->num_loopbacks >= ARRAY_SIZE(spec->loopback_list) - 1)
+ return;
+ list = spec->loopback_list + spec->num_loopbacks;
+ list->nid = mix;
+ list->dir = HDA_INPUT;
+ list->idx = idx;
+ spec->num_loopbacks++;
+ spec->loopback.amplist = spec->loopback_list;
+ }
+ #else
+ #define add_loopback_list(spec, mix, idx) /* NOP */
+ #endif
+
/* create input playback/capture controls for the given pin */
static int new_analog_input(struct alc_spec *spec, hda_nid_t pin,
const char *ctlname, int ctlidx,
HDA_COMPOSE_AMP_VAL(mix_nid, 3, idx, HDA_INPUT));
if (err < 0)
return err;
+ add_loopback_list(spec, mix_nid, idx);
return 0;
}
int max_nums = ARRAY_SIZE(spec->private_adc_nids);
int i, nums = 0;
- if (spec->shared_mic_hp)
- max_nums = 1; /* no multi streams with the shared HP/mic */
-
nid = codec->start_nid;
for (i = 0; i < codec->num_nodes; i++, nid++) {
hda_nid_t src;
return 0;
}
+ static bool alc_is_dac_already_used(struct hda_codec *codec, hda_nid_t nid)
+ {
+ struct alc_spec *spec = codec->spec;
+ int i;
+ if (found_in_nid_list(nid, spec->multiout.dac_nids,
+ ARRAY_SIZE(spec->private_dac_nids)) ||
+ found_in_nid_list(nid, spec->multiout.hp_out_nid,
+ ARRAY_SIZE(spec->multiout.hp_out_nid)) ||
+ found_in_nid_list(nid, spec->multiout.extra_out_nid,
+ ARRAY_SIZE(spec->multiout.extra_out_nid)))
+ return true;
+ for (i = 0; i < spec->multi_ios; i++) {
+ if (spec->multi_io[i].dac == nid)
+ return true;
+ }
+ return false;
+ }
+
/* look for an empty DAC slot */
static hda_nid_t alc_auto_look_for_dac(struct hda_codec *codec, hda_nid_t pin)
{
- struct alc_spec *spec = codec->spec;
hda_nid_t srcs[5];
int i, num;
hda_nid_t nid = alc_auto_mix_to_dac(codec, srcs[i]);
if (!nid)
continue;
- if (found_in_nid_list(nid, spec->multiout.dac_nids,
- ARRAY_SIZE(spec->private_dac_nids)))
- continue;
- if (found_in_nid_list(nid, spec->multiout.hp_out_nid,
- ARRAY_SIZE(spec->multiout.hp_out_nid)))
- continue;
- if (found_in_nid_list(nid, spec->multiout.extra_out_nid,
- ARRAY_SIZE(spec->multiout.extra_out_nid)))
- continue;
- return nid;
+ if (!alc_is_dac_already_used(codec, nid))
+ return nid;
}
return 0;
}
hda_nid_t srcs[5];
int i, num;
+ if (!pin || !dac)
+ return false;
pin = alc_go_down_to_selector(codec, pin);
num = snd_hda_get_connections(codec, pin, srcs, ARRAY_SIZE(srcs));
for (i = 0; i < num; i++) {
static hda_nid_t get_dac_if_single(struct hda_codec *codec, hda_nid_t pin)
{
+ struct alc_spec *spec = codec->spec;
hda_nid_t sel = alc_go_down_to_selector(codec, pin);
- if (snd_hda_get_conn_list(codec, sel, NULL) == 1)
+ hda_nid_t nid, nid_found, srcs[5];
+ int i, num = snd_hda_get_connections(codec, sel, srcs,
+ ARRAY_SIZE(srcs));
+ if (num == 1)
return alc_auto_look_for_dac(codec, pin);
- return 0;
+ nid_found = 0;
+ for (i = 0; i < num; i++) {
+ if (srcs[i] == spec->mixer_nid)
+ continue;
+ nid = alc_auto_mix_to_dac(codec, srcs[i]);
+ if (nid && !alc_is_dac_already_used(codec, nid)) {
+ if (nid_found)
+ return 0;
+ nid_found = nid;
+ }
+ }
+ return nid_found;
}
- /* return 0 if no possible DAC is found, 1 if one or more found */
- static int alc_auto_fill_extra_dacs(struct hda_codec *codec, int num_outs,
- const hda_nid_t *pins, hda_nid_t *dacs)
+ /* mark up volume and mute control NIDs: used during badness parsing and
+ * at creating actual controls
+ */
+ static inline unsigned int get_ctl_pos(unsigned int data)
{
- int i;
+ hda_nid_t nid = get_amp_nid_(data);
+ unsigned int dir;
+ if (snd_BUG_ON(nid >= MAX_VOL_NIDS))
+ return 0;
+ dir = get_amp_direction_(data);
+ return (nid << 1) | dir;
+ }
- if (num_outs && !dacs[0]) {
- dacs[0] = alc_auto_look_for_dac(codec, pins[0]);
- if (!dacs[0])
- return 0;
- }
+ #define is_ctl_used(bits, data) \
+ test_bit(get_ctl_pos(data), bits)
+ #define mark_ctl_usage(bits, data) \
+ set_bit(get_ctl_pos(data), bits)
- for (i = 1; i < num_outs; i++)
- dacs[i] = get_dac_if_single(codec, pins[i]);
- for (i = 1; i < num_outs; i++) {
+ static void clear_vol_marks(struct hda_codec *codec)
+ {
+ struct alc_spec *spec = codec->spec;
+ memset(spec->vol_ctls, 0, sizeof(spec->vol_ctls));
+ memset(spec->sw_ctls, 0, sizeof(spec->sw_ctls));
+ }
+
+ /* badness definition */
+ enum {
+ /* No primary DAC is found for the main output */
+ BAD_NO_PRIMARY_DAC = 0x10000,
+ /* No DAC is found for the extra output */
+ BAD_NO_DAC = 0x4000,
+ /* No possible multi-ios */
+ BAD_MULTI_IO = 0x103,
+ /* No individual DAC for extra output */
+ BAD_NO_EXTRA_DAC = 0x102,
+ /* No individual DAC for extra surrounds */
+ BAD_NO_EXTRA_SURR_DAC = 0x101,
+ /* Primary DAC shared with main surrounds */
+ BAD_SHARED_SURROUND = 0x100,
+ /* Primary DAC shared with main CLFE */
+ BAD_SHARED_CLFE = 0x10,
+ /* Primary DAC shared with extra surrounds */
+ BAD_SHARED_EXTRA_SURROUND = 0x10,
+ /* Volume widget is shared */
+ BAD_SHARED_VOL = 0x10,
+ };
+
+ static hda_nid_t alc_look_for_out_mute_nid(struct hda_codec *codec,
+ hda_nid_t pin, hda_nid_t dac);
+ static hda_nid_t alc_look_for_out_vol_nid(struct hda_codec *codec,
+ hda_nid_t pin, hda_nid_t dac);
+
+ static int eval_shared_vol_badness(struct hda_codec *codec, hda_nid_t pin,
+ hda_nid_t dac)
+ {
+ struct alc_spec *spec = codec->spec;
+ hda_nid_t nid;
+ unsigned int val;
+ int badness = 0;
+
+ nid = alc_look_for_out_vol_nid(codec, pin, dac);
+ if (nid) {
+ val = HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT);
+ if (is_ctl_used(spec->vol_ctls, nid))
+ badness += BAD_SHARED_VOL;
+ else
+ mark_ctl_usage(spec->vol_ctls, val);
+ } else
+ badness += BAD_SHARED_VOL;
+ nid = alc_look_for_out_mute_nid(codec, pin, dac);
+ if (nid) {
+ unsigned int wid_type = get_wcaps_type(get_wcaps(codec, nid));
+ if (wid_type == AC_WID_PIN || wid_type == AC_WID_AUD_OUT)
+ val = HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT);
+ else
+ val = HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_INPUT);
+ if (is_ctl_used(spec->sw_ctls, val))
+ badness += BAD_SHARED_VOL;
+ else
+ mark_ctl_usage(spec->sw_ctls, val);
+ } else
+ badness += BAD_SHARED_VOL;
+ return badness;
+ }
+
+ struct badness_table {
+ int no_primary_dac; /* no primary DAC */
+ int no_dac; /* no secondary DACs */
+ int shared_primary; /* primary DAC is shared with main output */
+ int shared_surr; /* secondary DAC shared with main or primary */
+ int shared_clfe; /* third DAC shared with main or primary */
+ int shared_surr_main; /* secondary DAC sahred with main/DAC0 */
+ };
+
+ static struct badness_table main_out_badness = {
+ .no_primary_dac = BAD_NO_PRIMARY_DAC,
+ .no_dac = BAD_NO_DAC,
+ .shared_primary = BAD_NO_PRIMARY_DAC,
+ .shared_surr = BAD_SHARED_SURROUND,
+ .shared_clfe = BAD_SHARED_CLFE,
+ .shared_surr_main = BAD_SHARED_SURROUND,
+ };
+
+ static struct badness_table extra_out_badness = {
+ .no_primary_dac = BAD_NO_DAC,
+ .no_dac = BAD_NO_DAC,
+ .shared_primary = BAD_NO_EXTRA_DAC,
+ .shared_surr = BAD_SHARED_EXTRA_SURROUND,
+ .shared_clfe = BAD_SHARED_EXTRA_SURROUND,
+ .shared_surr_main = BAD_NO_EXTRA_SURR_DAC,
+ };
+
+ /* try to assign DACs to pins and return the resultant badness */
+ static int alc_auto_fill_dacs(struct hda_codec *codec, int num_outs,
+ const hda_nid_t *pins, hda_nid_t *dacs,
+ const struct badness_table *bad)
+ {
+ struct alc_spec *spec = codec->spec;
+ struct auto_pin_cfg *cfg = &spec->autocfg;
+ int i, j;
+ int badness = 0;
+ hda_nid_t dac;
+
+ if (!num_outs)
+ return 0;
+
+ for (i = 0; i < num_outs; i++) {
+ hda_nid_t pin = pins[i];
if (!dacs[i])
- dacs[i] = alc_auto_look_for_dac(codec, pins[i]);
+ dacs[i] = alc_auto_look_for_dac(codec, pin);
+ if (!dacs[i] && !i) {
+ for (j = 1; j < num_outs; j++) {
+ if (alc_auto_is_dac_reachable(codec, pin, dacs[j])) {
+ dacs[0] = dacs[j];
+ dacs[j] = 0;
+ break;
+ }
+ }
+ }
+ dac = dacs[i];
+ if (!dac) {
+ if (alc_auto_is_dac_reachable(codec, pin, dacs[0]))
+ dac = dacs[0];
+ else if (cfg->line_outs > i &&
+ alc_auto_is_dac_reachable(codec, pin,
+ spec->private_dac_nids[i]))
+ dac = spec->private_dac_nids[i];
+ if (dac) {
+ if (!i)
+ badness += bad->shared_primary;
+ else if (i == 1)
+ badness += bad->shared_surr;
+ else
+ badness += bad->shared_clfe;
+ } else if (alc_auto_is_dac_reachable(codec, pin,
+ spec->private_dac_nids[0])) {
+ dac = spec->private_dac_nids[0];
+ badness += bad->shared_surr_main;
+ } else if (!i)
+ badness += bad->no_primary_dac;
+ else
+ badness += bad->no_dac;
+ }
+ if (dac)
+ badness += eval_shared_vol_badness(codec, pin, dac);
}
- return 1;
+
+ return badness;
}
static int alc_auto_fill_multi_ios(struct hda_codec *codec,
- unsigned int location, int offset);
- static hda_nid_t alc_look_for_out_vol_nid(struct hda_codec *codec,
- hda_nid_t pin, hda_nid_t dac);
+ hda_nid_t reference_pin,
+ bool hardwired, int offset);
+
+ static bool alc_map_singles(struct hda_codec *codec, int outs,
+ const hda_nid_t *pins, hda_nid_t *dacs)
+ {
+ int i;
+ bool found = false;
+ for (i = 0; i < outs; i++) {
+ if (dacs[i])
+ continue;
+ dacs[i] = get_dac_if_single(codec, pins[i]);
+ if (dacs[i])
+ found = true;
+ }
+ return found;
+ }
/* fill in the dac_nids table from the parsed pin configuration */
- static int alc_auto_fill_dac_nids(struct hda_codec *codec)
+ static int fill_and_eval_dacs(struct hda_codec *codec,
+ bool fill_hardwired,
+ bool fill_mio_first)
{
struct alc_spec *spec = codec->spec;
struct auto_pin_cfg *cfg = &spec->autocfg;
- unsigned int location, defcfg;
- int num_pins;
- bool redone = false;
- int i;
+ int i, err, badness;
- again:
/* set num_dacs once to full for alc_auto_look_for_dac() */
spec->multiout.num_dacs = cfg->line_outs;
- spec->multiout.hp_out_nid[0] = 0;
- spec->multiout.extra_out_nid[0] = 0;
- memset(spec->private_dac_nids, 0, sizeof(spec->private_dac_nids));
spec->multiout.dac_nids = spec->private_dac_nids;
+ memset(spec->private_dac_nids, 0, sizeof(spec->private_dac_nids));
+ memset(spec->multiout.hp_out_nid, 0, sizeof(spec->multiout.hp_out_nid));
+ memset(spec->multiout.extra_out_nid, 0, sizeof(spec->multiout.extra_out_nid));
spec->multi_ios = 0;
+ clear_vol_marks(codec);
+ badness = 0;
/* fill hard-wired DACs first */
- if (!redone) {
- for (i = 0; i < cfg->line_outs; i++)
- spec->private_dac_nids[i] =
- get_dac_if_single(codec, cfg->line_out_pins[i]);
- if (cfg->hp_outs)
- spec->multiout.hp_out_nid[0] =
- get_dac_if_single(codec, cfg->hp_pins[0]);
- if (cfg->speaker_outs)
- spec->multiout.extra_out_nid[0] =
- get_dac_if_single(codec, cfg->speaker_pins[0]);
+ if (fill_hardwired) {
+ bool mapped;
+ do {
+ mapped = alc_map_singles(codec, cfg->line_outs,
+ cfg->line_out_pins,
+ spec->private_dac_nids);
+ mapped |= alc_map_singles(codec, cfg->hp_outs,
+ cfg->hp_pins,
+ spec->multiout.hp_out_nid);
+ mapped |= alc_map_singles(codec, cfg->speaker_outs,
+ cfg->speaker_pins,
+ spec->multiout.extra_out_nid);
+ if (fill_mio_first && cfg->line_outs == 1 &&
+ cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
+ err = alc_auto_fill_multi_ios(codec, cfg->line_out_pins[0], true, 0);
+ if (!err)
+ mapped = true;
+ }
+ } while (mapped);
}
- for (i = 0; i < cfg->line_outs; i++) {
- hda_nid_t pin = cfg->line_out_pins[i];
- if (spec->private_dac_nids[i])
- continue;
- spec->private_dac_nids[i] = alc_auto_look_for_dac(codec, pin);
- if (!spec->private_dac_nids[i] && !redone) {
- /* if we can't find primary DACs, re-probe without
- * checking the hard-wired DACs
- */
- redone = true;
- goto again;
- }
- }
+ badness += alc_auto_fill_dacs(codec, cfg->line_outs, cfg->line_out_pins,
+ spec->private_dac_nids,
+ &main_out_badness);
/* re-count num_dacs and squash invalid entries */
spec->multiout.num_dacs = 0;
}
}
- if (cfg->line_outs == 1 && cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
+ if (fill_mio_first &&
+ cfg->line_outs == 1 && cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
/* try to fill multi-io first */
- defcfg = snd_hda_codec_get_pincfg(codec, cfg->line_out_pins[0]);
- location = get_defcfg_location(defcfg);
-
- num_pins = alc_auto_fill_multi_ios(codec, location, 0);
- if (num_pins > 0) {
- spec->multi_ios = num_pins;
- spec->ext_channel_count = 2;
- spec->multiout.num_dacs = num_pins + 1;
- }
+ err = alc_auto_fill_multi_ios(codec, cfg->line_out_pins[0], false, 0);
+ if (err < 0)
+ return err;
+ /* we don't count badness at this stage yet */
}
- if (cfg->line_out_type != AUTO_PIN_HP_OUT)
- alc_auto_fill_extra_dacs(codec, cfg->hp_outs, cfg->hp_pins,
- spec->multiout.hp_out_nid);
+ if (cfg->line_out_type != AUTO_PIN_HP_OUT) {
+ err = alc_auto_fill_dacs(codec, cfg->hp_outs, cfg->hp_pins,
+ spec->multiout.hp_out_nid,
+ &extra_out_badness);
+ if (err < 0)
+ return err;
+ badness += err;
+ }
if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
- int err = alc_auto_fill_extra_dacs(codec, cfg->speaker_outs,
- cfg->speaker_pins,
- spec->multiout.extra_out_nid);
- /* if no speaker volume is assigned, try again as the primary
- * output
- */
- if (!err && cfg->speaker_outs > 0 &&
+ err = alc_auto_fill_dacs(codec, cfg->speaker_outs,
+ cfg->speaker_pins,
+ spec->multiout.extra_out_nid,
+ &extra_out_badness);
+ if (err < 0)
+ return err;
+ badness += err;
+ }
+ if (cfg->line_outs == 1 && cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
+ err = alc_auto_fill_multi_ios(codec, cfg->line_out_pins[0], false, 0);
+ if (err < 0)
+ return err;
+ badness += err;
+ }
+ if (cfg->hp_outs && cfg->line_out_type == AUTO_PIN_SPEAKER_OUT) {
+ /* try multi-ios with HP + inputs */
+ int offset = 0;
+ if (cfg->line_outs >= 3)
+ offset = 1;
+ err = alc_auto_fill_multi_ios(codec, cfg->hp_pins[0], false,
+ offset);
+ if (err < 0)
+ return err;
+ badness += err;
+ }
+
+ if (spec->multi_ios == 2) {
+ for (i = 0; i < 2; i++)
+ spec->private_dac_nids[spec->multiout.num_dacs++] =
+ spec->multi_io[i].dac;
+ spec->ext_channel_count = 2;
+ } else if (spec->multi_ios) {
+ spec->multi_ios = 0;
+ badness += BAD_MULTI_IO;
+ }
+
+ return badness;
+ }
+
+ #define DEBUG_BADNESS
+
+ #ifdef DEBUG_BADNESS
+ #define debug_badness snd_printdd
+ #else
+ #define debug_badness(...)
+ #endif
+
+ static void debug_show_configs(struct alc_spec *spec, struct auto_pin_cfg *cfg)
+ {
+ debug_badness("multi_outs = %x/%x/%x/%x : %x/%x/%x/%x\n",
+ cfg->line_out_pins[0], cfg->line_out_pins[1],
+ cfg->line_out_pins[2], cfg->line_out_pins[2],
+ spec->multiout.dac_nids[0],
+ spec->multiout.dac_nids[1],
+ spec->multiout.dac_nids[2],
+ spec->multiout.dac_nids[3]);
+ if (spec->multi_ios > 0)
+ debug_badness("multi_ios(%d) = %x/%x : %x/%x\n",
+ spec->multi_ios,
+ spec->multi_io[0].pin, spec->multi_io[1].pin,
+ spec->multi_io[0].dac, spec->multi_io[1].dac);
+ debug_badness("hp_outs = %x/%x/%x/%x : %x/%x/%x/%x\n",
+ cfg->hp_pins[0], cfg->hp_pins[1],
+ cfg->hp_pins[2], cfg->hp_pins[2],
+ spec->multiout.hp_out_nid[0],
+ spec->multiout.hp_out_nid[1],
+ spec->multiout.hp_out_nid[2],
+ spec->multiout.hp_out_nid[3]);
+ debug_badness("spk_outs = %x/%x/%x/%x : %x/%x/%x/%x\n",
+ cfg->speaker_pins[0], cfg->speaker_pins[1],
+ cfg->speaker_pins[2], cfg->speaker_pins[3],
+ spec->multiout.extra_out_nid[0],
+ spec->multiout.extra_out_nid[1],
+ spec->multiout.extra_out_nid[2],
+ spec->multiout.extra_out_nid[3]);
+ }
+
+ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
+ {
+ struct alc_spec *spec = codec->spec;
+ struct auto_pin_cfg *cfg = &spec->autocfg;
+ struct auto_pin_cfg *best_cfg;
+ int best_badness = INT_MAX;
+ int badness;
+ bool fill_hardwired = true, fill_mio_first = true;
+ bool best_wired = true, best_mio = true;
+ bool hp_spk_swapped = false;
+
+ best_cfg = kmalloc(sizeof(*best_cfg), GFP_KERNEL);
+ if (!best_cfg)
+ return -ENOMEM;
+ *best_cfg = *cfg;
+
+ for (;;) {
+ badness = fill_and_eval_dacs(codec, fill_hardwired,
+ fill_mio_first);
+ if (badness < 0)
+ return badness;
+ debug_badness("==> lo_type=%d, wired=%d, mio=%d, badness=0x%x\n",
+ cfg->line_out_type, fill_hardwired, fill_mio_first,
+ badness);
+ debug_show_configs(spec, cfg);
+ if (badness < best_badness) {
+ best_badness = badness;
+ *best_cfg = *cfg;
+ best_wired = fill_hardwired;
+ best_mio = fill_mio_first;
+ }
+ if (!badness)
+ break;
+ fill_mio_first = !fill_mio_first;
+ if (!fill_mio_first)
+ continue;
+ fill_hardwired = !fill_hardwired;
+ if (!fill_hardwired)
+ continue;
+ if (hp_spk_swapped)
+ break;
+ hp_spk_swapped = true;
+ if (cfg->speaker_outs > 0 &&
cfg->line_out_type == AUTO_PIN_HP_OUT) {
cfg->hp_outs = cfg->line_outs;
memcpy(cfg->hp_pins, cfg->line_out_pins,
cfg->speaker_outs = 0;
memset(cfg->speaker_pins, 0, sizeof(cfg->speaker_pins));
cfg->line_out_type = AUTO_PIN_SPEAKER_OUT;
- redone = false;
- goto again;
- }
+ fill_hardwired = true;
+ continue;
+ }
+ if (cfg->hp_outs > 0 &&
+ cfg->line_out_type == AUTO_PIN_SPEAKER_OUT) {
+ cfg->speaker_outs = cfg->line_outs;
+ memcpy(cfg->speaker_pins, cfg->line_out_pins,
+ sizeof(cfg->speaker_pins));
+ cfg->line_outs = cfg->hp_outs;
+ memcpy(cfg->line_out_pins, cfg->hp_pins,
+ sizeof(cfg->hp_pins));
+ cfg->hp_outs = 0;
+ memset(cfg->hp_pins, 0, sizeof(cfg->hp_pins));
+ cfg->line_out_type = AUTO_PIN_HP_OUT;
+ fill_hardwired = true;
+ continue;
+ }
+ break;
}
- if (!spec->multi_ios &&
- cfg->line_out_type == AUTO_PIN_SPEAKER_OUT &&
- cfg->hp_outs) {
- /* try multi-ios with HP + inputs */
- defcfg = snd_hda_codec_get_pincfg(codec, cfg->hp_pins[0]);
- location = get_defcfg_location(defcfg);
-
- num_pins = alc_auto_fill_multi_ios(codec, location, 1);
- if (num_pins > 0) {
- spec->multi_ios = num_pins;
- spec->ext_channel_count = 2;
- spec->multiout.num_dacs = num_pins + 1;
- }
+ if (badness) {
+ *cfg = *best_cfg;
+ fill_and_eval_dacs(codec, best_wired, best_mio);
}
+ debug_badness("==> Best config: lo_type=%d, wired=%d, mio=%d\n",
+ cfg->line_out_type, best_wired, best_mio);
+ debug_show_configs(spec, cfg);
if (cfg->line_out_pins[0])
spec->vmaster_nid =
alc_look_for_out_vol_nid(codec, cfg->line_out_pins[0],
spec->multiout.dac_nids[0]);
- return 0;
- }
- static inline unsigned int get_ctl_pos(unsigned int data)
- {
- hda_nid_t nid = get_amp_nid_(data);
- unsigned int dir = get_amp_direction_(data);
- return (nid << 1) | dir;
+ /* clear the bitmap flags for creating controls */
+ clear_vol_marks(codec);
+ kfree(best_cfg);
+ return 0;
}
- #define is_ctl_used(bits, data) \
- test_bit(get_ctl_pos(data), bits)
- #define mark_ctl_usage(bits, data) \
- set_bit(get_ctl_pos(data), bits)
-
static int alc_auto_add_vol_ctl(struct hda_codec *codec,
const char *pfx, int cidx,
hda_nid_t nid, unsigned int chs)
dac = spec->multiout.dac_nids[i];
if (!dac)
continue;
- if (i >= cfg->line_outs)
+ if (i >= cfg->line_outs) {
pin = spec->multi_io[i - 1].pin;
- else
+ index = 0;
+ name = channel_name[i];
+ } else {
pin = cfg->line_out_pins[i];
+ name = alc_get_line_out_pfx(spec, i, true, &index);
+ }
sw = alc_look_for_out_mute_nid(codec, pin, dac);
vol = alc_look_for_out_vol_nid(codec, pin, dac);
- name = alc_get_line_out_pfx(spec, i, true, &index);
if (!name || !strcmp(name, "CLFE")) {
/* Center/LFE */
err = alc_auto_add_vol_ctl(codec, "Center", 0, vol, 1);
return alc_auto_create_extra_out(codec, *pins, dac, pfx, 0);
}
- if (dacs[num_pins - 1]) {
- /* OK, we have a multi-output system with individual volumes */
- for (i = 0; i < num_pins; i++) {
- if (num_pins >= 3) {
- snprintf(name, sizeof(name), "%s %s",
- pfx, channel_name[i]);
- err = alc_auto_create_extra_out(codec, pins[i], dacs[i],
- name, 0);
- } else {
- err = alc_auto_create_extra_out(codec, pins[i], dacs[i],
- pfx, i);
- }
- if (err < 0)
- return err;
- }
- return 0;
- }
-
- /* Let's create a bind-controls */
- ctl = new_bind_ctl(codec, num_pins, &snd_hda_bind_sw);
- if (!ctl)
- return -ENOMEM;
- n = 0;
for (i = 0; i < num_pins; i++) {
- if (get_wcaps(codec, pins[i]) & AC_WCAP_OUT_AMP)
- ctl->values[n++] =
- HDA_COMPOSE_AMP_VAL(pins[i], 3, 0, HDA_OUTPUT);
- }
- if (n) {
- snprintf(name, sizeof(name), "%s Playback Switch", pfx);
- err = add_control(spec, ALC_CTL_BIND_SW, name, 0, (long)ctl);
+ hda_nid_t dac;
+ if (dacs[num_pins - 1])
+ dac = dacs[i]; /* with individual volumes */
+ else
+ dac = 0;
+ if (num_pins == 2 && i == 1 && !strcmp(pfx, "Speaker")) {
+ err = alc_auto_create_extra_out(codec, pins[i], dac,
+ "Bass Speaker", 0);
+ } else if (num_pins >= 3) {
+ snprintf(name, sizeof(name), "%s %s",
+ pfx, channel_name[i]);
+ err = alc_auto_create_extra_out(codec, pins[i], dac,
+ name, 0);
+ } else {
+ err = alc_auto_create_extra_out(codec, pins[i], dac,
+ pfx, i);
+ }
if (err < 0)
return err;
}
+ if (dacs[num_pins - 1])
+ return 0;
+ /* Let's create a bind-controls for volumes */
ctl = new_bind_ctl(codec, num_pins, &snd_hda_bind_vol);
if (!ctl)
return -ENOMEM;
}
}
+ /* check whether the given pin can be a multi-io pin */
+ static bool can_be_multiio_pin(struct hda_codec *codec,
+ unsigned int location, hda_nid_t nid)
+ {
+ unsigned int defcfg, caps;
+
+ defcfg = snd_hda_codec_get_pincfg(codec, nid);
+ if (get_defcfg_connect(defcfg) != AC_JACK_PORT_COMPLEX)
+ return false;
+ if (location && get_defcfg_location(defcfg) != location)
+ return false;
+ caps = snd_hda_query_pin_caps(codec, nid);
+ if (!(caps & AC_PINCAP_OUT))
+ return false;
+ return true;
+ }
+
/*
* multi-io helper
+ *
+ * When hardwired is set, try to fill ony hardwired pins, and returns
+ * zero if any pins are filled, non-zero if nothing found.
+ * When hardwired is off, try to fill possible input pins, and returns
+ * the badness value.
*/
static int alc_auto_fill_multi_ios(struct hda_codec *codec,
- unsigned int location,
- int offset)
+ hda_nid_t reference_pin,
+ bool hardwired, int offset)
{
struct alc_spec *spec = codec->spec;
struct auto_pin_cfg *cfg = &spec->autocfg;
- hda_nid_t prime_dac = spec->private_dac_nids[0];
- int type, i, dacs, num_pins = 0;
+ int type, i, j, dacs, num_pins, old_pins;
+ unsigned int defcfg = snd_hda_codec_get_pincfg(codec, reference_pin);
+ unsigned int location = get_defcfg_location(defcfg);
+ int badness = 0;
- dacs = spec->multiout.num_dacs;
+ old_pins = spec->multi_ios;
+ if (old_pins >= 2)
+ goto end_fill;
+
+ num_pins = 0;
for (type = AUTO_PIN_LINE_IN; type >= AUTO_PIN_MIC; type--) {
for (i = 0; i < cfg->num_inputs; i++) {
- hda_nid_t nid = cfg->inputs[i].pin;
- hda_nid_t dac = 0;
- unsigned int defcfg, caps;
if (cfg->inputs[i].type != type)
continue;
- defcfg = snd_hda_codec_get_pincfg(codec, nid);
- if (get_defcfg_connect(defcfg) != AC_JACK_PORT_COMPLEX)
- continue;
- if (location && get_defcfg_location(defcfg) != location)
- continue;
- caps = snd_hda_query_pin_caps(codec, nid);
- if (!(caps & AC_PINCAP_OUT))
- continue;
- if (offset && offset + num_pins < dacs) {
- dac = spec->private_dac_nids[offset + num_pins];
+ if (can_be_multiio_pin(codec, location,
+ cfg->inputs[i].pin))
+ num_pins++;
+ }
+ }
+ if (num_pins < 2)
+ goto end_fill;
+
+ dacs = spec->multiout.num_dacs;
+ for (type = AUTO_PIN_LINE_IN; type >= AUTO_PIN_MIC; type--) {
+ for (i = 0; i < cfg->num_inputs; i++) {
+ hda_nid_t nid = cfg->inputs[i].pin;
+ hda_nid_t dac = 0;
+
+ if (cfg->inputs[i].type != type)
+ continue;
+ if (!can_be_multiio_pin(codec, location, nid))
+ continue;
+ for (j = 0; j < spec->multi_ios; j++) {
+ if (nid == spec->multi_io[j].pin)
+ break;
+ }
+ if (j < spec->multi_ios)
+ continue;
+
+ if (offset && offset + spec->multi_ios < dacs) {
+ dac = spec->private_dac_nids[offset + spec->multi_ios];
if (!alc_auto_is_dac_reachable(codec, nid, dac))
dac = 0;
}
- if (!dac)
+ if (hardwired)
+ dac = get_dac_if_single(codec, nid);
+ else if (!dac)
dac = alc_auto_look_for_dac(codec, nid);
- if (!dac)
+ if (!dac) {
+ badness++;
continue;
- spec->multi_io[num_pins].pin = nid;
- spec->multi_io[num_pins].dac = dac;
- num_pins++;
- spec->private_dac_nids[spec->multiout.num_dacs++] = dac;
+ }
+ spec->multi_io[spec->multi_ios].pin = nid;
+ spec->multi_io[spec->multi_ios].dac = dac;
+ spec->multi_ios++;
+ if (spec->multi_ios >= 2)
+ break;
}
}
- spec->multiout.num_dacs = dacs;
- if (num_pins < 2) {
- /* clear up again */
- memset(spec->private_dac_nids + dacs, 0,
- sizeof(hda_nid_t) * (AUTO_CFG_MAX_OUTS - dacs));
- spec->private_dac_nids[0] = prime_dac;
- return 0;
+ end_fill:
+ if (badness)
+ badness = BAD_MULTI_IO;
+ if (old_pins == spec->multi_ios) {
+ if (hardwired)
+ return 1; /* nothing found */
+ else
+ return badness; /* no badness if nothing found */
+ }
+ if (!hardwired && spec->multi_ios < 2) {
+ spec->multi_ios = old_pins;
+ return badness;
}
- return num_pins;
+
+ return 0;
}
static int alc_auto_ch_mode_info(struct snd_kcontrol *kcontrol,
if (spec->dyn_adc_switch)
return;
+ again:
nums = 0;
for (n = 0; n < spec->num_adc_nids; n++) {
hda_nid_t cap = spec->private_capsrc_nids[n];
if (!nums) {
/* check whether ADC-switch is possible */
if (!alc_check_dyn_adc_switch(codec)) {
+ if (spec->shared_mic_hp) {
+ spec->shared_mic_hp = 0;
+ spec->private_imux[0].num_items = 1;
+ goto again;
+ }
printk(KERN_WARNING "hda_codec: %s: no valid ADC found;"
" using fallback 0x%x\n",
codec->chip_name, spec->private_adc_nids[0]);
if (spec->auto_mic)
alc_auto_mic_check_imux(codec); /* check auto-mic setups */
- else if (spec->input_mux->num_items == 1)
+ else if (spec->input_mux->num_items == 1 || spec->shared_mic_hp)
spec->num_adc_nids = 1; /* reduce to a single ADC */
}
else
nums = spec->num_adc_nids;
for (c = 0; c < nums; c++)
- alc_mux_select(codec, 0, spec->cur_mux[c], true);
+ alc_mux_select(codec, c, spec->cur_mux[c], true);
}
/* add mic boosts if needed */
SND_PCI_QUIRK(0x1043, 0x83ce, "EeePC", 1),
SND_PCI_QUIRK(0x1043, 0x831a, "EeePC", 1),
SND_PCI_QUIRK(0x1043, 0x834a, "EeePC", 1),
+ SND_PCI_QUIRK(0x1458, 0xa002, "GA-MA790X", 1),
SND_PCI_QUIRK(0x8086, 0xd613, "Intel", 1),
{}
};
if (spec->kctls.list)
add_mixer(spec, spec->kctls.list);
+ if (!spec->no_analog && !spec->cap_mixer)
+ set_capture_mixer(codec);
+
return 1;
}
return alc_parse_auto_config(codec, alc880_ignore, alc880_ssids);
}
- #ifdef CONFIG_SND_HDA_POWER_SAVE
- static const struct hda_amp_list alc880_loopbacks[] = {
- { 0x0b, HDA_INPUT, 0 },
- { 0x0b, HDA_INPUT, 1 },
- { 0x0b, HDA_INPUT, 2 },
- { 0x0b, HDA_INPUT, 3 },
- { 0x0b, HDA_INPUT, 4 },
- { } /* end */
- };
- #endif
-
/*
* ALC880 fix-ups
*/
enum {
+ ALC880_FIXUP_GPIO1,
ALC880_FIXUP_GPIO2,
ALC880_FIXUP_MEDION_RIM,
+ ALC880_FIXUP_LG,
+ ALC880_FIXUP_W810,
+ ALC880_FIXUP_EAPD_COEF,
+ ALC880_FIXUP_TCL_S700,
+ ALC880_FIXUP_VOL_KNOB,
+ ALC880_FIXUP_FUJITSU,
+ ALC880_FIXUP_F1734,
+ ALC880_FIXUP_UNIWILL,
+ ALC880_FIXUP_UNIWILL_DIG,
+ ALC880_FIXUP_Z71V,
+ ALC880_FIXUP_3ST_BASE,
+ ALC880_FIXUP_3ST,
+ ALC880_FIXUP_3ST_DIG,
+ ALC880_FIXUP_5ST_BASE,
+ ALC880_FIXUP_5ST,
+ ALC880_FIXUP_5ST_DIG,
+ ALC880_FIXUP_6ST_BASE,
+ ALC880_FIXUP_6ST,
+ ALC880_FIXUP_6ST_DIG,
};
+ /* enable the volume-knob widget support on NID 0x21 */
+ static void alc880_fixup_vol_knob(struct hda_codec *codec,
+ const struct alc_fixup *fix, int action)
+ {
+ if (action == ALC_FIXUP_ACT_PROBE)
+ snd_hda_jack_detect_enable(codec, 0x21, ALC_DCVOL_EVENT);
+ }
+
static const struct alc_fixup alc880_fixups[] = {
+ [ALC880_FIXUP_GPIO1] = {
+ .type = ALC_FIXUP_VERBS,
+ .v.verbs = alc_gpio1_init_verbs,
+ },
[ALC880_FIXUP_GPIO2] = {
.type = ALC_FIXUP_VERBS,
.v.verbs = alc_gpio2_init_verbs,
.chained = true,
.chain_id = ALC880_FIXUP_GPIO2,
},
+ [ALC880_FIXUP_LG] = {
+ .type = ALC_FIXUP_PINS,
+ .v.pins = (const struct alc_pincfg[]) {
+ /* disable bogus unused pins */
+ { 0x16, 0x411111f0 },
+ { 0x18, 0x411111f0 },
+ { 0x1a, 0x411111f0 },
+ { }
+ }
+ },
+ [ALC880_FIXUP_W810] = {
+ .type = ALC_FIXUP_PINS,
+ .v.pins = (const struct alc_pincfg[]) {
+ /* disable bogus unused pins */
+ { 0x17, 0x411111f0 },
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC880_FIXUP_GPIO2,
+ },
+ [ALC880_FIXUP_EAPD_COEF] = {
+ .type = ALC_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ /* change to EAPD mode */
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x07 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x3060 },
+ {}
+ },
+ },
+ [ALC880_FIXUP_TCL_S700] = {
+ .type = ALC_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ /* change to EAPD mode */
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x07 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x3070 },
+ {}
+ },
+ .chained = true,
+ .chain_id = ALC880_FIXUP_GPIO2,
+ },
+ [ALC880_FIXUP_VOL_KNOB] = {
+ .type = ALC_FIXUP_FUNC,
+ .v.func = alc880_fixup_vol_knob,
+ },
+ [ALC880_FIXUP_FUJITSU] = {
+ /* override all pins as BIOS on old Amilo is broken */
+ .type = ALC_FIXUP_PINS,
+ .v.pins = (const struct alc_pincfg[]) {
+ { 0x14, 0x0121411f }, /* HP */
+ { 0x15, 0x99030120 }, /* speaker */
+ { 0x16, 0x99030130 }, /* bass speaker */
+ { 0x17, 0x411111f0 }, /* N/A */
+ { 0x18, 0x411111f0 }, /* N/A */
+ { 0x19, 0x01a19950 }, /* mic-in */
+ { 0x1a, 0x411111f0 }, /* N/A */
+ { 0x1b, 0x411111f0 }, /* N/A */
+ { 0x1c, 0x411111f0 }, /* N/A */
+ { 0x1d, 0x411111f0 }, /* N/A */
+ { 0x1e, 0x01454140 }, /* SPDIF out */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC880_FIXUP_VOL_KNOB,
+ },
+ [ALC880_FIXUP_F1734] = {
+ /* almost compatible with FUJITSU, but no bass and SPDIF */
+ .type = ALC_FIXUP_PINS,
+ .v.pins = (const struct alc_pincfg[]) {
+ { 0x14, 0x0121411f }, /* HP */
+ { 0x15, 0x99030120 }, /* speaker */
+ { 0x16, 0x411111f0 }, /* N/A */
+ { 0x17, 0x411111f0 }, /* N/A */
+ { 0x18, 0x411111f0 }, /* N/A */
+ { 0x19, 0x01a19950 }, /* mic-in */
+ { 0x1a, 0x411111f0 }, /* N/A */
+ { 0x1b, 0x411111f0 }, /* N/A */
+ { 0x1c, 0x411111f0 }, /* N/A */
+ { 0x1d, 0x411111f0 }, /* N/A */
+ { 0x1e, 0x411111f0 }, /* N/A */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC880_FIXUP_VOL_KNOB,
+ },
+ [ALC880_FIXUP_UNIWILL] = {
+ /* need to fix HP and speaker pins to be parsed correctly */
+ .type = ALC_FIXUP_PINS,
+ .v.pins = (const struct alc_pincfg[]) {
+ { 0x14, 0x0121411f }, /* HP */
+ { 0x15, 0x99030120 }, /* speaker */
+ { 0x16, 0x99030130 }, /* bass speaker */
+ { }
+ },
+ },
+ [ALC880_FIXUP_UNIWILL_DIG] = {
+ .type = ALC_FIXUP_PINS,
+ .v.pins = (const struct alc_pincfg[]) {
+ /* disable bogus unused pins */
+ { 0x17, 0x411111f0 },
+ { 0x19, 0x411111f0 },
+ { 0x1b, 0x411111f0 },
+ { 0x1f, 0x411111f0 },
+ { }
+ }
+ },
+ [ALC880_FIXUP_Z71V] = {
+ .type = ALC_FIXUP_PINS,
+ .v.pins = (const struct alc_pincfg[]) {
+ /* set up the whole pins as BIOS is utterly broken */
+ { 0x14, 0x99030120 }, /* speaker */
+ { 0x15, 0x0121411f }, /* HP */
+ { 0x16, 0x411111f0 }, /* N/A */
+ { 0x17, 0x411111f0 }, /* N/A */
+ { 0x18, 0x01a19950 }, /* mic-in */
+ { 0x19, 0x411111f0 }, /* N/A */
+ { 0x1a, 0x01813031 }, /* line-in */
+ { 0x1b, 0x411111f0 }, /* N/A */
+ { 0x1c, 0x411111f0 }, /* N/A */
+ { 0x1d, 0x411111f0 }, /* N/A */
+ { 0x1e, 0x0144111e }, /* SPDIF */
+ { }
+ }
+ },
+ [ALC880_FIXUP_3ST_BASE] = {
+ .type = ALC_FIXUP_PINS,
+ .v.pins = (const struct alc_pincfg[]) {
+ { 0x14, 0x01014010 }, /* line-out */
+ { 0x15, 0x411111f0 }, /* N/A */
+ { 0x16, 0x411111f0 }, /* N/A */
+ { 0x17, 0x411111f0 }, /* N/A */
+ { 0x18, 0x01a19c30 }, /* mic-in */
+ { 0x19, 0x0121411f }, /* HP */
+ { 0x1a, 0x01813031 }, /* line-in */
+ { 0x1b, 0x02a19c40 }, /* front-mic */
+ { 0x1c, 0x411111f0 }, /* N/A */
+ { 0x1d, 0x411111f0 }, /* N/A */
+ /* 0x1e is filled in below */
+ { 0x1f, 0x411111f0 }, /* N/A */
+ { }
+ }
+ },
+ [ALC880_FIXUP_3ST] = {
+ .type = ALC_FIXUP_PINS,
+ .v.pins = (const struct alc_pincfg[]) {
+ { 0x1e, 0x411111f0 }, /* N/A */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC880_FIXUP_3ST_BASE,
+ },
+ [ALC880_FIXUP_3ST_DIG] = {
+ .type = ALC_FIXUP_PINS,
+ .v.pins = (const struct alc_pincfg[]) {
+ { 0x1e, 0x0144111e }, /* SPDIF */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC880_FIXUP_3ST_BASE,
+ },
+ [ALC880_FIXUP_5ST_BASE] = {
+ .type = ALC_FIXUP_PINS,
+ .v.pins = (const struct alc_pincfg[]) {
+ { 0x14, 0x01014010 }, /* front */
+ { 0x15, 0x411111f0 }, /* N/A */
+ { 0x16, 0x01011411 }, /* CLFE */
+ { 0x17, 0x01016412 }, /* surr */
+ { 0x18, 0x01a19c30 }, /* mic-in */
+ { 0x19, 0x0121411f }, /* HP */
+ { 0x1a, 0x01813031 }, /* line-in */
+ { 0x1b, 0x02a19c40 }, /* front-mic */
+ { 0x1c, 0x411111f0 }, /* N/A */
+ { 0x1d, 0x411111f0 }, /* N/A */
+ /* 0x1e is filled in below */
+ { 0x1f, 0x411111f0 }, /* N/A */
+ { }
+ }
+ },
+ [ALC880_FIXUP_5ST] = {
+ .type = ALC_FIXUP_PINS,
+ .v.pins = (const struct alc_pincfg[]) {
+ { 0x1e, 0x411111f0 }, /* N/A */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC880_FIXUP_5ST_BASE,
+ },
+ [ALC880_FIXUP_5ST_DIG] = {
+ .type = ALC_FIXUP_PINS,
+ .v.pins = (const struct alc_pincfg[]) {
+ { 0x1e, 0x0144111e }, /* SPDIF */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC880_FIXUP_5ST_BASE,
+ },
+ [ALC880_FIXUP_6ST_BASE] = {
+ .type = ALC_FIXUP_PINS,
+ .v.pins = (const struct alc_pincfg[]) {
+ { 0x14, 0x01014010 }, /* front */
+ { 0x15, 0x01016412 }, /* surr */
+ { 0x16, 0x01011411 }, /* CLFE */
+ { 0x17, 0x01012414 }, /* side */
+ { 0x18, 0x01a19c30 }, /* mic-in */
+ { 0x19, 0x02a19c40 }, /* front-mic */
+ { 0x1a, 0x01813031 }, /* line-in */
+ { 0x1b, 0x0121411f }, /* HP */
+ { 0x1c, 0x411111f0 }, /* N/A */
+ { 0x1d, 0x411111f0 }, /* N/A */
+ /* 0x1e is filled in below */
+ { 0x1f, 0x411111f0 }, /* N/A */
+ { }
+ }
+ },
+ [ALC880_FIXUP_6ST] = {
+ .type = ALC_FIXUP_PINS,
+ .v.pins = (const struct alc_pincfg[]) {
+ { 0x1e, 0x411111f0 }, /* N/A */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC880_FIXUP_6ST_BASE,
+ },
+ [ALC880_FIXUP_6ST_DIG] = {
+ .type = ALC_FIXUP_PINS,
+ .v.pins = (const struct alc_pincfg[]) {
+ { 0x1e, 0x0144111e }, /* SPDIF */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC880_FIXUP_6ST_BASE,
+ },
};
static const struct snd_pci_quirk alc880_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1019, 0x0f69, "Coeus G610P", ALC880_FIXUP_W810),
+ SND_PCI_QUIRK(0x1043, 0x1964, "ASUS Z71V", ALC880_FIXUP_Z71V),
+ SND_PCI_QUIRK_VENDOR(0x1043, "ASUS", ALC880_FIXUP_GPIO1),
+ SND_PCI_QUIRK(0x1558, 0x5401, "Clevo GPIO2", ALC880_FIXUP_GPIO2),
+ SND_PCI_QUIRK_VENDOR(0x1558, "Clevo", ALC880_FIXUP_EAPD_COEF),
+ SND_PCI_QUIRK(0x1584, 0x9050, "Uniwill", ALC880_FIXUP_UNIWILL_DIG),
+ SND_PCI_QUIRK(0x1584, 0x9054, "Uniwill", ALC880_FIXUP_F1734),
+ SND_PCI_QUIRK(0x1584, 0x9070, "Uniwill", ALC880_FIXUP_UNIWILL),
+ SND_PCI_QUIRK(0x1584, 0x9077, "Uniwill P53", ALC880_FIXUP_VOL_KNOB),
+ SND_PCI_QUIRK(0x161f, 0x203d, "W810", ALC880_FIXUP_W810),
SND_PCI_QUIRK(0x161f, 0x205d, "Medion Rim 2150", ALC880_FIXUP_MEDION_RIM),
+ SND_PCI_QUIRK(0x1734, 0x107c, "FSC F1734", ALC880_FIXUP_F1734),
+ SND_PCI_QUIRK(0x1734, 0x1094, "FSC Amilo M1451G", ALC880_FIXUP_FUJITSU),
+ SND_PCI_QUIRK(0x1734, 0x10ac, "FSC AMILO Xi 1526", ALC880_FIXUP_F1734),
+ SND_PCI_QUIRK(0x1734, 0x10b0, "FSC Amilo Pi1556", ALC880_FIXUP_FUJITSU),
+ SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_FIXUP_LG),
+ SND_PCI_QUIRK(0x1854, 0x005f, "LG P1 Express", ALC880_FIXUP_LG),
+ SND_PCI_QUIRK(0x1854, 0x0068, "LG w1", ALC880_FIXUP_LG),
+ SND_PCI_QUIRK(0x19db, 0x4188, "TCL S700", ALC880_FIXUP_TCL_S700),
+
+ /* Below is the copied entries from alc880_quirks.c.
+ * It's not quite sure whether BIOS sets the correct pin-config table
+ * on these machines, thus they are kept to be compatible with
+ * the old static quirks. Once when it's confirmed to work without
+ * these overrides, it'd be better to remove.
+ */
+ SND_PCI_QUIRK(0x1019, 0xa880, "ECS", ALC880_FIXUP_5ST_DIG),
+ SND_PCI_QUIRK(0x1019, 0xa884, "Acer APFV", ALC880_FIXUP_6ST),
+ SND_PCI_QUIRK(0x1025, 0x0070, "ULI", ALC880_FIXUP_3ST_DIG),
+ SND_PCI_QUIRK(0x1025, 0x0077, "ULI", ALC880_FIXUP_6ST_DIG),
+ SND_PCI_QUIRK(0x1025, 0x0078, "ULI", ALC880_FIXUP_6ST_DIG),
+ SND_PCI_QUIRK(0x1025, 0x0087, "ULI", ALC880_FIXUP_6ST_DIG),
+ SND_PCI_QUIRK(0x1025, 0xe309, "ULI", ALC880_FIXUP_3ST_DIG),
+ SND_PCI_QUIRK(0x1025, 0xe310, "ULI", ALC880_FIXUP_3ST),
+ SND_PCI_QUIRK(0x1039, 0x1234, NULL, ALC880_FIXUP_6ST_DIG),
+ SND_PCI_QUIRK(0x104d, 0x81a0, "Sony", ALC880_FIXUP_3ST),
+ SND_PCI_QUIRK(0x104d, 0x81d6, "Sony", ALC880_FIXUP_3ST),
+ SND_PCI_QUIRK(0x107b, 0x3032, "Gateway", ALC880_FIXUP_5ST),
+ SND_PCI_QUIRK(0x107b, 0x3033, "Gateway", ALC880_FIXUP_5ST),
+ SND_PCI_QUIRK(0x107b, 0x4039, "Gateway", ALC880_FIXUP_5ST),
+ SND_PCI_QUIRK(0x1297, 0xc790, "Shuttle ST20G5", ALC880_FIXUP_6ST_DIG),
+ SND_PCI_QUIRK(0x1458, 0xa102, "Gigabyte K8", ALC880_FIXUP_6ST_DIG),
+ SND_PCI_QUIRK(0x1462, 0x1150, "MSI", ALC880_FIXUP_6ST_DIG),
+ SND_PCI_QUIRK(0x1509, 0x925d, "FIC P4M", ALC880_FIXUP_6ST_DIG),
+ SND_PCI_QUIRK(0x1565, 0x8202, "Biostar", ALC880_FIXUP_5ST_DIG),
+ SND_PCI_QUIRK(0x1695, 0x400d, "EPoX", ALC880_FIXUP_5ST_DIG),
+ SND_PCI_QUIRK(0x1695, 0x4012, "EPox EP-5LDA", ALC880_FIXUP_5ST_DIG),
+ SND_PCI_QUIRK(0x2668, 0x8086, NULL, ALC880_FIXUP_6ST_DIG), /* broken BIOS */
+ SND_PCI_QUIRK(0x8086, 0x2668, NULL, ALC880_FIXUP_6ST_DIG),
+ SND_PCI_QUIRK(0x8086, 0xa100, "Intel mobo", ALC880_FIXUP_5ST_DIG),
+ SND_PCI_QUIRK(0x8086, 0xd400, "Intel mobo", ALC880_FIXUP_5ST_DIG),
+ SND_PCI_QUIRK(0x8086, 0xd401, "Intel mobo", ALC880_FIXUP_5ST_DIG),
+ SND_PCI_QUIRK(0x8086, 0xd402, "Intel mobo", ALC880_FIXUP_3ST_DIG),
+ SND_PCI_QUIRK(0x8086, 0xe224, "Intel mobo", ALC880_FIXUP_5ST_DIG),
+ SND_PCI_QUIRK(0x8086, 0xe305, "Intel mobo", ALC880_FIXUP_3ST_DIG),
+ SND_PCI_QUIRK(0x8086, 0xe308, "Intel mobo", ALC880_FIXUP_3ST_DIG),
+ SND_PCI_QUIRK(0x8086, 0xe400, "Intel mobo", ALC880_FIXUP_5ST_DIG),
+ SND_PCI_QUIRK(0x8086, 0xe401, "Intel mobo", ALC880_FIXUP_5ST_DIG),
+ SND_PCI_QUIRK(0x8086, 0xe402, "Intel mobo", ALC880_FIXUP_5ST_DIG),
+ /* default Intel */
+ SND_PCI_QUIRK_VENDOR(0x8086, "Intel mobo", ALC880_FIXUP_3ST),
+ SND_PCI_QUIRK(0xa0a0, 0x0560, "AOpen i915GMm-HFS", ALC880_FIXUP_5ST_DIG),
+ SND_PCI_QUIRK(0xe803, 0x1019, NULL, ALC880_FIXUP_6ST_DIG),
{}
};
+ static const struct alc_model_fixup alc880_fixup_models[] = {
+ {.id = ALC880_FIXUP_3ST, .name = "3stack"},
+ {.id = ALC880_FIXUP_3ST_DIG, .name = "3stack-digout"},
+ {.id = ALC880_FIXUP_5ST, .name = "5stack"},
+ {.id = ALC880_FIXUP_5ST_DIG, .name = "5stack-digout"},
+ {.id = ALC880_FIXUP_6ST, .name = "6stack"},
+ {.id = ALC880_FIXUP_6ST_DIG, .name = "6stack-digout"},
+ {}
+ };
- /*
- * board setups
- */
- #ifdef CONFIG_SND_HDA_ENABLE_REALTEK_QUIRKS
- #define alc_board_config \
- snd_hda_check_board_config
- #define alc_board_codec_sid_config \
- snd_hda_check_board_codec_sid_config
- #include "alc_quirks.c"
- #else
- #define alc_board_config(codec, nums, models, tbl) -1
- #define alc_board_codec_sid_config(codec, nums, models, tbl) -1
- #define setup_preset(codec, x) /* NOP */
- #endif
/*
* OK, here we have finally the patch for ALC880
*/
- #ifdef CONFIG_SND_HDA_ENABLE_REALTEK_QUIRKS
- #include "alc880_quirks.c"
- #endif
-
static int patch_alc880(struct hda_codec *codec)
{
struct alc_spec *spec;
- int board_config;
int err;
spec = kzalloc(sizeof(*spec), GFP_KERNEL);
spec->mixer_nid = 0x0b;
spec->need_dac_fix = 1;
- board_config = alc_board_config(codec, ALC880_MODEL_LAST,
- alc880_models, alc880_cfg_tbl);
- if (board_config < 0) {
- printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
- codec->chip_name);
- board_config = ALC_MODEL_AUTO;
- }
-
- if (board_config == ALC_MODEL_AUTO) {
- alc_pick_fixup(codec, NULL, alc880_fixup_tbl, alc880_fixups);
- alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
- }
-
- if (board_config == ALC_MODEL_AUTO) {
- /* automatic parse from the BIOS config */
- err = alc880_parse_auto_config(codec);
- if (err < 0)
- goto error;
- #ifdef CONFIG_SND_HDA_ENABLE_REALTEK_QUIRKS
- else if (!err) {
- printk(KERN_INFO
- "hda_codec: Cannot set up configuration "
- "from BIOS. Using 3-stack mode...\n");
- board_config = ALC880_3ST;
- }
- #endif
- }
-
- if (board_config != ALC_MODEL_AUTO) {
- spec->vmaster_nid = 0x0c;
- setup_preset(codec, &alc880_presets[board_config]);
- }
-
- if (!spec->no_analog && !spec->adc_nids) {
- alc_auto_fill_adc_caps(codec);
- alc_rebuild_imux_for_auto_mic(codec);
- alc_remove_invalid_adc_nids(codec);
- }
+ alc_pick_fixup(codec, alc880_fixup_models, alc880_fixup_tbl,
+ alc880_fixups);
+ alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
- if (!spec->no_analog && !spec->cap_mixer)
- set_capture_mixer(codec);
+ /* automatic parse from the BIOS config */
+ err = alc880_parse_auto_config(codec);
+ if (err < 0)
+ goto error;
if (!spec->no_analog) {
err = snd_hda_attach_beep_device(codec, 0x1);
set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
}
- alc_apply_fixup(codec, ALC_FIXUP_ACT_PROBE);
-
codec->patch_ops = alc_patch_ops;
- if (board_config == ALC_MODEL_AUTO)
- spec->init_hook = alc_auto_init_std;
- else
- codec->patch_ops.build_controls = __alc_build_controls;
- #ifdef CONFIG_SND_HDA_POWER_SAVE
- if (!spec->loopback.amplist)
- spec->loopback.amplist = alc880_loopbacks;
- #endif
+
+ alc_apply_fixup(codec, ALC_FIXUP_ACT_PROBE);
return 0;
return alc_parse_auto_config(codec, alc260_ignore, alc260_ssids);
}
- #ifdef CONFIG_SND_HDA_POWER_SAVE
- static const struct hda_amp_list alc260_loopbacks[] = {
- { 0x07, HDA_INPUT, 0 },
- { 0x07, HDA_INPUT, 1 },
- { 0x07, HDA_INPUT, 2 },
- { 0x07, HDA_INPUT, 3 },
- { 0x07, HDA_INPUT, 4 },
- { } /* end */
- };
- #endif
-
/*
* Pin config fixes
*/
enum {
- PINFIX_HP_DC5750,
+ ALC260_FIXUP_HP_DC5750,
+ ALC260_FIXUP_HP_PIN_0F,
+ ALC260_FIXUP_COEF,
+ ALC260_FIXUP_GPIO1,
+ ALC260_FIXUP_GPIO1_TOGGLE,
+ ALC260_FIXUP_REPLACER,
+ ALC260_FIXUP_HP_B1900,
};
+ static void alc260_gpio1_automute(struct hda_codec *codec)
+ {
+ struct alc_spec *spec = codec->spec;
+ snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
+ spec->hp_jack_present);
+ }
+
+ static void alc260_fixup_gpio1_toggle(struct hda_codec *codec,
+ const struct alc_fixup *fix, int action)
+ {
+ struct alc_spec *spec = codec->spec;
+ if (action == ALC_FIXUP_ACT_PROBE) {
+ /* although the machine has only one output pin, we need to
+ * toggle GPIO1 according to the jack state
+ */
+ spec->automute_hook = alc260_gpio1_automute;
+ spec->detect_hp = 1;
+ spec->automute_speaker = 1;
+ spec->autocfg.hp_pins[0] = 0x0f; /* copy it for automute */
+ snd_hda_jack_detect_enable(codec, 0x0f, ALC_HP_EVENT);
+ spec->unsol_event = alc_sku_unsol_event;
+ add_verb(codec->spec, alc_gpio1_init_verbs);
+ }
+ }
+
static const struct alc_fixup alc260_fixups[] = {
- [PINFIX_HP_DC5750] = {
+ [ALC260_FIXUP_HP_DC5750] = {
.type = ALC_FIXUP_PINS,
.v.pins = (const struct alc_pincfg[]) {
{ 0x11, 0x90130110 }, /* speaker */
{ }
}
},
+ [ALC260_FIXUP_HP_PIN_0F] = {
+ .type = ALC_FIXUP_PINS,
+ .v.pins = (const struct alc_pincfg[]) {
+ { 0x0f, 0x01214000 }, /* HP */
+ { }
+ }
+ },
+ [ALC260_FIXUP_COEF] = {
+ .type = ALC_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x07 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x3040 },
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC260_FIXUP_HP_PIN_0F,
+ },
+ [ALC260_FIXUP_GPIO1] = {
+ .type = ALC_FIXUP_VERBS,
+ .v.verbs = alc_gpio1_init_verbs,
+ },
+ [ALC260_FIXUP_GPIO1_TOGGLE] = {
+ .type = ALC_FIXUP_FUNC,
+ .v.func = alc260_fixup_gpio1_toggle,
+ .chained = true,
+ .chain_id = ALC260_FIXUP_HP_PIN_0F,
+ },
+ [ALC260_FIXUP_REPLACER] = {
+ .type = ALC_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x07 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x3050 },
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC260_FIXUP_GPIO1_TOGGLE,
+ },
+ [ALC260_FIXUP_HP_B1900] = {
+ .type = ALC_FIXUP_FUNC,
+ .v.func = alc260_fixup_gpio1_toggle,
+ .chained = true,
+ .chain_id = ALC260_FIXUP_COEF,
+ }
};
static const struct snd_pci_quirk alc260_fixup_tbl[] = {
- SND_PCI_QUIRK(0x103c, 0x280a, "HP dc5750", PINFIX_HP_DC5750),
+ SND_PCI_QUIRK(0x1025, 0x007b, "Acer C20x", ALC260_FIXUP_GPIO1),
+ SND_PCI_QUIRK(0x1025, 0x007f, "Acer Aspire 9500", ALC260_FIXUP_COEF),
+ SND_PCI_QUIRK(0x1025, 0x008f, "Acer", ALC260_FIXUP_GPIO1),
+ SND_PCI_QUIRK(0x103c, 0x280a, "HP dc5750", ALC260_FIXUP_HP_DC5750),
+ SND_PCI_QUIRK(0x103c, 0x30ba, "HP Presario B1900", ALC260_FIXUP_HP_B1900),
+ SND_PCI_QUIRK(0x1509, 0x4540, "Favorit 100XS", ALC260_FIXUP_GPIO1),
+ SND_PCI_QUIRK(0x161f, 0x2057, "Replacer 672V", ALC260_FIXUP_REPLACER),
+ SND_PCI_QUIRK(0x1631, 0xc017, "PB V7900", ALC260_FIXUP_COEF),
{}
};
/*
*/
- #ifdef CONFIG_SND_HDA_ENABLE_REALTEK_QUIRKS
- #include "alc260_quirks.c"
- #endif
-
static int patch_alc260(struct hda_codec *codec)
{
struct alc_spec *spec;
- int err, board_config;
+ int err;
spec = kzalloc(sizeof(*spec), GFP_KERNEL);
if (spec == NULL)
spec->mixer_nid = 0x07;
- board_config = alc_board_config(codec, ALC260_MODEL_LAST,
- alc260_models, alc260_cfg_tbl);
- if (board_config < 0) {
- snd_printd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
- codec->chip_name);
- board_config = ALC_MODEL_AUTO;
- }
-
- if (board_config == ALC_MODEL_AUTO) {
- alc_pick_fixup(codec, NULL, alc260_fixup_tbl, alc260_fixups);
- alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
- }
-
- if (board_config == ALC_MODEL_AUTO) {
- /* automatic parse from the BIOS config */
- err = alc260_parse_auto_config(codec);
- if (err < 0)
- goto error;
- #ifdef CONFIG_SND_HDA_ENABLE_REALTEK_QUIRKS
- else if (!err) {
- printk(KERN_INFO
- "hda_codec: Cannot set up configuration "
- "from BIOS. Using base mode...\n");
- board_config = ALC260_BASIC;
- }
- #endif
- }
-
- if (board_config != ALC_MODEL_AUTO) {
- setup_preset(codec, &alc260_presets[board_config]);
- spec->vmaster_nid = 0x08;
- }
-
- if (!spec->no_analog && !spec->adc_nids) {
- alc_auto_fill_adc_caps(codec);
- alc_rebuild_imux_for_auto_mic(codec);
- alc_remove_invalid_adc_nids(codec);
- }
+ alc_pick_fixup(codec, NULL, alc260_fixup_tbl, alc260_fixups);
+ alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
- if (!spec->no_analog && !spec->cap_mixer)
- set_capture_mixer(codec);
+ /* automatic parse from the BIOS config */
+ err = alc260_parse_auto_config(codec);
+ if (err < 0)
+ goto error;
if (!spec->no_analog) {
err = snd_hda_attach_beep_device(codec, 0x1);
set_beep_amp(spec, 0x07, 0x05, HDA_INPUT);
}
- alc_apply_fixup(codec, ALC_FIXUP_ACT_PROBE);
-
codec->patch_ops = alc_patch_ops;
- if (board_config == ALC_MODEL_AUTO)
- spec->init_hook = alc_auto_init_std;
- else
- codec->patch_ops.build_controls = __alc_build_controls;
spec->shutup = alc_eapd_shutup;
- #ifdef CONFIG_SND_HDA_POWER_SAVE
- if (!spec->loopback.amplist)
- spec->loopback.amplist = alc260_loopbacks;
- #endif
+
+ alc_apply_fixup(codec, ALC_FIXUP_ACT_PROBE);
return 0;
* In addition, an independent DAC for the multi-playback (not used in this
* driver yet).
*/
- #ifdef CONFIG_SND_HDA_POWER_SAVE
- #define alc882_loopbacks alc880_loopbacks
- #endif
/*
* Pin config fixes
ALC882_FIXUP_PB_M5210,
ALC882_FIXUP_ACER_ASPIRE_7736,
ALC882_FIXUP_ASUS_W90V,
+ ALC889_FIXUP_CD,
ALC889_FIXUP_VAIO_TT,
ALC888_FIXUP_EEE1601,
ALC882_FIXUP_EAPD,
ALC883_FIXUP_EAPD,
ALC883_FIXUP_ACER_EAPD,
+ ALC882_FIXUP_GPIO1,
+ ALC882_FIXUP_GPIO2,
ALC882_FIXUP_GPIO3,
ALC889_FIXUP_COEF,
ALC882_FIXUP_ASUS_W2JC,
ALC882_FIXUP_ACER_ASPIRE_8930G,
ALC882_FIXUP_ASPIRE_8930G_VERBS,
ALC885_FIXUP_MACPRO_GPIO,
+ ALC889_FIXUP_DAC_ROUTE,
+ ALC889_FIXUP_MBP_VREF,
+ ALC889_FIXUP_IMAC91_VREF,
};
static void alc889_fixup_coef(struct hda_codec *codec,
alc882_gpio_mute(codec, 1, 0);
}
+ /* Fix the connection of some pins for ALC889:
+ * At least, Acer Aspire 5935 shows the connections to DAC3/4 don't
+ * work correctly (bko#42740)
+ */
+ static void alc889_fixup_dac_route(struct hda_codec *codec,
+ const struct alc_fixup *fix, int action)
+ {
+ if (action == ALC_FIXUP_ACT_PRE_PROBE) {
+ /* fake the connections during parsing the tree */
+ hda_nid_t conn1[2] = { 0x0c, 0x0d };
+ hda_nid_t conn2[2] = { 0x0e, 0x0f };
+ snd_hda_override_conn_list(codec, 0x14, 2, conn1);
+ snd_hda_override_conn_list(codec, 0x15, 2, conn1);
+ snd_hda_override_conn_list(codec, 0x18, 2, conn2);
+ snd_hda_override_conn_list(codec, 0x1a, 2, conn2);
+ } else if (action == ALC_FIXUP_ACT_PROBE) {
+ /* restore the connections */
+ hda_nid_t conn[5] = { 0x0c, 0x0d, 0x0e, 0x0f, 0x26 };
+ snd_hda_override_conn_list(codec, 0x14, 5, conn);
+ snd_hda_override_conn_list(codec, 0x15, 5, conn);
+ snd_hda_override_conn_list(codec, 0x18, 5, conn);
+ snd_hda_override_conn_list(codec, 0x1a, 5, conn);
+ }
+ }
+
+ /* Set VREF on HP pin */
+ static void alc889_fixup_mbp_vref(struct hda_codec *codec,
+ const struct alc_fixup *fix, int action)
+ {
+ struct alc_spec *spec = codec->spec;
+ static hda_nid_t nids[2] = { 0x14, 0x15 };
+ int i;
+
+ if (action != ALC_FIXUP_ACT_INIT)
+ return;
+ for (i = 0; i < ARRAY_SIZE(nids); i++) {
+ unsigned int val = snd_hda_codec_get_pincfg(codec, nids[i]);
+ if (get_defcfg_device(val) != AC_JACK_HP_OUT)
+ continue;
+ val = snd_hda_codec_read(codec, nids[i], 0,
+ AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
+ val |= AC_PINCTL_VREF_80;
+ snd_hda_codec_write(codec, nids[i], 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, val);
+ spec->keep_vref_in_automute = 1;
+ break;
+ }
+ }
+
+ /* Set VREF on speaker pins on imac91 */
+ static void alc889_fixup_imac91_vref(struct hda_codec *codec,
+ const struct alc_fixup *fix, int action)
+ {
+ struct alc_spec *spec = codec->spec;
+ static hda_nid_t nids[2] = { 0x18, 0x1a };
+ int i;
+
+ if (action != ALC_FIXUP_ACT_INIT)
+ return;
+ for (i = 0; i < ARRAY_SIZE(nids); i++) {
+ unsigned int val;
+ val = snd_hda_codec_read(codec, nids[i], 0,
+ AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
+ val |= AC_PINCTL_VREF_50;
+ snd_hda_codec_write(codec, nids[i], 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, val);
+ }
+ spec->keep_vref_in_automute = 1;
+ }
+
static const struct alc_fixup alc882_fixups[] = {
[ALC882_FIXUP_ABIT_AW9D_MAX] = {
.type = ALC_FIXUP_PINS,
{ }
}
},
+ [ALC889_FIXUP_CD] = {
+ .type = ALC_FIXUP_PINS,
+ .v.pins = (const struct alc_pincfg[]) {
+ { 0x1c, 0x993301f0 }, /* CD */
+ { }
+ }
+ },
[ALC889_FIXUP_VAIO_TT] = {
.type = ALC_FIXUP_PINS,
.v.pins = (const struct alc_pincfg[]) {
{ }
}
},
+ [ALC882_FIXUP_GPIO1] = {
+ .type = ALC_FIXUP_VERBS,
+ .v.verbs = alc_gpio1_init_verbs,
+ },
+ [ALC882_FIXUP_GPIO2] = {
+ .type = ALC_FIXUP_VERBS,
+ .v.verbs = alc_gpio2_init_verbs,
+ },
[ALC882_FIXUP_GPIO3] = {
.type = ALC_FIXUP_VERBS,
.v.verbs = alc_gpio3_init_verbs,
.type = ALC_FIXUP_FUNC,
.v.func = alc885_fixup_macpro_gpio,
},
+ [ALC889_FIXUP_DAC_ROUTE] = {
+ .type = ALC_FIXUP_FUNC,
+ .v.func = alc889_fixup_dac_route,
+ },
+ [ALC889_FIXUP_MBP_VREF] = {
+ .type = ALC_FIXUP_FUNC,
+ .v.func = alc889_fixup_mbp_vref,
+ .chained = true,
+ .chain_id = ALC882_FIXUP_GPIO1,
+ },
+ [ALC889_FIXUP_IMAC91_VREF] = {
+ .type = ALC_FIXUP_FUNC,
+ .v.func = alc889_fixup_imac91_vref,
+ .chained = true,
+ .chain_id = ALC882_FIXUP_GPIO1,
+ },
};
static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x0142, "Acer Aspire 7730G",
ALC882_FIXUP_ACER_ASPIRE_4930G),
SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210),
+ SND_PCI_QUIRK(0x1025, 0x0259, "Acer Aspire 5935", ALC889_FIXUP_DAC_ROUTE),
SND_PCI_QUIRK(0x1025, 0x0296, "Acer Aspire 7736z", ALC882_FIXUP_ACER_ASPIRE_7736),
SND_PCI_QUIRK(0x1043, 0x13c2, "Asus A7M", ALC882_FIXUP_EAPD),
SND_PCI_QUIRK(0x1043, 0x1873, "ASUS W90V", ALC882_FIXUP_ASUS_W90V),
SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
/* All Apple entries are in codec SSIDs */
+ SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF),
+ SND_PCI_QUIRK(0x106b, 0x00a1, "Macbook", ALC889_FIXUP_MBP_VREF),
+ SND_PCI_QUIRK(0x106b, 0x00a4, "MacbookPro 4,1", ALC889_FIXUP_MBP_VREF),
SND_PCI_QUIRK(0x106b, 0x0c00, "Mac Pro", ALC885_FIXUP_MACPRO_GPIO),
SND_PCI_QUIRK(0x106b, 0x1000, "iMac 24", ALC885_FIXUP_MACPRO_GPIO),
SND_PCI_QUIRK(0x106b, 0x2800, "AppleTV", ALC885_FIXUP_MACPRO_GPIO),
+ SND_PCI_QUIRK(0x106b, 0x2c00, "MacbookPro rev3", ALC889_FIXUP_MBP_VREF),
+ SND_PCI_QUIRK(0x106b, 0x3000, "iMac", ALC889_FIXUP_MBP_VREF),
SND_PCI_QUIRK(0x106b, 0x3200, "iMac 7,1 Aluminum", ALC882_FIXUP_EAPD),
+ SND_PCI_QUIRK(0x106b, 0x3400, "MacBookAir 1,1", ALC889_FIXUP_MBP_VREF),
+ SND_PCI_QUIRK(0x106b, 0x3500, "MacBookAir 2,1", ALC889_FIXUP_MBP_VREF),
+ SND_PCI_QUIRK(0x106b, 0x3600, "Macbook 3,1", ALC889_FIXUP_MBP_VREF),
+ SND_PCI_QUIRK(0x106b, 0x3800, "MacbookPro 4,1", ALC889_FIXUP_MBP_VREF),
SND_PCI_QUIRK(0x106b, 0x3e00, "iMac 24 Aluminum", ALC885_FIXUP_MACPRO_GPIO),
+ SND_PCI_QUIRK(0x106b, 0x3f00, "Macbook 5,1", ALC889_FIXUP_IMAC91_VREF),
+ SND_PCI_QUIRK(0x106b, 0x4000, "MacbookPro 5,1", ALC889_FIXUP_IMAC91_VREF),
+ SND_PCI_QUIRK(0x106b, 0x4100, "Macmini 3,1", ALC889_FIXUP_IMAC91_VREF),
+ SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF),
+ SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF),
+ SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_IMAC91_VREF),
SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
+ SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3", ALC889_FIXUP_CD),
SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
/*
*/
- #ifdef CONFIG_SND_HDA_ENABLE_REALTEK_QUIRKS
- #include "alc882_quirks.c"
- #endif
-
static int patch_alc882(struct hda_codec *codec)
{
struct alc_spec *spec;
- int err, board_config;
+ int err;
spec = kzalloc(sizeof(*spec), GFP_KERNEL);
if (spec == NULL)
if (err < 0)
goto error;
- board_config = alc_board_config(codec, ALC882_MODEL_LAST,
- alc882_models, NULL);
- if (board_config < 0)
- board_config = alc_board_codec_sid_config(codec,
- ALC882_MODEL_LAST, alc882_models, alc882_ssid_cfg_tbl);
-
- if (board_config < 0) {
- printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
- codec->chip_name);
- board_config = ALC_MODEL_AUTO;
- }
-
- if (board_config == ALC_MODEL_AUTO) {
- alc_pick_fixup(codec, NULL, alc882_fixup_tbl, alc882_fixups);
- alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
- }
+ alc_pick_fixup(codec, NULL, alc882_fixup_tbl, alc882_fixups);
+ alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
alc_auto_parse_customize_define(codec);
- if (board_config == ALC_MODEL_AUTO) {
- /* automatic parse from the BIOS config */
- err = alc882_parse_auto_config(codec);
- if (err < 0)
- goto error;
- }
-
- if (board_config != ALC_MODEL_AUTO) {
- setup_preset(codec, &alc882_presets[board_config]);
- spec->vmaster_nid = 0x0c;
- }
-
- if (!spec->no_analog && !spec->adc_nids) {
- alc_auto_fill_adc_caps(codec);
- alc_rebuild_imux_for_auto_mic(codec);
- alc_remove_invalid_adc_nids(codec);
- }
-
- if (!spec->no_analog && !spec->cap_mixer)
- set_capture_mixer(codec);
+ /* automatic parse from the BIOS config */
+ err = alc882_parse_auto_config(codec);
+ if (err < 0)
+ goto error;
if (!spec->no_analog && has_cdefine_beep(codec)) {
err = snd_hda_attach_beep_device(codec, 0x1);
set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
}
- alc_apply_fixup(codec, ALC_FIXUP_ACT_PROBE);
-
codec->patch_ops = alc_patch_ops;
- if (board_config == ALC_MODEL_AUTO)
- spec->init_hook = alc_auto_init_std;
- else
- codec->patch_ops.build_controls = __alc_build_controls;
- #ifdef CONFIG_SND_HDA_POWER_SAVE
- if (!spec->loopback.amplist)
- spec->loopback.amplist = alc882_loopbacks;
- #endif
+ alc_apply_fixup(codec, ALC_FIXUP_ACT_PROBE);
return 0;
};
- #ifdef CONFIG_SND_HDA_POWER_SAVE
- #define alc262_loopbacks alc880_loopbacks
- #endif
-
/*
*/
static int patch_alc262(struct hda_codec *codec)
if (err < 0)
goto error;
- if (!spec->no_analog && !spec->adc_nids) {
- alc_auto_fill_adc_caps(codec);
- alc_rebuild_imux_for_auto_mic(codec);
- alc_remove_invalid_adc_nids(codec);
- }
-
- if (!spec->no_analog && !spec->cap_mixer)
- set_capture_mixer(codec);
-
if (!spec->no_analog && has_cdefine_beep(codec)) {
err = snd_hda_attach_beep_device(codec, 0x1);
if (err < 0)
set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
}
- alc_apply_fixup(codec, ALC_FIXUP_ACT_PROBE);
-
codec->patch_ops = alc_patch_ops;
- spec->init_hook = alc_auto_init_std;
spec->shutup = alc_eapd_shutup;
- #ifdef CONFIG_SND_HDA_POWER_SAVE
- if (!spec->loopback.amplist)
- spec->loopback.amplist = alc262_loopbacks;
- #endif
+ alc_apply_fixup(codec, ALC_FIXUP_ACT_PROBE);
return 0;
(0 << AC_AMPCAP_MUTE_SHIFT));
}
- if (!spec->no_analog && !spec->adc_nids) {
- alc_auto_fill_adc_caps(codec);
- alc_rebuild_imux_for_auto_mic(codec);
- alc_remove_invalid_adc_nids(codec);
- }
-
- if (!spec->no_analog && !spec->cap_mixer)
- set_capture_mixer(codec);
-
codec->patch_ops = alc_patch_ops;
- spec->init_hook = alc_auto_init_std;
spec->shutup = alc_eapd_shutup;
return 0;
/*
* ALC269
*/
- #ifdef CONFIG_SND_HDA_POWER_SAVE
- #define alc269_loopbacks alc880_loopbacks
- #endif
-
static const struct hda_pcm_stream alc269_44k_pcm_analog_playback = {
.substreams = 1,
.channels_min = 2,
/* NID is set in alc_build_pcms */
};
- #ifdef CONFIG_SND_HDA_POWER_SAVE
- static int alc269_mic2_for_mute_led(struct hda_codec *codec)
- {
- switch (codec->subsystem_id) {
- case 0x103c1586:
- return 1;
- }
- return 0;
- }
-
- static int alc269_mic2_mute_check_ps(struct hda_codec *codec, hda_nid_t nid)
- {
- /* update mute-LED according to the speaker mute state */
- if (nid == 0x01 || nid == 0x14) {
- int pinval;
- if (snd_hda_codec_amp_read(codec, 0x14, 0, HDA_OUTPUT, 0) &
- HDA_AMP_MUTE)
- pinval = 0x24;
- else
- pinval = 0x20;
- /* mic2 vref pin is used for mute LED control */
- snd_hda_codec_update_cache(codec, 0x19, 0,
- AC_VERB_SET_PIN_WIDGET_CONTROL,
- pinval);
- }
- return alc_check_power_status(codec, nid);
- }
- #endif /* CONFIG_SND_HDA_POWER_SAVE */
-
/* different alc269-variants */
enum {
ALC269_TYPE_ALC269VA,
spec->automute_hook = alc269_quanta_automute;
}
+ /* update mute-LED according to the speaker mute state via mic2 VREF pin */
+ static void alc269_fixup_mic2_mute_hook(void *private_data, int enabled)
+ {
+ struct hda_codec *codec = private_data;
+ unsigned int pinval = enabled ? 0x20 : 0x24;
+ snd_hda_codec_update_cache(codec, 0x19, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL,
+ pinval);
+ }
+
+ static void alc269_fixup_mic2_mute(struct hda_codec *codec,
+ const struct alc_fixup *fix, int action)
+ {
+ struct alc_spec *spec = codec->spec;
+ switch (action) {
+ case ALC_FIXUP_ACT_BUILD:
+ spec->vmaster_mute.hook = alc269_fixup_mic2_mute_hook;
+ snd_hda_add_vmaster_hook(codec, &spec->vmaster_mute, true);
+ /* fallthru */
+ case ALC_FIXUP_ACT_INIT:
+ snd_hda_sync_vmaster_hook(&spec->vmaster_mute);
+ break;
+ }
+ }
+
enum {
ALC269_FIXUP_SONY_VAIO,
ALC275_FIXUP_SONY_VAIO_GPIO2,
ALC269_FIXUP_DMIC,
ALC269VB_FIXUP_AMIC,
ALC269VB_FIXUP_DMIC,
+ ALC269_FIXUP_MIC2_MUTE_LED,
};
static const struct alc_fixup alc269_fixups[] = {
{ }
},
},
+ [ALC269_FIXUP_MIC2_MUTE_LED] = {
+ .type = ALC_FIXUP_FUNC,
+ .v.func = alc269_fixup_mic2_mute,
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_MIC2_MUTE_LED),
SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x17aa, 0x3bf8, "Lenovo Ideapd", ALC269_FIXUP_PCM_44K),
SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
- #if 1
+ #if 0
/* Below is a quirk table taken from the old code.
* Basically the device should work as is without the fixup table.
* If BIOS doesn't give a proper info, enable the corresponding
};
- static int alc269_fill_coef(struct hda_codec *codec)
+ static void alc269_fill_coef(struct hda_codec *codec)
{
+ struct alc_spec *spec = codec->spec;
int val;
+ if (spec->codec_variant != ALC269_TYPE_ALC269VB)
+ return;
+
if ((alc_get_coef0(codec) & 0x00ff) < 0x015) {
alc_write_coef_idx(codec, 0xf, 0x960b);
alc_write_coef_idx(codec, 0xe, 0x8817);
val = alc_read_coef_idx(codec, 0x4); /* HP */
alc_write_coef_idx(codec, 0x4, val | (1<<11));
-
- return 0;
}
/*
}
if (err < 0)
goto error;
+ spec->init_hook = alc269_fill_coef;
alc269_fill_coef(codec);
}
if (err < 0)
goto error;
- if (!spec->no_analog && !spec->adc_nids) {
- alc_auto_fill_adc_caps(codec);
- alc_rebuild_imux_for_auto_mic(codec);
- alc_remove_invalid_adc_nids(codec);
- }
-
- if (!spec->no_analog && !spec->cap_mixer)
- set_capture_mixer(codec);
-
if (!spec->no_analog && has_cdefine_beep(codec)) {
err = snd_hda_attach_beep_device(codec, 0x1);
if (err < 0)
set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT);
}
- alc_apply_fixup(codec, ALC_FIXUP_ACT_PROBE);
-
codec->patch_ops = alc_patch_ops;
#ifdef CONFIG_PM
codec->patch_ops.resume = alc269_resume;
#endif
- spec->init_hook = alc_auto_init_std;
spec->shutup = alc269_shutup;
- #ifdef CONFIG_SND_HDA_POWER_SAVE
- if (!spec->loopback.amplist)
- spec->loopback.amplist = alc269_loopbacks;
- if (alc269_mic2_for_mute_led(codec))
- codec->patch_ops.check_power_status = alc269_mic2_mute_check_ps;
- #endif
+ alc_apply_fixup(codec, ALC_FIXUP_ACT_PROBE);
return 0;
return alc_parse_auto_config(codec, alc861_ignore, alc861_ssids);
}
- #ifdef CONFIG_SND_HDA_POWER_SAVE
- static const struct hda_amp_list alc861_loopbacks[] = {
- { 0x15, HDA_INPUT, 0 },
- { 0x15, HDA_INPUT, 1 },
- { 0x15, HDA_INPUT, 2 },
- { 0x15, HDA_INPUT, 3 },
- { } /* end */
- };
- #endif
-
-
/* Pin config fixes */
enum {
- PINFIX_FSC_AMILO_PI1505,
- PINFIX_ASUS_A6RP,
+ ALC861_FIXUP_FSC_AMILO_PI1505,
+ ALC861_FIXUP_AMP_VREF_0F,
+ ALC861_FIXUP_NO_JACK_DETECT,
+ ALC861_FIXUP_ASUS_A6RP,
};
/* On some laptops, VREF of pin 0x0f is abused for controlling the main amp */
spec->keep_vref_in_automute = 1;
}
+ /* suppress the jack-detection */
+ static void alc_fixup_no_jack_detect(struct hda_codec *codec,
+ const struct alc_fixup *fix, int action)
+ {
+ if (action == ALC_FIXUP_ACT_PRE_PROBE)
+ codec->no_jack_detect = 1;
+ }
+
static const struct alc_fixup alc861_fixups[] = {
- [PINFIX_FSC_AMILO_PI1505] = {
+ [ALC861_FIXUP_FSC_AMILO_PI1505] = {
.type = ALC_FIXUP_PINS,
.v.pins = (const struct alc_pincfg[]) {
{ 0x0b, 0x0221101f }, /* HP */
{ }
}
},
- [PINFIX_ASUS_A6RP] = {
+ [ALC861_FIXUP_AMP_VREF_0F] = {
.type = ALC_FIXUP_FUNC,
.v.func = alc861_fixup_asus_amp_vref_0f,
},
+ [ALC861_FIXUP_NO_JACK_DETECT] = {
+ .type = ALC_FIXUP_FUNC,
+ .v.func = alc_fixup_no_jack_detect,
+ },
+ [ALC861_FIXUP_ASUS_A6RP] = {
+ .type = ALC_FIXUP_FUNC,
+ .v.func = alc861_fixup_asus_amp_vref_0f,
+ .chained = true,
+ .chain_id = ALC861_FIXUP_NO_JACK_DETECT,
+ }
};
static const struct snd_pci_quirk alc861_fixup_tbl[] = {
- SND_PCI_QUIRK_VENDOR(0x1043, "ASUS laptop", PINFIX_ASUS_A6RP),
- SND_PCI_QUIRK(0x1584, 0x2b01, "Haier W18", PINFIX_ASUS_A6RP),
- SND_PCI_QUIRK(0x1734, 0x10c7, "FSC Amilo Pi1505", PINFIX_FSC_AMILO_PI1505),
+ SND_PCI_QUIRK(0x1043, 0x1393, "ASUS A6Rp", ALC861_FIXUP_ASUS_A6RP),
+ SND_PCI_QUIRK_VENDOR(0x1043, "ASUS laptop", ALC861_FIXUP_AMP_VREF_0F),
+ SND_PCI_QUIRK(0x1462, 0x7254, "HP DX2200", ALC861_FIXUP_NO_JACK_DETECT),
+ SND_PCI_QUIRK(0x1584, 0x2b01, "Haier W18", ALC861_FIXUP_AMP_VREF_0F),
+ SND_PCI_QUIRK(0x1584, 0x0000, "Uniwill ECS M31EI", ALC861_FIXUP_AMP_VREF_0F),
+ SND_PCI_QUIRK(0x1734, 0x10c7, "FSC Amilo Pi1505", ALC861_FIXUP_FSC_AMILO_PI1505),
{}
};
if (err < 0)
goto error;
- if (!spec->no_analog && !spec->adc_nids) {
- alc_auto_fill_adc_caps(codec);
- alc_rebuild_imux_for_auto_mic(codec);
- alc_remove_invalid_adc_nids(codec);
- }
-
- if (!spec->no_analog && !spec->cap_mixer)
- set_capture_mixer(codec);
-
if (!spec->no_analog) {
err = snd_hda_attach_beep_device(codec, 0x23);
if (err < 0)
set_beep_amp(spec, 0x23, 0, HDA_OUTPUT);
}
- alc_apply_fixup(codec, ALC_FIXUP_ACT_PROBE);
-
codec->patch_ops = alc_patch_ops;
- spec->init_hook = alc_auto_init_std;
#ifdef CONFIG_SND_HDA_POWER_SAVE
spec->power_hook = alc_power_eapd;
- if (!spec->loopback.amplist)
- spec->loopback.amplist = alc861_loopbacks;
#endif
+ alc_apply_fixup(codec, ALC_FIXUP_ACT_PROBE);
+
return 0;
error:
*
* In addition, an independent DAC
*/
- #ifdef CONFIG_SND_HDA_POWER_SAVE
- #define alc861vd_loopbacks alc880_loopbacks
- #endif
-
static int alc861vd_parse_auto_config(struct hda_codec *codec)
{
static const hda_nid_t alc861vd_ignore[] = { 0x1d, 0 };
add_verb(spec, alc660vd_eapd_verbs);
}
- if (!spec->no_analog && !spec->adc_nids) {
- alc_auto_fill_adc_caps(codec);
- alc_rebuild_imux_for_auto_mic(codec);
- alc_remove_invalid_adc_nids(codec);
- }
-
- if (!spec->no_analog && !spec->cap_mixer)
- set_capture_mixer(codec);
-
if (!spec->no_analog) {
err = snd_hda_attach_beep_device(codec, 0x23);
if (err < 0)
set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
}
- alc_apply_fixup(codec, ALC_FIXUP_ACT_PROBE);
-
codec->patch_ops = alc_patch_ops;
- spec->init_hook = alc_auto_init_std;
spec->shutup = alc_eapd_shutup;
- #ifdef CONFIG_SND_HDA_POWER_SAVE
- if (!spec->loopback.amplist)
- spec->loopback.amplist = alc861vd_loopbacks;
- #endif
+
+ alc_apply_fixup(codec, ALC_FIXUP_ACT_PROBE);
return 0;
* In addition, an independent DAC for the multi-playback (not used in this
* driver yet).
*/
- #ifdef CONFIG_SND_HDA_POWER_SAVE
- #define alc662_loopbacks alc880_loopbacks
- #endif
/*
* BIOS auto configuration
ALC662_FIXUP_ASUS_MODE6,
ALC662_FIXUP_ASUS_MODE7,
ALC662_FIXUP_ASUS_MODE8,
+ ALC662_FIXUP_NO_JACK_DETECT,
};
static const struct alc_fixup alc662_fixups[] = {
.chained = true,
.chain_id = ALC662_FIXUP_SKU_IGNORE
},
+ [ALC662_FIXUP_NO_JACK_DETECT] = {
+ .type = ALC_FIXUP_FUNC,
+ .v.func = alc_fixup_no_jack_detect,
+ },
};
static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
+ SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2),
SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
if (err < 0)
goto error;
- if (!spec->no_analog && !spec->adc_nids) {
- alc_auto_fill_adc_caps(codec);
- alc_rebuild_imux_for_auto_mic(codec);
- alc_remove_invalid_adc_nids(codec);
- }
-
- if (!spec->no_analog && !spec->cap_mixer)
- set_capture_mixer(codec);
-
if (!spec->no_analog && has_cdefine_beep(codec)) {
err = snd_hda_attach_beep_device(codec, 0x1);
if (err < 0)
}
}
- alc_apply_fixup(codec, ALC_FIXUP_ACT_PROBE);
-
codec->patch_ops = alc_patch_ops;
- spec->init_hook = alc_auto_init_std;
spec->shutup = alc_eapd_shutup;
- #ifdef CONFIG_SND_HDA_POWER_SAVE
- if (!spec->loopback.amplist)
- spec->loopback.amplist = alc662_loopbacks;
- #endif
+ alc_apply_fixup(codec, ALC_FIXUP_ACT_PROBE);
return 0;
return err;
}
- if (!spec->no_analog && !spec->cap_mixer)
- set_capture_mixer(codec);
-
codec->patch_ops = alc_patch_ops;
- spec->init_hook = alc_auto_init_std;
return 0;
}