]> Git Repo - linux.git/commitdiff
Merge tag 'block-6.10-20240607' of git://git.kernel.dk/linux
authorLinus Torvalds <[email protected]>
Fri, 7 Jun 2024 23:45:48 +0000 (16:45 -0700)
committerLinus Torvalds <[email protected]>
Fri, 7 Jun 2024 23:45:48 +0000 (16:45 -0700)
Pull block fixes from Jens Axboe:

 - Fix for null_blk block size validation (Andreas)

 - NVMe pull request via Keith:
      - Use reserved tags for special fabrics operations (Chunguang)
      - Persistent Reservation status masking fix (Weiwen)

* tag 'block-6.10-20240607' of git://git.kernel.dk/linux:
  null_blk: fix validation of block size
  nvme: fix nvme_pr_* status code parsing
  nvme-fabrics: use reserved tag for reg read/write command

551 files changed:
.mailmap
Documentation/admin-guide/LSM/tomoyo.rst
Documentation/admin-guide/kernel-parameters.txt
Documentation/arch/riscv/uabi.rst
Documentation/cdrom/cdrom-standard.rst
Documentation/core-api/swiotlb.rst
Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml
Documentation/devicetree/bindings/arm/sunxi.yaml
Documentation/devicetree/bindings/net/pse-pd/microchip,pd692x0.yaml
Documentation/devicetree/bindings/net/pse-pd/ti,tps23881.yaml
Documentation/netlink/specs/netdev.yaml
Documentation/networking/af_xdp.rst
Documentation/process/maintainer-netdev.rst
MAINTAINERS
Makefile
arch/arc/net/bpf_jit.h
arch/arc/net/bpf_jit_arcv2.c
arch/arc/net/bpf_jit_core.c
arch/arm64/include/asm/el2_setup.h
arch/arm64/include/asm/io.h
arch/arm64/include/asm/kvm_arm.h
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/kvm_hyp.h
arch/arm64/include/asm/kvm_pkvm.h
arch/arm64/kernel/armv8_deprecated.c
arch/arm64/kvm/arm.c
arch/arm64/kvm/emulate-nested.c
arch/arm64/kvm/fpsimd.c
arch/arm64/kvm/guest.c
arch/arm64/kvm/hyp/aarch32.c
arch/arm64/kvm/hyp/fpsimd.S
arch/arm64/kvm/hyp/include/hyp/switch.h
arch/arm64/kvm/hyp/include/nvhe/pkvm.h
arch/arm64/kvm/hyp/nvhe/hyp-main.c
arch/arm64/kvm/hyp/nvhe/pkvm.c
arch/arm64/kvm/hyp/nvhe/setup.c
arch/arm64/kvm/hyp/nvhe/switch.c
arch/arm64/kvm/hyp/vhe/switch.c
arch/arm64/kvm/nested.c
arch/arm64/kvm/reset.c
arch/loongarch/boot/dts/loongson-2k0500-ref.dts
arch/loongarch/boot/dts/loongson-2k1000-ref.dts
arch/loongarch/boot/dts/loongson-2k2000-ref.dts
arch/loongarch/include/asm/numa.h
arch/loongarch/include/asm/stackframe.h
arch/loongarch/kernel/head.S
arch/loongarch/kernel/setup.c
arch/loongarch/kernel/smp.c
arch/loongarch/kernel/vmlinux.lds.S
arch/powerpc/Kconfig
arch/powerpc/include/asm/uaccess.h
arch/powerpc/net/bpf_jit_comp32.c
arch/powerpc/net/bpf_jit_comp64.c
arch/powerpc/platforms/pseries/lparcfg.c
arch/riscv/Kconfig
arch/riscv/include/asm/cmpxchg.h
arch/riscv/kernel/cpu_ops_sbi.c
arch/riscv/kernel/cpu_ops_spinwait.c
arch/riscv/kvm/aia_device.c
arch/riscv/kvm/vcpu_onereg.c
arch/riscv/mm/fault.c
arch/riscv/mm/init.c
arch/s390/kernel/crash_dump.c
arch/x86/events/intel/cstate.c
arch/x86/events/intel/uncore.c
arch/x86/events/rapl.c
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/vmxfeatures.h
arch/x86/kernel/cpu/aperfmperf.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/cpu.h
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/topology_amd.c
arch/x86/kvm/Kconfig
arch/x86/kvm/lapic.c
arch/x86/kvm/lapic.h
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/spte.h
arch/x86/kvm/mmu/tdp_iter.h
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
drivers/acpi/ac.c
drivers/acpi/apei/einj-core.c
drivers/acpi/ec.c
drivers/acpi/sbs.c
drivers/ata/ahci.c
drivers/ata/libata-core.c
drivers/ata/pata_macio.c
drivers/base/regmap/regmap-i2c.c
drivers/char/hw_random/core.c
drivers/char/tpm/Kconfig
drivers/char/tpm/tpm-buf.c
drivers/char/tpm/tpm.h
drivers/char/tpm/tpm2-cmd.c
drivers/char/tpm/tpm2-sessions.c
drivers/char/tpm/tpm_tis_core.c
drivers/char/tpm/tpm_tis_core.h
drivers/char/tpm/tpm_tis_spi_main.c
drivers/cpufreq/amd-pstate-ut.c
drivers/cpufreq/amd-pstate.c
drivers/cpufreq/amd-pstate.h [new file with mode: 0644]
drivers/cpufreq/intel_pstate.c
drivers/cxl/core/region.c
drivers/dma-buf/st-dma-fence.c
drivers/dma-buf/sync_debug.c
drivers/firewire/packet-serdes-test.c
drivers/firewire/uapi-test.c
drivers/firmware/efi/efi-pstore.c
drivers/firmware/efi/libstub/loongarch.c
drivers/firmware/efi/libstub/zboot.lds
drivers/firmware/efi/runtime-wrappers.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
drivers/gpu/drm/amd/amdkfd/kfd_svm.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/include/atomfirmware.h
drivers/gpu/drm/amd/include/pptable.h
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c
drivers/gpu/drm/drm_buddy.c
drivers/gpu/drm/drm_gem_shmem_helper.c
drivers/gpu/drm/i915/display/intel_audio.c
drivers/gpu/drm/i915/display/intel_audio.h
drivers/gpu/drm/i915/display/intel_display_driver.c
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/gem/i915_gem_object.h
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
drivers/gpu/drm/i915/gt/intel_engine_cs.c
drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c
drivers/gpu/drm/i915/gt/intel_gt_types.h
drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
drivers/gpu/drm/lima/lima_gem.c
drivers/gpu/drm/msm/registers/gen_header.py
drivers/gpu/drm/nouveau/nvif/object.c
drivers/gpu/drm/panel/Kconfig
drivers/gpu/drm/panel/panel-lg-sw43408.c
drivers/gpu/drm/panel/panel-sitronix-st7789v.c
drivers/gpu/drm/panfrost/panfrost_gem.c
drivers/gpu/drm/tests/drm_buddy_test.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
drivers/gpu/drm/xe/xe_guc_submit.c
drivers/gpu/drm/xe/xe_migrate.c
drivers/gpu/drm/xe/xe_pcode.c
drivers/hwmon/dell-smm-hwmon.c
drivers/hwmon/intel-m10-bmc-hwmon.c
drivers/hwmon/ltc2992.c
drivers/hwmon/shtc1.c
drivers/i2c/busses/i2c-synquacer.c
drivers/input/touchscreen/silead.c
drivers/iommu/amd/amd_iommu.h
drivers/iommu/amd/init.c
drivers/iommu/amd/iommu.c
drivers/iommu/amd/ppr.c
drivers/iommu/dma-iommu.c
drivers/mailbox/zynqmp-ipi-mailbox.c
drivers/net/dsa/microchip/ksz_common.c
drivers/net/ethernet/amazon/ena/ena_com.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/ice/devlink/devlink.c
drivers/net/ethernet/intel/ice/ice.h
drivers/net/ethernet/intel/ice/ice_base.c
drivers/net/ethernet/intel/ice/ice_common.c
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_nvm.c
drivers/net/ethernet/intel/ice/ice_type.h
drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
drivers/net/ethernet/intel/ice/ice_xsk.c
drivers/net/ethernet/intel/idpf/idpf_lib.c
drivers/net/ethernet/intel/idpf/idpf_txrx.c
drivers/net/ethernet/intel/idpf/idpf_txrx.h
drivers/net/ethernet/intel/igc/igc_ethtool.c
drivers/net/ethernet/intel/igc/igc_main.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mediatek/mtk_eth_soc.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/fw.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c
drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
drivers/net/ethernet/ti/icssg/icssg_classifier.c
drivers/net/ipvlan/ipvlan_core.c
drivers/net/netkit.c
drivers/net/phy/micrel.c
drivers/net/usb/smsc95xx.c
drivers/net/virtio_net.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vxlan/vxlan_core.c
drivers/net/wireless/ath/ath10k/Kconfig
drivers/net/wireless/ath/ath11k/core.c
drivers/net/wireless/ath/ath11k/mac.c
drivers/net/wireless/ath/ath11k/pcic.c
drivers/net/wireless/intel/iwlwifi/iwl-drv.c
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/rs.h
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/mvm/scan.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.h
drivers/net/wireless/mediatek/mt76/mt7615/main.c
drivers/net/wireless/microchip/wilc1000/cfg80211.c
drivers/net/wireless/microchip/wilc1000/hif.c
drivers/net/wireless/microchip/wilc1000/netdev.c
drivers/net/wireless/microchip/wilc1000/netdev.h
drivers/net/wireless/microchip/wilc1000/wlan.c
drivers/net/wireless/realtek/rtlwifi/core.c
drivers/net/wwan/iosm/iosm_ipc_devlink.c
drivers/nfc/virtual_ncidev.c
drivers/of/irq.c
drivers/of/of_private.h
drivers/of/of_test.c
drivers/of/property.c
drivers/pci/access.c
drivers/pci/pci.c
drivers/pci/probe.c
drivers/platform/x86/Kconfig
drivers/platform/x86/amd/hsmp.c
drivers/platform/x86/dell/dell-smbios-base.c
drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
drivers/platform/x86/touchscreen_dmi.c
drivers/platform/x86/x86-android-tablets/Kconfig
drivers/pmdomain/imx/gpcv2.c
drivers/pnp/base.h
drivers/pnp/driver.c
drivers/ptp/ptp_chardev.c
drivers/regulator/rtq2208-regulator.c
drivers/scsi/device_handler/scsi_dh_alua.c
drivers/scsi/mpi3mr/mpi3mr_transport.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/mpt3sas/mpt3sas_transport.c
drivers/scsi/qedf/qedf.h
drivers/scsi/qedf/qedf_main.c
drivers/scsi/scsi.c
drivers/scsi/sr.h
drivers/scsi/sr_ioctl.c
drivers/spi/spi-cadence-xspi.c
drivers/spi/spi-stm32.c
drivers/spi/spi.c
drivers/thermal/thermal_core.c
drivers/thermal/thermal_core.h
drivers/thermal/thermal_debugfs.c
drivers/thermal/thermal_trip.c
drivers/ufs/core/ufs-mcq.c
fs/9p/vfs_dentry.c
fs/9p/vfs_inode.c
fs/afs/inode.c
fs/afs/mntpt.c
fs/bcachefs/backpointers.c
fs/bcachefs/bcachefs.h
fs/bcachefs/bcachefs_format.h
fs/bcachefs/btree_gc.c
fs/bcachefs/btree_gc.h
fs/bcachefs/btree_gc_types.h [new file with mode: 0644]
fs/bcachefs/btree_io.c
fs/bcachefs/btree_key_cache.c
fs/bcachefs/btree_locking.c
fs/bcachefs/buckets.c
fs/bcachefs/disk_groups_format.h [new file with mode: 0644]
fs/bcachefs/ec.c
fs/bcachefs/fs-io-buffered.c
fs/bcachefs/fs-io-direct.c
fs/bcachefs/fs.c
fs/bcachefs/fsck.c
fs/bcachefs/journal_seq_blacklist_format.h [new file with mode: 0644]
fs/bcachefs/mean_and_variance_test.c
fs/bcachefs/move.c
fs/bcachefs/replicas_format.h [new file with mode: 0644]
fs/bcachefs/sb-downgrade.c
fs/bcachefs/sb-downgrade_format.h [new file with mode: 0644]
fs/bcachefs/sb-errors_format.h [new file with mode: 0644]
fs/bcachefs/sb-errors_types.h
fs/bcachefs/sb-members_format.h [new file with mode: 0644]
fs/bcachefs/snapshot.c
fs/bcachefs/snapshot.h
fs/bcachefs/super-io.c
fs/bcachefs/super.c
fs/btrfs/btrfs_inode.h
fs/btrfs/disk-io.c
fs/btrfs/extent_io.c
fs/btrfs/file.c
fs/btrfs/ordered-data.c
fs/btrfs/tree-log.c
fs/dcache.c
fs/iomap/buffered-io.c
fs/netfs/buffered_write.c
fs/netfs/direct_write.c
fs/netfs/objects.c
fs/netfs/write_collect.c
fs/netfs/write_issue.c
fs/signalfd.c
fs/smb/client/cifsfs.c
fs/smb/client/cifspdu.h
fs/smb/client/inode.c
fs/smb/client/smb2ops.c
fs/smb/common/cifs_arc4.c
fs/smb/common/cifs_md4.c
fs/xfs/libxfs/xfs_alloc.c
fs/xfs/libxfs/xfs_attr.c
fs/xfs/libxfs/xfs_attr.h
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_inode_buf.c
fs/xfs/scrub/scrub.c
fs/xfs/scrub/xfarray.c
fs/xfs/xfs_attr_item.c
fs/xfs/xfs_handle.c
fs/xfs/xfs_iwalk.c
fs/xfs/xfs_reflink.c
include/drm/drm_buddy.h
include/linux/amd-pstate.h [deleted file]
include/linux/cdrom.h
include/linux/etherdevice.h
include/linux/i2c.h
include/linux/iommu.h
include/linux/lockdep.h
include/linux/mlx5/mlx5_ifc.h
include/linux/netfs.h
include/linux/pagemap.h
include/linux/pci.h
include/linux/pnp.h
include/linux/tpm.h
include/net/dst_ops.h
include/net/page_pool/types.h
include/net/request_sock.h
include/net/rtnetlink.h
include/net/sock.h
include/net/tcp_ao.h
include/sound/pcm.h
include/uapi/linux/cn_proc.h
include/uapi/linux/kd.h
include/uapi/linux/netdev.h
io_uring/io-wq.c
io_uring/io_uring.h
io_uring/memmap.c
io_uring/napi.c
io_uring/net.c
io_uring/opdef.c
io_uring/register.c
kernel/bpf/devmap.c
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/dma/map_benchmark.c
kernel/gen_kheaders.sh
kernel/power/swap.c
kernel/trace/bpf_trace.c
kernel/trace/trace_probe.c
kernel/trace/trace_uprobe.c
lib/fortify_kunit.c
lib/test_rhashtable.c
net/9p/client.c
net/ax25/af_ax25.c
net/ax25/ax25_dev.c
net/bpf/test_run.c
net/core/dev.c
net/core/dst_cache.c
net/core/rtnetlink.c
net/core/sock_map.c
net/ethernet/eth.c
net/ethtool/ioctl.c
net/ethtool/tsinfo.c
net/ipv4/af_inet.c
net/ipv4/devinet.c
net/ipv4/fib_frontend.c
net/ipv4/netfilter/nf_tproxy_ipv4.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_ao.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_minisocks.c
net/ipv6/ila/ila_lwt.c
net/ipv6/ioam6_iptunnel.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_offload.c
net/ipv6/route.c
net/ipv6/rpl_iptunnel.c
net/ipv6/seg6_iptunnel.c
net/ipv6/tcp_ipv6.c
net/mac80211/cfg.c
net/mac80211/he.c
net/mac80211/ieee80211_i.h
net/mac80211/main.c
net/mac80211/mesh.c
net/mac80211/mesh_pathtbl.c
net/mac80211/parse.c
net/mac80211/scan.c
net/mac80211/sta_info.c
net/mac80211/util.c
net/mptcp/protocol.c
net/ncsi/internal.h
net/ncsi/ncsi-manage.c
net/ncsi/ncsi-rsp.c
net/netfilter/ipset/ip_set_list_set.c
net/netfilter/nfnetlink_queue.c
net/netfilter/nft_fib.c
net/netfilter/nft_payload.c
net/sched/sch_multiq.c
net/sched/sch_taprio.c
net/smc/af_smc.c
net/sunrpc/auth_gss/svcauth_gss.c
net/unix/af_unix.c
net/unix/diag.c
net/wireless/core.c
net/wireless/pmsr.c
net/wireless/rdev-ops.h
net/wireless/scan.c
net/wireless/sysfs.c
net/wireless/util.c
net/xdp/xsk.c
net/xfrm/xfrm_policy.c
scripts/dtc/Makefile
scripts/gdb/linux/Makefile
scripts/kconfig/expr.c
scripts/kconfig/expr.h
scripts/kconfig/symbol.c
scripts/link-vmlinux.sh
scripts/make_fit.py
scripts/mksysmap
security/landlock/fs.c
security/tomoyo/Kconfig
security/tomoyo/common.c
sound/core/init.c
sound/core/jack.c
sound/core/seq/seq_ump_convert.c
sound/core/ump.c
sound/core/ump_convert.c
sound/hda/intel-dsp-config.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/cs42l43.c
sound/soc/codecs/wm_adsp.c
sound/soc/intel/boards/Kconfig
sound/soc/sof/amd/acp-common.c
sound/soc/sof/amd/acp.c
sound/soc/sof/amd/acp63.c
sound/soc/sof/amd/pci-acp63.c
sound/soc/sof/amd/pci-rmb.c
sound/soc/sof/amd/pci-rn.c
sound/soc/sof/amd/pci-vangogh.c
sound/soc/sof/amd/rembrandt.c
sound/soc/sof/amd/renoir.c
sound/soc/sof/amd/vangogh.c
sound/soc/sof/core.c
sound/soc/sof/imx/imx-common.c
sound/soc/sof/imx/imx8.c
sound/soc/sof/imx/imx8m.c
sound/soc/sof/imx/imx8ulp.c
sound/soc/sof/intel/atom.c
sound/soc/sof/intel/bdw.c
sound/soc/sof/intel/byt.c
sound/soc/sof/intel/hda-codec.c
sound/soc/sof/intel/hda-ctrl.c
sound/soc/sof/intel/hda-mlink.c
sound/soc/sof/intel/hda.c
sound/soc/sof/intel/pci-apl.c
sound/soc/sof/intel/pci-cnl.c
sound/soc/sof/intel/pci-icl.c
sound/soc/sof/intel/pci-lnl.c
sound/soc/sof/intel/pci-mtl.c
sound/soc/sof/intel/pci-skl.c
sound/soc/sof/intel/pci-tgl.c
sound/soc/sof/intel/pci-tng.c
sound/soc/sof/ipc4-pcm.c
sound/soc/sof/ipc4-topology.c
sound/soc/sof/ipc4-topology.h
sound/soc/sof/mediatek/mt8186/mt8186.c
sound/soc/sof/mediatek/mt8195/mt8195.c
sound/soc/sof/mediatek/mtk-adsp-common.c
sound/soc/sof/nocodec.c
sound/soc/sof/sof-acpi-dev.c
sound/soc/sof/sof-client-ipc-flood-test.c
sound/soc/sof/sof-client-ipc-kernel-injector.c
sound/soc/sof/sof-client-ipc-msg-injector.c
sound/soc/sof/sof-client-probes.c
sound/soc/sof/sof-of-dev.c
sound/soc/sof/sof-pci-dev.c
sound/soc/sof/sof-utils.c
sound/soc/sof/stream-ipc.c
sound/soc/sof/xtensa/core.c
tools/bpf/resolve_btfids/main.c
tools/include/uapi/linux/netdev.h
tools/lib/bpf/features.c
tools/power/cpupower/utils/helpers/amd.c
tools/testing/cxl/test/mem.c
tools/testing/selftests/alsa/Makefile
tools/testing/selftests/bpf/prog_tests/tc_netkit.c
tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
tools/testing/selftests/bpf/prog_tests/verifier.c
tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c
tools/testing/selftests/bpf/progs/test_tc_link.c
tools/testing/selftests/bpf/progs/uprobe_multi.c
tools/testing/selftests/bpf/progs/verifier_sockmap_mutate.c [new file with mode: 0644]
tools/testing/selftests/cachestat/test_cachestat.c
tools/testing/selftests/filesystems/overlayfs/dev_in_maps.c
tools/testing/selftests/ftrace/config
tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc
tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc
tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc
tools/testing/selftests/futex/Makefile
tools/testing/selftests/futex/functional/Makefile
tools/testing/selftests/futex/functional/futex_requeue_pi.c
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/s390x/shared_zeropage_test.c [new file with mode: 0644]
tools/testing/selftests/landlock/fs_test.c
tools/testing/selftests/net/hsr/config
tools/testing/selftests/net/hsr/hsr_ping.sh
tools/testing/selftests/net/lib.sh
tools/testing/selftests/net/mptcp/mptcp_join.sh
tools/testing/selftests/net/mptcp/mptcp_lib.sh
tools/testing/selftests/net/mptcp/simult_flows.sh
tools/testing/selftests/openat2/openat2_test.c
tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json

index 43cd2995dbc29ac1ac10fba7e0be76e4aec64116..efd9fa867a8eccd5488aac0c16822b0a2c9f2db4 100644 (file)
--- a/.mailmap
+++ b/.mailmap
 Karthikeyan Periyasamy <[email protected]> <[email protected]>
 Kay Sievers <[email protected]>
 Kenneth W Chen <[email protected]>
index 4bc9c2b4da6f3923db9cea93770e5f5a6da605bb..bdb2c2e2a1b26bf8119406b3404dc582b6322fb3 100644 (file)
@@ -9,8 +9,8 @@ TOMOYO is a name-based MAC extension (LSM module) for the Linux kernel.
 
 LiveCD-based tutorials are available at
 
-http://tomoyo.sourceforge.jp/1.8/ubuntu12.04-live.html
-http://tomoyo.sourceforge.jp/1.8/centos6-live.html
+https://tomoyo.sourceforge.net/1.8/ubuntu12.04-live.html
+https://tomoyo.sourceforge.net/1.8/centos6-live.html
 
 Though these tutorials use non-LSM version of TOMOYO, they are useful for you
 to know what TOMOYO is.
@@ -21,45 +21,32 @@ How to enable TOMOYO?
 Build the kernel with ``CONFIG_SECURITY_TOMOYO=y`` and pass ``security=tomoyo`` on
 kernel's command line.
 
-Please see http://tomoyo.osdn.jp/2.5/ for details.
+Please see https://tomoyo.sourceforge.net/2.6/ for details.
 
 Where is documentation?
 =======================
 
 User <-> Kernel interface documentation is available at
-https://tomoyo.osdn.jp/2.5/policy-specification/index.html .
+https://tomoyo.sourceforge.net/2.6/policy-specification/index.html .
 
 Materials we prepared for seminars and symposiums are available at
-https://osdn.jp/projects/tomoyo/docs/?category_id=532&language_id=1 .
+https://sourceforge.net/projects/tomoyo/files/docs/ .
 Below lists are chosen from three aspects.
 
 What is TOMOYO?
   TOMOYO Linux Overview
-    https://osdn.jp/projects/tomoyo/docs/lca2009-takeda.pdf
+    https://sourceforge.net/projects/tomoyo/files/docs/lca2009-takeda.pdf
   TOMOYO Linux: pragmatic and manageable security for Linux
-    https://osdn.jp/projects/tomoyo/docs/freedomhectaipei-tomoyo.pdf
+    https://sourceforge.net/projects/tomoyo/files/docs/freedomhectaipei-tomoyo.pdf
   TOMOYO Linux: A Practical Method to Understand and Protect Your Own Linux Box
-    https://osdn.jp/projects/tomoyo/docs/PacSec2007-en-no-demo.pdf
+    https://sourceforge.net/projects/tomoyo/files/docs/PacSec2007-en-no-demo.pdf
 
 What can TOMOYO do?
   Deep inside TOMOYO Linux
-    https://osdn.jp/projects/tomoyo/docs/lca2009-kumaneko.pdf
+    https://sourceforge.net/projects/tomoyo/files/docs/lca2009-kumaneko.pdf
   The role of "pathname based access control" in security.
-    https://osdn.jp/projects/tomoyo/docs/lfj2008-bof.pdf
+    https://sourceforge.net/projects/tomoyo/files/docs/lfj2008-bof.pdf
 
 History of TOMOYO?
   Realities of Mainlining
-    https://osdn.jp/projects/tomoyo/docs/lfj2008.pdf
-
-What is future plan?
-====================
-
-We believe that inode based security and name based security are complementary
-and both should be used together. But unfortunately, so far, we cannot enable
-multiple LSM modules at the same time. We feel sorry that you have to give up
-SELinux/SMACK/AppArmor etc. when you want to use TOMOYO.
-
-We hope that LSM becomes stackable in future. Meanwhile, you can use non-LSM
-version of TOMOYO, available at http://tomoyo.osdn.jp/1.8/ .
-LSM version of TOMOYO is a subset of non-LSM version of TOMOYO. We are planning
-to port non-LSM version's functionalities to LSM versions.
+    https://sourceforge.net/projects/tomoyo/files/docs/lfj2008.pdf
index 500cfa7762257cafc3bef374935759e1c7d9984c..b600df82669db000304571fd23c3af90df36190d 100644 (file)
                                Format:
                                <bus_id>,<clkrate>
 
+       i2c_touchscreen_props= [HW,ACPI,X86]
+                       Set device-properties for ACPI-enumerated I2C-attached
+                       touchscreen, to e.g. fix coordinates of upside-down
+                       mounted touchscreens. If you need this option please
+                       submit a drivers/platform/x86/touchscreen_dmi.c patch
+                       adding a DMI quirk for this.
+
+                       Format:
+                       <ACPI_HW_ID>:<prop_name>=<val>[:prop_name=val][:...]
+                       Where <val> is one of:
+                       Omit "=<val>" entirely  Set a boolean device-property
+                       Unsigned number         Set a u32 device-property
+                       Anything else           Set a string device-property
+
+                       Examples (split over multiple lines):
+                       i2c_touchscreen_props=GDIX1001:touchscreen-inverted-x:
+                       touchscreen-inverted-y
+
+                       i2c_touchscreen_props=MSSL1680:touchscreen-size-x=1920:
+                       touchscreen-size-y=1080:touchscreen-inverted-y:
+                       firmware-name=gsl1680-vendor-model.fw:silead,home-button
+
        i8042.debug     [HW] Toggle i8042 debug mode
        i8042.unmask_kbd_data
                        [HW] Enable printing of interrupt data from the KBD port
index 54d199dce78bf50525b0430dcfc5a6a71429bf68..2b420bab0527a75f09d76c0d7fe9904262830e16 100644 (file)
@@ -65,4 +65,6 @@ the extension, or may have deliberately removed it from the listing.
 Misaligned accesses
 -------------------
 
-Misaligned accesses are supported in userspace, but they may perform poorly.
+Misaligned scalar accesses are supported in userspace, but they may perform
+poorly.  Misaligned vector accesses are only supported if the Zicclsm extension
+is supported.
index 7964fe134277b8b8337d620e524de7d24a15354e..6c1303cff159e16daba2bf7c9a5fe8b72a3ebcad 100644 (file)
@@ -217,7 +217,7 @@ current *struct* is::
                int (*media_changed)(struct cdrom_device_info *, int);
                int (*tray_move)(struct cdrom_device_info *, int);
                int (*lock_door)(struct cdrom_device_info *, int);
-               int (*select_speed)(struct cdrom_device_info *, int);
+               int (*select_speed)(struct cdrom_device_info *, unsigned long);
                int (*get_last_session) (struct cdrom_device_info *,
                                         struct cdrom_multisession *);
                int (*get_mcn)(struct cdrom_device_info *, struct cdrom_mcn *);
@@ -396,7 +396,7 @@ action need be taken, and the return value should be 0.
 
 ::
 
-       int select_speed(struct cdrom_device_info *cdi, int speed)
+       int select_speed(struct cdrom_device_info *cdi, unsigned long speed)
 
 Some CD-ROM drives are capable of changing their head-speed. There
 are several reasons for changing the speed of a CD-ROM drive. Badly
index 5ad2c9ca85bcbb87549135ee6e93215f06fbeb94..cf06bae44ff85bae217607f381146af7f446b96d 100644 (file)
@@ -192,7 +192,7 @@ alignment larger than PAGE_SIZE.
 
 Dynamic swiotlb
 ---------------
-When CONFIG_DYNAMIC_SWIOTLB is enabled, swiotlb can do on-demand expansion of
+When CONFIG_SWIOTLB_DYNAMIC is enabled, swiotlb can do on-demand expansion of
 the amount of memory available for allocation as bounce buffers. If a bounce
 buffer request fails due to lack of available space, an asynchronous background
 task is kicked off to allocate memory from general system memory and turn it
index d2dce238ff5d683ea0ddbdf575f53677fe129a51..3e996346b2644806486757b1dd36bc4cb215a51e 100644 (file)
@@ -54,11 +54,10 @@ unevaluatedProperties: false
 
 examples:
   - |
-    mlahb: ahb@38000000 {
+    ahb {
       compatible = "st,mlahb", "simple-bus";
       #address-cells = <1>;
       #size-cells = <1>;
-      reg = <0x10000000 0x40000>;
       ranges;
       dma-ranges = <0x00000000 0x38000000 0x10000>,
                    <0x10000000 0x10000000 0x60000>,
index c6d0d8d81ed4deb9dd9a79d52a91c669c74e324d..c2a158b75e4979eaf6fc148970cc08767f6de264 100644 (file)
@@ -57,17 +57,17 @@ properties:
           - const: allwinner,sun8i-v3s
 
       - description: Anbernic RG35XX (2024)
-      - items:
+        items:
           - const: anbernic,rg35xx-2024
           - const: allwinner,sun50i-h700
 
       - description: Anbernic RG35XX Plus
-      - items:
+        items:
           - const: anbernic,rg35xx-plus
           - const: allwinner,sun50i-h700
 
       - description: Anbernic RG35XX H
-      - items:
+        items:
           - const: anbernic,rg35xx-h
           - const: allwinner,sun50i-h700
 
index 828439398fdf98a591d4fd3272fa8edfef31ae07..fd4244fceced9f80d8f6682b09d977439df877e6 100644 (file)
@@ -24,6 +24,7 @@ properties:
 
   managers:
     type: object
+    additionalProperties: false
     description:
       List of the PD69208T4/PD69204T4/PD69208M PSE managers. Each manager
       have 4 or 8 physical ports according to the chip version. No need to
@@ -47,8 +48,9 @@ properties:
       - "#size-cells"
 
     patternProperties:
-      "^manager@0[0-9a-b]$":
+      "^manager@[0-9a-b]$":
         type: object
+        additionalProperties: false
         description:
           PD69208T4/PD69204T4/PD69208M PSE manager exposing 4 or 8 physical
           ports.
@@ -69,9 +71,14 @@ properties:
         patternProperties:
           '^port@[0-7]$':
             type: object
+            additionalProperties: false
+
+            properties:
+              reg:
+                maxItems: 1
+
             required:
               - reg
-            additionalProperties: false
 
         required:
           - reg
index 4147adb11e10190620860ef421e9f09c7251b08c..6992d56832bf95c793ff6665b3626d228701c1a7 100644 (file)
@@ -29,13 +29,31 @@ properties:
       of the ports conversion matrix that establishes relationship between
       the logical ports and the physical channels.
     type: object
+    additionalProperties: false
+
+    properties:
+      "#address-cells":
+        const: 1
+
+      "#size-cells":
+        const: 0
 
     patternProperties:
       '^channel@[0-7]$':
         type: object
+        additionalProperties: false
+
+        properties:
+          reg:
+            maxItems: 1
+
         required:
           - reg
 
+    required:
+      - "#address-cells"
+      - "#size-cells"
+
 unevaluatedProperties: false
 
 required:
index 11a32373365ab0e2b70999bdd64a351a6e53560d..959755be4d7f9c15abef7e74813cf8837b8a38fc 100644 (file)
@@ -349,6 +349,10 @@ attribute-sets:
           Number of packets dropped due to transient lack of resources, such as
           buffer space, host descriptors etc.
         type: uint
+      -
+        name: rx-csum-complete
+        doc: Number of packets that were marked as CHECKSUM_COMPLETE.
+        type: uint
       -
         name: rx-csum-unnecessary
         doc: Number of packets that were marked as CHECKSUM_UNNECESSARY.
index 72da7057e4cf9643c179493dce22f9e243f31d70..dceeb0d763aa232f418bc229a59a3a6d6b7540b2 100644 (file)
@@ -329,24 +329,23 @@ XDP_SHARED_UMEM option and provide the initial socket's fd in the
 sxdp_shared_umem_fd field as you registered the UMEM on that
 socket. These two sockets will now share one and the same UMEM.
 
-In this case, it is possible to use the NIC's packet steering
-capabilities to steer the packets to the right queue. This is not
-possible in the previous example as there is only one queue shared
-among sockets, so the NIC cannot do this steering as it can only steer
-between queues.
-
-In libxdp (or libbpf prior to version 1.0), you need to use the
-xsk_socket__create_shared() API as it takes a reference to a FILL ring
-and a COMPLETION ring that will be created for you and bound to the
-shared UMEM. You can use this function for all the sockets you create,
-or you can use it for the second and following ones and use
-xsk_socket__create() for the first one. Both methods yield the same
-result.
+There is no need to supply an XDP program like the one in the previous
+case where sockets were bound to the same queue id and
+device. Instead, use the NIC's packet steering capabilities to steer
+the packets to the right queue. In the previous example, there is only
+one queue shared among sockets, so the NIC cannot do this steering. It
+can only steer between queues.
+
+In libbpf, you need to use the xsk_socket__create_shared() API as it
+takes a reference to a FILL ring and a COMPLETION ring that will be
+created for you and bound to the shared UMEM. You can use this
+function for all the sockets you create, or you can use it for the
+second and following ones and use xsk_socket__create() for the first
+one. Both methods yield the same result.
 
 Note that a UMEM can be shared between sockets on the same queue id
 and device, as well as between queues on the same device and between
-devices at the same time. It is also possible to redirect to any
-socket as long as it is bound to the same umem with XDP_SHARED_UMEM.
+devices at the same time.
 
 XDP_USE_NEED_WAKEUP bind flag
 -----------------------------
@@ -823,10 +822,6 @@ A: The short answer is no, that is not supported at the moment. The
    switch, or other distribution mechanism, in your NIC to direct
    traffic to the correct queue id and socket.
 
-   Note that if you are using the XDP_SHARED_UMEM option, it is
-   possible to switch traffic between any socket bound to the same
-   umem.
-
 Q: My packets are sometimes corrupted. What is wrong?
 
 A: Care has to be taken not to feed the same buffer in the UMEM into
index fd96e4a3cef9c09382e34419ec3f8ac1c5514cf4..5e1fcfad1c4c3eef53cab205b35bc9f38be030dc 100644 (file)
@@ -227,7 +227,7 @@ preferably including links to previous postings, for example::
   The amount of mooing will depend on packet rate so should match
   the diurnal cycle quite well.
 
-  Signed-of-by: Joe Defarmer <[email protected]>
+  Signed-off-by: Joe Defarmer <[email protected]>
   ---
   v3:
     - add a note about time-of-day mooing fluctuation to the commit message
index d6c90161c7bfe3886e675396cb2d2d7b44acc3b3..aacccb376c28a197f1f47b36e94c9e95a388e082 100644 (file)
@@ -1107,7 +1107,6 @@ L:        [email protected]
 S:     Supported
 F:     Documentation/admin-guide/pm/amd-pstate.rst
 F:     drivers/cpufreq/amd-pstate*
-F:     include/linux/amd-pstate.h
 F:     tools/power/x86/amd_pstate_tracer/amd_pstate_trace.py
 
 AMD PTDMA DRIVER
@@ -3854,6 +3853,7 @@ BPF JIT for ARM64
 M:     Daniel Borkmann <[email protected]>
 M:     Alexei Starovoitov <[email protected]>
 M:     Puranjay Mohan <[email protected]>
+R:     Xu Kuohai <[email protected]>
 L:     [email protected]
 S:     Supported
 F:     arch/arm64/net/
@@ -5187,7 +5187,6 @@ F:        Documentation/devicetree/bindings/media/i2c/chrontel,ch7322.yaml
 F:     drivers/media/cec/i2c/ch7322.c
 
 CIRRUS LOGIC AUDIO CODEC DRIVERS
-M:     James Schulman <[email protected]>
 M:     David Rhodes <[email protected]>
 M:     Richard Fitzgerald <[email protected]>
 L:     [email protected] (moderated for non-subscribers)
@@ -15238,7 +15237,6 @@ F:      drivers/staging/most/
 F:     include/linux/most.h
 
 MOTORCOMM PHY DRIVER
-M:     Peter Geis <[email protected]>
 M:     Frank <[email protected]>
 L:     [email protected]
 S:     Maintained
@@ -21316,7 +21314,7 @@ F:      arch/riscv/boot/dts/starfive/
 
 STARFIVE DWMAC GLUE LAYER
 M:     Emil Renner Berthing <[email protected]>
-M:     Samin Guo <samin.guo@starfivetech.com>
+M:     Minda Chen <minda.chen@starfivetech.com>
 S:     Maintained
 F:     Documentation/devicetree/bindings/net/starfive,jh7110-dwmac.yaml
 F:     drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
@@ -22679,7 +22677,7 @@ L:      [email protected] (subscribers-only, for users in English)
 L:     [email protected] (subscribers-only, for developers in Japanese)
 L:     [email protected] (subscribers-only, for users in Japanese)
 S:     Maintained
-W:     https://tomoyo.osdn.jp/
+W:     https://tomoyo.sourceforge.net/
 F:     security/tomoyo/
 
 TOPSTAR LAPTOP EXTRAS DRIVER
index f975b639632809e8599b4b131f2c2e56ebf8f186..7f921ae547f11640bf044d9e7ba1b159ef431c87 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 6
 PATCHLEVEL = 10
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
 NAME = Baby Opossum Posse
 
 # *DOCUMENTATION*
index ec44873c42d15418f129da4519536502a00a65a2..495f3023e4c1821eba1804d37c5b77b7c08fd086 100644 (file)
@@ -39,7 +39,7 @@
 
 /************** Functions that the back-end must provide **************/
 /* Extension for 32-bit operations. */
-inline u8 zext(u8 *buf, u8 rd);
+u8 zext(u8 *buf, u8 rd);
 /***** Moves *****/
 u8 mov_r32(u8 *buf, u8 rd, u8 rs, u8 sign_ext);
 u8 mov_r32_i32(u8 *buf, u8 reg, s32 imm);
index 31bfb6e9ce00fbf75c124d4f635969d9caa15fa3..4458e409ca0a84dacfaee10423ad54eaa2752fa0 100644 (file)
@@ -62,7 +62,7 @@ enum {
  *   If/when we decide to add ARCv2 instructions that do use register pairs,
  *   the mapping, hopefully, doesn't need to be revisited.
  */
-const u8 bpf2arc[][2] = {
+static const u8 bpf2arc[][2] = {
        /* Return value from in-kernel function, and exit value from eBPF */
        [BPF_REG_0] = {ARC_R_8, ARC_R_9},
        /* Arguments from eBPF program to in-kernel function */
@@ -1302,7 +1302,7 @@ static u8 arc_b(u8 *buf, s32 offset)
 
 /************* Packers (Deal with BPF_REGs) **************/
 
-inline u8 zext(u8 *buf, u8 rd)
+u8 zext(u8 *buf, u8 rd)
 {
        if (rd != BPF_REG_FP)
                return arc_movi_r(buf, REG_HI(rd), 0);
@@ -2235,6 +2235,7 @@ u8 gen_swap(u8 *buf, u8 rd, u8 size, u8 endian, bool force, bool do_zext)
                        break;
                default:
                        /* The caller must have handled this. */
+                       break;
                }
        } else {
                /*
@@ -2253,6 +2254,7 @@ u8 gen_swap(u8 *buf, u8 rd, u8 size, u8 endian, bool force, bool do_zext)
                        break;
                default:
                        /* The caller must have handled this. */
+                       break;
                }
        }
 
@@ -2517,7 +2519,7 @@ u8 arc_epilogue(u8 *buf, u32 usage, u16 frame_size)
 #define JCC64_NR_OF_JMPS 3     /* Number of jumps in jcc64 template. */
 #define JCC64_INSNS_TO_END 3   /* Number of insn. inclusive the 2nd jmp to end. */
 #define JCC64_SKIP_JMP 1       /* Index of the "skip" jump to "end". */
-const struct {
+static const struct {
        /*
         * "jit_off" is common between all "jmp[]" and is coupled with
         * "cond" of each "jmp[]" instance. e.g.:
@@ -2883,7 +2885,7 @@ u8 gen_jmp_64(u8 *buf, u8 rd, u8 rs, u8 cond, u32 curr_off, u32 targ_off)
  * The "ARC_CC_SET" becomes "CC_unequal" because of the "tst"
  * instruction that precedes the conditional branch.
  */
-const u8 arcv2_32_jmps[ARC_CC_LAST] = {
+static const u8 arcv2_32_jmps[ARC_CC_LAST] = {
        [ARC_CC_UGT] = CC_great_u,
        [ARC_CC_UGE] = CC_great_eq_u,
        [ARC_CC_ULT] = CC_less_u,
index 6f6b4ffccf2c29202c721a4fcf66504723a12413..e3628922c24a0cf5ea9e978bd65fa9334a2f5e2f 100644 (file)
@@ -159,7 +159,7 @@ static void jit_dump(const struct jit_context *ctx)
 /* Initialise the context so there's no garbage. */
 static int jit_ctx_init(struct jit_context *ctx, struct bpf_prog *prog)
 {
-       memset(ctx, 0, sizeof(ctx));
+       memset(ctx, 0, sizeof(*ctx));
 
        ctx->orig_prog = prog;
 
@@ -167,7 +167,7 @@ static int jit_ctx_init(struct jit_context *ctx, struct bpf_prog *prog)
        ctx->prog = bpf_jit_blind_constants(prog);
        if (IS_ERR(ctx->prog))
                return PTR_ERR(ctx->prog);
-       ctx->blinded = (ctx->prog == ctx->orig_prog ? false : true);
+       ctx->blinded = (ctx->prog != ctx->orig_prog);
 
        /* If the verifier doesn't zero-extend, then we have to do it. */
        ctx->do_zext = !ctx->prog->aux->verifier_zext;
@@ -1182,12 +1182,12 @@ static int jit_prepare(struct jit_context *ctx)
 }
 
 /*
- * All the "handle_*()" functions have been called before by the
- * "jit_prepare()". If there was an error, we would know by now.
- * Therefore, no extra error checking at this point, other than
- * a sanity check at the end that expects the calculated length
- * (jit.len) to be equal to the length of generated instructions
- * (jit.index).
+ * jit_compile() is the real compilation phase. jit_prepare() is
+ * invoked before jit_compile() as a dry-run to make sure everything
+ * will go OK and allocate the necessary memory.
+ *
+ * In the end, jit_compile() checks if it has produced the same number
+ * of instructions as jit_prepare() would.
  */
 static int jit_compile(struct jit_context *ctx)
 {
@@ -1407,9 +1407,9 @@ static struct bpf_prog *do_extra_pass(struct bpf_prog *prog)
 
 /*
  * This function may be invoked twice for the same stream of BPF
- * instructions. The "extra pass" happens, when there are "call"s
- * involved that their addresses are not known during the first
- * invocation.
+ * instructions. The "extra pass" happens, when there are
+ * (re)locations involved that their addresses are not known
+ * during the first run.
  */
 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 {
index e4546b29dd0cbfbb5b902e15caadab69458b1fac..fd87c4b8f984039ceeba3d2da074f66293733a64 100644 (file)
 /* Coprocessor traps */
 .macro __init_el2_cptr
        __check_hvhe .LnVHE_\@, x1
-       mov     x0, #(CPACR_EL1_FPEN_EL1EN | CPACR_EL1_FPEN_EL0EN)
+       mov     x0, #CPACR_ELx_FPEN
        msr     cpacr_el1, x0
        b       .Lskip_set_cptr_\@
 .LnVHE_\@:
 
        // (h)VHE case
        mrs     x0, cpacr_el1                   // Disable SVE traps
-       orr     x0, x0, #(CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN)
+       orr     x0, x0, #CPACR_ELx_ZEN
        msr     cpacr_el1, x0
        b       .Lskip_set_cptr_\@
 
 
        // (h)VHE case
        mrs     x0, cpacr_el1                   // Disable SME traps
-       orr     x0, x0, #(CPACR_EL1_SMEN_EL0EN | CPACR_EL1_SMEN_EL1EN)
+       orr     x0, x0, #CPACR_ELx_SMEN
        msr     cpacr_el1, x0
        b       .Lskip_set_cptr_sme_\@
 
index 4ff0ae3f6d6690a0283f8b6d6fc545344acbde79..41fd90895dfc3d0fcdf9810680d5f2a64bc113f1 100644 (file)
@@ -153,8 +153,9 @@ extern void __memset_io(volatile void __iomem *, int, size_t);
  * emit the large TLP from the CPU.
  */
 
-static inline void __const_memcpy_toio_aligned32(volatile u32 __iomem *to,
-                                                const u32 *from, size_t count)
+static __always_inline void
+__const_memcpy_toio_aligned32(volatile u32 __iomem *to, const u32 *from,
+                             size_t count)
 {
        switch (count) {
        case 8:
@@ -196,24 +197,22 @@ static inline void __const_memcpy_toio_aligned32(volatile u32 __iomem *to,
 
 void __iowrite32_copy_full(void __iomem *to, const void *from, size_t count);
 
-static inline void __const_iowrite32_copy(void __iomem *to, const void *from,
-                                         size_t count)
+static __always_inline void
+__iowrite32_copy(void __iomem *to, const void *from, size_t count)
 {
-       if (count == 8 || count == 4 || count == 2 || count == 1) {
+       if (__builtin_constant_p(count) &&
+           (count == 8 || count == 4 || count == 2 || count == 1)) {
                __const_memcpy_toio_aligned32(to, from, count);
                dgh();
        } else {
                __iowrite32_copy_full(to, from, count);
        }
 }
+#define __iowrite32_copy __iowrite32_copy
 
-#define __iowrite32_copy(to, from, count)                  \
-       (__builtin_constant_p(count) ?                     \
-                __const_iowrite32_copy(to, from, count) : \
-                __iowrite32_copy_full(to, from, count))
-
-static inline void __const_memcpy_toio_aligned64(volatile u64 __iomem *to,
-                                                const u64 *from, size_t count)
+static __always_inline void
+__const_memcpy_toio_aligned64(volatile u64 __iomem *to, const u64 *from,
+                             size_t count)
 {
        switch (count) {
        case 8:
@@ -255,21 +254,18 @@ static inline void __const_memcpy_toio_aligned64(volatile u64 __iomem *to,
 
 void __iowrite64_copy_full(void __iomem *to, const void *from, size_t count);
 
-static inline void __const_iowrite64_copy(void __iomem *to, const void *from,
-                                         size_t count)
+static __always_inline void
+__iowrite64_copy(void __iomem *to, const void *from, size_t count)
 {
-       if (count == 8 || count == 4 || count == 2 || count == 1) {
+       if (__builtin_constant_p(count) &&
+           (count == 8 || count == 4 || count == 2 || count == 1)) {
                __const_memcpy_toio_aligned64(to, from, count);
                dgh();
        } else {
                __iowrite64_copy_full(to, from, count);
        }
 }
-
-#define __iowrite64_copy(to, from, count)                  \
-       (__builtin_constant_p(count) ?                     \
-                __const_iowrite64_copy(to, from, count) : \
-                __iowrite64_copy_full(to, from, count))
+#define __iowrite64_copy __iowrite64_copy
 
 /*
  * I/O memory mapping functions.
index e01bb5ca13b7cce66ed0fc6d43a5a8edecc81413..b2adc2c6c82a545551064bad4079c485cc0a5084 100644 (file)
                                 GENMASK(19, 14) |      \
                                 BIT(11))
 
+#define CPTR_VHE_EL2_RES0      (GENMASK(63, 32) |      \
+                                GENMASK(27, 26) |      \
+                                GENMASK(23, 22) |      \
+                                GENMASK(19, 18) |      \
+                                GENMASK(15, 0))
+
 /* Hyp Debug Configuration Register bits */
 #define MDCR_EL2_E2TB_MASK     (UL(0x3))
 #define MDCR_EL2_E2TB_SHIFT    (UL(24))
index 501e3e019c930c77488331af26ba7007e9254b70..21650e7924d45846dacc0c99344d98c03f0eb574 100644 (file)
@@ -557,6 +557,68 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
                vcpu_set_flag((v), e);                                  \
        } while (0)
 
+#define __build_check_all_or_none(r, bits)                             \
+       BUILD_BUG_ON(((r) & (bits)) && ((r) & (bits)) != (bits))
+
+#define __cpacr_to_cptr_clr(clr, set)                                  \
+       ({                                                              \
+               u64 cptr = 0;                                           \
+                                                                       \
+               if ((set) & CPACR_ELx_FPEN)                             \
+                       cptr |= CPTR_EL2_TFP;                           \
+               if ((set) & CPACR_ELx_ZEN)                              \
+                       cptr |= CPTR_EL2_TZ;                            \
+               if ((set) & CPACR_ELx_SMEN)                             \
+                       cptr |= CPTR_EL2_TSM;                           \
+               if ((clr) & CPACR_ELx_TTA)                              \
+                       cptr |= CPTR_EL2_TTA;                           \
+               if ((clr) & CPTR_EL2_TAM)                               \
+                       cptr |= CPTR_EL2_TAM;                           \
+               if ((clr) & CPTR_EL2_TCPAC)                             \
+                       cptr |= CPTR_EL2_TCPAC;                         \
+                                                                       \
+               cptr;                                                   \
+       })
+
+#define __cpacr_to_cptr_set(clr, set)                                  \
+       ({                                                              \
+               u64 cptr = 0;                                           \
+                                                                       \
+               if ((clr) & CPACR_ELx_FPEN)                             \
+                       cptr |= CPTR_EL2_TFP;                           \
+               if ((clr) & CPACR_ELx_ZEN)                              \
+                       cptr |= CPTR_EL2_TZ;                            \
+               if ((clr) & CPACR_ELx_SMEN)                             \
+                       cptr |= CPTR_EL2_TSM;                           \
+               if ((set) & CPACR_ELx_TTA)                              \
+                       cptr |= CPTR_EL2_TTA;                           \
+               if ((set) & CPTR_EL2_TAM)                               \
+                       cptr |= CPTR_EL2_TAM;                           \
+               if ((set) & CPTR_EL2_TCPAC)                             \
+                       cptr |= CPTR_EL2_TCPAC;                         \
+                                                                       \
+               cptr;                                                   \
+       })
+
+#define cpacr_clear_set(clr, set)                                      \
+       do {                                                            \
+               BUILD_BUG_ON((set) & CPTR_VHE_EL2_RES0);                \
+               BUILD_BUG_ON((clr) & CPACR_ELx_E0POE);                  \
+               __build_check_all_or_none((clr), CPACR_ELx_FPEN);       \
+               __build_check_all_or_none((set), CPACR_ELx_FPEN);       \
+               __build_check_all_or_none((clr), CPACR_ELx_ZEN);        \
+               __build_check_all_or_none((set), CPACR_ELx_ZEN);        \
+               __build_check_all_or_none((clr), CPACR_ELx_SMEN);       \
+               __build_check_all_or_none((set), CPACR_ELx_SMEN);       \
+                                                                       \
+               if (has_vhe() || has_hvhe())                            \
+                       sysreg_clear_set(cpacr_el1, clr, set);          \
+               else                                                    \
+                       sysreg_clear_set(cptr_el2,                      \
+                                        __cpacr_to_cptr_clr(clr, set), \
+                                        __cpacr_to_cptr_set(clr, set));\
+       } while (0)
+
 static __always_inline void kvm_write_cptr_el2(u64 val)
 {
        if (has_vhe() || has_hvhe())
@@ -570,17 +632,16 @@ static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
        u64 val;
 
        if (has_vhe()) {
-               val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |
-                      CPACR_EL1_ZEN_EL1EN);
+               val = (CPACR_ELx_FPEN | CPACR_EL1_ZEN_EL1EN);
                if (cpus_have_final_cap(ARM64_SME))
                        val |= CPACR_EL1_SMEN_EL1EN;
        } else if (has_hvhe()) {
-               val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN);
+               val = CPACR_ELx_FPEN;
 
                if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
-                       val |= CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN;
+                       val |= CPACR_ELx_ZEN;
                if (cpus_have_final_cap(ARM64_SME))
-                       val |= CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN;
+                       val |= CPACR_ELx_SMEN;
        } else {
                val = CPTR_NVHE_EL2_RES1;
 
index 8170c04fde914c8d09f299f82c9492c8a2afea5b..36b8e97bf49ec4e742f58f1f8cccccf2ed015666 100644 (file)
@@ -76,6 +76,7 @@ static inline enum kvm_mode kvm_get_mode(void) { return KVM_MODE_NONE; };
 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
 
 extern unsigned int __ro_after_init kvm_sve_max_vl;
+extern unsigned int __ro_after_init kvm_host_sve_max_vl;
 int __init kvm_arm_init_sve(void);
 
 u32 __attribute_const__ kvm_target_cpu(void);
@@ -521,6 +522,20 @@ struct kvm_cpu_context {
        u64 *vncr_array;
 };
 
+struct cpu_sve_state {
+       __u64 zcr_el1;
+
+       /*
+        * Ordering is important since __sve_save_state/__sve_restore_state
+        * relies on it.
+        */
+       __u32 fpsr;
+       __u32 fpcr;
+
+       /* Must be SVE_VQ_BYTES (128 bit) aligned. */
+       __u8 sve_regs[];
+};
+
 /*
  * This structure is instantiated on a per-CPU basis, and contains
  * data that is:
@@ -534,7 +549,15 @@ struct kvm_cpu_context {
  */
 struct kvm_host_data {
        struct kvm_cpu_context host_ctxt;
-       struct user_fpsimd_state *fpsimd_state; /* hyp VA */
+
+       /*
+        * All pointers in this union are hyp VA.
+        * sve_state is only used in pKVM and if system_supports_sve().
+        */
+       union {
+               struct user_fpsimd_state *fpsimd_state;
+               struct cpu_sve_state *sve_state;
+       };
 
        /* Ownership of the FP regs */
        enum {
index 3e80464f8953172703f01c3cc2202ddff75ecede..b05bceca33850d454c450ae84c9c973c872a1ef7 100644 (file)
@@ -111,7 +111,8 @@ void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu);
 
 void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
 void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
-void __sve_restore_state(void *sve_pffr, u32 *fpsr);
+void __sve_save_state(void *sve_pffr, u32 *fpsr, int save_ffr);
+void __sve_restore_state(void *sve_pffr, u32 *fpsr, int restore_ffr);
 
 u64 __guest_enter(struct kvm_vcpu *vcpu);
 
@@ -142,5 +143,6 @@ extern u64 kvm_nvhe_sym(id_aa64smfr0_el1_sys_val);
 
 extern unsigned long kvm_nvhe_sym(__icache_flags);
 extern unsigned int kvm_nvhe_sym(kvm_arm_vmid_bits);
+extern unsigned int kvm_nvhe_sym(kvm_host_sve_max_vl);
 
 #endif /* __ARM64_KVM_HYP_H__ */
index ad9cfb5c1ff4e6b9e8352331d1be46afc732a81e..cd56acd9a842ccfaaa486cb4fef22e3777b90172 100644 (file)
@@ -128,4 +128,13 @@ static inline unsigned long hyp_ffa_proxy_pages(void)
        return (2 * KVM_FFA_MBOX_NR_PAGES) + DIV_ROUND_UP(desc_max, PAGE_SIZE);
 }
 
+static inline size_t pkvm_host_sve_state_size(void)
+{
+       if (!system_supports_sve())
+               return 0;
+
+       return size_add(sizeof(struct cpu_sve_state),
+                       SVE_SIG_REGS_SIZE(sve_vq_from_vl(kvm_host_sve_max_vl)));
+}
+
 #endif /* __ARM64_KVM_PKVM_H__ */
index dd6ce86d4332be4c0cff75f52ac88e366f05754a..b776e7424fe914dee57698257984466e8da9fd9e 100644 (file)
@@ -462,6 +462,9 @@ static int run_all_insn_set_hw_mode(unsigned int cpu)
        for (int i = 0; i < ARRAY_SIZE(insn_emulations); i++) {
                struct insn_emulation *insn = insn_emulations[i];
                bool enable = READ_ONCE(insn->current_mode) == INSN_HW;
+               if (insn->status == INSN_UNAVAILABLE)
+                       continue;
+
                if (insn->set_hw_mode && insn->set_hw_mode(enable)) {
                        pr_warn("CPU[%u] cannot support the emulation of %s",
                                cpu, insn->name);
index 9996a989b52e8742566ff6a8fa978981e2a91d49..59716789fe0f38186cd1700f92757b005f48d1cb 100644 (file)
@@ -1931,6 +1931,11 @@ static unsigned long nvhe_percpu_order(void)
        return size ? get_order(size) : 0;
 }
 
+static size_t pkvm_host_sve_state_order(void)
+{
+       return get_order(pkvm_host_sve_state_size());
+}
+
 /* A lookup table holding the hypervisor VA for each vector slot */
 static void *hyp_spectre_vector_selector[BP_HARDEN_EL2_SLOTS];
 
@@ -2310,12 +2315,20 @@ static void __init teardown_subsystems(void)
 
 static void __init teardown_hyp_mode(void)
 {
+       bool free_sve = system_supports_sve() && is_protected_kvm_enabled();
        int cpu;
 
        free_hyp_pgds();
        for_each_possible_cpu(cpu) {
                free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
                free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order());
+
+               if (free_sve) {
+                       struct cpu_sve_state *sve_state;
+
+                       sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state;
+                       free_pages((unsigned long) sve_state, pkvm_host_sve_state_order());
+               }
        }
 }
 
@@ -2398,6 +2411,58 @@ static int __init kvm_hyp_init_protection(u32 hyp_va_bits)
        return 0;
 }
 
+static int init_pkvm_host_sve_state(void)
+{
+       int cpu;
+
+       if (!system_supports_sve())
+               return 0;
+
+       /* Allocate pages for host sve state in protected mode. */
+       for_each_possible_cpu(cpu) {
+               struct page *page = alloc_pages(GFP_KERNEL, pkvm_host_sve_state_order());
+
+               if (!page)
+                       return -ENOMEM;
+
+               per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state = page_address(page);
+       }
+
+       /*
+        * Don't map the pages in hyp since these are only used in protected
+        * mode, which will (re)create its own mapping when initialized.
+        */
+
+       return 0;
+}
+
+/*
+ * Finalizes the initialization of hyp mode, once everything else is initialized
+ * and the initialziation process cannot fail.
+ */
+static void finalize_init_hyp_mode(void)
+{
+       int cpu;
+
+       if (system_supports_sve() && is_protected_kvm_enabled()) {
+               for_each_possible_cpu(cpu) {
+                       struct cpu_sve_state *sve_state;
+
+                       sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state;
+                       per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state =
+                               kern_hyp_va(sve_state);
+               }
+       } else {
+               for_each_possible_cpu(cpu) {
+                       struct user_fpsimd_state *fpsimd_state;
+
+                       fpsimd_state = &per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->host_ctxt.fp_regs;
+                       per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->fpsimd_state =
+                               kern_hyp_va(fpsimd_state);
+               }
+       }
+}
+
 static void pkvm_hyp_init_ptrauth(void)
 {
        struct kvm_cpu_context *hyp_ctxt;
@@ -2566,6 +2631,10 @@ static int __init init_hyp_mode(void)
                        goto out_err;
                }
 
+               err = init_pkvm_host_sve_state();
+               if (err)
+                       goto out_err;
+
                err = kvm_hyp_init_protection(hyp_va_bits);
                if (err) {
                        kvm_err("Failed to init hyp memory protection\n");
@@ -2730,6 +2799,13 @@ static __init int kvm_arm_init(void)
        if (err)
                goto out_subs;
 
+       /*
+        * This should be called after initialization is done and failure isn't
+        * possible anymore.
+        */
+       if (!in_hyp_mode)
+               finalize_init_hyp_mode();
+
        kvm_arm_initialised = true;
 
        return 0;
index 72d733c74a382f264e48394c6cb88f0fe9f25f45..54090967a33567008da56b9fbef6dd6711756a09 100644 (file)
@@ -2181,16 +2181,23 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
        if (forward_traps(vcpu, HCR_NV))
                return;
 
+       spsr = vcpu_read_sys_reg(vcpu, SPSR_EL2);
+       spsr = kvm_check_illegal_exception_return(vcpu, spsr);
+
        /* Check for an ERETAx */
        esr = kvm_vcpu_get_esr(vcpu);
        if (esr_iss_is_eretax(esr) && !kvm_auth_eretax(vcpu, &elr)) {
                /*
-                * Oh no, ERETAx failed to authenticate.  If we have
-                * FPACCOMBINE, deliver an exception right away.  If we
-                * don't, then let the mangled ELR value trickle down the
+                * Oh no, ERETAx failed to authenticate.
+                *
+                * If we have FPACCOMBINE and we don't have a pending
+                * Illegal Execution State exception (which has priority
+                * over FPAC), deliver an exception right away.
+                *
+                * Otherwise, let the mangled ELR value trickle down the
                 * ERET handling, and the guest will have a little surprise.
                 */
-               if (kvm_has_pauth(vcpu->kvm, FPACCOMBINE)) {
+               if (kvm_has_pauth(vcpu->kvm, FPACCOMBINE) && !(spsr & PSR_IL_BIT)) {
                        esr &= ESR_ELx_ERET_ISS_ERETA;
                        esr |= FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_FPAC);
                        kvm_inject_nested_sync(vcpu, esr);
@@ -2201,17 +2208,11 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
        preempt_disable();
        kvm_arch_vcpu_put(vcpu);
 
-       spsr = __vcpu_sys_reg(vcpu, SPSR_EL2);
-       spsr = kvm_check_illegal_exception_return(vcpu, spsr);
        if (!esr_iss_is_eretax(esr))
                elr = __vcpu_sys_reg(vcpu, ELR_EL2);
 
        trace_kvm_nested_eret(vcpu, elr, spsr);
 
-       /*
-        * Note that the current exception level is always the virtual EL2,
-        * since we set HCR_EL2.NV bit only when entering the virtual EL2.
-        */
        *vcpu_pc(vcpu) = elr;
        *vcpu_cpsr(vcpu) = spsr;
 
index 1807d3a79a8af8dbc22a5c34f4e289d3359b90f2..521b32868d0d26e7ea802b918aea55cdb407f761 100644 (file)
@@ -90,6 +90,13 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
                        fpsimd_save_and_flush_cpu_state();
                }
        }
+
+       /*
+        * If normal guests gain SME support, maintain this behavior for pKVM
+        * guests, which don't support SME.
+        */
+       WARN_ON(is_protected_kvm_enabled() && system_supports_sme() &&
+               read_sysreg_s(SYS_SVCR));
 }
 
 /*
@@ -161,9 +168,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
        if (has_vhe() && system_supports_sme()) {
                /* Also restore EL0 state seen on entry */
                if (vcpu_get_flag(vcpu, HOST_SME_ENABLED))
-                       sysreg_clear_set(CPACR_EL1, 0,
-                                        CPACR_EL1_SMEN_EL0EN |
-                                        CPACR_EL1_SMEN_EL1EN);
+                       sysreg_clear_set(CPACR_EL1, 0, CPACR_ELx_SMEN);
                else
                        sysreg_clear_set(CPACR_EL1,
                                         CPACR_EL1_SMEN_EL0EN,
index e2f762d959bb3325173ae05bd9ae93ad5c308cef..11098eb7eb44af659bda1c9a9b2d047ff143218e 100644 (file)
@@ -251,6 +251,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
                case PSR_AA32_MODE_SVC:
                case PSR_AA32_MODE_ABT:
                case PSR_AA32_MODE_UND:
+               case PSR_AA32_MODE_SYS:
                        if (!vcpu_el1_is_32bit(vcpu))
                                return -EINVAL;
                        break;
@@ -276,7 +277,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
        if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
                int i, nr_reg;
 
-               switch (*vcpu_cpsr(vcpu)) {
+               switch (*vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK) {
                /*
                 * Either we are dealing with user mode, and only the
                 * first 15 registers (+ PC) must be narrowed to 32bit.
index 8d9670e6615dc8767e3b1524e04fcd96ae6f63c8..449fa58cf3b63b1cc3d990b76e3d07d4180dc06b 100644 (file)
@@ -50,9 +50,23 @@ bool kvm_condition_valid32(const struct kvm_vcpu *vcpu)
        u32 cpsr_cond;
        int cond;
 
-       /* Top two bits non-zero?  Unconditional. */
-       if (kvm_vcpu_get_esr(vcpu) >> 30)
+       /*
+        * These are the exception classes that could fire with a
+        * conditional instruction.
+        */
+       switch (kvm_vcpu_trap_get_class(vcpu)) {
+       case ESR_ELx_EC_CP15_32:
+       case ESR_ELx_EC_CP15_64:
+       case ESR_ELx_EC_CP14_MR:
+       case ESR_ELx_EC_CP14_LS:
+       case ESR_ELx_EC_FP_ASIMD:
+       case ESR_ELx_EC_CP10_ID:
+       case ESR_ELx_EC_CP14_64:
+       case ESR_ELx_EC_SVC32:
+               break;
+       default:
                return true;
+       }
 
        /* Is condition field valid? */
        cond = kvm_vcpu_get_condition(vcpu);
index 61e6f3ba7b7d17b5d62e3eedf834cdfc1494417d..e950875e31cee4df58d041519b7584356463c91b 100644 (file)
@@ -25,3 +25,9 @@ SYM_FUNC_START(__sve_restore_state)
        sve_load 0, x1, x2, 3
        ret
 SYM_FUNC_END(__sve_restore_state)
+
+SYM_FUNC_START(__sve_save_state)
+       mov     x2, #1
+       sve_save 0, x1, x2, 3
+       ret
+SYM_FUNC_END(__sve_save_state)
index a92566f36022e65339bf7bfddd9691a961817a04..0c4de44534b7a8bb183cfffa1a414c9dc6814687 100644 (file)
@@ -316,10 +316,24 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
 {
        sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
        __sve_restore_state(vcpu_sve_pffr(vcpu),
-                           &vcpu->arch.ctxt.fp_regs.fpsr);
+                           &vcpu->arch.ctxt.fp_regs.fpsr,
+                           true);
        write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR);
 }
 
+static inline void __hyp_sve_save_host(void)
+{
+       struct cpu_sve_state *sve_state = *host_data_ptr(sve_state);
+
+       sve_state->zcr_el1 = read_sysreg_el1(SYS_ZCR);
+       write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
+       __sve_save_state(sve_state->sve_regs + sve_ffr_offset(kvm_host_sve_max_vl),
+                        &sve_state->fpsr,
+                        true);
+}
+
+static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu);
+
 /*
  * We trap the first access to the FP/SIMD to save the host context and
  * restore the guest context lazily.
@@ -330,7 +344,6 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
 {
        bool sve_guest;
        u8 esr_ec;
-       u64 reg;
 
        if (!system_supports_fpsimd())
                return false;
@@ -353,24 +366,15 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
        /* Valid trap.  Switch the context: */
 
        /* First disable enough traps to allow us to update the registers */
-       if (has_vhe() || has_hvhe()) {
-               reg = CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN;
-               if (sve_guest)
-                       reg |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
-
-               sysreg_clear_set(cpacr_el1, 0, reg);
-       } else {
-               reg = CPTR_EL2_TFP;
-               if (sve_guest)
-                       reg |= CPTR_EL2_TZ;
-
-               sysreg_clear_set(cptr_el2, reg, 0);
-       }
+       if (sve_guest || (is_protected_kvm_enabled() && system_supports_sve()))
+               cpacr_clear_set(0, CPACR_ELx_FPEN | CPACR_ELx_ZEN);
+       else
+               cpacr_clear_set(0, CPACR_ELx_FPEN);
        isb();
 
        /* Write out the host state if it's in the registers */
        if (host_owns_fp_regs())
-               __fpsimd_save_state(*host_data_ptr(fpsimd_state));
+               kvm_hyp_save_fpsimd_host(vcpu);
 
        /* Restore the guest state */
        if (sve_guest)
index 22f374e9f532968937fd179c6732e458cffeceee..24a9a8330d190396e90abc7202e6b7e86ea1de63 100644 (file)
@@ -59,7 +59,6 @@ static inline bool pkvm_hyp_vcpu_is_protected(struct pkvm_hyp_vcpu *hyp_vcpu)
 }
 
 void pkvm_hyp_vm_table_init(void *tbl);
-void pkvm_host_fpsimd_state_init(void);
 
 int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
                   unsigned long pgd_hva);
index d5c48dc98f67947b9c1cd1dee7991594dfb74e87..f43d845f3c4ecdc41c1c661f8bbbb8165678897b 100644 (file)
@@ -23,20 +23,80 @@ DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
 
 void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt);
 
+static void __hyp_sve_save_guest(struct kvm_vcpu *vcpu)
+{
+       __vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
+       /*
+        * On saving/restoring guest sve state, always use the maximum VL for
+        * the guest. The layout of the data when saving the sve state depends
+        * on the VL, so use a consistent (i.e., the maximum) guest VL.
+        */
+       sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
+       __sve_save_state(vcpu_sve_pffr(vcpu), &vcpu->arch.ctxt.fp_regs.fpsr, true);
+       write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
+}
+
+static void __hyp_sve_restore_host(void)
+{
+       struct cpu_sve_state *sve_state = *host_data_ptr(sve_state);
+
+       /*
+        * On saving/restoring host sve state, always use the maximum VL for
+        * the host. The layout of the data when saving the sve state depends
+        * on the VL, so use a consistent (i.e., the maximum) host VL.
+        *
+        * Setting ZCR_EL2 to ZCR_ELx_LEN_MASK sets the effective length
+        * supported by the system (or limited at EL3).
+        */
+       write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
+       __sve_restore_state(sve_state->sve_regs + sve_ffr_offset(kvm_host_sve_max_vl),
+                           &sve_state->fpsr,
+                           true);
+       write_sysreg_el1(sve_state->zcr_el1, SYS_ZCR);
+}
+
+static void fpsimd_sve_flush(void)
+{
+       *host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
+}
+
+static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
+{
+       if (!guest_owns_fp_regs())
+               return;
+
+       cpacr_clear_set(0, CPACR_ELx_FPEN | CPACR_ELx_ZEN);
+       isb();
+
+       if (vcpu_has_sve(vcpu))
+               __hyp_sve_save_guest(vcpu);
+       else
+               __fpsimd_save_state(&vcpu->arch.ctxt.fp_regs);
+
+       if (system_supports_sve())
+               __hyp_sve_restore_host();
+       else
+               __fpsimd_restore_state(*host_data_ptr(fpsimd_state));
+
+       *host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
+}
+
 static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
 {
        struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
 
+       fpsimd_sve_flush();
+
        hyp_vcpu->vcpu.arch.ctxt        = host_vcpu->arch.ctxt;
 
        hyp_vcpu->vcpu.arch.sve_state   = kern_hyp_va(host_vcpu->arch.sve_state);
-       hyp_vcpu->vcpu.arch.sve_max_vl  = host_vcpu->arch.sve_max_vl;
+       /* Limit guest vector length to the maximum supported by the host.  */
+       hyp_vcpu->vcpu.arch.sve_max_vl  = min(host_vcpu->arch.sve_max_vl, kvm_host_sve_max_vl);
 
        hyp_vcpu->vcpu.arch.hw_mmu      = host_vcpu->arch.hw_mmu;
 
        hyp_vcpu->vcpu.arch.hcr_el2     = host_vcpu->arch.hcr_el2;
        hyp_vcpu->vcpu.arch.mdcr_el2    = host_vcpu->arch.mdcr_el2;
-       hyp_vcpu->vcpu.arch.cptr_el2    = host_vcpu->arch.cptr_el2;
 
        hyp_vcpu->vcpu.arch.iflags      = host_vcpu->arch.iflags;
 
@@ -54,10 +114,11 @@ static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
        struct vgic_v3_cpu_if *host_cpu_if = &host_vcpu->arch.vgic_cpu.vgic_v3;
        unsigned int i;
 
+       fpsimd_sve_sync(&hyp_vcpu->vcpu);
+
        host_vcpu->arch.ctxt            = hyp_vcpu->vcpu.arch.ctxt;
 
        host_vcpu->arch.hcr_el2         = hyp_vcpu->vcpu.arch.hcr_el2;
-       host_vcpu->arch.cptr_el2        = hyp_vcpu->vcpu.arch.cptr_el2;
 
        host_vcpu->arch.fault           = hyp_vcpu->vcpu.arch.fault;
 
@@ -79,6 +140,17 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
                struct pkvm_hyp_vcpu *hyp_vcpu;
                struct kvm *host_kvm;
 
+               /*
+                * KVM (and pKVM) doesn't support SME guests for now, and
+                * ensures that SME features aren't enabled in pstate when
+                * loading a vcpu. Therefore, if SME features enabled the host
+                * is misbehaving.
+                */
+               if (unlikely(system_supports_sme() && read_sysreg_s(SYS_SVCR))) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+
                host_kvm = kern_hyp_va(host_vcpu->kvm);
                hyp_vcpu = pkvm_load_hyp_vcpu(host_kvm->arch.pkvm.handle,
                                              host_vcpu->vcpu_idx);
@@ -405,11 +477,7 @@ void handle_trap(struct kvm_cpu_context *host_ctxt)
                handle_host_smc(host_ctxt);
                break;
        case ESR_ELx_EC_SVE:
-               if (has_hvhe())
-                       sysreg_clear_set(cpacr_el1, 0, (CPACR_EL1_ZEN_EL1EN |
-                                                       CPACR_EL1_ZEN_EL0EN));
-               else
-                       sysreg_clear_set(cptr_el2, CPTR_EL2_TZ, 0);
+               cpacr_clear_set(0, CPACR_ELx_ZEN);
                isb();
                sve_cond_update_zcr_vq(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
                break;
index 16aa4875ddb8c05c1c5761f264e1c2826ae6d1bc..95cf1857425175dd1779442c5c76f17c2b62d766 100644 (file)
@@ -18,6 +18,8 @@ unsigned long __icache_flags;
 /* Used by kvm_get_vttbr(). */
 unsigned int kvm_arm_vmid_bits;
 
+unsigned int kvm_host_sve_max_vl;
+
 /*
  * Set trap register values based on features in ID_AA64PFR0.
  */
@@ -63,7 +65,7 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
        /* Trap SVE */
        if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) {
                if (has_hvhe())
-                       cptr_clear |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
+                       cptr_clear |= CPACR_ELx_ZEN;
                else
                        cptr_set |= CPTR_EL2_TZ;
        }
@@ -247,17 +249,6 @@ void pkvm_hyp_vm_table_init(void *tbl)
        vm_table = tbl;
 }
 
-void pkvm_host_fpsimd_state_init(void)
-{
-       unsigned long i;
-
-       for (i = 0; i < hyp_nr_cpus; i++) {
-               struct kvm_host_data *host_data = per_cpu_ptr(&kvm_host_data, i);
-
-               host_data->fpsimd_state = &host_data->host_ctxt.fp_regs;
-       }
-}
-
 /*
  * Return the hyp vm structure corresponding to the handle.
  */
@@ -586,6 +577,8 @@ unlock:
        if (ret)
                unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu));
 
+       hyp_vcpu->vcpu.arch.cptr_el2 = kvm_get_reset_cptr_el2(&hyp_vcpu->vcpu);
+
        return ret;
 }
 
index 859f22f754d373a8d74cbd613f3d2e62e009a211..f4350ba07b0b0c37440af0783d313108e1cc0a92 100644 (file)
@@ -67,6 +67,28 @@ static int divide_memory_pool(void *virt, unsigned long size)
        return 0;
 }
 
+static int pkvm_create_host_sve_mappings(void)
+{
+       void *start, *end;
+       int ret, i;
+
+       if (!system_supports_sve())
+               return 0;
+
+       for (i = 0; i < hyp_nr_cpus; i++) {
+               struct kvm_host_data *host_data = per_cpu_ptr(&kvm_host_data, i);
+               struct cpu_sve_state *sve_state = host_data->sve_state;
+
+               start = kern_hyp_va(sve_state);
+               end = start + PAGE_ALIGN(pkvm_host_sve_state_size());
+               ret = pkvm_create_mappings(start, end, PAGE_HYP);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
 static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
                                 unsigned long *per_cpu_base,
                                 u32 hyp_va_bits)
@@ -125,6 +147,8 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
                        return ret;
        }
 
+       pkvm_create_host_sve_mappings();
+
        /*
         * Map the host sections RO in the hypervisor, but transfer the
         * ownership from the host to the hypervisor itself to make sure they
@@ -300,7 +324,6 @@ void __noreturn __pkvm_init_finalise(void)
                goto out;
 
        pkvm_hyp_vm_table_init(vm_table_base);
-       pkvm_host_fpsimd_state_init();
 out:
        /*
         * We tail-called to here from handle___pkvm_init() and will not return,
index 6758cd90557061810c2d9baf8bf29d04a8a0eb05..6af179c6356d666b68e426429572ce6d559bbd82 100644 (file)
@@ -48,15 +48,14 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
        val |= has_hvhe() ? CPACR_EL1_TTA : CPTR_EL2_TTA;
        if (cpus_have_final_cap(ARM64_SME)) {
                if (has_hvhe())
-                       val &= ~(CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN);
+                       val &= ~CPACR_ELx_SMEN;
                else
                        val |= CPTR_EL2_TSM;
        }
 
        if (!guest_owns_fp_regs()) {
                if (has_hvhe())
-                       val &= ~(CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |
-                                CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN);
+                       val &= ~(CPACR_ELx_FPEN | CPACR_ELx_ZEN);
                else
                        val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
 
@@ -182,6 +181,25 @@ static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code)
                kvm_handle_pvm_sysreg(vcpu, exit_code));
 }
 
+static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
+{
+       /*
+        * Non-protected kvm relies on the host restoring its sve state.
+        * Protected kvm restores the host's sve state as not to reveal that
+        * fpsimd was used by a guest nor leak upper sve bits.
+        */
+       if (unlikely(is_protected_kvm_enabled() && system_supports_sve())) {
+               __hyp_sve_save_host();
+
+               /* Re-enable SVE traps if not supported for the guest vcpu. */
+               if (!vcpu_has_sve(vcpu))
+                       cpacr_clear_set(CPACR_ELx_ZEN, 0);
+
+       } else {
+               __fpsimd_save_state(*host_data_ptr(fpsimd_state));
+       }
+}
+
 static const exit_handler_fn hyp_exit_handlers[] = {
        [0 ... ESR_ELx_EC_MAX]          = NULL,
        [ESR_ELx_EC_CP15_32]            = kvm_hyp_handle_cp15_32,
index d7af5f46f22a3ec167c868f89181f9d91178e7f7..8fbb6a2e0559d7e7d8d90ce11b618f35a723c46e 100644 (file)
@@ -93,8 +93,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
 
        val = read_sysreg(cpacr_el1);
        val |= CPACR_ELx_TTA;
-       val &= ~(CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN |
-                CPACR_EL1_SMEN_EL0EN | CPACR_EL1_SMEN_EL1EN);
+       val &= ~(CPACR_ELx_ZEN | CPACR_ELx_SMEN);
 
        /*
         * With VHE (HCR.E2H == 1), accesses to CPACR_EL1 are routed to
@@ -109,9 +108,9 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
 
        if (guest_owns_fp_regs()) {
                if (vcpu_has_sve(vcpu))
-                       val |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
+                       val |= CPACR_ELx_ZEN;
        } else {
-               val &= ~(CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN);
+               val &= ~CPACR_ELx_FPEN;
                __activate_traps_fpsimd32(vcpu);
        }
 
@@ -262,6 +261,11 @@ static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code)
        return true;
 }
 
+static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
+{
+       __fpsimd_save_state(*host_data_ptr(fpsimd_state));
+}
+
 static const exit_handler_fn hyp_exit_handlers[] = {
        [0 ... ESR_ELx_EC_MAX]          = NULL,
        [ESR_ELx_EC_CP15_32]            = kvm_hyp_handle_cp15_32,
index 6813c7c7f00ab2eedf9d04a9a84bfb5f4858ca77..bae8536cbf003e9b949db4df115f0a7815e55554 100644 (file)
@@ -58,8 +58,10 @@ static u64 limit_nv_id_reg(u32 id, u64 val)
                break;
 
        case SYS_ID_AA64PFR1_EL1:
-               /* Only support SSBS */
-               val &= NV_FTR(PFR1, SSBS);
+               /* Only support BTI, SSBS, CSV2_frac */
+               val &= (NV_FTR(PFR1, BT)        |
+                       NV_FTR(PFR1, SSBS)      |
+                       NV_FTR(PFR1, CSV2_frac));
                break;
 
        case SYS_ID_AA64MMFR0_EL1:
index 1b7b58cb121f98c3746da33c2d973ef49a428500..3fc8ca164dbe4e8f3da73068a240164ecc91bd17 100644 (file)
@@ -32,6 +32,7 @@
 
 /* Maximum phys_shift supported for any VM on this host */
 static u32 __ro_after_init kvm_ipa_limit;
+unsigned int __ro_after_init kvm_host_sve_max_vl;
 
 /*
  * ARMv8 Reset Values
@@ -51,6 +52,8 @@ int __init kvm_arm_init_sve(void)
 {
        if (system_supports_sve()) {
                kvm_sve_max_vl = sve_max_virtualisable_vl();
+               kvm_host_sve_max_vl = sve_max_vl();
+               kvm_nvhe_sym(kvm_host_sve_max_vl) = kvm_host_sve_max_vl;
 
                /*
                 * The get_sve_reg()/set_sve_reg() ioctl interface will need
index 8aefb0c126722980a345062cae02a6127c02b52e..a34734a6c3ce802f0b735c2689212a459a6372f2 100644 (file)
 &gmac0 {
        status = "okay";
 
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-id";
        bus_id = <0x0>;
 };
 
 &gmac1 {
        status = "okay";
 
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-id";
        bus_id = <0x1>;
 };
 
index 8463fe035386e40daa112740fd9804af6195fb37..23cf26cc3e5f19703220bd20a8e12e1e61a1e941 100644 (file)
@@ -43,7 +43,7 @@
 &gmac0 {
        status = "okay";
 
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-id";
        phy-handle = <&phy0>;
        mdio {
                compatible = "snps,dwmac-mdio";
@@ -58,7 +58,7 @@
 &gmac1 {
        status = "okay";
 
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-id";
        phy-handle = <&phy1>;
        mdio {
                compatible = "snps,dwmac-mdio";
index 74b99bd234cc38df9a087915280e86ddb5bd56d4..ea9e6985d0e9fca980eaad34ca4089b6164ad447 100644 (file)
@@ -92,7 +92,7 @@
 &gmac2 {
        status = "okay";
 
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-id";
        phy-handle = <&phy2>;
        mdio {
                compatible = "snps,dwmac-mdio";
index 27f319b498625718d2349618d39008327b2f9935..b5f9de9f102e444bcb4e563bddc704f52f3a940e 100644 (file)
@@ -56,6 +56,7 @@ extern int early_cpu_to_node(int cpu);
 static inline void early_numa_add_cpu(int cpuid, s16 node)     { }
 static inline void numa_add_cpu(unsigned int cpu)              { }
 static inline void numa_remove_cpu(unsigned int cpu)           { }
+static inline void set_cpuid_to_node(int cpuid, s16 node)      { }
 
 static inline int early_cpu_to_node(int cpu)
 {
index 45b507a7b06fca8af6e16b36a2a1c3f538368eb2..d9eafd3ee3d1e848675f54f033163ecc66e07d72 100644 (file)
@@ -42,7 +42,7 @@
        .macro JUMP_VIRT_ADDR temp1 temp2
        li.d    \temp1, CACHE_BASE
        pcaddi  \temp2, 0
-       or      \temp1, \temp1, \temp2
+       bstrins.d  \temp1, \temp2, (DMW_PABITS - 1), 0
        jirl    zero, \temp1, 0xc
        .endm
 
index c4f7de2e28054ceb6458c964c26e86596cea8602..4677ea8fa8e98cad8b11b5ea89d7811500be738e 100644 (file)
@@ -22,7 +22,7 @@
 _head:
        .word   MZ_MAGIC                /* "MZ", MS-DOS header */
        .org    0x8
-       .dword  kernel_entry            /* Kernel entry point */
+       .dword  _kernel_entry           /* Kernel entry point (physical address) */
        .dword  _kernel_asize           /* Kernel image effective size */
        .quad   PHYS_LINK_KADDR         /* Kernel image load offset from start of RAM */
        .org    0x38                    /* 0x20 ~ 0x37 reserved */
index 60e0fe97f61a31604b095044395c40ba6fd5efc8..3d048f1be1438863831e19817711292558d75149 100644 (file)
@@ -282,7 +282,7 @@ static void __init fdt_setup(void)
                return;
 
        /* Prefer to use built-in dtb, checking its legality first. */
-       if (!fdt_check_header(__dtb_start))
+       if (IS_ENABLED(CONFIG_BUILTIN_DTB) && !fdt_check_header(__dtb_start))
                fdt_pointer = __dtb_start;
        else
                fdt_pointer = efi_fdt_pointer(); /* Fallback to firmware dtb */
@@ -351,10 +351,8 @@ void __init platform_init(void)
        arch_reserve_vmcore();
        arch_reserve_crashkernel();
 
-#ifdef CONFIG_ACPI_TABLE_UPGRADE
-       acpi_table_upgrade();
-#endif
 #ifdef CONFIG_ACPI
+       acpi_table_upgrade();
        acpi_gbl_use_default_register_widths = false;
        acpi_boot_table_init();
 #endif
index 0dfe2388ef413b673185ab88951b06c532559fff..1436d2465939b2d79744833c8c5a4abe3ead8916 100644 (file)
@@ -273,7 +273,6 @@ static void __init fdt_smp_setup(void)
 
                if (cpuid == loongson_sysconf.boot_cpu_id) {
                        cpu = 0;
-                       numa_add_cpu(cpu);
                } else {
                        cpu = cpumask_next_zero(-1, cpu_present_mask);
                }
@@ -283,6 +282,9 @@ static void __init fdt_smp_setup(void)
                set_cpu_present(cpu, true);
                __cpu_number_map[cpuid] = cpu;
                __cpu_logical_map[cpu] = cpuid;
+
+               early_numa_add_cpu(cpu, 0);
+               set_cpuid_to_node(cpuid, 0);
        }
 
        loongson_sysconf.nr_cpus = num_processors;
@@ -468,6 +470,7 @@ void smp_prepare_boot_cpu(void)
        set_cpu_possible(0, true);
        set_cpu_online(0, true);
        set_my_cpu_offset(per_cpu_offset(0));
+       numa_add_cpu(0);
 
        rr_node = first_node(node_online_map);
        for_each_possible_cpu(cpu) {
index e8e97dbf9ca40ff243e33f1d2cfa9fdbb59d54b4..3c7595342730ed2c1e76c343e16553180fbd167d 100644 (file)
@@ -6,6 +6,7 @@
 
 #define PAGE_SIZE _PAGE_SIZE
 #define RO_EXCEPTION_TABLE_ALIGN       4
+#define PHYSADDR_MASK                  0xffffffffffff /* 48-bit */
 
 /*
  * Put .bss..swapper_pg_dir as the first thing in .bss. This will
@@ -142,10 +143,11 @@ SECTIONS
 
 #ifdef CONFIG_EFI_STUB
        /* header symbols */
-       _kernel_asize = _end - _text;
-       _kernel_fsize = _edata - _text;
-       _kernel_vsize = _end - __initdata_begin;
-       _kernel_rsize = _edata - __initdata_begin;
+       _kernel_entry = ABSOLUTE(kernel_entry & PHYSADDR_MASK);
+       _kernel_asize = ABSOLUTE(_end - _text);
+       _kernel_fsize = ABSOLUTE(_edata - _text);
+       _kernel_vsize = ABSOLUTE(_end - __initdata_begin);
+       _kernel_rsize = ABSOLUTE(_edata - __initdata_begin);
 #endif
 
        .gptab.sdata : {
index 3c968f2f4ac44774398e76cf132698e0f18677e4..c88c6d46a5bc0127e29fa2179b58d8cb0409f149 100644 (file)
@@ -137,7 +137,7 @@ config PPC
        select ARCH_HAS_GCOV_PROFILE_ALL
        select ARCH_HAS_HUGEPD                  if HUGETLB_PAGE
        select ARCH_HAS_KCOV
-       select ARCH_HAS_KERNEL_FPU_SUPPORT      if PPC_FPU
+       select ARCH_HAS_KERNEL_FPU_SUPPORT      if PPC64 && PPC_FPU
        select ARCH_HAS_MEMBARRIER_CALLBACKS
        select ARCH_HAS_MEMBARRIER_SYNC_CORE
        select ARCH_HAS_MEMREMAP_COMPAT_ALIGN   if PPC_64S_HASH_MMU
index de10437fd20652ee63a6d214638bded13cdbc6c3..fd594bf6c6a9c5e4a80c9dd5933cd5d1797a326e 100644 (file)
@@ -92,9 +92,25 @@ __pu_failed:                                                 \
                : label)
 #endif
 
+#ifdef CONFIG_CC_IS_CLANG
+#define DS_FORM_CONSTRAINT "Z<>"
+#else
+#define DS_FORM_CONSTRAINT "YZ<>"
+#endif
+
 #ifdef __powerpc64__
+#ifdef CONFIG_PPC_KERNEL_PREFIXED
 #define __put_user_asm2_goto(x, ptr, label)                    \
        __put_user_asm_goto(x, ptr, label, "std")
+#else
+#define __put_user_asm2_goto(x, addr, label)                   \
+       asm goto ("1: std%U1%X1 %0,%1   # put_user\n"           \
+               EX_TABLE(1b, %l2)                               \
+               :                                               \
+               : "r" (x), DS_FORM_CONSTRAINT (*addr)           \
+               :                                               \
+               : label)
+#endif // CONFIG_PPC_KERNEL_PREFIXED
 #else /* __powerpc64__ */
 #define __put_user_asm2_goto(x, addr, label)                   \
        asm goto(                                       \
@@ -165,8 +181,19 @@ do {                                                               \
 #endif
 
 #ifdef __powerpc64__
+#ifdef CONFIG_PPC_KERNEL_PREFIXED
 #define __get_user_asm2_goto(x, addr, label)                   \
        __get_user_asm_goto(x, addr, label, "ld")
+#else
+#define __get_user_asm2_goto(x, addr, label)                   \
+       asm_goto_output(                                        \
+               "1:     ld%U1%X1 %0, %1 # get_user\n"           \
+               EX_TABLE(1b, %l2)                               \
+               : "=r" (x)                                      \
+               : DS_FORM_CONSTRAINT (*addr)                    \
+               :                                               \
+               : label)
+#endif // CONFIG_PPC_KERNEL_PREFIXED
 #else /* __powerpc64__ */
 #define __get_user_asm2_goto(x, addr, label)                   \
        asm_goto_output(                                        \
index 43b97032a91c00a5b9ed8cc56c603c3c71dbc999..a0c4f1bde83e86da7524d56aa8a3408a6cacc647 100644 (file)
@@ -900,6 +900,15 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
 
                        /* Get offset into TMP_REG */
                        EMIT(PPC_RAW_LI(tmp_reg, off));
+                       /*
+                        * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync'
+                        * before and after the operation.
+                        *
+                        * This is a requirement in the Linux Kernel Memory Model.
+                        * See __cmpxchg_u32() in asm/cmpxchg.h as an example.
+                        */
+                       if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP))
+                               EMIT(PPC_RAW_SYNC());
                        tmp_idx = ctx->idx * 4;
                        /* load value from memory into r0 */
                        EMIT(PPC_RAW_LWARX(_R0, tmp_reg, dst_reg, 0));
@@ -953,6 +962,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
 
                        /* For the BPF_FETCH variant, get old data into src_reg */
                        if (imm & BPF_FETCH) {
+                               /* Emit 'sync' to enforce full ordering */
+                               if (IS_ENABLED(CONFIG_SMP))
+                                       EMIT(PPC_RAW_SYNC());
                                EMIT(PPC_RAW_MR(ret_reg, ax_reg));
                                if (!fp->aux->verifier_zext)
                                        EMIT(PPC_RAW_LI(ret_reg - 1, 0)); /* higher 32-bit */
index 8afc14a4a1258ebab01c802efdaa1c55cb73fd1b..7703dcf48be86bfd82f08f264a1735509b12cbff 100644 (file)
@@ -846,6 +846,15 @@ emit_clear:
 
                        /* Get offset into TMP_REG_1 */
                        EMIT(PPC_RAW_LI(tmp1_reg, off));
+                       /*
+                        * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync'
+                        * before and after the operation.
+                        *
+                        * This is a requirement in the Linux Kernel Memory Model.
+                        * See __cmpxchg_u64() in asm/cmpxchg.h as an example.
+                        */
+                       if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP))
+                               EMIT(PPC_RAW_SYNC());
                        tmp_idx = ctx->idx * 4;
                        /* load value from memory into TMP_REG_2 */
                        if (size == BPF_DW)
@@ -908,6 +917,9 @@ emit_clear:
                        PPC_BCC_SHORT(COND_NE, tmp_idx);
 
                        if (imm & BPF_FETCH) {
+                               /* Emit 'sync' to enforce full ordering */
+                               if (IS_ENABLED(CONFIG_SMP))
+                                       EMIT(PPC_RAW_SYNC());
                                EMIT(PPC_RAW_MR(ret_reg, _R0));
                                /*
                                 * Skip unnecessary zero-extension for 32-bit cmpxchg.
index 6e7029640c0ca2f1f9aad9ec95104e45680cf93a..62da20f9700a974200c05a9d5de908746f2f151d 100644 (file)
@@ -371,8 +371,8 @@ static int read_dt_lpar_name(struct seq_file *m)
 
 static void read_lpar_name(struct seq_file *m)
 {
-       if (read_rtas_lpar_name(m) && read_dt_lpar_name(m))
-               pr_err_once("Error can't get the LPAR name");
+       if (read_rtas_lpar_name(m))
+               read_dt_lpar_name(m);
 }
 
 #define SPLPAR_MAXLENGTH 1026*(sizeof(char))
index b94176e25be1810e749cb94686b9c536fd9a2bb0..0525ee2d63c716bc7551b2bedc519cd47b5d4262 100644 (file)
@@ -106,7 +106,7 @@ config RISCV
        select HAS_IOPORT if MMU
        select HAVE_ARCH_AUDITSYSCALL
        select HAVE_ARCH_HUGE_VMALLOC if HAVE_ARCH_HUGE_VMAP
-       select HAVE_ARCH_HUGE_VMAP if MMU && 64BIT && !XIP_KERNEL
+       select HAVE_ARCH_HUGE_VMAP if MMU && 64BIT
        select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
        select HAVE_ARCH_JUMP_LABEL_RELATIVE if !XIP_KERNEL
        select HAVE_ARCH_KASAN if MMU && 64BIT
index ddb002ed89dea0dbf4956a5161a0f426bcba557f..808b4c78462e5aa363fc55d7871497efa23d57b4 100644 (file)
@@ -10,7 +10,7 @@
 
 #include <asm/fence.h>
 
-#define __arch_xchg_masked(prepend, append, r, p, n)                   \
+#define __arch_xchg_masked(sc_sfx, prepend, append, r, p, n)           \
 ({                                                                     \
        u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3);                     \
        ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE;  \
@@ -25,7 +25,7 @@
               "0:      lr.w %0, %2\n"                                  \
               "        and  %1, %0, %z4\n"                             \
               "        or   %1, %1, %z3\n"                             \
-              "        sc.w %1, %1, %2\n"                              \
+              "        sc.w" sc_sfx " %1, %1, %2\n"                    \
               "        bnez %1, 0b\n"                                  \
               append                                                   \
               : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b))       \
@@ -46,7 +46,8 @@
                : "memory");                                            \
 })
 
-#define _arch_xchg(ptr, new, sfx, prepend, append)                     \
+#define _arch_xchg(ptr, new, sc_sfx, swap_sfx, prepend,                        \
+                  sc_append, swap_append)                              \
 ({                                                                     \
        __typeof__(ptr) __ptr = (ptr);                                  \
        __typeof__(*(__ptr)) __new = (new);                             \
        switch (sizeof(*__ptr)) {                                       \
        case 1:                                                         \
        case 2:                                                         \
-               __arch_xchg_masked(prepend, append,                     \
+               __arch_xchg_masked(sc_sfx, prepend, sc_append,          \
                                   __ret, __ptr, __new);                \
                break;                                                  \
        case 4:                                                         \
-               __arch_xchg(".w" sfx, prepend, append,                  \
+               __arch_xchg(".w" swap_sfx, prepend, swap_append,        \
                              __ret, __ptr, __new);                     \
                break;                                                  \
        case 8:                                                         \
-               __arch_xchg(".d" sfx, prepend, append,                  \
+               __arch_xchg(".d" swap_sfx, prepend, swap_append,        \
                              __ret, __ptr, __new);                     \
                break;                                                  \
        default:                                                        \
 })
 
 #define arch_xchg_relaxed(ptr, x)                                      \
-       _arch_xchg(ptr, x, "", "", "")
+       _arch_xchg(ptr, x, "", "", "", "", "")
 
 #define arch_xchg_acquire(ptr, x)                                      \
-       _arch_xchg(ptr, x, "", "", RISCV_ACQUIRE_BARRIER)
+       _arch_xchg(ptr, x, "", "", "",                                  \
+                  RISCV_ACQUIRE_BARRIER, RISCV_ACQUIRE_BARRIER)
 
 #define arch_xchg_release(ptr, x)                                      \
-       _arch_xchg(ptr, x, "", RISCV_RELEASE_BARRIER, "")
+       _arch_xchg(ptr, x, "", "", RISCV_RELEASE_BARRIER, "", "")
 
 #define arch_xchg(ptr, x)                                              \
-       _arch_xchg(ptr, x, ".aqrl", "", "")
+       _arch_xchg(ptr, x, ".rl", ".aqrl", "", RISCV_FULL_BARRIER, "")
 
 #define xchg32(ptr, x)                                                 \
 ({                                                                     \
index 1cc7df740eddc9a2977fde8c428e19fd90ec9126..e6fbaaf549562d6e9ca63a66371441fe8b230cb3 100644 (file)
@@ -72,7 +72,7 @@ static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle)
        /* Make sure tidle is updated */
        smp_mb();
        bdata->task_ptr = tidle;
-       bdata->stack_ptr = task_stack_page(tidle) + THREAD_SIZE;
+       bdata->stack_ptr = task_pt_regs(tidle);
        /* Make sure boot data is updated */
        smp_mb();
        hsm_data = __pa(bdata);
index 613872b0a21acb2a708f194d22f186f376f8a748..24869eb889085e4ae40881230707b54024bad975 100644 (file)
@@ -34,8 +34,7 @@ static void cpu_update_secondary_bootdata(unsigned int cpuid,
 
        /* Make sure tidle is updated */
        smp_mb();
-       WRITE_ONCE(__cpu_spinwait_stack_pointer[hartid],
-                  task_stack_page(tidle) + THREAD_SIZE);
+       WRITE_ONCE(__cpu_spinwait_stack_pointer[hartid], task_pt_regs(tidle));
        WRITE_ONCE(__cpu_spinwait_task_pointer[hartid], tidle);
 }
 
index 0eb689351b7d04128ba93122209f71b830b33efe..5cd407c6a8e4f82389ad3d0a63081d6dcfec7688 100644 (file)
@@ -237,10 +237,11 @@ static gpa_t aia_imsic_ppn(struct kvm_aia *aia, gpa_t addr)
 
 static u32 aia_imsic_hart_index(struct kvm_aia *aia, gpa_t addr)
 {
-       u32 hart, group = 0;
+       u32 hart = 0, group = 0;
 
-       hart = (addr >> (aia->nr_guest_bits + IMSIC_MMIO_PAGE_SHIFT)) &
-               GENMASK_ULL(aia->nr_hart_bits - 1, 0);
+       if (aia->nr_hart_bits)
+               hart = (addr >> (aia->nr_guest_bits + IMSIC_MMIO_PAGE_SHIFT)) &
+                      GENMASK_ULL(aia->nr_hart_bits - 1, 0);
        if (aia->nr_group_bits)
                group = (addr >> aia->nr_group_shift) &
                        GENMASK_ULL(aia->nr_group_bits - 1, 0);
index c676275ea0a017617f3f6d766c66302479e4dc45..62874fbca29ff5c71720cb718201d2f9e8c994ba 100644 (file)
@@ -724,9 +724,9 @@ static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
        switch (reg_subtype) {
        case KVM_REG_RISCV_ISA_SINGLE:
                return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val);
-       case KVM_REG_RISCV_SBI_MULTI_EN:
+       case KVM_REG_RISCV_ISA_MULTI_EN:
                return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true);
-       case KVM_REG_RISCV_SBI_MULTI_DIS:
+       case KVM_REG_RISCV_ISA_MULTI_DIS:
                return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false);
        default:
                return -ENOENT;
index b3fcf7d67efba16a9ee5adf8e4144ef663326edb..5224f373380225a2dfdecd40bfc01ce8f5d13bc0 100644 (file)
@@ -293,8 +293,8 @@ void handle_page_fault(struct pt_regs *regs)
        if (unlikely(access_error(cause, vma))) {
                vma_end_read(vma);
                count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
-               tsk->thread.bad_cause = SEGV_ACCERR;
-               bad_area_nosemaphore(regs, code, addr);
+               tsk->thread.bad_cause = cause;
+               bad_area_nosemaphore(regs, SEGV_ACCERR, addr);
                return;
        }
 
index e3218d65f21d5a432723e0637122af46d8620db4..e3405e4b99af50c3ec6a38c3908c6f154dec1f21 100644 (file)
@@ -250,18 +250,19 @@ static void __init setup_bootmem(void)
                kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base;
 
        /*
-        * memblock allocator is not aware of the fact that last 4K bytes of
-        * the addressable memory can not be mapped because of IS_ERR_VALUE
-        * macro. Make sure that last 4k bytes are not usable by memblock
-        * if end of dram is equal to maximum addressable memory.  For 64-bit
-        * kernel, this problem can't happen here as the end of the virtual
-        * address space is occupied by the kernel mapping then this check must
-        * be done as soon as the kernel mapping base address is determined.
+        * Reserve physical address space that would be mapped to virtual
+        * addresses greater than (void *)(-PAGE_SIZE) because:
+        *  - This memory would overlap with ERR_PTR
+        *  - This memory belongs to high memory, which is not supported
+        *
+        * This is not applicable to 64-bit kernel, because virtual addresses
+        * after (void *)(-PAGE_SIZE) are not linearly mapped: they are
+        * occupied by kernel mapping. Also it is unrealistic for high memory
+        * to exist on 64-bit platforms.
         */
        if (!IS_ENABLED(CONFIG_64BIT)) {
-               max_mapped_addr = __pa(~(ulong)0);
-               if (max_mapped_addr == (phys_ram_end - 1))
-                       memblock_set_current_limit(max_mapped_addr - 4096);
+               max_mapped_addr = __va_to_pa_nodebug(-PAGE_SIZE);
+               memblock_reserve(max_mapped_addr, (phys_addr_t)-max_mapped_addr);
        }
 
        min_low_pfn = PFN_UP(phys_ram_base);
index 9863ebe75019a6daf28596e3274a7e51437f2c80..edae1341619600cab5dc042a206571156c4d7f16 100644 (file)
@@ -451,7 +451,7 @@ static void *nt_final(void *ptr)
 /*
  * Initialize ELF header (new kernel)
  */
-static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt)
+static void *ehdr_init(Elf64_Ehdr *ehdr, int phdr_count)
 {
        memset(ehdr, 0, sizeof(*ehdr));
        memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
@@ -465,11 +465,8 @@ static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt)
        ehdr->e_phoff = sizeof(Elf64_Ehdr);
        ehdr->e_ehsize = sizeof(Elf64_Ehdr);
        ehdr->e_phentsize = sizeof(Elf64_Phdr);
-       /*
-        * Number of memory chunk PT_LOAD program headers plus one kernel
-        * image PT_LOAD program header plus one PT_NOTE program header.
-        */
-       ehdr->e_phnum = mem_chunk_cnt + 1 + 1;
+       /* Number of PT_LOAD program headers plus PT_NOTE program header */
+       ehdr->e_phnum = phdr_count + 1;
        return ehdr + 1;
 }
 
@@ -503,12 +500,14 @@ static int get_mem_chunk_cnt(void)
 /*
  * Initialize ELF loads (new kernel)
  */
-static void loads_init(Elf64_Phdr *phdr)
+static void loads_init(Elf64_Phdr *phdr, bool os_info_has_vm)
 {
-       unsigned long old_identity_base = os_info_old_value(OS_INFO_IDENTITY_BASE);
+       unsigned long old_identity_base = 0;
        phys_addr_t start, end;
        u64 idx;
 
+       if (os_info_has_vm)
+               old_identity_base = os_info_old_value(OS_INFO_IDENTITY_BASE);
        for_each_physmem_range(idx, &oldmem_type, &start, &end) {
                phdr->p_type = PT_LOAD;
                phdr->p_vaddr = old_identity_base + start;
@@ -522,6 +521,11 @@ static void loads_init(Elf64_Phdr *phdr)
        }
 }
 
+static bool os_info_has_vm(void)
+{
+       return os_info_old_value(OS_INFO_KASLR_OFFSET);
+}
+
 /*
  * Prepare PT_LOAD type program header for kernel image region
  */
@@ -566,7 +570,7 @@ static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
        return ptr;
 }
 
-static size_t get_elfcorehdr_size(int mem_chunk_cnt)
+static size_t get_elfcorehdr_size(int phdr_count)
 {
        size_t size;
 
@@ -581,10 +585,8 @@ static size_t get_elfcorehdr_size(int mem_chunk_cnt)
        size += nt_vmcoreinfo_size();
        /* nt_final */
        size += sizeof(Elf64_Nhdr);
-       /* PT_LOAD type program header for kernel text region */
-       size += sizeof(Elf64_Phdr);
        /* PT_LOADS */
-       size += mem_chunk_cnt * sizeof(Elf64_Phdr);
+       size += phdr_count * sizeof(Elf64_Phdr);
 
        return size;
 }
@@ -595,8 +597,8 @@ static size_t get_elfcorehdr_size(int mem_chunk_cnt)
 int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
 {
        Elf64_Phdr *phdr_notes, *phdr_loads, *phdr_text;
+       int mem_chunk_cnt, phdr_text_cnt;
        size_t alloc_size;
-       int mem_chunk_cnt;
        void *ptr, *hdr;
        u64 hdr_off;
 
@@ -615,12 +617,14 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
        }
 
        mem_chunk_cnt = get_mem_chunk_cnt();
+       phdr_text_cnt = os_info_has_vm() ? 1 : 0;
 
-       alloc_size = get_elfcorehdr_size(mem_chunk_cnt);
+       alloc_size = get_elfcorehdr_size(mem_chunk_cnt + phdr_text_cnt);
 
        hdr = kzalloc(alloc_size, GFP_KERNEL);
 
-       /* Without elfcorehdr /proc/vmcore cannot be created. Thus creating
+       /*
+        * Without elfcorehdr /proc/vmcore cannot be created. Thus creating
         * a dump with this crash kernel will fail. Panic now to allow other
         * dump mechanisms to take over.
         */
@@ -628,21 +632,23 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
                panic("s390 kdump allocating elfcorehdr failed");
 
        /* Init elf header */
-       ptr = ehdr_init(hdr, mem_chunk_cnt);
+       phdr_notes = ehdr_init(hdr, mem_chunk_cnt + phdr_text_cnt);
        /* Init program headers */
-       phdr_notes = ptr;
-       ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr));
-       phdr_text = ptr;
-       ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr));
-       phdr_loads = ptr;
-       ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr) * mem_chunk_cnt);
+       if (phdr_text_cnt) {
+               phdr_text = phdr_notes + 1;
+               phdr_loads = phdr_text + 1;
+       } else {
+               phdr_loads = phdr_notes + 1;
+       }
+       ptr = PTR_ADD(phdr_loads, sizeof(Elf64_Phdr) * mem_chunk_cnt);
        /* Init notes */
        hdr_off = PTR_DIFF(ptr, hdr);
        ptr = notes_init(phdr_notes, ptr, ((unsigned long) hdr) + hdr_off);
        /* Init kernel text program header */
-       text_init(phdr_text);
+       if (phdr_text_cnt)
+               text_init(phdr_text);
        /* Init loads */
-       loads_init(phdr_loads);
+       loads_init(phdr_loads, phdr_text_cnt);
        /* Finalize program headers */
        hdr_off = PTR_DIFF(ptr, hdr);
        *addr = (unsigned long long) hdr;
index e64eaa8dda5a45e7d8938d31afd2a0d9434469f9..9d6e8f13d13a7807c11123b160a0c7df50185b27 100644 (file)
 #include "../perf_event.h"
 #include "../probe.h"
 
+MODULE_DESCRIPTION("Support for Intel cstate performance events");
 MODULE_LICENSE("GPL");
 
 #define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format)                \
index 419c517b8594fbdd057a0f36bc707c8b6cd8b58f..c68f5b39952b0a4cc2fcc2bb61a8ba2a1c6b7bed 100644 (file)
@@ -34,6 +34,7 @@ static struct event_constraint uncore_constraint_fixed =
 struct event_constraint uncore_constraint_empty =
        EVENT_CONSTRAINT(0, 0, 0);
 
+MODULE_DESCRIPTION("Support for Intel uncore performance events");
 MODULE_LICENSE("GPL");
 
 int uncore_pcibus_to_dieid(struct pci_bus *bus)
index 46e673585560312fbc8f54b910ccc247da6adb38..0c5e7a7c43ac07ea63eda49d6d92de47b1caec0a 100644 (file)
@@ -64,6 +64,7 @@
 #include "perf_event.h"
 #include "probe.h"
 
+MODULE_DESCRIPTION("Support Intel/AMD RAPL energy consumption counters");
 MODULE_LICENSE("GPL");
 
 /*
index ece45b3f6f2073ea81ad65b139173411c77b3d73..f8ca74e7678f3ab8dc44ea0d2cd355cf04c5e068 100644 (file)
@@ -2154,6 +2154,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
 
 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
                       void *insn, int insn_len);
+void kvm_mmu_print_sptes(struct kvm_vcpu *vcpu, gpa_t gpa, const char *msg);
 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
 void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
                             u64 addr, unsigned long roots);
index 266daf5b5b842d0b9921d5950285c648077712fc..695f3666488996da42eba757617f47597475dd5a 100644 (file)
@@ -77,7 +77,7 @@
 #define VMX_FEATURE_ENCLS_EXITING      ( 2*32+ 15) /* "" VM-Exit on ENCLS (leaf dependent) */
 #define VMX_FEATURE_RDSEED_EXITING     ( 2*32+ 16) /* "" VM-Exit on RDSEED */
 #define VMX_FEATURE_PAGE_MOD_LOGGING   ( 2*32+ 17) /* "pml" Log dirty pages into buffer */
-#define VMX_FEATURE_EPT_VIOLATION_VE   ( 2*32+ 18) /* "" Conditionally reflect EPT violations as #VE exceptions */
+#define VMX_FEATURE_EPT_VIOLATION_VE   ( 2*32+ 18) /* Conditionally reflect EPT violations as #VE exceptions */
 #define VMX_FEATURE_PT_CONCEAL_VMX     ( 2*32+ 19) /* "" Suppress VMX indicators in Processor Trace */
 #define VMX_FEATURE_XSAVES             ( 2*32+ 20) /* "" Enable XSAVES and XRSTORS in guest */
 #define VMX_FEATURE_MODE_BASED_EPT_EXEC        ( 2*32+ 22) /* "ept_mode_based_exec" Enable separate EPT EXEC bits for supervisor vs. user */
index f9a8c7b7943fb2c6de232ae56b02ccefcc1504ee..b3fa61d45352e7120d7be7fb5cdb9d6f071c9729 100644 (file)
@@ -345,6 +345,7 @@ static DECLARE_WORK(disable_freq_invariance_work,
                    disable_freq_invariance_workfn);
 
 DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE;
+EXPORT_PER_CPU_SYMBOL_GPL(arch_freq_scale);
 
 static void scale_freq_tick(u64 acnt, u64 mcnt)
 {
index 2b170da84f97fec6a1c49a4793d91f21ceb48195..d4e539d4e158cc561e156c028336080a07478ac8 100644 (file)
@@ -1075,6 +1075,10 @@ void get_cpu_address_sizes(struct cpuinfo_x86 *c)
 
                c->x86_virt_bits = (eax >> 8) & 0xff;
                c->x86_phys_bits = eax & 0xff;
+
+               /* Provide a sane default if not enumerated: */
+               if (!c->x86_clflush_size)
+                       c->x86_clflush_size = 32;
        }
 
        c->x86_cache_bits = c->x86_phys_bits;
@@ -1585,6 +1589,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
        if (have_cpuid_p()) {
                cpu_detect(c);
                get_cpu_vendor(c);
+               intel_unlock_cpuid_leafs(c);
                get_cpu_cap(c);
                setup_force_cpu_cap(X86_FEATURE_CPUID);
                get_cpu_address_sizes(c);
@@ -1744,7 +1749,7 @@ static void generic_identify(struct cpuinfo_x86 *c)
        cpu_detect(c);
 
        get_cpu_vendor(c);
-
+       intel_unlock_cpuid_leafs(c);
        get_cpu_cap(c);
 
        get_cpu_address_sizes(c);
index ea9e07d57c8dd2d694d78769ea8729d0dda63dbd..1beccefbaff9a5a50f83c944cd8267d65f484085 100644 (file)
@@ -61,9 +61,11 @@ extern __ro_after_init enum tsx_ctrl_states tsx_ctrl_state;
 
 extern void __init tsx_init(void);
 void tsx_ap_init(void);
+void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c);
 #else
 static inline void tsx_init(void) { }
 static inline void tsx_ap_init(void) { }
+static inline void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c) { }
 #endif /* CONFIG_CPU_SUP_INTEL */
 
 extern void init_spectral_chicken(struct cpuinfo_x86 *c);
index 3c3e7e5695ba419434e5b64668b83deae507f8e3..fdf3489d92a4980245481389f327527637be3bdd 100644 (file)
@@ -269,19 +269,26 @@ detect_keyid_bits:
        c->x86_phys_bits -= keyid_bits;
 }
 
+void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c)
+{
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+               return;
+
+       if (c->x86 < 6 || (c->x86 == 6 && c->x86_model < 0xd))
+               return;
+
+       /*
+        * The BIOS can have limited CPUID to leaf 2, which breaks feature
+        * enumeration. Unlock it and update the maximum leaf info.
+        */
+       if (msr_clear_bit(MSR_IA32_MISC_ENABLE, MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0)
+               c->cpuid_level = cpuid_eax(0);
+}
+
 static void early_init_intel(struct cpuinfo_x86 *c)
 {
        u64 misc_enable;
 
-       /* Unmask CPUID levels if masked: */
-       if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
-               if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
-                                 MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) {
-                       c->cpuid_level = cpuid_eax(0);
-                       get_cpu_cap(c);
-               }
-       }
-
        if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
                (c->x86 == 0x6 && c->x86_model >= 0x0e))
                set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
index d419deed6a4884a387f5babc999fd599c5a7314e..7d476fa697ca53b352efe76ec659193968938558 100644 (file)
@@ -84,9 +84,9 @@ static bool parse_8000_001e(struct topo_scan *tscan, bool has_topoext)
 
        /*
         * If leaf 0xb is available, then the domain shifts are set
-        * already and nothing to do here.
+        * already and nothing to do here. Only valid for family >= 0x17.
         */
-       if (!has_topoext) {
+       if (!has_topoext && tscan->c->x86 >= 0x17) {
                /*
                 * Leaf 0x80000008 set the CORE domain shift already.
                 * Update the SMT domain, but do not propagate it.
index d64fb2b3eb69e364772b1d9e515cb232a325f4b8..fec95a7702703f90bff5fc9c7dc4e2f91207c6f4 100644 (file)
@@ -44,6 +44,7 @@ config KVM
        select KVM_VFIO
        select HAVE_KVM_PM_NOTIFIER if PM
        select KVM_GENERIC_HARDWARE_ENABLING
+       select KVM_WERROR if WERROR
        help
          Support hosting fully virtualized guest machines using hardware
          virtualization extensions.  You will need a fairly recent
@@ -66,7 +67,7 @@ config KVM_WERROR
        # FRAME_WARN, i.e. KVM_WERROR=y with KASAN=y requires special tuning.
        # Building KVM with -Werror and KASAN is still doable via enabling
        # the kernel-wide WERROR=y.
-       depends on KVM && EXPERT && !KASAN
+       depends on KVM && ((EXPERT && !KASAN) || WERROR)
        help
          Add -Werror to the build flags for KVM.
 
@@ -97,15 +98,17 @@ config KVM_INTEL
 
 config KVM_INTEL_PROVE_VE
         bool "Check that guests do not receive #VE exceptions"
-        default KVM_PROVE_MMU || DEBUG_KERNEL
-        depends on KVM_INTEL
+        depends on KVM_INTEL && EXPERT
         help
-
           Checks that KVM's page table management code will not incorrectly
           let guests receive a virtualization exception.  Virtualization
           exceptions will be trapped by the hypervisor rather than injected
           in the guest.
 
+          Note: some CPUs appear to generate spurious EPT Violations #VEs
+          that trigger KVM's WARN, in particular with eptad=0 and/or nested
+          virtualization.
+
           If unsure, say N.
 
 config X86_SGX_KVM
index ebf41023be38293dbc248d75a7125b63cd46c189..acd7d48100a1d3360202f33b5fa778b34d38afeb 100644 (file)
 #define MAX_APIC_VECTOR                        256
 #define APIC_VECTORS_PER_REG           32
 
-static bool lapic_timer_advance_dynamic __read_mostly;
+/*
+ * Enable local APIC timer advancement (tscdeadline mode only) with adaptive
+ * tuning.  When enabled, KVM programs the host timer event to fire early, i.e.
+ * before the deadline expires, to account for the delay between taking the
+ * VM-Exit (to inject the guest event) and the subsequent VM-Enter to resume
+ * the guest, i.e. so that the interrupt arrives in the guest with minimal
+ * latency relative to the deadline programmed by the guest.
+ */
+static bool lapic_timer_advance __read_mostly = true;
+module_param(lapic_timer_advance, bool, 0444);
+
 #define LAPIC_TIMER_ADVANCE_ADJUST_MIN 100     /* clock cycles */
 #define LAPIC_TIMER_ADVANCE_ADJUST_MAX 10000   /* clock cycles */
 #define LAPIC_TIMER_ADVANCE_NS_INIT    1000
@@ -1854,16 +1864,14 @@ static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
        guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
        trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
 
-       if (lapic_timer_advance_dynamic) {
-               adjust_lapic_timer_advance(vcpu, guest_tsc - tsc_deadline);
-               /*
-                * If the timer fired early, reread the TSC to account for the
-                * overhead of the above adjustment to avoid waiting longer
-                * than is necessary.
-                */
-               if (guest_tsc < tsc_deadline)
-                       guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
-       }
+       adjust_lapic_timer_advance(vcpu, guest_tsc - tsc_deadline);
+
+       /*
+        * If the timer fired early, reread the TSC to account for the overhead
+        * of the above adjustment to avoid waiting longer than is necessary.
+        */
+       if (guest_tsc < tsc_deadline)
+               guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
 
        if (guest_tsc < tsc_deadline)
                __wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
@@ -2812,7 +2820,7 @@ static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
                return HRTIMER_NORESTART;
 }
 
-int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
+int kvm_create_lapic(struct kvm_vcpu *vcpu)
 {
        struct kvm_lapic *apic;
 
@@ -2845,13 +2853,8 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
        hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
                     HRTIMER_MODE_ABS_HARD);
        apic->lapic_timer.timer.function = apic_timer_fn;
-       if (timer_advance_ns == -1) {
+       if (lapic_timer_advance)
                apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
-               lapic_timer_advance_dynamic = true;
-       } else {
-               apic->lapic_timer.timer_advance_ns = timer_advance_ns;
-               lapic_timer_advance_dynamic = false;
-       }
 
        /*
         * Stuff the APIC ENABLE bit in lieu of temporarily incrementing
index 0a0ea4b5dd8ce7239b85f5d054828fd5f0eedb6b..a69e706b9080acf97b2947f01d37d2a760064957 100644 (file)
@@ -85,7 +85,7 @@ struct kvm_lapic {
 
 struct dest_map;
 
-int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns);
+int kvm_create_lapic(struct kvm_vcpu *vcpu);
 void kvm_free_lapic(struct kvm_vcpu *vcpu);
 
 int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu);
index 662f62dfb2aa9f0d205ee02a88da0b687c2291d6..8d74bdef68c1d39a2b6037de4277a3228686c709 100644 (file)
@@ -336,16 +336,19 @@ static int is_cpuid_PSE36(void)
 #ifdef CONFIG_X86_64
 static void __set_spte(u64 *sptep, u64 spte)
 {
+       KVM_MMU_WARN_ON(is_ept_ve_possible(spte));
        WRITE_ONCE(*sptep, spte);
 }
 
 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
 {
+       KVM_MMU_WARN_ON(is_ept_ve_possible(spte));
        WRITE_ONCE(*sptep, spte);
 }
 
 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
 {
+       KVM_MMU_WARN_ON(is_ept_ve_possible(spte));
        return xchg(sptep, spte);
 }
 
@@ -4101,23 +4104,31 @@ static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level
        return leaf;
 }
 
-/* return true if reserved bit(s) are detected on a valid, non-MMIO SPTE. */
-static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
+static int get_sptes_lockless(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
+                             int *root_level)
 {
-       u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
-       struct rsvd_bits_validate *rsvd_check;
-       int root, leaf, level;
-       bool reserved = false;
+       int leaf;
 
        walk_shadow_page_lockless_begin(vcpu);
 
        if (is_tdp_mmu_active(vcpu))
-               leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
+               leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, root_level);
        else
-               leaf = get_walk(vcpu, addr, sptes, &root);
+               leaf = get_walk(vcpu, addr, sptes, root_level);
 
        walk_shadow_page_lockless_end(vcpu);
+       return leaf;
+}
+
+/* return true if reserved bit(s) are detected on a valid, non-MMIO SPTE. */
+static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
+{
+       u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
+       struct rsvd_bits_validate *rsvd_check;
+       int root, leaf, level;
+       bool reserved = false;
 
+       leaf = get_sptes_lockless(vcpu, addr, sptes, &root);
        if (unlikely(leaf < 0)) {
                *sptep = 0ull;
                return reserved;
@@ -4400,9 +4411,6 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
                        return RET_PF_EMULATE;
        }
 
-       fault->mmu_seq = vcpu->kvm->mmu_invalidate_seq;
-       smp_rmb();
-
        /*
         * Check for a relevant mmu_notifier invalidation event before getting
         * the pfn from the primary MMU, and before acquiring mmu_lock.
@@ -5921,6 +5929,22 @@ emulate:
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
 
+void kvm_mmu_print_sptes(struct kvm_vcpu *vcpu, gpa_t gpa, const char *msg)
+{
+       u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
+       int root_level, leaf, level;
+
+       leaf = get_sptes_lockless(vcpu, gpa, sptes, &root_level);
+       if (unlikely(leaf < 0))
+               return;
+
+       pr_err("%s %llx", msg, gpa);
+       for (level = root_level; level >= leaf; level--)
+               pr_cont(", spte[%d] = 0x%llx", level, sptes[level]);
+       pr_cont("\n");
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_print_sptes);
+
 static void __kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
                                      u64 addr, hpa_t root_hpa)
 {
index 5dd5405fa07ac9a23ec3ffdfa71280bf32b67e2a..52fa004a1fbc9f3dbe77b2bb8ef93d2cb74b341b 100644 (file)
@@ -3,6 +3,8 @@
 #ifndef KVM_X86_MMU_SPTE_H
 #define KVM_X86_MMU_SPTE_H
 
+#include <asm/vmx.h>
+
 #include "mmu.h"
 #include "mmu_internal.h"
 
@@ -276,6 +278,13 @@ static inline bool is_shadow_present_pte(u64 pte)
        return !!(pte & SPTE_MMU_PRESENT_MASK);
 }
 
+static inline bool is_ept_ve_possible(u64 spte)
+{
+       return (shadow_present_mask & VMX_EPT_SUPPRESS_VE_BIT) &&
+              !(spte & VMX_EPT_SUPPRESS_VE_BIT) &&
+              (spte & VMX_EPT_RWX_MASK) != VMX_EPT_MISCONFIG_WX_VALUE;
+}
+
 /*
  * Returns true if A/D bits are supported in hardware and are enabled by KVM.
  * When enabled, KVM uses A/D bits for all non-nested MMUs.  Because L1 can
index fae559559a806a8121760a637430b858ec5bacab..2880fd392e0cbb9921aed5b8b5992c54e484530f 100644 (file)
@@ -21,11 +21,13 @@ static inline u64 kvm_tdp_mmu_read_spte(tdp_ptep_t sptep)
 
 static inline u64 kvm_tdp_mmu_write_spte_atomic(tdp_ptep_t sptep, u64 new_spte)
 {
+       KVM_MMU_WARN_ON(is_ept_ve_possible(new_spte));
        return xchg(rcu_dereference(sptep), new_spte);
 }
 
 static inline void __kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 new_spte)
 {
+       KVM_MMU_WARN_ON(is_ept_ve_possible(new_spte));
        WRITE_ONCE(*rcu_dereference(sptep), new_spte);
 }
 
index 1259dd63defc8e92ed2eb49e291db5a96d7623c9..36539c1b36cd632f5fb32a263cc3c68632b2f1c9 100644 (file)
@@ -626,7 +626,7 @@ static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm,
         * SPTEs.
         */
        handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
-                           0, iter->level, true);
+                           SHADOW_NONPRESENT_VALUE, iter->level, true);
 
        return 0;
 }
index 0623cfaa7bb0ee9f9ca3ec142e99feb41254533b..95095a233a459a9fafb49ac9d244a6eb7becf94e 100644 (file)
@@ -779,6 +779,14 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
         */
        fpstate_set_confidential(&vcpu->arch.guest_fpu);
        vcpu->arch.guest_state_protected = true;
+
+       /*
+        * SEV-ES guest mandates LBR Virtualization to be _always_ ON. Enable it
+        * only after setting guest_state_protected because KVM_SET_MSRS allows
+        * dynamic toggling of LBRV (for performance reason) on write access to
+        * MSR_IA32_DEBUGCTLMSR when guest_state_protected is not set.
+        */
+       svm_enable_lbrv(vcpu);
        return 0;
 }
 
@@ -2406,6 +2414,12 @@ void __init sev_hardware_setup(void)
        if (!boot_cpu_has(X86_FEATURE_SEV_ES))
                goto out;
 
+       if (!lbrv) {
+               WARN_ONCE(!boot_cpu_has(X86_FEATURE_LBRV),
+                         "LBRV must be present for SEV-ES support");
+               goto out;
+       }
+
        /* Has the system been allocated ASIDs for SEV-ES? */
        if (min_sev_asid == 1)
                goto out;
@@ -3216,7 +3230,6 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm)
        struct kvm_vcpu *vcpu = &svm->vcpu;
 
        svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE;
-       svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
 
        /*
         * An SEV-ES guest requires a VMSA area that is a separate from the
@@ -3268,10 +3281,6 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm)
        /* Clear intercepts on selected MSRs */
        set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1);
        set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1);
-       set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
-       set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
-       set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
-       set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
 }
 
 void sev_init_vmcb(struct vcpu_svm *svm)
index c8dc25886c16581c2e89e94b685fa6c95ad4fc7a..296c524988f953b3d80a37d7de628c24c9d39e58 100644 (file)
@@ -99,6 +99,7 @@ static const struct svm_direct_access_msrs {
        { .index = MSR_IA32_SPEC_CTRL,                  .always = false },
        { .index = MSR_IA32_PRED_CMD,                   .always = false },
        { .index = MSR_IA32_FLUSH_CMD,                  .always = false },
+       { .index = MSR_IA32_DEBUGCTLMSR,                .always = false },
        { .index = MSR_IA32_LASTBRANCHFROMIP,           .always = false },
        { .index = MSR_IA32_LASTBRANCHTOIP,             .always = false },
        { .index = MSR_IA32_LASTINTFROMIP,              .always = false },
@@ -215,7 +216,7 @@ int vgif = true;
 module_param(vgif, int, 0444);
 
 /* enable/disable LBR virtualization */
-static int lbrv = true;
+int lbrv = true;
 module_param(lbrv, int, 0444);
 
 static int tsc_scaling = true;
@@ -990,7 +991,7 @@ void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
        vmcb_mark_dirty(to_vmcb, VMCB_LBR);
 }
 
-static void svm_enable_lbrv(struct kvm_vcpu *vcpu)
+void svm_enable_lbrv(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
@@ -1000,6 +1001,9 @@ static void svm_enable_lbrv(struct kvm_vcpu *vcpu)
        set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
        set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
 
+       if (sev_es_guest(vcpu->kvm))
+               set_msr_interception(vcpu, svm->msrpm, MSR_IA32_DEBUGCTLMSR, 1, 1);
+
        /* Move the LBR msrs to the vmcb02 so that the guest can see them. */
        if (is_guest_mode(vcpu))
                svm_copy_lbrs(svm->vmcb, svm->vmcb01.ptr);
@@ -1009,6 +1013,8 @@ static void svm_disable_lbrv(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
+       KVM_BUG_ON(sev_es_guest(vcpu->kvm), vcpu->kvm);
+
        svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
        set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
        set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
@@ -2822,10 +2828,24 @@ static int svm_get_msr_feature(struct kvm_msr_entry *msr)
        return 0;
 }
 
+static bool
+sev_es_prevent_msr_access(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+{
+       return sev_es_guest(vcpu->kvm) &&
+              vcpu->arch.guest_state_protected &&
+              svm_msrpm_offset(msr_info->index) != MSR_INVALID &&
+              !msr_write_intercepted(vcpu, msr_info->index);
+}
+
 static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
+       if (sev_es_prevent_msr_access(vcpu, msr_info)) {
+               msr_info->data = 0;
+               return -EINVAL;
+       }
+
        switch (msr_info->index) {
        case MSR_AMD64_TSC_RATIO:
                if (!msr_info->host_initiated &&
@@ -2976,6 +2996,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
 
        u32 ecx = msr->index;
        u64 data = msr->data;
+
+       if (sev_es_prevent_msr_access(vcpu, msr))
+               return -EINVAL;
+
        switch (ecx) {
        case MSR_AMD64_TSC_RATIO:
 
@@ -3846,16 +3870,27 @@ static void svm_enable_nmi_window(struct kvm_vcpu *vcpu)
        struct vcpu_svm *svm = to_svm(vcpu);
 
        /*
-        * KVM should never request an NMI window when vNMI is enabled, as KVM
-        * allows at most one to-be-injected NMI and one pending NMI, i.e. if
-        * two NMIs arrive simultaneously, KVM will inject one and set
-        * V_NMI_PENDING for the other.  WARN, but continue with the standard
-        * single-step approach to try and salvage the pending NMI.
+        * If NMIs are outright masked, i.e. the vCPU is already handling an
+        * NMI, and KVM has not yet intercepted an IRET, then there is nothing
+        * more to do at this time as KVM has already enabled IRET intercepts.
+        * If KVM has already intercepted IRET, then single-step over the IRET,
+        * as NMIs aren't architecturally unmasked until the IRET completes.
+        *
+        * If vNMI is enabled, KVM should never request an NMI window if NMIs
+        * are masked, as KVM allows at most one to-be-injected NMI and one
+        * pending NMI.  If two NMIs arrive simultaneously, KVM will inject one
+        * NMI and set V_NMI_PENDING for the other, but if and only if NMIs are
+        * unmasked.  KVM _will_ request an NMI window in some situations, e.g.
+        * if the vCPU is in an STI shadow or if GIF=0, KVM can't immediately
+        * inject the NMI.  In those situations, KVM needs to single-step over
+        * the STI shadow or intercept STGI.
         */
-       WARN_ON_ONCE(is_vnmi_enabled(svm));
+       if (svm_get_nmi_mask(vcpu)) {
+               WARN_ON_ONCE(is_vnmi_enabled(svm));
 
-       if (svm_get_nmi_mask(vcpu) && !svm->awaiting_iret_completion)
-               return; /* IRET will cause a vm exit */
+               if (!svm->awaiting_iret_completion)
+                       return; /* IRET will cause a vm exit */
+       }
 
        /*
         * SEV-ES guests are responsible for signaling when a vCPU is ready to
@@ -5265,6 +5300,12 @@ static __init int svm_hardware_setup(void)
 
        nrips = nrips && boot_cpu_has(X86_FEATURE_NRIPS);
 
+       if (lbrv) {
+               if (!boot_cpu_has(X86_FEATURE_LBRV))
+                       lbrv = false;
+               else
+                       pr_info("LBR virtualization supported\n");
+       }
        /*
         * Note, SEV setup consumes npt_enabled and enable_mmio_caching (which
         * may be modified by svm_adjust_mmio_mask()), as well as nrips.
@@ -5318,14 +5359,6 @@ static __init int svm_hardware_setup(void)
                svm_x86_ops.set_vnmi_pending = NULL;
        }
 
-
-       if (lbrv) {
-               if (!boot_cpu_has(X86_FEATURE_LBRV))
-                       lbrv = false;
-               else
-                       pr_info("LBR virtualization supported\n");
-       }
-
        if (!enable_pmu)
                pr_info("PMU virtualization is disabled\n");
 
index be57213cd295935f055bf46d3856393944108061..0f1472690b593a0ff1a9fd96c77275905ed4311c 100644 (file)
@@ -30,7 +30,7 @@
 #define        IOPM_SIZE PAGE_SIZE * 3
 #define        MSRPM_SIZE PAGE_SIZE * 2
 
-#define MAX_DIRECT_ACCESS_MSRS 47
+#define MAX_DIRECT_ACCESS_MSRS 48
 #define MSRPM_OFFSETS  32
 extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
 extern bool npt_enabled;
@@ -39,6 +39,7 @@ extern int vgif;
 extern bool intercept_smi;
 extern bool x2avic_enabled;
 extern bool vnmi;
+extern int lbrv;
 
 /*
  * Clean bits in VMCB.
@@ -552,6 +553,7 @@ u32 *svm_vcpu_alloc_msrpm(void);
 void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm);
 void svm_vcpu_free_msrpm(u32 *msrpm);
 void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
+void svm_enable_lbrv(struct kvm_vcpu *vcpu);
 void svm_update_lbrv(struct kvm_vcpu *vcpu);
 
 int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
index d5b832126e34580088a36091b62f0b5a66b011b7..643935a0f70ab77b8de56e1c0a145211af5af2ed 100644 (file)
@@ -2242,6 +2242,9 @@ static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx)
                vmcs_write64(EPT_POINTER,
                             construct_eptp(&vmx->vcpu, 0, PT64_ROOT_4LEVEL));
 
+       if (vmx->ve_info)
+               vmcs_write64(VE_INFORMATION_ADDRESS, __pa(vmx->ve_info));
+
        /* All VMFUNCs are currently emulated through L0 vmexits.  */
        if (cpu_has_vmx_vmfunc())
                vmcs_write64(VM_FUNCTION_CONTROL, 0);
@@ -6230,6 +6233,8 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
                else if (is_alignment_check(intr_info) &&
                         !vmx_guest_inject_ac(vcpu))
                        return true;
+               else if (is_ve_fault(intr_info))
+                       return true;
                return false;
        case EXIT_REASON_EXTERNAL_INTERRUPT:
                return true;
index 6051fad5945fa08f9a348fc380799ead2d415991..b3c83c06f8265b60991bec5f6159b0a977e219c3 100644 (file)
@@ -5218,8 +5218,15 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
        if (is_invalid_opcode(intr_info))
                return handle_ud(vcpu);
 
-       if (KVM_BUG_ON(is_ve_fault(intr_info), vcpu->kvm))
-               return -EIO;
+       if (WARN_ON_ONCE(is_ve_fault(intr_info))) {
+               struct vmx_ve_information *ve_info = vmx->ve_info;
+
+               WARN_ONCE(ve_info->exit_reason != EXIT_REASON_EPT_VIOLATION,
+                         "Unexpected #VE on VM-Exit reason 0x%x", ve_info->exit_reason);
+               dump_vmcs(vcpu);
+               kvm_mmu_print_sptes(vcpu, ve_info->guest_physical_address, "#VE");
+               return 1;
+       }
 
        error_code = 0;
        if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
index 082ac6d95a3a08160d54ec832d0c075c0bd71489..8c9e4281d978d73849d840139daa28b465eb8c25 100644 (file)
@@ -164,15 +164,6 @@ module_param(kvmclock_periodic_sync, bool, 0444);
 static u32 __read_mostly tsc_tolerance_ppm = 250;
 module_param(tsc_tolerance_ppm, uint, 0644);
 
-/*
- * lapic timer advance (tscdeadline mode only) in nanoseconds.  '-1' enables
- * adaptive tuning starting from default advancement of 1000ns.  '0' disables
- * advancement entirely.  Any other value is used as-is and disables adaptive
- * tuning, i.e. allows privileged userspace to set an exact advancement time.
- */
-static int __read_mostly lapic_timer_advance_ns = -1;
-module_param(lapic_timer_advance_ns, int, 0644);
-
 static bool __read_mostly vector_hashing = true;
 module_param(vector_hashing, bool, 0444);
 
@@ -12169,7 +12160,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
        if (r < 0)
                return r;
 
-       r = kvm_create_lapic(vcpu, lapic_timer_advance_ns);
+       r = kvm_create_lapic(vcpu);
        if (r < 0)
                goto fail_mmu_destroy;
 
index 2d4a35e6dd18d55c1ce1127ee81dbfd080a15653..09a87fa222c78fee2e4c40987d07c725e37aa5ff 100644 (file)
@@ -145,7 +145,7 @@ static void acpi_ac_notify(acpi_handle handle, u32 event, void *data)
                                                  dev_name(&adev->dev), event,
                                                  (u32) ac->state);
                acpi_notifier_call_chain(adev, event, (u32) ac->state);
-               kobject_uevent(&ac->charger->dev.kobj, KOBJ_CHANGE);
+               power_supply_changed(ac->charger);
        }
 }
 
@@ -268,7 +268,7 @@ static int acpi_ac_resume(struct device *dev)
        if (acpi_ac_get_state(ac))
                return 0;
        if (old_state != ac->state)
-               kobject_uevent(&ac->charger->dev.kobj, KOBJ_CHANGE);
+               power_supply_changed(ac->charger);
 
        return 0;
 }
index 9515bcfe5e973229139587fc2fbd5077a42cfea6..73903a497d73f7549e913ee00d85507398643464 100644 (file)
@@ -909,7 +909,7 @@ static void __exit einj_exit(void)
        if (einj_initialized)
                platform_driver_unregister(&einj_driver);
 
-       platform_device_del(einj_dev);
+       platform_device_unregister(einj_dev);
 }
 
 module_init(einj_init);
index e7793ee9e6498ea5edb671ef7c6f38ba470c9a44..68dd17f96f636f343f76418b1b870161816e50ba 100644 (file)
@@ -1333,10 +1333,13 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
        if (ec->busy_polling || bits > 8)
                acpi_ec_burst_enable(ec);
 
-       for (i = 0; i < bytes; ++i, ++address, ++value)
+       for (i = 0; i < bytes; ++i, ++address, ++value) {
                result = (function == ACPI_READ) ?
                        acpi_ec_read(ec, address, value) :
                        acpi_ec_write(ec, address, *value);
+               if (result < 0)
+                       break;
+       }
 
        if (ec->busy_polling || bits > 8)
                acpi_ec_burst_disable(ec);
@@ -1348,8 +1351,10 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
                return AE_NOT_FOUND;
        case -ETIME:
                return AE_TIME;
-       default:
+       case 0:
                return AE_OK;
+       default:
+               return AE_ERROR;
        }
 }
 
index 94e3c000df2e164824a95157715a204fd3f74526..dc8164b182dccf3fa12fd59f0430d08468ba63a1 100644 (file)
@@ -610,7 +610,7 @@ static void acpi_sbs_callback(void *context)
        if (sbs->charger_exists) {
                acpi_ac_get_present(sbs);
                if (sbs->charger_present != saved_charger_state)
-                       kobject_uevent(&sbs->charger->dev.kobj, KOBJ_CHANGE);
+                       power_supply_changed(sbs->charger);
        }
 
        if (sbs->manager_present) {
@@ -622,7 +622,7 @@ static void acpi_sbs_callback(void *context)
                        acpi_battery_read(bat);
                        if (saved_battery_state == bat->present)
                                continue;
-                       kobject_uevent(&bat->bat->dev.kobj, KOBJ_CHANGE);
+                       power_supply_changed(bat->bat);
                }
        }
 }
index 6548f10e61d9c72ca89180e011f8e495058302a1..07d66d2c5f0dd8ecb9ccd5743dd32586b3e37ae8 100644 (file)
@@ -429,7 +429,6 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x02d7), board_ahci_pcs_quirk }, /* Comet Lake PCH RAID */
        /* Elkhart Lake IDs 0x4b60 & 0x4b62 https://sata-io.org/product/8803 not tested yet */
        { PCI_VDEVICE(INTEL, 0x4b63), board_ahci_pcs_quirk }, /* Elkhart Lake AHCI */
-       { PCI_VDEVICE(INTEL, 0x7ae2), board_ahci_pcs_quirk }, /* Alder Lake-P AHCI */
 
        /* JMicron 360/1/3/5/6, match class to avoid IDE function */
        { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
index 4f35aab81a0a38c7d25ffcafefe0dd9aa4135d22..e1bf8a19b3c89e2cdfc257578bdc9b92cc95cb80 100644 (file)
@@ -4136,8 +4136,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "PIONEER BD-RW   BDR-207M",   NULL,   ATA_HORKAGE_NOLPM },
        { "PIONEER BD-RW   BDR-205",    NULL,   ATA_HORKAGE_NOLPM },
 
-       /* Crucial BX100 SSD 500GB has broken LPM support */
+       /* Crucial devices with broken LPM support */
        { "CT500BX100SSD1",             NULL,   ATA_HORKAGE_NOLPM },
+       { "CT240BX500SSD1",             NULL,   ATA_HORKAGE_NOLPM },
 
        /* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
        { "Crucial_CT512MX100*",        "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
@@ -4155,6 +4156,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
                                                ATA_HORKAGE_ZERO_AFTER_TRIM |
                                                ATA_HORKAGE_NOLPM },
 
+       /* AMD Radeon devices with broken LPM support */
+       { "R3SL240G",                   NULL,   ATA_HORKAGE_NOLPM },
+
+       /* Apacer models with LPM issues */
+       { "Apacer AS340*",              NULL,   ATA_HORKAGE_NOLPM },
+
        /* These specific Samsung models/firmware-revs do not handle LPM well */
        { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM },
        { "SAMSUNG SSD PM830 mSATA *",  "CXM13D1Q", ATA_HORKAGE_NOLPM },
index 817838e2f70e09c2b47a10deee8a99b4e323091c..3cb455a32d92661e2503107930f4f54b499b7996 100644 (file)
@@ -915,10 +915,13 @@ static const struct scsi_host_template pata_macio_sht = {
        .sg_tablesize           = MAX_DCMDS,
        /* We may not need that strict one */
        .dma_boundary           = ATA_DMA_BOUNDARY,
-       /* Not sure what the real max is but we know it's less than 64K, let's
-        * use 64K minus 256
+       /*
+        * The SCSI core requires the segment size to cover at least a page, so
+        * for 64K page size kernels this must be at least 64K. However the
+        * hardware can't handle 64K, so pata_macio_qc_prep() will split large
+        * requests.
         */
-       .max_segment_size       = MAX_DBDMA_SEG,
+       .max_segment_size       = SZ_64K,
        .device_configure       = pata_macio_device_configure,
        .sdev_groups            = ata_common_sdev_groups,
        .can_queue              = ATA_DEF_QUEUE,
index 3ec611dc0c09fbe251e8e789e64db49127d2fea6..a905e955bbfc7869cf3eed2605f8e616465b21d6 100644 (file)
@@ -350,7 +350,8 @@ static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
 
                if (quirks->max_write_len &&
                    (bus->max_raw_write == 0 || bus->max_raw_write > quirks->max_write_len))
-                       max_write = quirks->max_write_len;
+                       max_write = quirks->max_write_len -
+                               (config->reg_bits + config->pad_bits) / BITS_PER_BYTE;
 
                if (max_read || max_write) {
                        ret_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
index f5c71a617a993108049f12de67234a39405c27c2..4084df65c9fa33b6b43278e151576a1823049394 100644 (file)
@@ -64,19 +64,6 @@ static size_t rng_buffer_size(void)
        return RNG_BUFFER_SIZE;
 }
 
-static void add_early_randomness(struct hwrng *rng)
-{
-       int bytes_read;
-
-       mutex_lock(&reading_mutex);
-       bytes_read = rng_get_data(rng, rng_fillbuf, 32, 0);
-       mutex_unlock(&reading_mutex);
-       if (bytes_read > 0) {
-               size_t entropy = bytes_read * 8 * rng->quality / 1024;
-               add_hwgenerator_randomness(rng_fillbuf, bytes_read, entropy, false);
-       }
-}
-
 static inline void cleanup_rng(struct kref *kref)
 {
        struct hwrng *rng = container_of(kref, struct hwrng, ref);
@@ -340,13 +327,12 @@ static ssize_t rng_current_store(struct device *dev,
                                 const char *buf, size_t len)
 {
        int err;
-       struct hwrng *rng, *old_rng, *new_rng;
+       struct hwrng *rng, *new_rng;
 
        err = mutex_lock_interruptible(&rng_mutex);
        if (err)
                return -ERESTARTSYS;
 
-       old_rng = current_rng;
        if (sysfs_streq(buf, "")) {
                err = enable_best_rng();
        } else {
@@ -362,11 +348,8 @@ static ssize_t rng_current_store(struct device *dev,
        new_rng = get_current_rng_nolock();
        mutex_unlock(&rng_mutex);
 
-       if (new_rng) {
-               if (new_rng != old_rng)
-                       add_early_randomness(new_rng);
+       if (new_rng)
                put_rng(new_rng);
-       }
 
        return err ? : len;
 }
@@ -544,7 +527,6 @@ int hwrng_register(struct hwrng *rng)
 {
        int err = -EINVAL;
        struct hwrng *tmp;
-       bool is_new_current = false;
 
        if (!rng->name || (!rng->data_read && !rng->read))
                goto out;
@@ -573,25 +555,8 @@ int hwrng_register(struct hwrng *rng)
                err = set_current_rng(rng);
                if (err)
                        goto out_unlock;
-               /* to use current_rng in add_early_randomness() we need
-                * to take a ref
-                */
-               is_new_current = true;
-               kref_get(&rng->ref);
        }
        mutex_unlock(&rng_mutex);
-       if (is_new_current || !rng->init) {
-               /*
-                * Use a new device's input to add some randomness to
-                * the system.  If this rng device isn't going to be
-                * used right away, its init function hasn't been
-                * called yet by set_current_rng(); so only use the
-                * randomness from devices that don't need an init callback
-                */
-               add_early_randomness(rng);
-       }
-       if (is_new_current)
-               put_rng(rng);
        return 0;
 out_unlock:
        mutex_unlock(&rng_mutex);
@@ -602,12 +567,11 @@ EXPORT_SYMBOL_GPL(hwrng_register);
 
 void hwrng_unregister(struct hwrng *rng)
 {
-       struct hwrng *old_rng, *new_rng;
+       struct hwrng *new_rng;
        int err;
 
        mutex_lock(&rng_mutex);
 
-       old_rng = current_rng;
        list_del(&rng->list);
        complete_all(&rng->dying);
        if (current_rng == rng) {
@@ -626,11 +590,8 @@ void hwrng_unregister(struct hwrng *rng)
        } else
                mutex_unlock(&rng_mutex);
 
-       if (new_rng) {
-               if (old_rng != new_rng)
-                       add_early_randomness(new_rng);
+       if (new_rng)
                put_rng(new_rng);
-       }
 
        wait_for_completion(&rng->cleanup_done);
 }
index e63a6a17793c8560085c1571e862338cf4e1c21b..cf0be8a7939de49e2ec2d752ce307d31c80ca2a9 100644 (file)
@@ -29,7 +29,7 @@ if TCG_TPM
 
 config TCG_TPM2_HMAC
        bool "Use HMAC and encrypted transactions on the TPM bus"
-       default y
+       default X86_64
        select CRYPTO_ECDH
        select CRYPTO_LIB_AESCFB
        select CRYPTO_LIB_SHA256
index 647c6ca92ac3ca3e545a78cfc39c25ec25cc3121..cad0048bcc3c6852de3633b7def9d9f604c83c29 100644 (file)
@@ -223,30 +223,4 @@ u32 tpm_buf_read_u32(struct tpm_buf *buf, off_t *offset)
 }
 EXPORT_SYMBOL_GPL(tpm_buf_read_u32);
 
-static u16 tpm_buf_tag(struct tpm_buf *buf)
-{
-       struct tpm_header *head = (struct tpm_header *)buf->data;
-
-       return be16_to_cpu(head->tag);
-}
-
-/**
- * tpm_buf_parameters - return the TPM response parameters area of the tpm_buf
- * @buf: tpm_buf to use
- *
- * Where the parameters are located depends on the tag of a TPM
- * command (it's immediately after the header for TPM_ST_NO_SESSIONS
- * or 4 bytes after for TPM_ST_SESSIONS). Evaluate this and return a
- * pointer to the first byte of the parameters area.
- *
- * @return: pointer to parameters area
- */
-u8 *tpm_buf_parameters(struct tpm_buf *buf)
-{
-       int offset = TPM_HEADER_SIZE;
-
-       if (tpm_buf_tag(buf) == TPM2_ST_SESSIONS)
-               offset += 4;
 
-       return &buf->data[offset];
-}
index 6b8b9956ba6944f651419a18b18bd2e4defab8b4..7bb87fa5f7a12663256442a6d1cbc0ad39cd9136 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/tpm_eventlog.h>
 
 #ifdef CONFIG_X86
-#include <asm/intel-family.h>
+#include <asm/cpu_device_id.h>
 #endif
 
 #define TPM_MINOR              224     /* officially assigned */
index 0cdf892ec2a7d40929787948dc146d432f74d70f..1e856259219e2e9995b9fe7f799422d6c0bf3d87 100644 (file)
@@ -281,6 +281,7 @@ struct tpm2_get_random_out {
 int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
 {
        struct tpm2_get_random_out *out;
+       struct tpm_header *head;
        struct tpm_buf buf;
        u32 recd;
        u32 num_bytes = max;
@@ -288,6 +289,7 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
        int total = 0;
        int retries = 5;
        u8 *dest_ptr = dest;
+       off_t offset;
 
        if (!num_bytes || max > TPM_MAX_RNG_DATA)
                return -EINVAL;
@@ -320,7 +322,13 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
                        goto out;
                }
 
-               out = (struct tpm2_get_random_out *)tpm_buf_parameters(&buf);
+               head = (struct tpm_header *)buf.data;
+               offset = TPM_HEADER_SIZE;
+               /* Skip the parameter size field: */
+               if (be16_to_cpu(head->tag) == TPM2_ST_SESSIONS)
+                       offset += 4;
+
+               out = (struct tpm2_get_random_out *)&buf.data[offset];
                recd = min_t(u32, be16_to_cpu(out->size), num_bytes);
                if (tpm_buf_length(&buf) <
                    TPM_HEADER_SIZE +
index ea8860661876ed9ea64daed40095a4126687cbd4..907ac9956a78f02fee74aead24236ce664133b87 100644 (file)
@@ -80,6 +80,9 @@
 /* maximum number of names the TPM must remember for authorization */
 #define AUTH_MAX_NAMES 3
 
+#define AES_KEY_BYTES  AES_KEYSIZE_128
+#define AES_KEY_BITS   (AES_KEY_BYTES*8)
+
 static int tpm2_create_primary(struct tpm_chip *chip, u32 hierarchy,
                               u32 *handle, u8 *name);
 
@@ -954,6 +957,20 @@ int tpm2_start_auth_session(struct tpm_chip *chip)
 }
 EXPORT_SYMBOL(tpm2_start_auth_session);
 
+/*
+ * A mask containing the object attributes for the kernel held null primary key
+ * used in HMAC encryption. For more information on specific attributes look up
+ * to "8.3 TPMA_OBJECT (Object Attributes)".
+ */
+#define TPM2_OA_NULL_KEY ( \
+       TPM2_OA_NO_DA | \
+       TPM2_OA_FIXED_TPM | \
+       TPM2_OA_FIXED_PARENT | \
+       TPM2_OA_SENSITIVE_DATA_ORIGIN | \
+       TPM2_OA_USER_WITH_AUTH | \
+       TPM2_OA_DECRYPT | \
+       TPM2_OA_RESTRICTED)
+
 /**
  * tpm2_parse_create_primary() - parse the data returned from TPM_CC_CREATE_PRIMARY
  *
@@ -1018,7 +1035,7 @@ static int tpm2_parse_create_primary(struct tpm_chip *chip, struct tpm_buf *buf,
        val = tpm_buf_read_u32(buf, &offset_t);
 
        /* object properties */
-       if (val != TPM2_OA_TMPL)
+       if (val != TPM2_OA_NULL_KEY)
                return -EINVAL;
 
        /* auth policy (empty) */
@@ -1178,7 +1195,7 @@ static int tpm2_create_primary(struct tpm_chip *chip, u32 hierarchy,
        tpm_buf_append_u16(&template, TPM_ALG_SHA256);
 
        /* object properties */
-       tpm_buf_append_u32(&template, TPM2_OA_TMPL);
+       tpm_buf_append_u32(&template, TPM2_OA_NULL_KEY);
 
        /* sauth policy (empty) */
        tpm_buf_append_u16(&template, 0);
index 176cd8dbf1db2c61d5f2ed193c63822e898ac9c6..fdef214b9f6bffc24017c50271e552c6e9d44564 100644 (file)
@@ -1020,7 +1020,8 @@ void tpm_tis_remove(struct tpm_chip *chip)
                interrupt = 0;
 
        tpm_tis_write32(priv, reg, ~TPM_GLOBAL_INT_ENABLE & interrupt);
-       flush_work(&priv->free_irq_work);
+       if (priv->free_irq_work.func)
+               flush_work(&priv->free_irq_work);
 
        tpm_tis_clkrun_enable(chip, false);
 
index 13e99cf65efe4483053c2478aff2d3834c68609b..690ad8e9b731906e92c9d08b6541562f69057693 100644 (file)
@@ -210,7 +210,7 @@ static inline int tpm_tis_verify_crc(struct tpm_tis_data *data, size_t len,
 static inline bool is_bsw(void)
 {
 #ifdef CONFIG_X86
-       return ((boot_cpu_data.x86_model == INTEL_FAM6_ATOM_AIRMONT) ? 1 : 0);
+       return (boot_cpu_data.x86_vfm == INTEL_ATOM_AIRMONT) ? 1 : 0;
 #else
        return false;
 #endif
index 3f9eaf27b41b89a8ab8133cfcaec7a1e2c3e86f0..c9eca24bbad478ada329f8c1e1eec3fa6d1ab232 100644 (file)
@@ -37,6 +37,7 @@
 #include "tpm_tis_spi.h"
 
 #define MAX_SPI_FRAMESIZE 64
+#define SPI_HDRSIZE 4
 
 /*
  * TCG SPI flow control is documented in section 6.4 of the spec[1]. In short,
@@ -247,7 +248,7 @@ static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
 int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy,
                     int irq, const struct tpm_tis_phy_ops *phy_ops)
 {
-       phy->iobuf = devm_kmalloc(&spi->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL);
+       phy->iobuf = devm_kmalloc(&spi->dev, SPI_HDRSIZE + MAX_SPI_FRAMESIZE, GFP_KERNEL);
        if (!phy->iobuf)
                return -ENOMEM;
 
index f04ae67dda372d2787bba3bb45d57f53515e2bc0..fc275d41d51e9b51a93ccc26f36e5592c4228d60 100644 (file)
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/fs.h>
-#include <linux/amd-pstate.h>
 
 #include <acpi/cppc_acpi.h>
 
+#include "amd-pstate.h"
+
 /*
  * Abbreviations:
  * amd_pstate_ut: used as a shortform for AMD P-State unit test.
index 1b7e82a0ad2e62f4e0f3e520bfa9408888bc4434..9ad62dbe8bfbff6f01a0d89630bb12edda6e9349 100644 (file)
@@ -36,7 +36,6 @@
 #include <linux/delay.h>
 #include <linux/uaccess.h>
 #include <linux/static_call.h>
-#include <linux/amd-pstate.h>
 #include <linux/topology.h>
 
 #include <acpi/processor.h>
@@ -46,6 +45,8 @@
 #include <asm/processor.h>
 #include <asm/cpufeature.h>
 #include <asm/cpu_device_id.h>
+
+#include "amd-pstate.h"
 #include "amd-pstate-trace.h"
 
 #define AMD_PSTATE_TRANSITION_LATENCY  20000
 #define CPPC_HIGHEST_PERF_PERFORMANCE  196
 #define CPPC_HIGHEST_PERF_DEFAULT      166
 
+#define AMD_CPPC_EPP_PERFORMANCE               0x00
+#define AMD_CPPC_EPP_BALANCE_PERFORMANCE       0x80
+#define AMD_CPPC_EPP_BALANCE_POWERSAVE         0xBF
+#define AMD_CPPC_EPP_POWERSAVE                 0xFF
+
+/*
+ * enum amd_pstate_mode - driver working mode of amd pstate
+ */
+enum amd_pstate_mode {
+       AMD_PSTATE_UNDEFINED = 0,
+       AMD_PSTATE_DISABLE,
+       AMD_PSTATE_PASSIVE,
+       AMD_PSTATE_ACTIVE,
+       AMD_PSTATE_GUIDED,
+       AMD_PSTATE_MAX,
+};
+
+static const char * const amd_pstate_mode_string[] = {
+       [AMD_PSTATE_UNDEFINED]   = "undefined",
+       [AMD_PSTATE_DISABLE]     = "disable",
+       [AMD_PSTATE_PASSIVE]     = "passive",
+       [AMD_PSTATE_ACTIVE]      = "active",
+       [AMD_PSTATE_GUIDED]      = "guided",
+       NULL,
+};
+
+struct quirk_entry {
+       u32 nominal_freq;
+       u32 lowest_freq;
+};
+
 /*
  * TODO: We need more time to fine tune processors with shared memory solution
  * with community together.
@@ -669,7 +701,7 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
        if (state)
                policy->cpuinfo.max_freq = cpudata->max_freq;
        else
-               policy->cpuinfo.max_freq = cpudata->nominal_freq;
+               policy->cpuinfo.max_freq = cpudata->nominal_freq * 1000;
 
        policy->max = policy->cpuinfo.max_freq;
 
diff --git a/drivers/cpufreq/amd-pstate.h b/drivers/cpufreq/amd-pstate.h
new file mode 100644 (file)
index 0000000..e6a28e7
--- /dev/null
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2022 Advanced Micro Devices, Inc.
+ *
+ * Author: Meng Li <[email protected]>
+ */
+
+#ifndef _LINUX_AMD_PSTATE_H
+#define _LINUX_AMD_PSTATE_H
+
+#include <linux/pm_qos.h>
+
+/*********************************************************************
+ *                        AMD P-state INTERFACE                       *
+ *********************************************************************/
+/**
+ * struct  amd_aperf_mperf
+ * @aperf: actual performance frequency clock count
+ * @mperf: maximum performance frequency clock count
+ * @tsc:   time stamp counter
+ */
+struct amd_aperf_mperf {
+       u64 aperf;
+       u64 mperf;
+       u64 tsc;
+};
+
+/**
+ * struct amd_cpudata - private CPU data for AMD P-State
+ * @cpu: CPU number
+ * @req: constraint request to apply
+ * @cppc_req_cached: cached performance request hints
+ * @highest_perf: the maximum performance an individual processor may reach,
+ *               assuming ideal conditions
+ *               For platforms that do not support the preferred core feature, the
+ *               highest_pef may be configured with 166 or 255, to avoid max frequency
+ *               calculated wrongly. we take the fixed value as the highest_perf.
+ * @nominal_perf: the maximum sustained performance level of the processor,
+ *               assuming ideal operating conditions
+ * @lowest_nonlinear_perf: the lowest performance level at which nonlinear power
+ *                        savings are achieved
+ * @lowest_perf: the absolute lowest performance level of the processor
+ * @prefcore_ranking: the preferred core ranking, the higher value indicates a higher
+ *               priority.
+ * @min_limit_perf: Cached value of the performance corresponding to policy->min
+ * @max_limit_perf: Cached value of the performance corresponding to policy->max
+ * @min_limit_freq: Cached value of policy->min (in khz)
+ * @max_limit_freq: Cached value of policy->max (in khz)
+ * @max_freq: the frequency (in khz) that mapped to highest_perf
+ * @min_freq: the frequency (in khz) that mapped to lowest_perf
+ * @nominal_freq: the frequency (in khz) that mapped to nominal_perf
+ * @lowest_nonlinear_freq: the frequency (in khz) that mapped to lowest_nonlinear_perf
+ * @cur: Difference of Aperf/Mperf/tsc count between last and current sample
+ * @prev: Last Aperf/Mperf/tsc count value read from register
+ * @freq: current cpu frequency value (in khz)
+ * @boost_supported: check whether the Processor or SBIOS supports boost mode
+ * @hw_prefcore: check whether HW supports preferred core featue.
+ *               Only when hw_prefcore and early prefcore param are true,
+ *               AMD P-State driver supports preferred core featue.
+ * @epp_policy: Last saved policy used to set energy-performance preference
+ * @epp_cached: Cached CPPC energy-performance preference value
+ * @policy: Cpufreq policy value
+ * @cppc_cap1_cached Cached MSR_AMD_CPPC_CAP1 register value
+ *
+ * The amd_cpudata is key private data for each CPU thread in AMD P-State, and
+ * represents all the attributes and goals that AMD P-State requests at runtime.
+ */
+struct amd_cpudata {
+       int     cpu;
+
+       struct  freq_qos_request req[2];
+       u64     cppc_req_cached;
+
+       u32     highest_perf;
+       u32     nominal_perf;
+       u32     lowest_nonlinear_perf;
+       u32     lowest_perf;
+       u32     prefcore_ranking;
+       u32     min_limit_perf;
+       u32     max_limit_perf;
+       u32     min_limit_freq;
+       u32     max_limit_freq;
+
+       u32     max_freq;
+       u32     min_freq;
+       u32     nominal_freq;
+       u32     lowest_nonlinear_freq;
+
+       struct amd_aperf_mperf cur;
+       struct amd_aperf_mperf prev;
+
+       u64     freq;
+       bool    boost_supported;
+       bool    hw_prefcore;
+
+       /* EPP feature related attributes*/
+       s16     epp_policy;
+       s16     epp_cached;
+       u32     policy;
+       u64     cppc_cap1_cached;
+       bool    suspended;
+};
+
+#endif /* _LINUX_AMD_PSTATE_H */
index 4b986c044741eda32d5e93f9457c7b606ccfacc0..65d3f79104bd509658b8ed6c7c4e7477844e0a08 100644 (file)
@@ -1153,7 +1153,8 @@ static void intel_pstate_update_policies(void)
 static void __intel_pstate_update_max_freq(struct cpudata *cpudata,
                                           struct cpufreq_policy *policy)
 {
-       intel_pstate_get_hwp_cap(cpudata);
+       if (hwp_active)
+               intel_pstate_get_hwp_cap(cpudata);
 
        policy->cpuinfo.max_freq = READ_ONCE(global.no_turbo) ?
                        cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;
index 00a9f0eef8dd2c6292b7772038cd7af6835d5dd3..3c2b6144be23cd1a6af9988f1e77b51fd4d421a5 100644 (file)
@@ -2352,15 +2352,6 @@ static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd,
        struct device *dev;
        int rc;
 
-       switch (mode) {
-       case CXL_DECODER_RAM:
-       case CXL_DECODER_PMEM:
-               break;
-       default:
-               dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode);
-               return ERR_PTR(-EINVAL);
-       }
-
        cxlr = cxl_region_alloc(cxlrd, id);
        if (IS_ERR(cxlr))
                return cxlr;
@@ -2415,6 +2406,15 @@ static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd,
 {
        int rc;
 
+       switch (mode) {
+       case CXL_DECODER_RAM:
+       case CXL_DECODER_PMEM:
+               break;
+       default:
+               dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode);
+               return ERR_PTR(-EINVAL);
+       }
+
        rc = memregion_alloc(GFP_KERNEL);
        if (rc < 0)
                return ERR_PTR(rc);
index b7c6f7ea9e0c83910b5a7d5001c25e0b96fa7492..6a1bfcd0cc21081cb42db82b3c61032ad2e401b2 100644 (file)
@@ -540,6 +540,12 @@ static int race_signal_callback(void *arg)
                        t[i].before = pass;
                        t[i].task = kthread_run(thread_signal_callback, &t[i],
                                                "dma-fence:%d", i);
+                       if (IS_ERR(t[i].task)) {
+                               ret = PTR_ERR(t[i].task);
+                               while (--i >= 0)
+                                       kthread_stop_put(t[i].task);
+                               return ret;
+                       }
                        get_task_struct(t[i].task);
                }
 
index 101394f16930f81f1ac98da172a50517e7534cff..237bce21d1e724632f56646c914cc31bb9efe876 100644 (file)
@@ -110,12 +110,12 @@ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
 
        seq_printf(s, "%s: %d\n", obj->name, obj->value);
 
-       spin_lock_irq(&obj->lock);
+       spin_lock(&obj->lock); /* Caller already disabled IRQ. */
        list_for_each(pos, &obj->pt_list) {
                struct sync_pt *pt = container_of(pos, struct sync_pt, link);
                sync_print_fence(s, &pt->base, false);
        }
-       spin_unlock_irq(&obj->lock);
+       spin_unlock(&obj->lock);
 }
 
 static void sync_print_sync_file(struct seq_file *s,
index f93c966e794d97b914b1d681990d6f8a4109b8ae..e83b1fece780eeae4a50d6dbe943085778649e63 100644 (file)
@@ -579,4 +579,5 @@ static struct kunit_suite packet_serdes_test_suite = {
 };
 kunit_test_suite(packet_serdes_test_suite);
 
+MODULE_DESCRIPTION("FireWire packet serialization/deserialization unit test suite");
 MODULE_LICENSE("GPL");
index 2fcbede4fab1f51a622df0511b9695fc9d400175..bc3f10a2e516e076ee63d762b9d1e538fb44f8c2 100644 (file)
@@ -86,4 +86,5 @@ static struct kunit_suite structure_layout_test_suite = {
 };
 kunit_test_suite(structure_layout_test_suite);
 
+MODULE_DESCRIPTION("FireWire UAPI unit test suite");
 MODULE_LICENSE("GPL");
index 5b9dc26e6bcb963289debaee0084ed9494782d71..552c78f5f05902a20f78ad7671bbb36d5542a7f2 100644 (file)
@@ -136,7 +136,7 @@ static int efi_pstore_read_func(struct pstore_record *record,
                                     &size, record->buf);
        if (status != EFI_SUCCESS) {
                kfree(record->buf);
-               return -EIO;
+               return efi_status_to_err(status);
        }
 
        /*
@@ -189,7 +189,7 @@ static ssize_t efi_pstore_read(struct pstore_record *record)
                        return 0;
 
                if (status != EFI_SUCCESS)
-                       return -EIO;
+                       return efi_status_to_err(status);
 
                /* skip variables that don't concern us */
                if (efi_guidcmp(guid, LINUX_EFI_CRASH_GUID))
@@ -227,7 +227,7 @@ static int efi_pstore_write(struct pstore_record *record)
                                            record->size, record->psi->buf,
                                            true);
        efivar_unlock();
-       return status == EFI_SUCCESS ? 0 : -EIO;
+       return efi_status_to_err(status);
 };
 
 static int efi_pstore_erase(struct pstore_record *record)
@@ -238,7 +238,7 @@ static int efi_pstore_erase(struct pstore_record *record)
                                     PSTORE_EFI_ATTRIBUTES, 0, NULL);
 
        if (status != EFI_SUCCESS && status != EFI_NOT_FOUND)
-               return -EIO;
+               return efi_status_to_err(status);
        return 0;
 }
 
index 684c9354637c6541c1d695ab93e80551f6c5cc1e..d0ef93551c44f64affb5a152b277a8b0c4e827fb 100644 (file)
@@ -41,7 +41,7 @@ static efi_status_t exit_boot_func(struct efi_boot_memmap *map, void *priv)
 unsigned long __weak kernel_entry_address(unsigned long kernel_addr,
                efi_loaded_image_t *image)
 {
-       return *(unsigned long *)(kernel_addr + 8) - VMLINUX_LOAD_ADDRESS + kernel_addr;
+       return *(unsigned long *)(kernel_addr + 8) - PHYSADDR(VMLINUX_LOAD_ADDRESS) + kernel_addr;
 }
 
 efi_status_t efi_boot_kernel(void *handle, efi_loaded_image_t *image,
index ac8c0ef851581f5d3eb51260ac1d2a6b92955bb6..af2c82f7bd9024094470a443d46dfc3381862ec8 100644 (file)
@@ -41,6 +41,7 @@ SECTIONS
        }
 
        /DISCARD/ : {
+               *(.discard .discard.*)
                *(.modinfo .init.modinfo)
        }
 }
index 5d56bc40a79d727e1b76fcea552827829f900435..708b777857d3417f2205a7bb851790e4e34ebf90 100644 (file)
@@ -213,7 +213,7 @@ extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock);
  * Calls the appropriate efi_runtime_service() with the appropriate
  * arguments.
  */
-static void efi_call_rts(struct work_struct *work)
+static void __nocfi efi_call_rts(struct work_struct *work)
 {
        const union efi_rts_args *args = efi_rts_work.args;
        efi_status_t status = EFI_NOT_FOUND;
@@ -435,7 +435,7 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,
        return status;
 }
 
-static efi_status_t
+static efi_status_t __nocfi
 virt_efi_set_variable_nb(efi_char16_t *name, efi_guid_t *vendor, u32 attr,
                         unsigned long data_size, void *data)
 {
@@ -469,7 +469,7 @@ static efi_status_t virt_efi_query_variable_info(u32 attr,
        return status;
 }
 
-static efi_status_t
+static efi_status_t __nocfi
 virt_efi_query_variable_info_nb(u32 attr, u64 *storage_space,
                                u64 *remaining_space, u64 *max_variable_size)
 {
@@ -499,10 +499,9 @@ static efi_status_t virt_efi_get_next_high_mono_count(u32 *count)
        return status;
 }
 
-static void virt_efi_reset_system(int reset_type,
-                                 efi_status_t status,
-                                 unsigned long data_size,
-                                 efi_char16_t *data)
+static void __nocfi
+virt_efi_reset_system(int reset_type, efi_status_t status,
+                     unsigned long data_size, efi_char16_t *data)
 {
        if (down_trylock(&efi_runtime_lock)) {
                pr_warn("failed to invoke the reset_system() runtime service:\n"
index 8975cf41a91ac90672167d5abab8f13275ced0dc..48ad0c04aa72b7ea760687070e6ff2e80a4674c9 100644 (file)
@@ -196,7 +196,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
                        return -EINVAL;
 
                vram_size = KFD_XCP_MEMORY_SIZE(adev, xcp_id);
-               if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
+               if (adev->flags & AMD_IS_APU) {
                        system_mem_needed = size;
                        ttm_mem_needed = size;
                }
@@ -233,7 +233,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
        if (adev && xcp_id >= 0) {
                adev->kfd.vram_used[xcp_id] += vram_needed;
                adev->kfd.vram_used_aligned[xcp_id] +=
-                               (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) ?
+                               (adev->flags & AMD_IS_APU) ?
                                vram_needed :
                                ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN);
        }
@@ -261,7 +261,7 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
 
                if (adev) {
                        adev->kfd.vram_used[xcp_id] -= size;
-                       if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
+                       if (adev->flags & AMD_IS_APU) {
                                adev->kfd.vram_used_aligned[xcp_id] -= size;
                                kfd_mem_limit.system_mem_used -= size;
                                kfd_mem_limit.ttm_mem_used -= size;
@@ -890,7 +890,7 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
         * if peer device has large BAR. In contrast, access over xGMI is
         * allowed for both small and large BAR configurations of peer device
         */
-       if ((adev != bo_adev && !(adev->gmc.is_app_apu || adev->flags & AMD_IS_APU)) &&
+       if ((adev != bo_adev && !(adev->flags & AMD_IS_APU)) &&
            ((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) ||
             (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) ||
             (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
@@ -1658,7 +1658,7 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev,
                - atomic64_read(&adev->vram_pin_size)
                - reserved_for_pt;
 
-       if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
+       if (adev->flags & AMD_IS_APU) {
                system_mem_available = no_system_mem_limit ?
                                        kfd_mem_limit.max_system_mem_limit :
                                        kfd_mem_limit.max_system_mem_limit -
@@ -1706,7 +1706,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
        if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
                domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
 
-               if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
+               if (adev->flags & AMD_IS_APU) {
                        domain = AMDGPU_GEM_DOMAIN_GTT;
                        alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
                        alloc_flags = 0;
@@ -1953,7 +1953,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
        if (size) {
                if (!is_imported &&
                   (mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM ||
-                  ((adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) &&
+                  ((adev->flags & AMD_IS_APU) &&
                    mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT)))
                        *size = bo_size;
                else
@@ -2376,7 +2376,7 @@ static int import_obj_create(struct amdgpu_device *adev,
        (*mem)->bo = bo;
        (*mem)->va = va;
        (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) &&
-                        !(adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) ?
+                        !(adev->flags & AMD_IS_APU) ?
                         AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
 
        (*mem)->mapped_to_gpu_memory = 0;
index 861ccff78af954228408efa5832858a0c52f63dd..932dc93b2e631d38b7ccf1e1edf0bbadf00eaf41 100644 (file)
@@ -5944,13 +5944,18 @@ static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev,
        *speed = PCI_SPEED_UNKNOWN;
        *width = PCIE_LNK_WIDTH_UNKNOWN;
 
-       while ((parent = pci_upstream_bridge(parent))) {
-               /* skip upstream/downstream switches internal to dGPU*/
-               if (parent->vendor == PCI_VENDOR_ID_ATI)
-                       continue;
-               *speed = pcie_get_speed_cap(parent);
-               *width = pcie_get_width_cap(parent);
-               break;
+       if (amdgpu_device_pcie_dynamic_switching_supported(adev)) {
+               while ((parent = pci_upstream_bridge(parent))) {
+                       /* skip upstream/downstream switches internal to dGPU*/
+                       if (parent->vendor == PCI_VENDOR_ID_ATI)
+                               continue;
+                       *speed = pcie_get_speed_cap(parent);
+                       *width = pcie_get_width_cap(parent);
+                       break;
+               }
+       } else {
+               /* use the current speeds rather than max if switching is not supported */
+               pcie_bandwidth_available(adev->pdev, NULL, speed, width);
        }
 }
 
index c8980d5f6540a6b5b966d34c4143519ed3c3f833..7021c4a66fb5e91a282d92f16dbe540ae205f355 100644 (file)
@@ -46,7 +46,7 @@ struct amdgpu_iv_entry;
 #define AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(x)            AMDGPU_GET_REG_FIELD(x, 7, 7)
 #define AMDGPU_RAS_GPU_ERR_SOCKET_ID(x)                        AMDGPU_GET_REG_FIELD(x, 10, 8)
 #define AMDGPU_RAS_GPU_ERR_AID_ID(x)                   AMDGPU_GET_REG_FIELD(x, 12, 11)
-#define AMDGPU_RAS_GPU_ERR_HBM_ID(x)                   AMDGPU_GET_REG_FIELD(x, 13, 13)
+#define AMDGPU_RAS_GPU_ERR_HBM_ID(x)                   AMDGPU_GET_REG_FIELD(x, 14, 13)
 #define AMDGPU_RAS_GPU_ERR_BOOT_STATUS(x)              AMDGPU_GET_REG_FIELD(x, 31, 31)
 
 #define AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT   1000
index 7fdd306a48a0ecb3729a3632c1c38385802e295c..f07647a9a9d97bb87bf1c6704cf12674d3f89b4e 100644 (file)
@@ -706,11 +706,15 @@ int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
                         struct amdgpu_vm_bo_base *entry)
 {
        struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry);
-       struct amdgpu_bo *bo = parent->bo, *pbo;
+       struct amdgpu_bo *bo, *pbo;
        struct amdgpu_vm *vm = params->vm;
        uint64_t pde, pt, flags;
        unsigned int level;
 
+       if (WARN_ON(!parent))
+               return -EINVAL;
+
+       bo = parent->bo;
        for (level = 0, pbo = bo->parent; pbo; ++level)
                pbo = pbo->parent;
 
index 414ea3f560a7a54b72ffce7290c1d41ed126798c..d4e2aed2efa3316cc857786c4744424108ebbfaa 100644 (file)
@@ -422,7 +422,7 @@ __aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr)
 
        if (adev->gmc.num_mem_partitions == num_xcc / 2)
                return (adev->flags & AMD_IS_APU) ? AMDGPU_TPX_PARTITION_MODE :
-                                                   AMDGPU_QPX_PARTITION_MODE;
+                                                   AMDGPU_CPX_PARTITION_MODE;
 
        if (adev->gmc.num_mem_partitions == 2 && !(adev->flags & AMD_IS_APU))
                return AMDGPU_DPX_PARTITION_MODE;
index 9596bca572129d108e5f3d2f80f815e8e011a68a..afc57df421cd9c4d3bf2fb52b56bdfe0d3227afe 100644 (file)
@@ -408,15 +408,8 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
                        f2g = &gfx_v11_kfd2kgd;
                        break;
                case IP_VERSION(11, 0, 3):
-                       if ((adev->pdev->device == 0x7460 &&
-                            adev->pdev->revision == 0x00) ||
-                           (adev->pdev->device == 0x7461 &&
-                            adev->pdev->revision == 0x00))
-                               /* Note: Compiler version is 11.0.5 while HW version is 11.0.3 */
-                               gfx_target_version = 110005;
-                       else
-                               /* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */
-                               gfx_target_version = 110001;
+                       /* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */
+                       gfx_target_version = 110001;
                        f2g = &gfx_v11_kfd2kgd;
                        break;
                case IP_VERSION(11, 5, 0):
index 4816fcb9803a9d0b631cd5943341f06201c27cf6..8ee3d07ffbdfa243623a93fa4f7aeb56ba02334f 100644 (file)
@@ -1023,7 +1023,7 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
        if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 1))
                return -EINVAL;
 
-       if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU)
+       if (adev->flags & AMD_IS_APU)
                return 0;
 
        pgmap = &kfddev->pgmap;
index 069b81eeea03cb7f84f9b9acdd958f2339bae124..31e500859ab012500d6678fe36ce89e8936f0f30 100644 (file)
@@ -2619,8 +2619,7 @@ svm_range_best_restore_location(struct svm_range *prange,
                return -1;
        }
 
-       if (node->adev->gmc.is_app_apu ||
-           node->adev->flags & AMD_IS_APU)
+       if (node->adev->flags & AMD_IS_APU)
                return 0;
 
        if (prange->preferred_loc == gpuid ||
@@ -3338,8 +3337,7 @@ svm_range_best_prefetch_location(struct svm_range *prange)
                goto out;
        }
 
-       if (bo_node->adev->gmc.is_app_apu ||
-           bo_node->adev->flags & AMD_IS_APU) {
+       if (bo_node->adev->flags & AMD_IS_APU) {
                best_loc = 0;
                goto out;
        }
index 9c37bd0567efa5be8f5da736f0c96eea5d2a6320..70c1776611c472cb9b213729f0567ac20600ef4c 100644 (file)
@@ -201,7 +201,6 @@ void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_s
  * is initialized to not 0 when page migration register device memory.
  */
 #define KFD_IS_SVM_API_SUPPORTED(adev) ((adev)->kfd.pgmap.type != 0 ||\
-                                       (adev)->gmc.is_app_apu ||\
                                        ((adev)->flags & AMD_IS_APU))
 
 void svm_range_bo_unref_async(struct svm_range_bo *svm_bo);
index 35733d5c51938cb069fd0ee8b37654342449e19a..a5e1a93ddaea2ab8b11274a6d435861d9d2fc7a4 100644 (file)
@@ -613,6 +613,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
                &connector->base,
                dev->mode_config.tile_property,
                0);
+       connector->colorspace_property = master->base.colorspace_property;
+       if (connector->colorspace_property)
+               drm_connector_attach_colorspace_property(connector);
 
        drm_connector_set_path_property(connector, pathprop);
 
index 1acb2d2c5597b53d556a8690c7b5b851229bbd0a..5716918372001a394967b3dcf620e623f65e9467 100644 (file)
@@ -3583,7 +3583,7 @@ struct atom_gpio_voltage_object_v4
    uint8_t  phase_delay_us;                      // phase delay in unit of micro second
    uint8_t  reserved;   
    uint32_t gpio_mask_val;                         // GPIO Mask value
-   struct atom_voltage_gpio_map_lut voltage_gpio_lut[1];
+   struct atom_voltage_gpio_map_lut voltage_gpio_lut[] __counted_by(gpio_entry_num);
 };
 
 struct  atom_svid2_voltage_object_v4
index 2e8e6c9875f6c619db989b918811d1c21a8a5b7d..f83ace2d7ec30f39768080e2623069c48105f893 100644 (file)
@@ -477,31 +477,30 @@ typedef struct _ATOM_PPLIB_STATE_V2
 } ATOM_PPLIB_STATE_V2;
 
 typedef struct _StateArray{
-    //how many states we have 
-    UCHAR ucNumEntries;
-    
-    ATOM_PPLIB_STATE_V2 states[1];
+       //how many states we have
+       UCHAR ucNumEntries;
+
+       ATOM_PPLIB_STATE_V2 states[] /* __counted_by(ucNumEntries) */;
 }StateArray;
 
 
 typedef struct _ClockInfoArray{
-    //how many clock levels we have
-    UCHAR ucNumEntries;
-    
-    //sizeof(ATOM_PPLIB_CLOCK_INFO)
-    UCHAR ucEntrySize;
-    
-    UCHAR clockInfo[];
+       //how many clock levels we have
+       UCHAR ucNumEntries;
+
+       //sizeof(ATOM_PPLIB_CLOCK_INFO)
+       UCHAR ucEntrySize;
+
+       UCHAR clockInfo[];
 }ClockInfoArray;
 
 typedef struct _NonClockInfoArray{
+       //how many non-clock levels we have. normally should be same as number of states
+       UCHAR ucNumEntries;
+       //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
+       UCHAR ucEntrySize;
 
-    //how many non-clock levels we have. normally should be same as number of states
-    UCHAR ucNumEntries;
-    //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
-    UCHAR ucEntrySize;
-    
-    ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[];
+       ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[] __counted_by(ucNumEntries);
 }NonClockInfoArray;
 
 typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
@@ -513,8 +512,10 @@ typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
 
 typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table
 {
-    UCHAR ucNumEntries;                                                // Number of entries.
-    ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1];             // Dynamically allocate entries.
+       // Number of entries.
+       UCHAR ucNumEntries;
+       // Dynamically allocate entries.
+       ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[] __counted_by(ucNumEntries);
 }ATOM_PPLIB_Clock_Voltage_Dependency_Table;
 
 typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
@@ -529,8 +530,10 @@ typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
 
 typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
 {
-    UCHAR ucNumEntries;                                                // Number of entries.
-    ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1];                  // Dynamically allocate entries.
+       // Number of entries.
+       UCHAR ucNumEntries;
+       // Dynamically allocate entries.
+       ATOM_PPLIB_Clock_Voltage_Limit_Record entries[] __counted_by(ucNumEntries);
 }ATOM_PPLIB_Clock_Voltage_Limit_Table;
 
 union _ATOM_PPLIB_CAC_Leakage_Record
@@ -553,8 +556,10 @@ typedef union _ATOM_PPLIB_CAC_Leakage_Record ATOM_PPLIB_CAC_Leakage_Record;
 
 typedef struct _ATOM_PPLIB_CAC_Leakage_Table
 {
-    UCHAR ucNumEntries;                                                 // Number of entries.
-    ATOM_PPLIB_CAC_Leakage_Record entries[1];                           // Dynamically allocate entries.
+       // Number of entries.
+       UCHAR ucNumEntries;
+       // Dynamically allocate entries.
+       ATOM_PPLIB_CAC_Leakage_Record entries[] __counted_by(ucNumEntries);
 }ATOM_PPLIB_CAC_Leakage_Table;
 
 typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record
@@ -568,8 +573,10 @@ typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record
 
 typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Table
 {
-    UCHAR ucNumEntries;                                                 // Number of entries.
-    ATOM_PPLIB_PhaseSheddingLimits_Record entries[1];                   // Dynamically allocate entries.
+       // Number of entries.
+       UCHAR ucNumEntries;
+       // Dynamically allocate entries.
+       ATOM_PPLIB_PhaseSheddingLimits_Record entries[] __counted_by(ucNumEntries);
 }ATOM_PPLIB_PhaseSheddingLimits_Table;
 
 typedef struct _VCEClockInfo{
@@ -580,8 +587,8 @@ typedef struct _VCEClockInfo{
 }VCEClockInfo;
 
 typedef struct _VCEClockInfoArray{
-    UCHAR ucNumEntries;
-    VCEClockInfo entries[1];
+       UCHAR ucNumEntries;
+       VCEClockInfo entries[] __counted_by(ucNumEntries);
 }VCEClockInfoArray;
 
 typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
@@ -592,8 +599,8 @@ typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
 
 typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table
 {
-    UCHAR numEntries;
-    ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1];
+       UCHAR numEntries;
+       ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[] __counted_by(numEntries);
 }ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table;
 
 typedef struct _ATOM_PPLIB_VCE_State_Record
@@ -604,8 +611,8 @@ typedef struct _ATOM_PPLIB_VCE_State_Record
 
 typedef struct _ATOM_PPLIB_VCE_State_Table
 {
-    UCHAR numEntries;
-    ATOM_PPLIB_VCE_State_Record entries[1];
+       UCHAR numEntries;
+       ATOM_PPLIB_VCE_State_Record entries[] __counted_by(numEntries);
 }ATOM_PPLIB_VCE_State_Table;
 
 
@@ -626,8 +633,8 @@ typedef struct _UVDClockInfo{
 }UVDClockInfo;
 
 typedef struct _UVDClockInfoArray{
-    UCHAR ucNumEntries;
-    UVDClockInfo entries[1];
+       UCHAR ucNumEntries;
+       UVDClockInfo entries[] __counted_by(ucNumEntries);
 }UVDClockInfoArray;
 
 typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
@@ -638,8 +645,8 @@ typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
 
 typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table
 {
-    UCHAR numEntries;
-    ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1];
+       UCHAR numEntries;
+       ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[] __counted_by(numEntries);
 }ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table;
 
 typedef struct _ATOM_PPLIB_UVD_Table
@@ -657,8 +664,8 @@ typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Record
 }ATOM_PPLIB_SAMClk_Voltage_Limit_Record;
 
 typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Table{
-    UCHAR numEntries;
-    ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[];
+       UCHAR numEntries;
+       ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[] __counted_by(numEntries);
 }ATOM_PPLIB_SAMClk_Voltage_Limit_Table;
 
 typedef struct _ATOM_PPLIB_SAMU_Table
@@ -675,8 +682,8 @@ typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Record
 }ATOM_PPLIB_ACPClk_Voltage_Limit_Record;
 
 typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Table{
-    UCHAR numEntries;
-    ATOM_PPLIB_ACPClk_Voltage_Limit_Record entries[1];
+       UCHAR numEntries;
+       ATOM_PPLIB_ACPClk_Voltage_Limit_Record entries[] __counted_by(numEntries);
 }ATOM_PPLIB_ACPClk_Voltage_Limit_Table;
 
 typedef struct _ATOM_PPLIB_ACP_Table
@@ -743,9 +750,9 @@ typedef struct ATOM_PPLIB_VQ_Budgeting_Record{
 } ATOM_PPLIB_VQ_Budgeting_Record;
 
 typedef struct ATOM_PPLIB_VQ_Budgeting_Table {
-    UCHAR revid;
-    UCHAR numEntries;
-    ATOM_PPLIB_VQ_Budgeting_Record         entries[1];
+       UCHAR revid;
+       UCHAR numEntries;
+       ATOM_PPLIB_VQ_Budgeting_Record entries[] __counted_by(numEntries);
 } ATOM_PPLIB_VQ_Budgeting_Table;
 
 #pragma pack()
index bc241b593db1555444c51e5b8c5d6a12f697a8c9..b6257f34a7c657ba43c012184fa6f09eeb0d7897 100644 (file)
@@ -226,15 +226,17 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en)
        struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
-       if (!en && adev->in_s4) {
-               /* Adds a GFX reset as workaround just before sending the
-                * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering
-                * an invalid state.
-                */
-               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
-                                                     SMU_RESET_MODE_2, NULL);
-               if (ret)
-                       return ret;
+       if (!en && !adev->in_s0ix) {
+               if (adev->in_s4) {
+                       /* Adds a GFX reset as workaround just before sending the
+                        * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering
+                        * an invalid state.
+                        */
+                       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
+                                                             SMU_RESET_MODE_2, NULL);
+                       if (ret)
+                               return ret;
+               }
 
                ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
        }
index 706265220292a974e6e1f8d2da588a267eaaee06..90703f4542aba5bda708278b7797219153e60eed 100644 (file)
@@ -1562,7 +1562,6 @@ static int smu_v14_0_2_i2c_control_init(struct smu_context *smu)
                smu_i2c->port = i;
                mutex_init(&smu_i2c->mutex);
                control->owner = THIS_MODULE;
-               control->class = I2C_CLASS_SPD;
                control->dev.parent = &adev->pdev->dev;
                control->algo = &smu_v14_0_2_i2c_algo;
                snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
index d8e449e6ebda286382179bce3f66d1a4812750b9..50cb8f7ee6b2cae492028cdffa65084c121c070e 100644 (file)
@@ -72,11 +72,6 @@ struct gamma_curve_sector {
        u32 segment_width;
 };
 
-struct gamma_curve_segment {
-       u32 start;
-       u32 end;
-};
-
 static struct gamma_curve_sector sector_tbl[] = {
        { 0,    4,  4   },
        { 16,   4,  4   },
index 94f8c34fc2932867c70f7816631ccd99fbac4691..6a8e45e9d0ecc19a232d489af91fc2464fff9975 100644 (file)
@@ -239,7 +239,7 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
        if (size < chunk_size)
                return -EINVAL;
 
-       if (chunk_size < PAGE_SIZE)
+       if (chunk_size < SZ_4K)
                return -EINVAL;
 
        if (!is_power_of_2(chunk_size))
index 177773bcdbfd4797b1d36e065a77282e8d7750c5..53c003983ad183a55219fd811da2d9adc06e0b28 100644 (file)
@@ -233,6 +233,8 @@ int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
 
        dma_resv_assert_held(shmem->base.resv);
 
+       drm_WARN_ON(shmem->base.dev, shmem->base.import_attach);
+
        ret = drm_gem_shmem_get_pages(shmem);
 
        return ret;
@@ -611,6 +613,9 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
                return ret;
        }
 
+       if (is_cow_mapping(vma->vm_flags))
+               return -EINVAL;
+
        dma_resv_lock(shmem->base.resv, NULL);
        ret = drm_gem_shmem_get_pages(shmem);
        dma_resv_unlock(shmem->base.resv);
index ed81e1466c4b5ae1791dfa0b0462ed3ae5aec95d..40e7d862675eed5580d01e6ea707bde3c9af2750 100644 (file)
@@ -1252,17 +1252,6 @@ static const struct component_ops i915_audio_component_bind_ops = {
 static void i915_audio_component_init(struct drm_i915_private *i915)
 {
        u32 aud_freq, aud_freq_init;
-       int ret;
-
-       ret = component_add_typed(i915->drm.dev,
-                                 &i915_audio_component_bind_ops,
-                                 I915_COMPONENT_AUDIO);
-       if (ret < 0) {
-               drm_err(&i915->drm,
-                       "failed to add audio component (%d)\n", ret);
-               /* continue with reduced functionality */
-               return;
-       }
 
        if (DISPLAY_VER(i915) >= 9) {
                aud_freq_init = intel_de_read(i915, AUD_FREQ_CNTRL);
@@ -1285,6 +1274,21 @@ static void i915_audio_component_init(struct drm_i915_private *i915)
 
        /* init with current cdclk */
        intel_audio_cdclk_change_post(i915);
+}
+
+static void i915_audio_component_register(struct drm_i915_private *i915)
+{
+       int ret;
+
+       ret = component_add_typed(i915->drm.dev,
+                                 &i915_audio_component_bind_ops,
+                                 I915_COMPONENT_AUDIO);
+       if (ret < 0) {
+               drm_err(&i915->drm,
+                       "failed to add audio component (%d)\n", ret);
+               /* continue with reduced functionality */
+               return;
+       }
 
        i915->display.audio.component_registered = true;
 }
@@ -1317,6 +1321,12 @@ void intel_audio_init(struct drm_i915_private *i915)
                i915_audio_component_init(i915);
 }
 
+void intel_audio_register(struct drm_i915_private *i915)
+{
+       if (!i915->display.audio.lpe.platdev)
+               i915_audio_component_register(i915);
+}
+
 /**
  * intel_audio_deinit() - deinitialize the audio driver
  * @i915: the i915 drm device private data
index 9327954b801e579ec836383bd88c7f5995208ffe..576c061d72a45099e295a8d204a6b3d0b4f1dd7b 100644 (file)
@@ -28,6 +28,7 @@ void intel_audio_codec_get_config(struct intel_encoder *encoder,
 void intel_audio_cdclk_change_pre(struct drm_i915_private *dev_priv);
 void intel_audio_cdclk_change_post(struct drm_i915_private *dev_priv);
 void intel_audio_init(struct drm_i915_private *dev_priv);
+void intel_audio_register(struct drm_i915_private *i915);
 void intel_audio_deinit(struct drm_i915_private *dev_priv);
 void intel_audio_sdp_split_update(const struct intel_crtc_state *crtc_state);
 
index 89bd032ed995e73e90bc584a0009ddd311a04f62..794b4af380558d5da9d123c59f12b1deadacbdca 100644 (file)
@@ -540,6 +540,8 @@ void intel_display_driver_register(struct drm_i915_private *i915)
 
        intel_display_driver_enable_user_access(i915);
 
+       intel_audio_register(i915);
+
        intel_display_debugfs_register(i915);
 
        /*
index 42619fc05de484fb1f54e2194f71157ed7c98fa0..090724fa766c9eeac0e9f8bcde0949e0d8888a36 100644 (file)
@@ -255,6 +255,7 @@ struct i915_execbuffer {
        struct intel_context *context; /* logical state for the request */
        struct i915_gem_context *gem_context; /** caller's context */
        intel_wakeref_t wakeref;
+       intel_wakeref_t wakeref_gt0;
 
        /** our requests to build */
        struct i915_request *requests[MAX_ENGINE_INSTANCE + 1];
@@ -2685,6 +2686,7 @@ static int
 eb_select_engine(struct i915_execbuffer *eb)
 {
        struct intel_context *ce, *child;
+       struct intel_gt *gt;
        unsigned int idx;
        int err;
 
@@ -2708,10 +2710,17 @@ eb_select_engine(struct i915_execbuffer *eb)
                }
        }
        eb->num_batches = ce->parallel.number_children + 1;
+       gt = ce->engine->gt;
 
        for_each_child(ce, child)
                intel_context_get(child);
        eb->wakeref = intel_gt_pm_get(ce->engine->gt);
+       /*
+        * Keep GT0 active on MTL so that i915_vma_parked() doesn't
+        * free VMAs while execbuf ioctl is validating VMAs.
+        */
+       if (gt->info.id)
+               eb->wakeref_gt0 = intel_gt_pm_get(to_gt(gt->i915));
 
        if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
                err = intel_context_alloc_state(ce);
@@ -2750,6 +2759,9 @@ eb_select_engine(struct i915_execbuffer *eb)
        return err;
 
 err:
+       if (gt->info.id)
+               intel_gt_pm_put(to_gt(gt->i915), eb->wakeref_gt0);
+
        intel_gt_pm_put(ce->engine->gt, eb->wakeref);
        for_each_child(ce, child)
                intel_context_put(child);
@@ -2763,6 +2775,12 @@ eb_put_engine(struct i915_execbuffer *eb)
        struct intel_context *child;
 
        i915_vm_put(eb->context->vm);
+       /*
+        * This works in conjunction with eb_select_engine() to prevent
+        * i915_vma_parked() from interfering while execbuf validates vmas.
+        */
+       if (eb->gt->info.id)
+               intel_gt_pm_put(to_gt(eb->gt->i915), eb->wakeref_gt0);
        intel_gt_pm_put(eb->context->engine->gt, eb->wakeref);
        for_each_child(eb->context, child)
                intel_context_put(child);
index 3560a062d2872fb5d09edc5cabd78f95a590a3fa..5d7446a48ae790d75c8540d0e28957f78e9ecd30 100644 (file)
@@ -284,7 +284,9 @@ bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj);
 static inline bool
 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
 {
-       return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
+       /* TODO: make DPT shrinkable when it has no bound vmas */
+       return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE) &&
+               !obj->is_dpt;
 }
 
 static inline bool
index 65a931ea80e9b846338b00f35951df2d0723f2e6..3527b8f446fe3b89978391c3de2d366eb252f536 100644 (file)
@@ -196,7 +196,7 @@ static int verify_access(struct drm_i915_private *i915,
        if (err)
                goto out_file;
 
-       mode = intel_gt_coherent_map_type(to_gt(i915), native_obj, true);
+       mode = intel_gt_coherent_map_type(to_gt(i915), native_obj, false);
        vaddr = i915_gem_object_pin_map_unlocked(native_obj, mode);
        if (IS_ERR(vaddr)) {
                err = PTR_ERR(vaddr);
index d650beb8ed22f6ac99592ea9fa5d2fc1fa20984d..20b9b04ec1e0bf1e8152b9f546a857632ec66381 100644 (file)
@@ -263,8 +263,13 @@ static void signal_irq_work(struct irq_work *work)
                i915_request_put(rq);
        }
 
+       /* Lazy irq enabling after HW submission */
        if (!READ_ONCE(b->irq_armed) && !list_empty(&b->signalers))
                intel_breadcrumbs_arm_irq(b);
+
+       /* And confirm that we still want irqs enabled before we yield */
+       if (READ_ONCE(b->irq_armed) && !atomic_read(&b->active))
+               intel_breadcrumbs_disarm_irq(b);
 }
 
 struct intel_breadcrumbs *
@@ -315,13 +320,7 @@ void __intel_breadcrumbs_park(struct intel_breadcrumbs *b)
                return;
 
        /* Kick the work once more to drain the signalers, and disarm the irq */
-       irq_work_sync(&b->irq_work);
-       while (READ_ONCE(b->irq_armed) && !atomic_read(&b->active)) {
-               local_irq_disable();
-               signal_irq_work(&b->irq_work);
-               local_irq_enable();
-               cond_resched();
-       }
+       irq_work_queue(&b->irq_work);
 }
 
 void intel_breadcrumbs_free(struct kref *kref)
@@ -404,7 +403,7 @@ static void insert_breadcrumb(struct i915_request *rq)
         * the request as it may have completed and raised the interrupt as
         * we were attaching it into the lists.
         */
-       if (!b->irq_armed || __i915_request_is_complete(rq))
+       if (!READ_ONCE(b->irq_armed) || __i915_request_is_complete(rq))
                irq_work_queue(&b->irq_work);
 }
 
index 5c8e9ee3b008373789600ed045e5122e1965711b..3b740ca2500091a36e1e1ccf3fa4f2f93dac5f57 100644 (file)
@@ -885,6 +885,12 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
        if (IS_DG2(gt->i915)) {
                u8 first_ccs = __ffs(CCS_MASK(gt));
 
+               /*
+                * Store the number of active cslices before
+                * changing the CCS engine configuration
+                */
+               gt->ccs.cslices = CCS_MASK(gt);
+
                /* Mask off all the CCS engine */
                info->engine_mask &= ~GENMASK(CCS3, CCS0);
                /* Put back in the first CCS engine */
index 99b71bb7da0a6bb8a1f905ab71fcdadf61037e78..3c62a44e9106ceec802dc02645365a5e856fb041 100644 (file)
@@ -19,7 +19,7 @@ unsigned int intel_gt_apply_ccs_mode(struct intel_gt *gt)
 
        /* Build the value for the fixed CCS load balancing */
        for (cslice = 0; cslice < I915_MAX_CCS; cslice++) {
-               if (CCS_MASK(gt) & BIT(cslice))
+               if (gt->ccs.cslices & BIT(cslice))
                        /*
                         * If available, assign the cslice
                         * to the first available engine...
index def7dd0eb6f196d45be6c3f3cd3f767a5fc1eb48..cfdd2ad5e9549c3b50bfcbba9a02eb67d185a163 100644 (file)
@@ -207,6 +207,14 @@ struct intel_gt {
                                            [MAX_ENGINE_INSTANCE + 1];
        enum intel_submission_method submission_method;
 
+       struct {
+               /*
+                * Mask of the non fused CCS slices
+                * to be used for the load balancing
+                */
+               intel_engine_mask_t cslices;
+       } ccs;
+
        /*
         * Default address space (either GGTT or ppGTT depending on arch).
         *
index bebf28e3c4794b45fe520e5d57e4c0099c931af5..525587cfe1af94ad190deca3365bb400a0f479cb 100644 (file)
@@ -29,9 +29,9 @@
  */
 
 #define GUC_KLV_LEN_MIN                                1u
-#define GUC_KLV_0_KEY                          (0xffff << 16)
-#define GUC_KLV_0_LEN                          (0xffff << 0)
-#define GUC_KLV_n_VALUE                                (0xffffffff << 0)
+#define GUC_KLV_0_KEY                          (0xffffu << 16)
+#define GUC_KLV_0_LEN                          (0xffffu << 0)
+#define GUC_KLV_n_VALUE                                (0xffffffffu << 0)
 
 /**
  * DOC: GuC Self Config KLVs
index 7ea244d876ca63ccd1a91ba934d6f8871a3ac6ad..9bb997dbb4b9010e979f3e41c29c2ac48729d957 100644 (file)
@@ -185,7 +185,7 @@ static int lima_gem_pin(struct drm_gem_object *obj)
        if (bo->heap_size)
                return -EINVAL;
 
-       return drm_gem_shmem_pin(&bo->base);
+       return drm_gem_shmem_pin_locked(&bo->base);
 }
 
 static int lima_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
index fc3bfdc991d27aecb2d6ffefae1d8d06a58e0f25..3926485bb197b0992232447cb71bf1c1ebd0968c 100644 (file)
@@ -538,7 +538,7 @@ class Parser(object):
                self.variants.add(reg.domain)
 
        def do_validate(self, schemafile):
-               if self.validate == False:
+               if not self.validate:
                        return
 
                try:
@@ -948,7 +948,8 @@ def main():
        parser = argparse.ArgumentParser()
        parser.add_argument('--rnn', type=str, required=True)
        parser.add_argument('--xml', type=str, required=True)
-       parser.add_argument('--validate', action=argparse.BooleanOptionalAction)
+       parser.add_argument('--validate', default=False, action='store_true')
+       parser.add_argument('--no-validate', dest='validate', action='store_false')
 
        subparsers = parser.add_subparsers()
        subparsers.required = True
index 4d1aaee8fe15fcb1a8e2514095066809b8ebe1e7..1d19c87eaec18e6190adc3aa16e49bca48f0f731 100644 (file)
@@ -142,11 +142,16 @@ nvif_object_mthd(struct nvif_object *object, u32 mthd, void *data, u32 size)
                struct nvif_ioctl_v0 ioctl;
                struct nvif_ioctl_mthd_v0 mthd;
        } *args;
+       u32 args_size;
        u8 stack[128];
        int ret;
 
-       if (sizeof(*args) + size > sizeof(stack)) {
-               if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL)))
+       if (check_add_overflow(sizeof(*args), size, &args_size))
+               return -ENOMEM;
+
+       if (args_size > sizeof(stack)) {
+               args = kmalloc(args_size, GFP_KERNEL);
+               if (!args)
                        return -ENOMEM;
        } else {
                args = (void *)stack;
@@ -157,7 +162,7 @@ nvif_object_mthd(struct nvif_object *object, u32 mthd, void *data, u32 size)
        args->mthd.method = mthd;
 
        memcpy(args->mthd.data, data, size);
-       ret = nvif_object_ioctl(object, args, sizeof(*args) + size, NULL);
+       ret = nvif_object_ioctl(object, args, args_size, NULL);
        memcpy(data, args->mthd.data, size);
        if (args != (void *)stack)
                kfree(args);
@@ -276,7 +281,15 @@ nvif_object_ctor(struct nvif_object *parent, const char *name, u32 handle,
        object->map.size = 0;
 
        if (parent) {
-               if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL))) {
+               u32 args_size;
+
+               if (check_add_overflow(sizeof(*args), size, &args_size)) {
+                       nvif_object_dtor(object);
+                       return -ENOMEM;
+               }
+
+               args = kmalloc(args_size, GFP_KERNEL);
+               if (!args) {
                        nvif_object_dtor(object);
                        return -ENOMEM;
                }
@@ -293,8 +306,7 @@ nvif_object_ctor(struct nvif_object *parent, const char *name, u32 handle,
                args->new.oclass = oclass;
 
                memcpy(args->new.data, data, size);
-               ret = nvif_object_ioctl(parent, args, sizeof(*args) + size,
-                                       &object->priv);
+               ret = nvif_object_ioctl(parent, args, args_size, &object->priv);
                memcpy(data, args->new.data, size);
                kfree(args);
                if (ret == 0)
index 982324ef5a41b8f26756f6459716028b718e644a..2ae0eb0638f3252e643f3b6368687de3513ee209 100644 (file)
@@ -340,6 +340,8 @@ config DRM_PANEL_LG_SW43408
        depends on OF
        depends on DRM_MIPI_DSI
        depends on BACKLIGHT_CLASS_DEVICE
+       select DRM_DISPLAY_DP_HELPER
+       select DRM_DISPLAY_HELPER
        help
          Say Y here if you want to enable support for LG sw43408 panel.
          The panel has a 1080x2160@60Hz resolution and uses 24 bit RGB per
index 115f4702d59f769bf49ce888aa91d7db0a83cd0c..2b3a73696dcec7251aefe7ba6e4250ec365664b2 100644 (file)
@@ -182,7 +182,7 @@ static int sw43408_backlight_update_status(struct backlight_device *bl)
        return mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
 }
 
-const struct backlight_ops sw43408_backlight_ops = {
+static const struct backlight_ops sw43408_backlight_ops = {
        .update_status = sw43408_backlight_update_status,
 };
 
index 88e80fe98112daa0019ec43217a94178a5b0b1a7..28bfc48a91272901862dcaa309f51da4697d0a4a 100644 (file)
@@ -282,15 +282,15 @@ static const struct drm_display_mode et028013dma_mode = {
 static const struct drm_display_mode jt240mhqs_hwt_ek_e3_mode = {
        .clock = 6000,
        .hdisplay = 240,
-       .hsync_start = 240 + 28,
-       .hsync_end = 240 + 28 + 10,
-       .htotal = 240 + 28 + 10 + 10,
+       .hsync_start = 240 + 38,
+       .hsync_end = 240 + 38 + 10,
+       .htotal = 240 + 38 + 10 + 10,
        .vdisplay = 280,
-       .vsync_start = 280 + 8,
-       .vsync_end = 280 + 8 + 4,
-       .vtotal = 280 + 8 + 4 + 4,
-       .width_mm = 43,
-       .height_mm = 37,
+       .vsync_start = 280 + 48,
+       .vsync_end = 280 + 48 + 4,
+       .vtotal = 280 + 48 + 4 + 4,
+       .width_mm = 37,
+       .height_mm = 43,
        .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
 };
 
@@ -643,7 +643,9 @@ static int st7789v_probe(struct spi_device *spi)
        if (ret)
                return dev_err_probe(dev, ret, "Failed to get backlight\n");
 
-       of_drm_get_panel_orientation(spi->dev.of_node, &ctx->orientation);
+       ret = of_drm_get_panel_orientation(spi->dev.of_node, &ctx->orientation);
+       if (ret)
+               return dev_err_probe(&spi->dev, ret, "Failed to get orientation\n");
 
        drm_panel_add(&ctx->panel);
 
index d47b40b82b0bc4d189a2fa6772aaf3a91ee54c18..8e0ff3efede7e3206b75d38328dc843b741ce022 100644 (file)
@@ -192,7 +192,7 @@ static int panfrost_gem_pin(struct drm_gem_object *obj)
        if (bo->is_heap)
                return -EINVAL;
 
-       return drm_gem_shmem_pin(&bo->base);
+       return drm_gem_shmem_pin_locked(&bo->base);
 }
 
 static enum drm_gem_object_status panfrost_gem_status(struct drm_gem_object *obj)
index b3be68b03610cf6b614ad8ada2f48ad5fb5ec5ef..dd8fb9f8341a990f63e2d67fc0a2c6f44fd023df 100644 (file)
@@ -505,8 +505,8 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test)
         * Eventually we will have a fully 50% fragmented mm.
         */
 
-       mm_size = PAGE_SIZE << max_order;
-       KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
+       mm_size = SZ_4K << max_order;
+       KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
                               "buddy_init failed\n");
 
        KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
@@ -520,7 +520,7 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test)
                }
 
                for (order = top; order--;) {
-                       size = get_size(order, PAGE_SIZE);
+                       size = get_size(order, mm.chunk_size);
                        KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start,
                                                                            mm_size, size, size,
                                                                                &tmp, flags),
@@ -534,7 +534,7 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test)
                }
 
                /* There should be one final page for this sub-allocation */
-               size = get_size(0, PAGE_SIZE);
+               size = get_size(0, mm.chunk_size);
                KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
                                                                    size, size, &tmp, flags),
                                                           "buddy_alloc hit -ENOMEM for hole\n");
@@ -544,7 +544,7 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test)
 
                list_move_tail(&block->link, &holes);
 
-               size = get_size(top, PAGE_SIZE);
+               size = get_size(top, mm.chunk_size);
                KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
                                                                   size, size, &tmp, flags),
                                                          "buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
@@ -555,7 +555,7 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test)
 
        /* Nothing larger than blocks of chunk_size now available */
        for (order = 1; order <= max_order; order++) {
-               size = get_size(order, PAGE_SIZE);
+               size = get_size(order, mm.chunk_size);
                KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
                                                                   size, size, &tmp, flags),
                                                          "buddy_alloc unexpectedly succeeded at order %d, it should be full!",
@@ -584,14 +584,14 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
         * page left.
         */
 
-       mm_size = PAGE_SIZE << max_order;
-       KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
+       mm_size = SZ_4K << max_order;
+       KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
                               "buddy_init failed\n");
 
        KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
 
        for (order = 0; order < max_order; order++) {
-               size = get_size(order, PAGE_SIZE);
+               size = get_size(order, mm.chunk_size);
                KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
                                                                    size, size, &tmp, flags),
                                                           "buddy_alloc hit -ENOMEM with order=%d\n",
@@ -604,7 +604,7 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
        }
 
        /* And now the last remaining block available */
-       size = get_size(0, PAGE_SIZE);
+       size = get_size(0, mm.chunk_size);
        KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
                                                            size, size, &tmp, flags),
                                                   "buddy_alloc hit -ENOMEM on final alloc\n");
@@ -616,7 +616,7 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
 
        /* Should be completely full! */
        for (order = max_order; order--;) {
-               size = get_size(order, PAGE_SIZE);
+               size = get_size(order, mm.chunk_size);
                KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
                                                                   size, size, &tmp, flags),
                                                          "buddy_alloc unexpectedly succeeded, it should be full!");
@@ -632,7 +632,7 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
                list_del(&block->link);
                drm_buddy_free_block(&mm, block);
 
-               size = get_size(order, PAGE_SIZE);
+               size = get_size(order, mm.chunk_size);
                KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
                                                                    size, size, &tmp, flags),
                                                           "buddy_alloc hit -ENOMEM with order=%d\n",
@@ -647,7 +647,7 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
        }
 
        /* To confirm, now the whole mm should be available */
-       size = get_size(max_order, PAGE_SIZE);
+       size = get_size(max_order, mm.chunk_size);
        KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
                                                            size, size, &tmp, flags),
                                                   "buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
@@ -678,15 +678,15 @@ static void drm_test_buddy_alloc_optimistic(struct kunit *test)
         * try to allocate them all.
         */
 
-       mm_size = PAGE_SIZE * ((1 << (max_order + 1)) - 1);
+       mm_size = SZ_4K * ((1 << (max_order + 1)) - 1);
 
-       KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
+       KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
                               "buddy_init failed\n");
 
        KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
 
        for (order = 0; order <= max_order; order++) {
-               size = get_size(order, PAGE_SIZE);
+               size = get_size(order, mm.chunk_size);
                KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
                                                                    size, size, &tmp, flags),
                                                           "buddy_alloc hit -ENOMEM with order=%d\n",
@@ -699,7 +699,7 @@ static void drm_test_buddy_alloc_optimistic(struct kunit *test)
        }
 
        /* Should be completely full! */
-       size = get_size(0, PAGE_SIZE);
+       size = get_size(0, mm.chunk_size);
        KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
                                                           size, size, &tmp, flags),
                                                  "buddy_alloc unexpectedly succeeded, it should be full!");
@@ -716,7 +716,7 @@ static void drm_test_buddy_alloc_limit(struct kunit *test)
        LIST_HEAD(allocated);
        struct drm_buddy mm;
 
-       KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, PAGE_SIZE));
+       KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, SZ_4K));
 
        KUNIT_EXPECT_EQ_MSG(test, mm.max_order, DRM_BUDDY_MAX_ORDER,
                            "mm.max_order(%d) != %d\n", mm.max_order,
@@ -724,7 +724,7 @@ static void drm_test_buddy_alloc_limit(struct kunit *test)
 
        size = mm.chunk_size << mm.max_order;
        KUNIT_EXPECT_FALSE(test, drm_buddy_alloc_blocks(&mm, start, size, size,
-                                                       PAGE_SIZE, &allocated, flags));
+                                                       mm.chunk_size, &allocated, flags));
 
        block = list_first_entry_or_null(&allocated, struct drm_buddy_block, link);
        KUNIT_EXPECT_TRUE(test, block);
@@ -734,10 +734,10 @@ static void drm_test_buddy_alloc_limit(struct kunit *test)
                                                drm_buddy_block_order(block), mm.max_order);
 
        KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_size(&mm, block),
-                           BIT_ULL(mm.max_order) * PAGE_SIZE,
+                           BIT_ULL(mm.max_order) * mm.chunk_size,
                                                "block size(%llu) != %llu\n",
                                                drm_buddy_block_size(&mm, block),
-                                               BIT_ULL(mm.max_order) * PAGE_SIZE);
+                                               BIT_ULL(mm.max_order) * mm.chunk_size);
 
        drm_buddy_free_list(&mm, &allocated, 0);
        drm_buddy_fini(&mm);
index 8f1730aeacc9eb5e0a35297e9df4be5eca7046aa..823d8d2da17c3f0ba8eae82ffe959e80d29e0a0c 100644 (file)
@@ -746,7 +746,7 @@ static int vmw_setup_pci_resources(struct vmw_private *dev,
                dev->vram_size = pci_resource_len(pdev, 2);
 
                drm_info(&dev->drm,
-                       "Register MMIO at 0x%pa size is %llu kiB\n",
+                       "Register MMIO at 0x%pa size is %llu KiB\n",
                         &rmmio_start, (uint64_t)rmmio_size / 1024);
                dev->rmmio = devm_ioremap(dev->drm.dev,
                                          rmmio_start,
@@ -765,7 +765,7 @@ static int vmw_setup_pci_resources(struct vmw_private *dev,
                fifo_size = pci_resource_len(pdev, 2);
 
                drm_info(&dev->drm,
-                        "FIFO at %pa size is %llu kiB\n",
+                        "FIFO at %pa size is %llu KiB\n",
                         &fifo_start, (uint64_t)fifo_size / 1024);
                dev->fifo_mem = devm_memremap(dev->drm.dev,
                                              fifo_start,
@@ -790,7 +790,7 @@ static int vmw_setup_pci_resources(struct vmw_private *dev,
         * SVGA_REG_VRAM_SIZE.
         */
        drm_info(&dev->drm,
-                "VRAM at %pa size is %llu kiB\n",
+                "VRAM at %pa size is %llu KiB\n",
                 &dev->vram_start, (uint64_t)dev->vram_size / 1024);
 
        return 0;
@@ -960,13 +960,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
                                vmw_read(dev_priv,
                                         SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
 
-               /*
-                * Workaround for low memory 2D VMs to compensate for the
-                * allocation taken by fbdev
-                */
-               if (!(dev_priv->capabilities & SVGA_CAP_3D))
-                       mem_size *= 3;
-
                dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
                dev_priv->max_primary_mem =
                        vmw_read(dev_priv, SVGA_REG_MAX_PRIMARY_MEM);
@@ -991,13 +984,13 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
                dev_priv->max_primary_mem = dev_priv->vram_size;
        }
        drm_info(&dev_priv->drm,
-                "Legacy memory limits: VRAM = %llu kB, FIFO = %llu kB, surface = %u kB\n",
+                "Legacy memory limits: VRAM = %llu KiB, FIFO = %llu KiB, surface = %u KiB\n",
                 (u64)dev_priv->vram_size / 1024,
                 (u64)dev_priv->fifo_mem_size / 1024,
                 dev_priv->memory_size / 1024);
 
        drm_info(&dev_priv->drm,
-                "MOB limits: max mob size = %u kB, max mob pages = %u\n",
+                "MOB limits: max mob size = %u KiB, max mob pages = %u\n",
                 dev_priv->max_mob_size / 1024, dev_priv->max_mob_pages);
 
        ret = vmw_dma_masks(dev_priv);
@@ -1015,7 +1008,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
                         (unsigned)dev_priv->max_gmr_pages);
        }
        drm_info(&dev_priv->drm,
-                "Maximum display memory size is %llu kiB\n",
+                "Maximum display memory size is %llu KiB\n",
                 (uint64_t)dev_priv->max_primary_mem / 1024);
 
        /* Need mmio memory to check for fifo pitchlock cap. */
index 4ecaea0026fccdfa59bdb351513acdb85b758f1e..a1ce41e1c4684ea41c418265d1e35f7b6fbba54b 100644 (file)
@@ -1043,9 +1043,6 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
 int vmw_kms_write_svga(struct vmw_private *vmw_priv,
                       unsigned width, unsigned height, unsigned pitch,
                       unsigned bpp, unsigned depth);
-bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
-                               uint32_t pitch,
-                               uint32_t height);
 int vmw_kms_present(struct vmw_private *dev_priv,
                    struct drm_file *file_priv,
                    struct vmw_framebuffer *vfb,
index a0b47c9b33f552a6d8022596c4acd6ec3420909d..5bd967fbcf5547d7675e728c6a375793b6819377 100644 (file)
@@ -94,14 +94,14 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
                        } else
                                new_max_pages = gman->max_gmr_pages * 2;
                        if (new_max_pages > gman->max_gmr_pages && new_max_pages >= gman->used_gmr_pages) {
-                               DRM_WARN("vmwgfx: increasing guest mob limits to %u kB.\n",
+                               DRM_WARN("vmwgfx: increasing guest mob limits to %u KiB.\n",
                                         ((new_max_pages) << (PAGE_SHIFT - 10)));
 
                                gman->max_gmr_pages = new_max_pages;
                        } else {
                                char buf[256];
                                snprintf(buf, sizeof(buf),
-                                        "vmwgfx, error: guest graphics is out of memory (mob limit at: %ukB).\n",
+                                        "vmwgfx, error: guest graphics is out of memory (mob limit at: %u KiB).\n",
                                         ((gman->max_gmr_pages) << (PAGE_SHIFT - 10)));
                                vmw_host_printf(buf);
                                DRM_WARN("%s", buf);
index 13b2820cae51d9e81af86f8ae43f2ceb5a29b813..00c4ff684130129d6765dc6457233c32bc4b4228 100644 (file)
@@ -224,7 +224,7 @@ static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
        new_image = vmw_du_cursor_plane_acquire_image(new_vps);
 
        changed = false;
-       if (old_image && new_image)
+       if (old_image && new_image && old_image != new_image)
                changed = memcmp(old_image, new_image, size) != 0;
 
        return changed;
@@ -2171,13 +2171,12 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv,
        return 0;
 }
 
+static
 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
-                               uint32_t pitch,
-                               uint32_t height)
+                               u64 pitch,
+                               u64 height)
 {
-       return ((u64) pitch * (u64) height) < (u64)
-               ((dev_priv->active_display_unit == vmw_du_screen_target) ?
-                dev_priv->max_primary_mem : dev_priv->vram_size);
+       return (pitch * height) < (u64)dev_priv->vram_size;
 }
 
 /**
@@ -2873,25 +2872,18 @@ out_unref:
 enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector,
                                              struct drm_display_mode *mode)
 {
+       enum drm_mode_status ret;
        struct drm_device *dev = connector->dev;
        struct vmw_private *dev_priv = vmw_priv(dev);
-       u32 max_width = dev_priv->texture_max_width;
-       u32 max_height = dev_priv->texture_max_height;
        u32 assumed_cpp = 4;
 
        if (dev_priv->assume_16bpp)
                assumed_cpp = 2;
 
-       if (dev_priv->active_display_unit == vmw_du_screen_target) {
-               max_width  = min(dev_priv->stdu_max_width,  max_width);
-               max_height = min(dev_priv->stdu_max_height, max_height);
-       }
-
-       if (max_width < mode->hdisplay)
-               return MODE_BAD_HVALUE;
-
-       if (max_height < mode->vdisplay)
-               return MODE_BAD_VVALUE;
+       ret = drm_mode_validate_size(mode, dev_priv->texture_max_width,
+                                    dev_priv->texture_max_height);
+       if (ret != MODE_OK)
+               return ret;
 
        if (!vmw_kms_validate_mode_vram(dev_priv,
                                        mode->hdisplay * assumed_cpp,
index 2041c4d48daa53b37d3794ba525d90e027839ded..a04e0736318da6c09876ec4fdfe02ae05c3836a7 100644 (file)
 #define vmw_connector_to_stdu(x) \
        container_of(x, struct vmw_screen_target_display_unit, base.connector)
 
-
+/*
+ * Some renderers such as llvmpipe will align the width and height of their
+ * buffers to match their tile size. We need to keep this in mind when exposing
+ * modes to userspace so that this possible over-allocation will not exceed
+ * graphics memory. 64x64 pixels seems to be a reasonable upper bound for the
+ * tile size of current renderers.
+ */
+#define GPU_TILE_SIZE 64
 
 enum stdu_content_type {
        SAME_AS_DISPLAY = 0,
@@ -85,11 +92,6 @@ struct vmw_stdu_update {
        SVGA3dCmdUpdateGBScreenTarget body;
 };
 
-struct vmw_stdu_dma {
-       SVGA3dCmdHeader     header;
-       SVGA3dCmdSurfaceDMA body;
-};
-
 struct vmw_stdu_surface_copy {
        SVGA3dCmdHeader      header;
        SVGA3dCmdSurfaceCopy body;
@@ -414,6 +416,7 @@ static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
 {
        struct vmw_private *dev_priv;
        struct vmw_screen_target_display_unit *stdu;
+       struct drm_crtc_state *new_crtc_state;
        int ret;
 
        if (!crtc) {
@@ -423,6 +426,7 @@ static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
 
        stdu     = vmw_crtc_to_stdu(crtc);
        dev_priv = vmw_priv(crtc->dev);
+       new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
 
        if (dev_priv->vkms_enabled)
                drm_crtc_vblank_off(crtc);
@@ -434,6 +438,14 @@ static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
 
                (void) vmw_stdu_update_st(dev_priv, stdu);
 
+               /* Don't destroy the Screen Target if we are only setting the
+                * display as inactive
+                */
+               if (new_crtc_state->enable &&
+                   !new_crtc_state->active &&
+                   !new_crtc_state->mode_changed)
+                       return;
+
                ret = vmw_stdu_destroy_st(dev_priv, stdu);
                if (ret)
                        DRM_ERROR("Failed to destroy Screen Target\n");
@@ -829,7 +841,41 @@ static void vmw_stdu_connector_destroy(struct drm_connector *connector)
        vmw_stdu_destroy(vmw_connector_to_stdu(connector));
 }
 
+static enum drm_mode_status
+vmw_stdu_connector_mode_valid(struct drm_connector *connector,
+                             struct drm_display_mode *mode)
+{
+       enum drm_mode_status ret;
+       struct drm_device *dev = connector->dev;
+       struct vmw_private *dev_priv = vmw_priv(dev);
+       u64 assumed_cpp = dev_priv->assume_16bpp ? 2 : 4;
+       /* Align width and height to account for GPU tile over-alignment */
+       u64 required_mem = ALIGN(mode->hdisplay, GPU_TILE_SIZE) *
+                          ALIGN(mode->vdisplay, GPU_TILE_SIZE) *
+                          assumed_cpp;
+       required_mem = ALIGN(required_mem, PAGE_SIZE);
+
+       ret = drm_mode_validate_size(mode, dev_priv->stdu_max_width,
+                                    dev_priv->stdu_max_height);
+       if (ret != MODE_OK)
+               return ret;
+
+       ret = drm_mode_validate_size(mode, dev_priv->texture_max_width,
+                                    dev_priv->texture_max_height);
+       if (ret != MODE_OK)
+               return ret;
 
+       if (required_mem > dev_priv->max_primary_mem)
+               return MODE_MEM;
+
+       if (required_mem > dev_priv->max_mob_pages * PAGE_SIZE)
+               return MODE_MEM;
+
+       if (required_mem > dev_priv->max_mob_size)
+               return MODE_MEM;
+
+       return MODE_OK;
+}
 
 static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
        .dpms = vmw_du_connector_dpms,
@@ -845,7 +891,7 @@ static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
 static const struct
 drm_connector_helper_funcs vmw_stdu_connector_helper_funcs = {
        .get_modes = vmw_connector_get_modes,
-       .mode_valid = vmw_connector_mode_valid
+       .mode_valid = vmw_stdu_connector_mode_valid
 };
 
 
index 79116ad58620411a060b4a21dd085ce246e02e51..476d613333a981d870be0f3e27e6c78f2fd3317c 100644 (file)
@@ -1749,6 +1749,7 @@ static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
        if (!xe_gt_is_media_type(gt)) {
                pf_release_vf_config_ggtt(gt, config);
                pf_release_vf_config_lmem(gt, config);
+               pf_update_vf_lmtt(gt_to_xe(gt), vfid);
        }
        pf_release_config_ctxs(gt, config);
        pf_release_config_dbs(gt, config);
index c7d38469fb4690cd69a5541b0500998290d44f43..e4e3658e6a1384b9d2936de133dcd322065392e2 100644 (file)
@@ -1240,6 +1240,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
        return 0;
 
 err_entity:
+       mutex_unlock(&guc->submission_state.lock);
        xe_sched_entity_fini(&ge->entity);
 err_sched:
        xe_sched_fini(&ge->sched);
index 9f6e9b7f11c848502ebba3b819f3ff829f22304a..65e5a3f4c340f0f6bb3597f94efc099d9ff23e26 100644 (file)
@@ -34,7 +34,6 @@
 #include "xe_sync.h"
 #include "xe_trace.h"
 #include "xe_vm.h"
-#include "xe_wa.h"
 
 /**
  * struct xe_migrate - migrate context.
@@ -300,10 +299,6 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
 }
 
 /*
- * Due to workaround 16017236439, odd instance hardware copy engines are
- * faster than even instance ones.
- * This function returns the mask involving all fast copy engines and the
- * reserved copy engine to be used as logical mask for migrate engine.
  * Including the reserved copy engine is required to avoid deadlocks due to
  * migrate jobs servicing the faults gets stuck behind the job that faulted.
  */
@@ -317,8 +312,7 @@ static u32 xe_migrate_usm_logical_mask(struct xe_gt *gt)
                if (hwe->class != XE_ENGINE_CLASS_COPY)
                        continue;
 
-               if (!XE_WA(gt, 16017236439) ||
-                   xe_gt_is_usm_hwe(gt, hwe) || hwe->instance & 1)
+               if (xe_gt_is_usm_hwe(gt, hwe))
                        logical_mask |= BIT(hwe->logical_instance);
        }
 
@@ -369,6 +363,10 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
                if (!hwe || !logical_mask)
                        return ERR_PTR(-EINVAL);
 
+               /*
+                * XXX: Currently only reserving 1 (likely slow) BCS instance on
+                * PVC, may want to revisit if performance is needed.
+                */
                m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe,
                                            EXEC_QUEUE_FLAG_KERNEL |
                                            EXEC_QUEUE_FLAG_PERMANENT |
index c010ef16fbf57451e55ededd46adc43e107153a5..a5e7da8cf94416669458e21dc8b0ade051fefa51 100644 (file)
@@ -191,7 +191,7 @@ int xe_pcode_request(struct xe_gt *gt, u32 mbox, u32 request,
        drm_WARN_ON_ONCE(&gt_to_xe(gt)->drm, timeout_base_ms > 1);
        preempt_disable();
        ret = pcode_try_request(gt, mbox, request, reply_mask, reply, &status,
-                               true, timeout_base_ms * 1000, true);
+                               true, 50 * 1000, true);
        preempt_enable();
 
 out:
index 48a81c64f00d2535d84dd15aca6c081e2f275207..942526bd4775fa50e56a2003d2ba92e341cf766a 100644 (file)
@@ -1545,6 +1545,14 @@ static const struct dmi_system_id i8k_whitelist_fan_control[] __initconst = {
                },
                .driver_data = (void *)&i8k_fan_control_data[I8K_FAN_30A3_31A3],
        },
+       {
+               .ident = "Dell G15 5511",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Dell G15 5511"),
+               },
+               .driver_data = (void *)&i8k_fan_control_data[I8K_FAN_30A3_31A3],
+       },
        { }
 };
 
index 6500ca548f9c73f1b2c9ebdce6ae59a82e63c55a..ca2dff158925153032b4e0f50086b0cb08f3c971 100644 (file)
@@ -429,7 +429,7 @@ static const struct m10bmc_sdata n6000bmc_curr_tbl[] = {
 };
 
 static const struct m10bmc_sdata n6000bmc_power_tbl[] = {
-       { 0x724, 0x0, 0x0, 0x0, 0x0, 1, "Board Power" },
+       { 0x724, 0x0, 0x0, 0x0, 0x0, 1000, "Board Power" },
 };
 
 static const struct hwmon_channel_info * const n6000bmc_hinfo[] = {
index 229aed15d5caa7d2bf4ee2fa39628d4442b70c38..d4a93223cd3b34716bf50dec258b7d1c04fb0b45 100644 (file)
@@ -876,9 +876,11 @@ static int ltc2992_parse_dt(struct ltc2992_state *st)
 
                ret = fwnode_property_read_u32(child, "shunt-resistor-micro-ohms", &val);
                if (!ret) {
-                       if (!val)
+                       if (!val) {
+                               fwnode_handle_put(child);
                                return dev_err_probe(&st->client->dev, -EINVAL,
                                                     "shunt resistor value cannot be zero\n");
+                       }
                        st->r_sense_uohm[addr] = val;
                }
        }
index 1f96e94967ee8db09371860519d98a65ed022d7a..439dd3dba5fc81fc41c29c332e4023e716089cfa 100644 (file)
@@ -238,7 +238,7 @@ static int shtc1_probe(struct i2c_client *client)
 
        if (np) {
                data->setup.blocking_io = of_property_read_bool(np, "sensirion,blocking-io");
-               data->setup.high_precision = !of_property_read_bool(np, "sensicon,low-precision");
+               data->setup.high_precision = !of_property_read_bool(np, "sensirion,low-precision");
        } else {
                if (client->dev.platform_data)
                        data->setup = *(struct shtc1_platform_data *)dev->platform_data;
index 31ecb2c7e9783517e7a3a7d2bd3ce415e857cda3..4eccbcd0fbfc00b7f67875e880054bdd06515be4 100644 (file)
@@ -138,7 +138,6 @@ struct synquacer_i2c {
        int                     irq;
        struct device           *dev;
        void __iomem            *base;
-       struct clk              *pclk;
        u32                     pclkrate;
        u32                     speed_khz;
        u32                     timeout_ms;
@@ -535,6 +534,7 @@ static const struct i2c_adapter synquacer_i2c_ops = {
 static int synquacer_i2c_probe(struct platform_device *pdev)
 {
        struct synquacer_i2c *i2c;
+       struct clk *pclk;
        u32 bus_speed;
        int ret;
 
@@ -550,13 +550,12 @@ static int synquacer_i2c_probe(struct platform_device *pdev)
        device_property_read_u32(&pdev->dev, "socionext,pclk-rate",
                                 &i2c->pclkrate);
 
-       i2c->pclk = devm_clk_get_enabled(&pdev->dev, "pclk");
-       if (IS_ERR(i2c->pclk))
-               return dev_err_probe(&pdev->dev, PTR_ERR(i2c->pclk),
+       pclk = devm_clk_get_enabled(&pdev->dev, "pclk");
+       if (IS_ERR(pclk))
+               return dev_err_probe(&pdev->dev, PTR_ERR(pclk),
                                     "failed to get and enable clock\n");
 
-       dev_dbg(&pdev->dev, "clock source %p\n", i2c->pclk);
-       i2c->pclkrate = clk_get_rate(i2c->pclk);
+       i2c->pclkrate = clk_get_rate(pclk);
 
        if (i2c->pclkrate < SYNQUACER_I2C_MIN_CLK_RATE ||
            i2c->pclkrate > SYNQUACER_I2C_MAX_CLK_RATE)
index bbd366dcb69af95e4622793430392335cbbb6e6b..6a42b27c459901255de37acef43097607e686d65 100644 (file)
@@ -71,7 +71,6 @@ struct silead_ts_data {
        struct regulator_bulk_data regulators[2];
        char fw_name[64];
        struct touchscreen_properties prop;
-       u32 max_fingers;
        u32 chip_id;
        struct input_mt_pos pos[SILEAD_MAX_FINGERS];
        int slots[SILEAD_MAX_FINGERS];
@@ -136,7 +135,7 @@ static int silead_ts_request_input_dev(struct silead_ts_data *data)
        touchscreen_parse_properties(data->input, true, &data->prop);
        silead_apply_efi_fw_min_max(data);
 
-       input_mt_init_slots(data->input, data->max_fingers,
+       input_mt_init_slots(data->input, SILEAD_MAX_FINGERS,
                            INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED |
                            INPUT_MT_TRACK);
 
@@ -256,10 +255,10 @@ static void silead_ts_read_data(struct i2c_client *client)
                return;
        }
 
-       if (buf[0] > data->max_fingers) {
+       if (buf[0] > SILEAD_MAX_FINGERS) {
                dev_warn(dev, "More touches reported then supported %d > %d\n",
-                        buf[0], data->max_fingers);
-               buf[0] = data->max_fingers;
+                        buf[0], SILEAD_MAX_FINGERS);
+               buf[0] = SILEAD_MAX_FINGERS;
        }
 
        if (silead_ts_handle_pen_data(data, buf))
@@ -315,7 +314,6 @@ sync:
 
 static int silead_ts_init(struct i2c_client *client)
 {
-       struct silead_ts_data *data = i2c_get_clientdata(client);
        int error;
 
        error = i2c_smbus_write_byte_data(client, SILEAD_REG_RESET,
@@ -325,7 +323,7 @@ static int silead_ts_init(struct i2c_client *client)
        usleep_range(SILEAD_CMD_SLEEP_MIN, SILEAD_CMD_SLEEP_MAX);
 
        error = i2c_smbus_write_byte_data(client, SILEAD_REG_TOUCH_NR,
-                                       data->max_fingers);
+                                         SILEAD_MAX_FINGERS);
        if (error)
                goto i2c_write_err;
        usleep_range(SILEAD_CMD_SLEEP_MIN, SILEAD_CMD_SLEEP_MAX);
@@ -591,13 +589,6 @@ static void silead_ts_read_props(struct i2c_client *client)
        const char *str;
        int error;
 
-       error = device_property_read_u32(dev, "silead,max-fingers",
-                                        &data->max_fingers);
-       if (error) {
-               dev_dbg(dev, "Max fingers read error %d\n", error);
-               data->max_fingers = 5; /* Most devices handle up-to 5 fingers */
-       }
-
        error = device_property_read_string(dev, "firmware-name", &str);
        if (!error)
                snprintf(data->fw_name, sizeof(data->fw_name),
index 2fde1302a5843ce5610731855998c9bc60039940..2d5945c982bde5077674eb5ec8a35f6493e37761 100644 (file)
@@ -129,7 +129,8 @@ static inline int check_feature_gpt_level(void)
 static inline bool amd_iommu_gt_ppr_supported(void)
 {
        return (check_feature(FEATURE_GT) &&
-               check_feature(FEATURE_PPR));
+               check_feature(FEATURE_PPR) &&
+               check_feature(FEATURE_EPHSUP));
 }
 
 static inline u64 iommu_virt_to_phys(void *vaddr)
index a18e74878f68a0815965fb324b60467b0853c688..27e2937270950bf3066f0759248c4eefba43e62a 100644 (file)
@@ -1626,8 +1626,17 @@ static void __init free_pci_segments(void)
        }
 }
 
+static void __init free_sysfs(struct amd_iommu *iommu)
+{
+       if (iommu->iommu.dev) {
+               iommu_device_unregister(&iommu->iommu);
+               iommu_device_sysfs_remove(&iommu->iommu);
+       }
+}
+
 static void __init free_iommu_one(struct amd_iommu *iommu)
 {
+       free_sysfs(iommu);
        free_cwwb_sem(iommu);
        free_command_buffer(iommu);
        free_event_buffer(iommu);
index 52d83730a22ad4aa3a5feb48cd1be21f503e6cac..c2703599bb16684aa7f47f90f0dbc695498e01a9 100644 (file)
@@ -2032,7 +2032,6 @@ static int do_attach(struct iommu_dev_data *dev_data,
                     struct protection_domain *domain)
 {
        struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
-       struct pci_dev *pdev;
        int ret = 0;
 
        /* Update data structures */
@@ -2047,30 +2046,13 @@ static int do_attach(struct iommu_dev_data *dev_data,
        domain->dev_iommu[iommu->index] += 1;
        domain->dev_cnt                 += 1;
 
-       pdev = dev_is_pci(dev_data->dev) ? to_pci_dev(dev_data->dev) : NULL;
+       /* Setup GCR3 table */
        if (pdom_is_sva_capable(domain)) {
                ret = init_gcr3_table(dev_data, domain);
                if (ret)
                        return ret;
-
-               if (pdev) {
-                       pdev_enable_caps(pdev);
-
-                       /*
-                        * Device can continue to function even if IOPF
-                        * enablement failed. Hence in error path just
-                        * disable device PRI support.
-                        */
-                       if (amd_iommu_iopf_add_device(iommu, dev_data))
-                               pdev_disable_cap_pri(pdev);
-               }
-       } else if (pdev) {
-               pdev_enable_cap_ats(pdev);
        }
 
-       /* Update device table */
-       amd_iommu_dev_update_dte(dev_data, true);
-
        return ret;
 }
 
@@ -2163,6 +2145,11 @@ static void detach_device(struct device *dev)
 
        do_detach(dev_data);
 
+out:
+       spin_unlock(&dev_data->lock);
+
+       spin_unlock_irqrestore(&domain->lock, flags);
+
        /* Remove IOPF handler */
        if (ppr)
                amd_iommu_iopf_remove_device(iommu, dev_data);
@@ -2170,10 +2157,6 @@ static void detach_device(struct device *dev)
        if (dev_is_pci(dev))
                pdev_disable_caps(to_pci_dev(dev));
 
-out:
-       spin_unlock(&dev_data->lock);
-
-       spin_unlock_irqrestore(&domain->lock, flags);
 }
 
 static struct iommu_device *amd_iommu_probe_device(struct device *dev)
@@ -2485,6 +2468,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
        struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
        struct protection_domain *domain = to_pdomain(dom);
        struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
+       struct pci_dev *pdev;
        int ret;
 
        /*
@@ -2517,7 +2501,23 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
        }
 #endif
 
-       iommu_completion_wait(iommu);
+       pdev = dev_is_pci(dev_data->dev) ? to_pci_dev(dev_data->dev) : NULL;
+       if (pdev && pdom_is_sva_capable(domain)) {
+               pdev_enable_caps(pdev);
+
+               /*
+                * Device can continue to function even if IOPF
+                * enablement failed. Hence in error path just
+                * disable device PRI support.
+                */
+               if (amd_iommu_iopf_add_device(iommu, dev_data))
+                       pdev_disable_cap_pri(pdev);
+       } else if (pdev) {
+               pdev_enable_cap_ats(pdev);
+       }
+
+       /* Update device table */
+       amd_iommu_dev_update_dte(dev_data, true);
 
        return ret;
 }
index 091423bb8aac88d2f6030bb88ac96e5084d11414..7c67d69f0b8cad4aeecec6343dbc46b680b55c46 100644 (file)
@@ -222,8 +222,7 @@ int amd_iommu_iopf_init(struct amd_iommu *iommu)
        if (iommu->iopf_queue)
                return ret;
 
-       snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name),
-                "amdiommu-%#x-iopfq",
+       snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name), "amdvi-%#x",
                 PCI_SEG_DEVID_TO_SBDF(iommu->pci_seg->id, iommu->devid));
 
        iommu->iopf_queue = iopf_queue_alloc(iommu->iopfq_name);
@@ -249,40 +248,26 @@ void amd_iommu_page_response(struct device *dev, struct iopf_fault *evt,
 int amd_iommu_iopf_add_device(struct amd_iommu *iommu,
                              struct iommu_dev_data *dev_data)
 {
-       unsigned long flags;
        int ret = 0;
 
        if (!dev_data->pri_enabled)
                return ret;
 
-       raw_spin_lock_irqsave(&iommu->lock, flags);
-
-       if (!iommu->iopf_queue) {
-               ret = -EINVAL;
-               goto out_unlock;
-       }
+       if (!iommu->iopf_queue)
+               return -EINVAL;
 
        ret = iopf_queue_add_device(iommu->iopf_queue, dev_data->dev);
        if (ret)
-               goto out_unlock;
+               return ret;
 
        dev_data->ppr = true;
-
-out_unlock:
-       raw_spin_unlock_irqrestore(&iommu->lock, flags);
-       return ret;
+       return 0;
 }
 
 /* Its assumed that caller has verified that device was added to iopf queue */
 void amd_iommu_iopf_remove_device(struct amd_iommu *iommu,
                                  struct iommu_dev_data *dev_data)
 {
-       unsigned long flags;
-
-       raw_spin_lock_irqsave(&iommu->lock, flags);
-
        iopf_queue_remove_device(iommu->iopf_queue, dev_data->dev);
        dev_data->ppr = false;
-
-       raw_spin_unlock_irqrestore(&iommu->lock, flags);
 }
index f731e4b2a41724e76459c767efe78ab7d1a41e7f..43520e7275cc12818c2a056cc00d0a539792f1da 100644 (file)
@@ -686,15 +686,15 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, struct device *dev
 
        /* Check the domain allows at least some access to the device... */
        if (map) {
-               dma_addr_t base = dma_range_map_min(map);
-               if (base > domain->geometry.aperture_end ||
+               if (dma_range_map_min(map) > domain->geometry.aperture_end ||
                    dma_range_map_max(map) < domain->geometry.aperture_start) {
                        pr_warn("specified DMA range outside IOMMU capability\n");
                        return -EFAULT;
                }
-               /* ...then finally give it a kicking to make sure it fits */
-               base_pfn = max(base, domain->geometry.aperture_start) >> order;
        }
+       /* ...then finally give it a kicking to make sure it fits */
+       base_pfn = max_t(unsigned long, base_pfn,
+                        domain->geometry.aperture_start >> order);
 
        /* start_pfn is always nonzero for an already-initialised domain */
        mutex_lock(&cookie->mutex);
index 7c90bac3de216a34af3be01df548d98278dd3c7d..4acf5612487cfd77fd5ee4d3556123bfb051fc46 100644 (file)
@@ -850,7 +850,6 @@ static int xlnx_mbox_init_sgi(struct platform_device *pdev,
                return ret;
        }
 
-       irq_to_desc(pdata->virq_sgi);
        irq_set_status_flags(pdata->virq_sgi, IRQ_PER_CPU);
 
        /* Setup function for the CPU hot-plug cases */
index 1e0085cd9a9ad5fb3dccde133bb0b1251bad237a..2818e24e2a51dd39073ee1bc8d51ea7dfa2dc1e0 100644 (file)
@@ -3142,7 +3142,7 @@ phy_interface_t ksz_get_xmii(struct ksz_device *dev, int port, bool gbit)
                else
                        interface = PHY_INTERFACE_MODE_MII;
        } else if (val == bitval[P_RMII_SEL]) {
-               interface = PHY_INTERFACE_MODE_RGMII;
+               interface = PHY_INTERFACE_MODE_RMII;
        } else {
                interface = PHY_INTERFACE_MODE_RGMII;
                if (data8 & P_RGMII_ID_EG_ENABLE)
index 2d8a66ea82fab7f0a023ab469ccc33321d0b4ba3..713a595370bffa5a615073874bd3a99a4a8f286d 100644 (file)
@@ -312,7 +312,6 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
                              struct ena_com_io_sq *io_sq)
 {
        size_t size;
-       int dev_node = 0;
 
        memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
 
@@ -325,12 +324,9 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
        size = io_sq->desc_entry_size * io_sq->q_depth;
 
        if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
-               dev_node = dev_to_node(ena_dev->dmadev);
-               set_dev_node(ena_dev->dmadev, ctx->numa_node);
                io_sq->desc_addr.virt_addr =
                        dma_alloc_coherent(ena_dev->dmadev, size, &io_sq->desc_addr.phys_addr,
                                           GFP_KERNEL);
-               set_dev_node(ena_dev->dmadev, dev_node);
                if (!io_sq->desc_addr.virt_addr) {
                        io_sq->desc_addr.virt_addr =
                                dma_alloc_coherent(ena_dev->dmadev, size,
@@ -354,10 +350,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
                size = (size_t)io_sq->bounce_buf_ctrl.buffer_size *
                        io_sq->bounce_buf_ctrl.buffers_num;
 
-               dev_node = dev_to_node(ena_dev->dmadev);
-               set_dev_node(ena_dev->dmadev, ctx->numa_node);
                io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
-               set_dev_node(ena_dev->dmadev, dev_node);
                if (!io_sq->bounce_buf_ctrl.base_buffer)
                        io_sq->bounce_buf_ctrl.base_buffer =
                                devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
@@ -397,7 +390,6 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
                              struct ena_com_io_cq *io_cq)
 {
        size_t size;
-       int prev_node = 0;
 
        memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
 
@@ -409,11 +401,8 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
 
        size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
 
-       prev_node = dev_to_node(ena_dev->dmadev);
-       set_dev_node(ena_dev->dmadev, ctx->numa_node);
        io_cq->cdesc_addr.virt_addr =
                dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
-       set_dev_node(ena_dev->dmadev, prev_node);
        if (!io_cq->cdesc_addr.virt_addr) {
                io_cq->cdesc_addr.virt_addr =
                        dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr,
index f604119efc8098d511258679e48bbcc633611cba..5f26fc3ad65556d772def1d6c39de0b25989f88d 100644 (file)
@@ -1117,18 +1117,30 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
        pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]);
 
        if (port[IFLA_PORT_PROFILE]) {
+               if (nla_len(port[IFLA_PORT_PROFILE]) != PORT_PROFILE_MAX) {
+                       memcpy(pp, &prev_pp, sizeof(*pp));
+                       return -EINVAL;
+               }
                pp->set |= ENIC_SET_NAME;
                memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]),
                        PORT_PROFILE_MAX);
        }
 
        if (port[IFLA_PORT_INSTANCE_UUID]) {
+               if (nla_len(port[IFLA_PORT_INSTANCE_UUID]) != PORT_UUID_MAX) {
+                       memcpy(pp, &prev_pp, sizeof(*pp));
+                       return -EINVAL;
+               }
                pp->set |= ENIC_SET_INSTANCE;
                memcpy(pp->instance_uuid,
                        nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
        }
 
        if (port[IFLA_PORT_HOST_UUID]) {
+               if (nla_len(port[IFLA_PORT_HOST_UUID]) != PORT_UUID_MAX) {
+                       memcpy(pp, &prev_pp, sizeof(*pp));
+                       return -EINVAL;
+               }
                pp->set |= ENIC_SET_HOST;
                memcpy(pp->host_uuid,
                        nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
index a72d8a2eb0b315a505c61f9663844fa133120ccf..881ece735dcf1470fed863fdbb765e231ca2dbbf 100644 (file)
@@ -4130,6 +4130,14 @@ free_queue_mem:
        return ret;
 }
 
+static void fec_enet_deinit(struct net_device *ndev)
+{
+       struct fec_enet_private *fep = netdev_priv(ndev);
+
+       netif_napi_del(&fep->napi);
+       fec_enet_free_queue(ndev);
+}
+
 #ifdef CONFIG_OF
 static int fec_reset_phy(struct platform_device *pdev)
 {
@@ -4524,6 +4532,7 @@ failed_register:
        fec_enet_mii_remove(fep);
 failed_mii_init:
 failed_irq:
+       fec_enet_deinit(ndev);
 failed_init:
        fec_ptp_stop(pdev);
 failed_reset:
@@ -4587,6 +4596,7 @@ fec_drv_remove(struct platform_device *pdev)
        pm_runtime_put_noidle(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 
+       fec_enet_deinit(ndev);
        free_netdev(ndev);
 }
 
index f9e94be36e97f20b5aeeccf43dfe580bf7c01847..2e98a2a0bead951e714668c91a699d6bdf5fd72b 100644 (file)
@@ -1225,6 +1225,28 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
        }
 
 release:
+       /* Switching PHY interface always returns MDI error
+        * so disable retry mechanism to avoid wasting time
+        */
+       e1000e_disable_phy_retry(hw);
+
+       /* Force SMBus mode in PHY */
+       ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
+       if (ret_val) {
+               e1000e_enable_phy_retry(hw);
+               hw->phy.ops.release(hw);
+               goto out;
+       }
+       phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
+       e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
+
+       e1000e_enable_phy_retry(hw);
+
+       /* Force SMBus mode in MAC */
+       mac_reg = er32(CTRL_EXT);
+       mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
+       ew32(CTRL_EXT, mac_reg);
+
        hw->phy.ops.release(hw);
 out:
        if (ret_val)
index 220d62fca55d1b2ba405003773f79396a9dcdb42..da5c59daf8ba94e48d6dc7cf31a61b37f86de37e 100644 (file)
@@ -6623,7 +6623,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
        struct e1000_hw *hw = &adapter->hw;
        u32 ctrl, ctrl_ext, rctl, status, wufc;
        int retval = 0;
-       u16 smb_ctrl;
 
        /* Runtime suspend should only enable wakeup for link changes */
        if (runtime)
@@ -6697,23 +6696,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
                        if (retval)
                                return retval;
                }
-
-               /* Force SMBUS to allow WOL */
-               /* Switching PHY interface always returns MDI error
-                * so disable retry mechanism to avoid wasting time
-                */
-               e1000e_disable_phy_retry(hw);
-
-               e1e_rphy(hw, CV_SMB_CTRL, &smb_ctrl);
-               smb_ctrl |= CV_SMB_CTRL_FORCE_SMBUS;
-               e1e_wphy(hw, CV_SMB_CTRL, smb_ctrl);
-
-               e1000e_enable_phy_retry(hw);
-
-               /* Force SMBus mode in MAC */
-               ctrl_ext = er32(CTRL_EXT);
-               ctrl_ext |= E1000_CTRL_EXT_FORCE_SMBUS;
-               ew32(CTRL_EXT, ctrl_ext);
        }
 
        /* Ensure that the appropriate bits are set in LPI_CTRL
index 1f188c052828b47b37ebb72059dfba61be96d00a..284c3fad5a6e4f757097ecaf5e6a72816c09f67b 100644 (file)
@@ -11171,6 +11171,8 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
        ret = i40e_reset(pf);
        if (!ret)
                i40e_rebuild(pf, reinit, lock_acquired);
+       else
+               dev_err(&pf->pdev->dev, "%s: i40e_reset() FAILED", __func__);
 }
 
 /**
@@ -16334,6 +16336,139 @@ unmap:
        pci_disable_device(pdev);
 }
 
+/**
+ * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
+ * using the mac_address_write admin q function
+ * @pf: pointer to i40e_pf struct
+ **/
+static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
+{
+       struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf);
+       struct i40e_hw *hw = &pf->hw;
+       u8 mac_addr[6];
+       u16 flags = 0;
+       int ret;
+
+       /* Get current MAC address in case it's an LAA */
+       if (main_vsi && main_vsi->netdev) {
+               ether_addr_copy(mac_addr, main_vsi->netdev->dev_addr);
+       } else {
+               dev_err(&pf->pdev->dev,
+                       "Failed to retrieve MAC address; using default\n");
+               ether_addr_copy(mac_addr, hw->mac.addr);
+       }
+
+       /* The FW expects the mac address write cmd to first be called with
+        * one of these flags before calling it again with the multicast
+        * enable flags.
+        */
+       flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
+
+       if (hw->func_caps.flex10_enable && hw->partition_id != 1)
+               flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
+
+       ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
+       if (ret) {
+               dev_err(&pf->pdev->dev,
+                       "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
+               return;
+       }
+
+       flags = I40E_AQC_MC_MAG_EN
+                       | I40E_AQC_WOL_PRESERVE_ON_PFR
+                       | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
+       ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
+       if (ret)
+               dev_err(&pf->pdev->dev,
+                       "Failed to enable Multicast Magic Packet wake up\n");
+}
+
+/**
+ * i40e_io_suspend - suspend all IO operations
+ * @pf: pointer to i40e_pf struct
+ *
+ **/
+static int i40e_io_suspend(struct i40e_pf *pf)
+{
+       struct i40e_hw *hw = &pf->hw;
+
+       set_bit(__I40E_DOWN, pf->state);
+
+       /* Ensure service task will not be running */
+       del_timer_sync(&pf->service_timer);
+       cancel_work_sync(&pf->service_task);
+
+       /* Client close must be called explicitly here because the timer
+        * has been stopped.
+        */
+       i40e_notify_client_of_netdev_close(pf, false);
+
+       if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) &&
+           pf->wol_en)
+               i40e_enable_mc_magic_wake(pf);
+
+       /* Since we're going to destroy queues during the
+        * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
+        * whole section
+        */
+       rtnl_lock();
+
+       i40e_prep_for_reset(pf);
+
+       wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
+       wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+
+       /* Clear the interrupt scheme and release our IRQs so that the system
+        * can safely hibernate even when there are a large number of CPUs.
+        * Otherwise hibernation might fail when mapping all the vectors back
+        * to CPU0.
+        */
+       i40e_clear_interrupt_scheme(pf);
+
+       rtnl_unlock();
+
+       return 0;
+}
+
+/**
+ * i40e_io_resume - resume IO operations
+ * @pf: pointer to i40e_pf struct
+ *
+ **/
+static int i40e_io_resume(struct i40e_pf *pf)
+{
+       struct device *dev = &pf->pdev->dev;
+       int err;
+
+       /* We need to hold the RTNL lock prior to restoring interrupt schemes,
+        * since we're going to be restoring queues
+        */
+       rtnl_lock();
+
+       /* We cleared the interrupt scheme when we suspended, so we need to
+        * restore it now to resume device functionality.
+        */
+       err = i40e_restore_interrupt_scheme(pf);
+       if (err) {
+               dev_err(dev, "Cannot restore interrupt scheme: %d\n",
+                       err);
+       }
+
+       clear_bit(__I40E_DOWN, pf->state);
+       i40e_reset_and_rebuild(pf, false, true);
+
+       rtnl_unlock();
+
+       /* Clear suspended state last after everything is recovered */
+       clear_bit(__I40E_SUSPENDED, pf->state);
+
+       /* Restart the service task */
+       mod_timer(&pf->service_timer,
+                 round_jiffies(jiffies + pf->service_timer_period));
+
+       return 0;
+}
+
 /**
  * i40e_pci_error_detected - warning that something funky happened in PCI land
  * @pdev: PCI device information struct
@@ -16358,7 +16493,7 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
 
        /* shutdown all operations */
        if (!test_bit(__I40E_SUSPENDED, pf->state))
-               i40e_prep_for_reset(pf);
+               i40e_io_suspend(pf);
 
        /* Request a slot reset */
        return PCI_ERS_RESULT_NEED_RESET;
@@ -16380,7 +16515,8 @@ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
        u32 reg;
 
        dev_dbg(&pdev->dev, "%s\n", __func__);
-       if (pci_enable_device_mem(pdev)) {
+       /* enable I/O and memory of the device  */
+       if (pci_enable_device(pdev)) {
                dev_info(&pdev->dev,
                         "Cannot re-enable PCI device after reset.\n");
                result = PCI_ERS_RESULT_DISCONNECT;
@@ -16443,54 +16579,7 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
        if (test_bit(__I40E_SUSPENDED, pf->state))
                return;
 
-       i40e_handle_reset_warning(pf, false);
-}
-
-/**
- * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
- * using the mac_address_write admin q function
- * @pf: pointer to i40e_pf struct
- **/
-static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
-{
-       struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf);
-       struct i40e_hw *hw = &pf->hw;
-       u8 mac_addr[6];
-       u16 flags = 0;
-       int ret;
-
-       /* Get current MAC address in case it's an LAA */
-       if (main_vsi && main_vsi->netdev) {
-               ether_addr_copy(mac_addr, main_vsi->netdev->dev_addr);
-       } else {
-               dev_err(&pf->pdev->dev,
-                       "Failed to retrieve MAC address; using default\n");
-               ether_addr_copy(mac_addr, hw->mac.addr);
-       }
-
-       /* The FW expects the mac address write cmd to first be called with
-        * one of these flags before calling it again with the multicast
-        * enable flags.
-        */
-       flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
-
-       if (hw->func_caps.flex10_enable && hw->partition_id != 1)
-               flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
-
-       ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
-       if (ret) {
-               dev_err(&pf->pdev->dev,
-                       "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
-               return;
-       }
-
-       flags = I40E_AQC_MC_MAG_EN
-                       | I40E_AQC_WOL_PRESERVE_ON_PFR
-                       | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
-       ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
-       if (ret)
-               dev_err(&pf->pdev->dev,
-                       "Failed to enable Multicast Magic Packet wake up\n");
+       i40e_io_resume(pf);
 }
 
 /**
@@ -16552,48 +16641,11 @@ static void i40e_shutdown(struct pci_dev *pdev)
 static int i40e_suspend(struct device *dev)
 {
        struct i40e_pf *pf = dev_get_drvdata(dev);
-       struct i40e_hw *hw = &pf->hw;
 
        /* If we're already suspended, then there is nothing to do */
        if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
                return 0;
-
-       set_bit(__I40E_DOWN, pf->state);
-
-       /* Ensure service task will not be running */
-       del_timer_sync(&pf->service_timer);
-       cancel_work_sync(&pf->service_task);
-
-       /* Client close must be called explicitly here because the timer
-        * has been stopped.
-        */
-       i40e_notify_client_of_netdev_close(pf, false);
-
-       if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) &&
-           pf->wol_en)
-               i40e_enable_mc_magic_wake(pf);
-
-       /* Since we're going to destroy queues during the
-        * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
-        * whole section
-        */
-       rtnl_lock();
-
-       i40e_prep_for_reset(pf);
-
-       wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
-       wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
-
-       /* Clear the interrupt scheme and release our IRQs so that the system
-        * can safely hibernate even when there are a large number of CPUs.
-        * Otherwise hibernation might fail when mapping all the vectors back
-        * to CPU0.
-        */
-       i40e_clear_interrupt_scheme(pf);
-
-       rtnl_unlock();
-
-       return 0;
+       return i40e_io_suspend(pf);
 }
 
 /**
@@ -16603,39 +16655,11 @@ static int i40e_suspend(struct device *dev)
 static int i40e_resume(struct device *dev)
 {
        struct i40e_pf *pf = dev_get_drvdata(dev);
-       int err;
 
        /* If we're not suspended, then there is nothing to do */
        if (!test_bit(__I40E_SUSPENDED, pf->state))
                return 0;
-
-       /* We need to hold the RTNL lock prior to restoring interrupt schemes,
-        * since we're going to be restoring queues
-        */
-       rtnl_lock();
-
-       /* We cleared the interrupt scheme when we suspended, so we need to
-        * restore it now to resume device functionality.
-        */
-       err = i40e_restore_interrupt_scheme(pf);
-       if (err) {
-               dev_err(dev, "Cannot restore interrupt scheme: %d\n",
-                       err);
-       }
-
-       clear_bit(__I40E_DOWN, pf->state);
-       i40e_reset_and_rebuild(pf, false, true);
-
-       rtnl_unlock();
-
-       /* Clear suspended state last after everything is recovered */
-       clear_bit(__I40E_SUSPENDED, pf->state);
-
-       /* Restart the service task */
-       mod_timer(&pf->service_timer,
-                 round_jiffies(jiffies + pf->service_timer_period));
-
-       return 0;
+       return i40e_io_resume(pf);
 }
 
 static const struct pci_error_handlers i40e_err_handler = {
index c4b69655cdf57db4a5b2613755fe68e147e23605..704e9ad5144e8c2844b6343c63126d1724ad0f08 100644 (file)
@@ -1388,7 +1388,7 @@ enum ice_param_id {
        ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS,
 };
 
-static const struct devlink_param ice_devlink_params[] = {
+static const struct devlink_param ice_dvl_rdma_params[] = {
        DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
                              ice_devlink_enable_roce_get,
                              ice_devlink_enable_roce_set,
@@ -1397,6 +1397,9 @@ static const struct devlink_param ice_devlink_params[] = {
                              ice_devlink_enable_iw_get,
                              ice_devlink_enable_iw_set,
                              ice_devlink_enable_iw_validate),
+};
+
+static const struct devlink_param ice_dvl_sched_params[] = {
        DEVLINK_PARAM_DRIVER(ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS,
                             "tx_scheduling_layers",
                             DEVLINK_PARAM_TYPE_U8,
@@ -1464,21 +1467,31 @@ int ice_devlink_register_params(struct ice_pf *pf)
 {
        struct devlink *devlink = priv_to_devlink(pf);
        struct ice_hw *hw = &pf->hw;
-       size_t params_size;
+       int status;
 
-       params_size =  ARRAY_SIZE(ice_devlink_params);
+       status = devl_params_register(devlink, ice_dvl_rdma_params,
+                                     ARRAY_SIZE(ice_dvl_rdma_params));
+       if (status)
+               return status;
 
-       if (!hw->func_caps.common_cap.tx_sched_topo_comp_mode_en)
-               params_size--;
+       if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en)
+               status = devl_params_register(devlink, ice_dvl_sched_params,
+                                             ARRAY_SIZE(ice_dvl_sched_params));
 
-       return devl_params_register(devlink, ice_devlink_params,
-                                   params_size);
+       return status;
 }
 
 void ice_devlink_unregister_params(struct ice_pf *pf)
 {
-       devl_params_unregister(priv_to_devlink(pf), ice_devlink_params,
-                              ARRAY_SIZE(ice_devlink_params));
+       struct devlink *devlink = priv_to_devlink(pf);
+       struct ice_hw *hw = &pf->hw;
+
+       devl_params_unregister(devlink, ice_dvl_rdma_params,
+                              ARRAY_SIZE(ice_dvl_rdma_params));
+
+       if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en)
+               devl_params_unregister(devlink, ice_dvl_sched_params,
+                                      ARRAY_SIZE(ice_dvl_sched_params));
 }
 
 #define ICE_DEVLINK_READ_BLK_SIZE (1024 * 1024)
index 6ad8002b22e14a9a86f2ed994a6798d67627b5f3..99a75a59078ef3e6c71c46696b694d00fd082ea1 100644 (file)
@@ -409,7 +409,6 @@ struct ice_vsi {
        struct ice_tc_cfg tc_cfg;
        struct bpf_prog *xdp_prog;
        struct ice_tx_ring **xdp_rings;  /* XDP ring array */
-       unsigned long *af_xdp_zc_qps;    /* tracks AF_XDP ZC enabled qps */
        u16 num_xdp_txq;                 /* Used XDP queues */
        u8 xdp_mapping_mode;             /* ICE_MAP_MODE_[CONTIG|SCATTER] */
 
@@ -746,6 +745,25 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
        ring->flags |= ICE_TX_FLAGS_RING_XDP;
 }
 
+/**
+ * ice_get_xp_from_qid - get ZC XSK buffer pool bound to a queue ID
+ * @vsi: pointer to VSI
+ * @qid: index of a queue to look at XSK buff pool presence
+ *
+ * Return: A pointer to xsk_buff_pool structure if there is a buffer pool
+ * attached and configured as zero-copy, NULL otherwise.
+ */
+static inline struct xsk_buff_pool *ice_get_xp_from_qid(struct ice_vsi *vsi,
+                                                       u16 qid)
+{
+       struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);
+
+       if (!ice_is_xdp_ena_vsi(vsi))
+               return NULL;
+
+       return (pool && pool->dev) ? pool : NULL;
+}
+
 /**
  * ice_xsk_pool - get XSK buffer pool bound to a ring
  * @ring: Rx ring to use
@@ -758,10 +776,7 @@ static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
        struct ice_vsi *vsi = ring->vsi;
        u16 qid = ring->q_index;
 
-       if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
-               return NULL;
-
-       return xsk_get_pool_from_qid(vsi->netdev, qid);
+       return ice_get_xp_from_qid(vsi, qid);
 }
 
 /**
@@ -786,12 +801,7 @@ static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
        if (!ring)
                return;
 
-       if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) {
-               ring->xsk_pool = NULL;
-               return;
-       }
-
-       ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid);
+       ring->xsk_pool = ice_get_xp_from_qid(vsi, qid);
 }
 
 /**
@@ -920,9 +930,17 @@ int ice_down(struct ice_vsi *vsi);
 int ice_down_up(struct ice_vsi *vsi);
 int ice_vsi_cfg_lan(struct ice_vsi *vsi);
 struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
+
+enum ice_xdp_cfg {
+       ICE_XDP_CFG_FULL,       /* Fully apply new config in .ndo_bpf() */
+       ICE_XDP_CFG_PART,       /* Save/use part of config in VSI rebuild */
+};
+
 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
-int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
-int ice_destroy_xdp_rings(struct ice_vsi *vsi);
+int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
+                         enum ice_xdp_cfg cfg_type);
+int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type);
+void ice_map_xdp_rings(struct ice_vsi *vsi);
 int
 ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
             u32 flags);
index 687f6cb2b917afc55de7020c401c5095c6163825..5d396c1a7731482f725561a8eff709ecd3cc793e 100644 (file)
@@ -842,6 +842,9 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
                }
                rx_rings_rem -= rx_rings_per_v;
        }
+
+       if (ice_is_xdp_ena_vsi(vsi))
+               ice_map_xdp_rings(vsi);
 }
 
 /**
index 5649b257e6312e57e0040962f7213a606a2581dc..24716a3b494cd09ecf8b7562b67dd036ffc00d7f 100644 (file)
@@ -3148,6 +3148,16 @@ ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
        case ICE_PHY_TYPE_HIGH_100G_AUI2:
                speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
                break;
+       case ICE_PHY_TYPE_HIGH_200G_CR4_PAM4:
+       case ICE_PHY_TYPE_HIGH_200G_SR4:
+       case ICE_PHY_TYPE_HIGH_200G_FR4:
+       case ICE_PHY_TYPE_HIGH_200G_LR4:
+       case ICE_PHY_TYPE_HIGH_200G_DR4:
+       case ICE_PHY_TYPE_HIGH_200G_KR4_PAM4:
+       case ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC:
+       case ICE_PHY_TYPE_HIGH_200G_AUI4:
+               speed_phy_type_high = ICE_AQ_LINK_SPEED_200GB;
+               break;
        default:
                speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
                break;
index 5371e91f6bbb4b48f4065421ac9a68d82fda17d1..7629b0190578b3d4bf1fc8d54b54af570f1648d4 100644 (file)
@@ -114,14 +114,8 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
        if (!vsi->q_vectors)
                goto err_vectors;
 
-       vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL);
-       if (!vsi->af_xdp_zc_qps)
-               goto err_zc_qps;
-
        return 0;
 
-err_zc_qps:
-       devm_kfree(dev, vsi->q_vectors);
 err_vectors:
        devm_kfree(dev, vsi->rxq_map);
 err_rxq_map:
@@ -309,8 +303,6 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
 
        dev = ice_pf_to_dev(pf);
 
-       bitmap_free(vsi->af_xdp_zc_qps);
-       vsi->af_xdp_zc_qps = NULL;
        /* free the ring and vector containers */
        devm_kfree(dev, vsi->q_vectors);
        vsi->q_vectors = NULL;
@@ -2282,22 +2274,23 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi)
                if (ret)
                        goto unroll_vector_base;
 
-               ice_vsi_map_rings_to_vectors(vsi);
-
-               /* Associate q_vector rings to napi */
-               ice_vsi_set_napi_queues(vsi);
-
-               vsi->stat_offsets_loaded = false;
-
                if (ice_is_xdp_ena_vsi(vsi)) {
                        ret = ice_vsi_determine_xdp_res(vsi);
                        if (ret)
                                goto unroll_vector_base;
-                       ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
+                       ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog,
+                                                   ICE_XDP_CFG_PART);
                        if (ret)
                                goto unroll_vector_base;
                }
 
+               ice_vsi_map_rings_to_vectors(vsi);
+
+               /* Associate q_vector rings to napi */
+               ice_vsi_set_napi_queues(vsi);
+
+               vsi->stat_offsets_loaded = false;
+
                /* ICE_VSI_CTRL does not need RSS so skip RSS processing */
                if (vsi->type != ICE_VSI_CTRL)
                        /* Do not exit if configuring RSS had an issue, at
@@ -2437,7 +2430,7 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
                /* return value check can be skipped here, it always returns
                 * 0 if reset is in progress
                 */
-               ice_destroy_xdp_rings(vsi);
+               ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_PART);
 
        ice_vsi_clear_rings(vsi);
        ice_vsi_free_q_vectors(vsi);
index f60c022f79609695bcad9f8ff581c389de02e35c..1b61ca3a6eb6e15353be17e6d7f72a27708bff8b 100644 (file)
@@ -2707,17 +2707,72 @@ static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
                bpf_prog_put(old_prog);
 }
 
+static struct ice_tx_ring *ice_xdp_ring_from_qid(struct ice_vsi *vsi, int qid)
+{
+       struct ice_q_vector *q_vector;
+       struct ice_tx_ring *ring;
+
+       if (static_key_enabled(&ice_xdp_locking_key))
+               return vsi->xdp_rings[qid % vsi->num_xdp_txq];
+
+       q_vector = vsi->rx_rings[qid]->q_vector;
+       ice_for_each_tx_ring(ring, q_vector->tx)
+               if (ice_ring_is_xdp(ring))
+                       return ring;
+
+       return NULL;
+}
+
+/**
+ * ice_map_xdp_rings - Map XDP rings to interrupt vectors
+ * @vsi: the VSI with XDP rings being configured
+ *
+ * Map XDP rings to interrupt vectors and perform the configuration steps
+ * dependent on the mapping.
+ */
+void ice_map_xdp_rings(struct ice_vsi *vsi)
+{
+       int xdp_rings_rem = vsi->num_xdp_txq;
+       int v_idx, q_idx;
+
+       /* follow the logic from ice_vsi_map_rings_to_vectors */
+       ice_for_each_q_vector(vsi, v_idx) {
+               struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+               int xdp_rings_per_v, q_id, q_base;
+
+               xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
+                                              vsi->num_q_vectors - v_idx);
+               q_base = vsi->num_xdp_txq - xdp_rings_rem;
+
+               for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
+                       struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
+
+                       xdp_ring->q_vector = q_vector;
+                       xdp_ring->next = q_vector->tx.tx_ring;
+                       q_vector->tx.tx_ring = xdp_ring;
+               }
+               xdp_rings_rem -= xdp_rings_per_v;
+       }
+
+       ice_for_each_rxq(vsi, q_idx) {
+               vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi,
+                                                                      q_idx);
+               ice_tx_xsk_pool(vsi, q_idx);
+       }
+}
+
 /**
  * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
  * @vsi: VSI to bring up Tx rings used by XDP
  * @prog: bpf program that will be assigned to VSI
+ * @cfg_type: create from scratch or restore the existing configuration
  *
  * Return 0 on success and negative value on error
  */
-int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
+int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
+                         enum ice_xdp_cfg cfg_type)
 {
        u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
-       int xdp_rings_rem = vsi->num_xdp_txq;
        struct ice_pf *pf = vsi->back;
        struct ice_qs_cfg xdp_qs_cfg = {
                .qs_mutex = &pf->avail_q_mutex,
@@ -2730,8 +2785,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
                .mapping_mode = ICE_VSI_MAP_CONTIG
        };
        struct device *dev;
-       int i, v_idx;
-       int status;
+       int status, i;
 
        dev = ice_pf_to_dev(pf);
        vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
@@ -2750,49 +2804,15 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
        if (ice_xdp_alloc_setup_rings(vsi))
                goto clear_xdp_rings;
 
-       /* follow the logic from ice_vsi_map_rings_to_vectors */
-       ice_for_each_q_vector(vsi, v_idx) {
-               struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
-               int xdp_rings_per_v, q_id, q_base;
-
-               xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
-                                              vsi->num_q_vectors - v_idx);
-               q_base = vsi->num_xdp_txq - xdp_rings_rem;
-
-               for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
-                       struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
-
-                       xdp_ring->q_vector = q_vector;
-                       xdp_ring->next = q_vector->tx.tx_ring;
-                       q_vector->tx.tx_ring = xdp_ring;
-               }
-               xdp_rings_rem -= xdp_rings_per_v;
-       }
-
-       ice_for_each_rxq(vsi, i) {
-               if (static_key_enabled(&ice_xdp_locking_key)) {
-                       vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
-               } else {
-                       struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
-                       struct ice_tx_ring *ring;
-
-                       ice_for_each_tx_ring(ring, q_vector->tx) {
-                               if (ice_ring_is_xdp(ring)) {
-                                       vsi->rx_rings[i]->xdp_ring = ring;
-                                       break;
-                               }
-                       }
-               }
-               ice_tx_xsk_pool(vsi, i);
-       }
-
        /* omit the scheduler update if in reset path; XDP queues will be
         * taken into account at the end of ice_vsi_rebuild, where
         * ice_cfg_vsi_lan is being called
         */
-       if (ice_is_reset_in_progress(pf->state))
+       if (cfg_type == ICE_XDP_CFG_PART)
                return 0;
 
+       ice_map_xdp_rings(vsi);
+
        /* tell the Tx scheduler that right now we have
         * additional queues
         */
@@ -2842,22 +2862,21 @@ err_map_xdp:
 /**
  * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
  * @vsi: VSI to remove XDP rings
+ * @cfg_type: disable XDP permanently or allow it to be restored later
  *
  * Detach XDP rings from irq vectors, clean up the PF bitmap and free
  * resources
  */
-int ice_destroy_xdp_rings(struct ice_vsi *vsi)
+int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
 {
        u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
        struct ice_pf *pf = vsi->back;
        int i, v_idx;
 
        /* q_vectors are freed in reset path so there's no point in detaching
-        * rings; in case of rebuild being triggered not from reset bits
-        * in pf->state won't be set, so additionally check first q_vector
-        * against NULL
+        * rings
         */
-       if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
+       if (cfg_type == ICE_XDP_CFG_PART)
                goto free_qmap;
 
        ice_for_each_q_vector(vsi, v_idx) {
@@ -2898,7 +2917,7 @@ free_qmap:
        if (static_key_enabled(&ice_xdp_locking_key))
                static_branch_dec(&ice_xdp_locking_key);
 
-       if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
+       if (cfg_type == ICE_XDP_CFG_PART)
                return 0;
 
        ice_vsi_assign_bpf_prog(vsi, NULL);
@@ -3009,7 +3028,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
                if (xdp_ring_err) {
                        NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
                } else {
-                       xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
+                       xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,
+                                                            ICE_XDP_CFG_FULL);
                        if (xdp_ring_err)
                                NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
                }
@@ -3020,7 +3040,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
                        NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
        } else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
                xdp_features_clear_redirect_target(vsi->netdev);
-               xdp_ring_err = ice_destroy_xdp_rings(vsi);
+               xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL);
                if (xdp_ring_err)
                        NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
                /* reallocate Rx queues that were used for zero-copy */
index 84eab92dc03cfe99f791518f3a21358dd7a91822..59e8879ac0598a8d6e7fac474ba0acbcb9981a02 100644 (file)
@@ -374,11 +374,25 @@ ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u1
  *
  * Read the specified word from the copy of the Shadow RAM found in the
  * specified NVM module.
+ *
+ * Note that the Shadow RAM copy is always located after the CSS header, and
+ * is aligned to 64-byte (32-word) offsets.
  */
 static int
 ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
 {
-       return ice_read_nvm_module(hw, bank, ICE_NVM_SR_COPY_WORD_OFFSET + offset, data);
+       u32 sr_copy;
+
+       switch (bank) {
+       case ICE_ACTIVE_FLASH_BANK:
+               sr_copy = roundup(hw->flash.banks.active_css_hdr_len, 32);
+               break;
+       case ICE_INACTIVE_FLASH_BANK:
+               sr_copy = roundup(hw->flash.banks.inactive_css_hdr_len, 32);
+               break;
+       }
+
+       return ice_read_nvm_module(hw, bank, sr_copy + offset, data);
 }
 
 /**
@@ -440,8 +454,7 @@ int
 ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
                       u16 module_type)
 {
-       u16 pfa_len, pfa_ptr;
-       u16 next_tlv;
+       u16 pfa_len, pfa_ptr, next_tlv, max_tlv;
        int status;
 
        status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
@@ -454,11 +467,23 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
                ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n");
                return status;
        }
+
+       /* The Preserved Fields Area contains a sequence of Type-Length-Value
+        * structures which define its contents. The PFA length includes all
+        * of the TLVs, plus the initial length word itself, *and* one final
+        * word at the end after all of the TLVs.
+        */
+       if (check_add_overflow(pfa_ptr, pfa_len - 1, &max_tlv)) {
+               dev_warn(ice_hw_to_dev(hw), "PFA starts at offset %u. PFA length of %u caused 16-bit arithmetic overflow.\n",
+                        pfa_ptr, pfa_len);
+               return -EINVAL;
+       }
+
        /* Starting with first TLV after PFA length, iterate through the list
         * of TLVs to find the requested one.
         */
        next_tlv = pfa_ptr + 1;
-       while (next_tlv < pfa_ptr + pfa_len) {
+       while (next_tlv < max_tlv) {
                u16 tlv_sub_module_type;
                u16 tlv_len;
 
@@ -482,10 +507,13 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
                        }
                        return -EINVAL;
                }
-               /* Check next TLV, i.e. current TLV pointer + length + 2 words
-                * (for current TLV's type and length)
-                */
-               next_tlv = next_tlv + tlv_len + 2;
+
+               if (check_add_overflow(next_tlv, 2, &next_tlv) ||
+                   check_add_overflow(next_tlv, tlv_len, &next_tlv)) {
+                       dev_warn(ice_hw_to_dev(hw), "TLV of type %u and length 0x%04x caused 16-bit arithmetic overflow. The PFA starts at 0x%04x and has length of 0x%04x\n",
+                                tlv_sub_module_type, tlv_len, pfa_ptr, pfa_len);
+                       return -EINVAL;
+               }
        }
        /* Module does not exist */
        return -ENOENT;
@@ -1009,6 +1037,72 @@ static int ice_determine_active_flash_banks(struct ice_hw *hw)
        return 0;
 }
 
+/**
+ * ice_get_nvm_css_hdr_len - Read the CSS header length from the NVM CSS header
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @hdr_len: storage for header length in words
+ *
+ * Read the CSS header length from the NVM CSS header and add the Authentication
+ * header size, and then convert to words.
+ *
+ * Return: zero on success, or a negative error code on failure.
+ */
+static int
+ice_get_nvm_css_hdr_len(struct ice_hw *hw, enum ice_bank_select bank,
+                       u32 *hdr_len)
+{
+       u16 hdr_len_l, hdr_len_h;
+       u32 hdr_len_dword;
+       int status;
+
+       status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_L,
+                                    &hdr_len_l);
+       if (status)
+               return status;
+
+       status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_H,
+                                    &hdr_len_h);
+       if (status)
+               return status;
+
+       /* CSS header length is in DWORD, so convert to words and add
+        * authentication header size
+        */
+       hdr_len_dword = hdr_len_h << 16 | hdr_len_l;
+       *hdr_len = (hdr_len_dword * 2) + ICE_NVM_AUTH_HEADER_LEN;
+
+       return 0;
+}
+
+/**
+ * ice_determine_css_hdr_len - Discover CSS header length for the device
+ * @hw: pointer to the HW struct
+ *
+ * Determine the size of the CSS header at the start of the NVM module. This
+ * is useful for locating the Shadow RAM copy in the NVM, as the Shadow RAM is
+ * always located just after the CSS header.
+ *
+ * Return: zero on success, or a negative error code on failure.
+ */
+static int ice_determine_css_hdr_len(struct ice_hw *hw)
+{
+       struct ice_bank_info *banks = &hw->flash.banks;
+       int status;
+
+       status = ice_get_nvm_css_hdr_len(hw, ICE_ACTIVE_FLASH_BANK,
+                                        &banks->active_css_hdr_len);
+       if (status)
+               return status;
+
+       status = ice_get_nvm_css_hdr_len(hw, ICE_INACTIVE_FLASH_BANK,
+                                        &banks->inactive_css_hdr_len);
+       if (status)
+               return status;
+
+       return 0;
+}
+
 /**
  * ice_init_nvm - initializes NVM setting
  * @hw: pointer to the HW struct
@@ -1055,6 +1149,12 @@ int ice_init_nvm(struct ice_hw *hw)
                return status;
        }
 
+       status = ice_determine_css_hdr_len(hw);
+       if (status) {
+               ice_debug(hw, ICE_DBG_NVM, "Failed to determine Shadow RAM copy offsets.\n");
+               return status;
+       }
+
        status = ice_get_nvm_ver_info(hw, ICE_ACTIVE_FLASH_BANK, &flash->nvm);
        if (status) {
                ice_debug(hw, ICE_DBG_INIT, "Failed to read NVM info.\n");
index f0796a93f4287e22e490f24a0dc1260cf5d7c777..eef397e5baa07d7fba682cd7262e216329e10689 100644 (file)
@@ -482,6 +482,8 @@ struct ice_bank_info {
        u32 orom_size;                          /* Size of OROM bank */
        u32 netlist_ptr;                        /* Pointer to 1st Netlist bank */
        u32 netlist_size;                       /* Size of Netlist bank */
+       u32 active_css_hdr_len;                 /* Active CSS header length */
+       u32 inactive_css_hdr_len;               /* Inactive CSS header length */
        enum ice_flash_bank nvm_bank;           /* Active NVM bank */
        enum ice_flash_bank orom_bank;          /* Active OROM bank */
        enum ice_flash_bank netlist_bank;       /* Active Netlist bank */
@@ -1087,17 +1089,13 @@ struct ice_aq_get_set_rss_lut_params {
 #define ICE_SR_SECTOR_SIZE_IN_WORDS    0x800
 
 /* CSS Header words */
+#define ICE_NVM_CSS_HDR_LEN_L                  0x02
+#define ICE_NVM_CSS_HDR_LEN_H                  0x03
 #define ICE_NVM_CSS_SREV_L                     0x14
 #define ICE_NVM_CSS_SREV_H                     0x15
 
-/* Length of CSS header section in words */
-#define ICE_CSS_HEADER_LENGTH                  330
-
-/* Offset of Shadow RAM copy in the NVM bank area. */
-#define ICE_NVM_SR_COPY_WORD_OFFSET            roundup(ICE_CSS_HEADER_LENGTH, 32)
-
-/* Size in bytes of Option ROM trailer */
-#define ICE_NVM_OROM_TRAILER_LENGTH            (2 * ICE_CSS_HEADER_LENGTH)
+/* Length of Authentication header section in words */
+#define ICE_NVM_AUTH_HEADER_LEN                        0x08
 
 /* The Link Topology Netlist section is stored as a series of words. It is
  * stored in the NVM as a TLV, with the first two words containing the type
index 2e9ad27cb9d13e8f5faa1d28a5d777de8fe0c2d3..6e8f2aab6080153082f1fde3e58effabc5df720a 100644 (file)
@@ -45,14 +45,15 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
                return -EINVAL;
 
        err = ice_fltr_add_vlan(vsi, vlan);
-       if (err && err != -EEXIST) {
+       if (!err)
+               vsi->num_vlan++;
+       else if (err == -EEXIST)
+               err = 0;
+       else
                dev_err(ice_pf_to_dev(vsi->back), "Failure Adding VLAN %d on VSI %i, status %d\n",
                        vlan->vid, vsi->vsi_num, err);
-               return err;
-       }
 
-       vsi->num_vlan++;
-       return 0;
+       return err;
 }
 
 /**
index 7541f223bf4f69cb63f985f553e4e30e8036e9c8..a65955eb23c0bd85adc7f0f8f1a4b39867da8907 100644 (file)
@@ -269,7 +269,6 @@ static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
        if (!pool)
                return -EINVAL;
 
-       clear_bit(qid, vsi->af_xdp_zc_qps);
        xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
 
        return 0;
@@ -300,8 +299,6 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
        if (err)
                return err;
 
-       set_bit(qid, vsi->af_xdp_zc_qps);
-
        return 0;
 }
 
@@ -349,11 +346,13 @@ ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present)
 int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc)
 {
        struct ice_rx_ring *rx_ring;
-       unsigned long q;
+       uint i;
+
+       ice_for_each_rxq(vsi, i) {
+               rx_ring = vsi->rx_rings[i];
+               if (!rx_ring->xsk_pool)
+                       continue;
 
-       for_each_set_bit(q, vsi->af_xdp_zc_qps,
-                        max_t(int, vsi->alloc_txq, vsi->alloc_rxq)) {
-               rx_ring = vsi->rx_rings[q];
                if (ice_realloc_rx_xdp_bufs(rx_ring, zc))
                        return -ENOMEM;
        }
index 52ceda6306a3d0317044234f6798fe0f6c8e5f83..f1ee5584e8fa20556ace18afbc78c00549cf8c24 100644 (file)
@@ -1394,6 +1394,7 @@ static int idpf_vport_open(struct idpf_vport *vport, bool alloc_res)
        }
 
        idpf_rx_init_buf_tail(vport);
+       idpf_vport_intr_ena(vport);
 
        err = idpf_send_config_queues_msg(vport);
        if (err) {
index 285da2177ee446e7607b8c0206ea16241e0cf3cf..b023704bbbdab8018bfcbed2a0619da63b1b608c 100644 (file)
@@ -3746,9 +3746,9 @@ static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport)
  */
 void idpf_vport_intr_deinit(struct idpf_vport *vport)
 {
+       idpf_vport_intr_dis_irq_all(vport);
        idpf_vport_intr_napi_dis_all(vport);
        idpf_vport_intr_napi_del_all(vport);
-       idpf_vport_intr_dis_irq_all(vport);
        idpf_vport_intr_rel_irq(vport);
 }
 
@@ -4179,7 +4179,6 @@ int idpf_vport_intr_init(struct idpf_vport *vport)
 
        idpf_vport_intr_map_vector_to_qs(vport);
        idpf_vport_intr_napi_add_all(vport);
-       idpf_vport_intr_napi_ena_all(vport);
 
        err = vport->adapter->dev_ops.reg_ops.intr_reg_init(vport);
        if (err)
@@ -4193,17 +4192,20 @@ int idpf_vport_intr_init(struct idpf_vport *vport)
        if (err)
                goto unroll_vectors_alloc;
 
-       idpf_vport_intr_ena_irq_all(vport);
-
        return 0;
 
 unroll_vectors_alloc:
-       idpf_vport_intr_napi_dis_all(vport);
        idpf_vport_intr_napi_del_all(vport);
 
        return err;
 }
 
+void idpf_vport_intr_ena(struct idpf_vport *vport)
+{
+       idpf_vport_intr_napi_ena_all(vport);
+       idpf_vport_intr_ena_irq_all(vport);
+}
+
 /**
  * idpf_config_rss - Send virtchnl messages to configure RSS
  * @vport: virtual port
index 3d046b81e507a1d29ce038a85ff542a5835dab01..551391e20464709c6cc566d8b02a0b395c04b702 100644 (file)
@@ -990,6 +990,7 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport);
 void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector);
 void idpf_vport_intr_deinit(struct idpf_vport *vport);
 int idpf_vport_intr_init(struct idpf_vport *vport);
+void idpf_vport_intr_ena(struct idpf_vport *vport);
 enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *decoded);
 int idpf_config_rss(struct idpf_vport *vport);
 int idpf_init_rss(struct idpf_vport *vport);
index f2c4f1966bb041629411c252ca7c7f06b5fdf51e..0cd2bd695db1dfddd64d2c82027f4206b6609268 100644 (file)
@@ -1629,12 +1629,17 @@ static int igc_ethtool_get_eee(struct net_device *netdev,
        struct igc_hw *hw = &adapter->hw;
        u32 eeer;
 
+       linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+                        edata->supported);
+       linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+                        edata->supported);
+       linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+                        edata->supported);
+
        if (hw->dev_spec._base.eee_enable)
                mii_eee_cap1_mod_linkmode_t(edata->advertised,
                                            adapter->eee_advert);
 
-       *edata = adapter->eee;
-
        eeer = rd32(IGC_EEER);
 
        /* EEE status on negotiated link */
index 12f004f46082cdfdd68493f43ef2e1653b2e815e..305e05294a26595fdc361851e2c75dd9355b9e20 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/bpf_trace.h>
 #include <net/xdp_sock_drv.h>
 #include <linux/pci.h>
+#include <linux/mdio.h>
 
 #include <net/ipv6.h>
 
@@ -4975,6 +4976,9 @@ void igc_up(struct igc_adapter *adapter)
        /* start the watchdog. */
        hw->mac.get_link_status = true;
        schedule_work(&adapter->watchdog_task);
+
+       adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T |
+                             MDIO_EEE_2_5GT;
 }
 
 /**
index e8b73b9d75e3118f56ee42a322d05491b0c325f0..97722ce8c4cb34e2fac06adfb08c189d18989a5b 100644 (file)
@@ -2519,7 +2519,17 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
         * - when available free entries are less.
         * Lower priority ones out of avaialble free entries are always
         * chosen when 'high vs low' question arises.
+        *
+        * For a VF base MCAM match rule is set by its PF. And all the
+        * further MCAM rules installed by VF on its own are
+        * concatenated with the base rule set by its PF. Hence PF entries
+        * should be at lower priority compared to VF entries. Otherwise
+        * base rule is hit always and rules installed by VF will be of
+        * no use. Hence if the request is from PF then allocate low
+        * priority entries.
         */
+       if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+               goto lprio_alloc;
 
        /* Get the search range for priority allocation request */
        if (req->priority) {
@@ -2528,17 +2538,6 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
                goto alloc;
        }
 
-       /* For a VF base MCAM match rule is set by its PF. And all the
-        * further MCAM rules installed by VF on its own are
-        * concatenated with the base rule set by its PF. Hence PF entries
-        * should be at lower priority compared to VF entries. Otherwise
-        * base rule is hit always and rules installed by VF will be of
-        * no use. Hence if the request is from PF and NOT a priority
-        * allocation request then allocate low priority entries.
-        */
-       if (!(pcifunc & RVU_PFVF_FUNC_MASK))
-               goto lprio_alloc;
-
        /* Find out the search range for non-priority allocation request
         *
         * Get MCAM free entry count in middle zone.
@@ -2568,6 +2567,18 @@ lprio_alloc:
                reverse = true;
                start = 0;
                end = mcam->bmap_entries;
+               /* Ensure PF requests are always at bottom and if PF requests
+                * for higher/lower priority entry wrt reference entry then
+                * honour that criteria and start search for entries from bottom
+                * and not in mid zone.
+                */
+               if (!(pcifunc & RVU_PFVF_FUNC_MASK) &&
+                   req->priority == NPC_MCAM_HIGHER_PRIO)
+                       end = req->ref_entry;
+
+               if (!(pcifunc & RVU_PFVF_FUNC_MASK) &&
+                   req->priority == NPC_MCAM_LOWER_PRIO)
+                       start = req->ref_entry;
        }
 
 alloc:
index 070711df612ece7978b7e6cd0310203b635e0439..edac008099c0c41def858e1bca01d7d22e8ffe10 100644 (file)
@@ -1422,7 +1422,10 @@ static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid,
        otx2_qos_read_txschq_cfg(pfvf, node, old_cfg);
 
        /* delete the txschq nodes allocated for this node */
+       otx2_qos_disable_sq(pfvf, qid);
+       otx2_qos_free_hw_node_schq(pfvf, node);
        otx2_qos_free_sw_node_schq(pfvf, node);
+       pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
 
        /* mark this node as htb inner node */
        WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
@@ -1632,6 +1635,7 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force
                dwrr_del_node = true;
 
        /* destroy the leaf node */
+       otx2_qos_disable_sq(pfvf, qid);
        otx2_qos_destroy_node(pfvf, node);
        pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
 
index cae46290a7aee28a2d4feb4ab65ec5340dc91bf8..c84ce54a84a00e88fd463723fd66dbdd6a86c44a 100644 (file)
@@ -1131,9 +1131,9 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
 {
        const struct mtk_soc_data *soc = eth->soc;
        dma_addr_t phy_ring_tail;
-       int cnt = MTK_QDMA_RING_SIZE;
+       int cnt = soc->tx.fq_dma_size;
        dma_addr_t dma_addr;
-       int i;
+       int i, j, len;
 
        if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM))
                eth->scratch_ring = eth->sram_base;
@@ -1142,40 +1142,46 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
                                                       cnt * soc->tx.desc_size,
                                                       &eth->phy_scratch_ring,
                                                       GFP_KERNEL);
+
        if (unlikely(!eth->scratch_ring))
                return -ENOMEM;
 
-       eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
-       if (unlikely(!eth->scratch_head))
-               return -ENOMEM;
+       phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
 
-       dma_addr = dma_map_single(eth->dma_dev,
-                                 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
-                                 DMA_FROM_DEVICE);
-       if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
-               return -ENOMEM;
+       for (j = 0; j < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); j++) {
+               len = min_t(int, cnt - j * MTK_FQ_DMA_LENGTH, MTK_FQ_DMA_LENGTH);
+               eth->scratch_head[j] = kcalloc(len, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
 
-       phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
+               if (unlikely(!eth->scratch_head[j]))
+                       return -ENOMEM;
 
-       for (i = 0; i < cnt; i++) {
-               dma_addr_t addr = dma_addr + i * MTK_QDMA_PAGE_SIZE;
-               struct mtk_tx_dma_v2 *txd;
+               dma_addr = dma_map_single(eth->dma_dev,
+                                         eth->scratch_head[j], len * MTK_QDMA_PAGE_SIZE,
+                                         DMA_FROM_DEVICE);
 
-               txd = eth->scratch_ring + i * soc->tx.desc_size;
-               txd->txd1 = addr;
-               if (i < cnt - 1)
-                       txd->txd2 = eth->phy_scratch_ring +
-                                   (i + 1) * soc->tx.desc_size;
+               if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
+                       return -ENOMEM;
 
-               txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
-               if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
-                       txd->txd3 |= TX_DMA_PREP_ADDR64(addr);
-               txd->txd4 = 0;
-               if (mtk_is_netsys_v2_or_greater(eth)) {
-                       txd->txd5 = 0;
-                       txd->txd6 = 0;
-                       txd->txd7 = 0;
-                       txd->txd8 = 0;
+               for (i = 0; i < cnt; i++) {
+                       struct mtk_tx_dma_v2 *txd;
+
+                       txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size;
+                       txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
+                       if (j * MTK_FQ_DMA_LENGTH + i < cnt)
+                               txd->txd2 = eth->phy_scratch_ring +
+                                           (j * MTK_FQ_DMA_LENGTH + i + 1) * soc->tx.desc_size;
+
+                       txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
+                       if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
+                               txd->txd3 |= TX_DMA_PREP_ADDR64(dma_addr + i * MTK_QDMA_PAGE_SIZE);
+
+                       txd->txd4 = 0;
+                       if (mtk_is_netsys_v2_or_greater(eth)) {
+                               txd->txd5 = 0;
+                               txd->txd6 = 0;
+                               txd->txd7 = 0;
+                               txd->txd8 = 0;
+                       }
                }
        }
 
@@ -2457,7 +2463,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
        if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
                ring_size = MTK_QDMA_RING_SIZE;
        else
-               ring_size = MTK_DMA_SIZE;
+               ring_size = soc->tx.dma_size;
 
        ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
                               GFP_KERNEL);
@@ -2465,8 +2471,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
                goto no_tx_mem;
 
        if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) {
-               ring->dma = eth->sram_base + ring_size * sz;
-               ring->phys = eth->phy_scratch_ring + ring_size * (dma_addr_t)sz;
+               ring->dma = eth->sram_base + soc->tx.fq_dma_size * sz;
+               ring->phys = eth->phy_scratch_ring + soc->tx.fq_dma_size * (dma_addr_t)sz;
        } else {
                ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
                                               &ring->phys, GFP_KERNEL);
@@ -2588,6 +2594,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
 {
        const struct mtk_reg_map *reg_map = eth->soc->reg_map;
+       const struct mtk_soc_data *soc = eth->soc;
        struct mtk_rx_ring *ring;
        int rx_data_len, rx_dma_size, tx_ring_size;
        int i;
@@ -2595,7 +2602,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
        if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
                tx_ring_size = MTK_QDMA_RING_SIZE;
        else
-               tx_ring_size = MTK_DMA_SIZE;
+               tx_ring_size = soc->tx.dma_size;
 
        if (rx_flag == MTK_RX_FLAGS_QDMA) {
                if (ring_no)
@@ -2610,7 +2617,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
                rx_dma_size = MTK_HW_LRO_DMA_SIZE;
        } else {
                rx_data_len = ETH_DATA_LEN;
-               rx_dma_size = MTK_DMA_SIZE;
+               rx_dma_size = soc->rx.dma_size;
        }
 
        ring->frag_size = mtk_max_frag_size(rx_data_len);
@@ -3139,7 +3146,10 @@ static void mtk_dma_free(struct mtk_eth *eth)
                        mtk_rx_clean(eth, &eth->rx_ring[i], false);
        }
 
-       kfree(eth->scratch_head);
+       for (i = 0; i < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); i++) {
+               kfree(eth->scratch_head[i]);
+               eth->scratch_head[i] = NULL;
+       }
 }
 
 static bool mtk_hw_reset_check(struct mtk_eth *eth)
@@ -5052,11 +5062,14 @@ static const struct mtk_soc_data mt2701_data = {
                .desc_size = sizeof(struct mtk_tx_dma),
                .dma_max_len = MTK_TX_DMA_BUF_LEN,
                .dma_len_offset = 16,
+               .dma_size = MTK_DMA_SIZE(2K),
+               .fq_dma_size = MTK_DMA_SIZE(2K),
        },
        .rx = {
                .desc_size = sizeof(struct mtk_rx_dma),
                .irq_done_mask = MTK_RX_DONE_INT,
                .dma_l4_valid = RX_DMA_L4_VALID,
+               .dma_size = MTK_DMA_SIZE(2K),
                .dma_max_len = MTK_TX_DMA_BUF_LEN,
                .dma_len_offset = 16,
        },
@@ -5076,11 +5089,14 @@ static const struct mtk_soc_data mt7621_data = {
                .desc_size = sizeof(struct mtk_tx_dma),
                .dma_max_len = MTK_TX_DMA_BUF_LEN,
                .dma_len_offset = 16,
+               .dma_size = MTK_DMA_SIZE(2K),
+               .fq_dma_size = MTK_DMA_SIZE(2K),
        },
        .rx = {
                .desc_size = sizeof(struct mtk_rx_dma),
                .irq_done_mask = MTK_RX_DONE_INT,
                .dma_l4_valid = RX_DMA_L4_VALID,
+               .dma_size = MTK_DMA_SIZE(2K),
                .dma_max_len = MTK_TX_DMA_BUF_LEN,
                .dma_len_offset = 16,
        },
@@ -5102,11 +5118,14 @@ static const struct mtk_soc_data mt7622_data = {
                .desc_size = sizeof(struct mtk_tx_dma),
                .dma_max_len = MTK_TX_DMA_BUF_LEN,
                .dma_len_offset = 16,
+               .dma_size = MTK_DMA_SIZE(2K),
+               .fq_dma_size = MTK_DMA_SIZE(2K),
        },
        .rx = {
                .desc_size = sizeof(struct mtk_rx_dma),
                .irq_done_mask = MTK_RX_DONE_INT,
                .dma_l4_valid = RX_DMA_L4_VALID,
+               .dma_size = MTK_DMA_SIZE(2K),
                .dma_max_len = MTK_TX_DMA_BUF_LEN,
                .dma_len_offset = 16,
        },
@@ -5127,11 +5146,14 @@ static const struct mtk_soc_data mt7623_data = {
                .desc_size = sizeof(struct mtk_tx_dma),
                .dma_max_len = MTK_TX_DMA_BUF_LEN,
                .dma_len_offset = 16,
+               .dma_size = MTK_DMA_SIZE(2K),
+               .fq_dma_size = MTK_DMA_SIZE(2K),
        },
        .rx = {
                .desc_size = sizeof(struct mtk_rx_dma),
                .irq_done_mask = MTK_RX_DONE_INT,
                .dma_l4_valid = RX_DMA_L4_VALID,
+               .dma_size = MTK_DMA_SIZE(2K),
                .dma_max_len = MTK_TX_DMA_BUF_LEN,
                .dma_len_offset = 16,
        },
@@ -5150,11 +5172,14 @@ static const struct mtk_soc_data mt7629_data = {
                .desc_size = sizeof(struct mtk_tx_dma),
                .dma_max_len = MTK_TX_DMA_BUF_LEN,
                .dma_len_offset = 16,
+               .dma_size = MTK_DMA_SIZE(2K),
+               .fq_dma_size = MTK_DMA_SIZE(2K),
        },
        .rx = {
                .desc_size = sizeof(struct mtk_rx_dma),
                .irq_done_mask = MTK_RX_DONE_INT,
                .dma_l4_valid = RX_DMA_L4_VALID,
+               .dma_size = MTK_DMA_SIZE(2K),
                .dma_max_len = MTK_TX_DMA_BUF_LEN,
                .dma_len_offset = 16,
        },
@@ -5176,6 +5201,8 @@ static const struct mtk_soc_data mt7981_data = {
                .desc_size = sizeof(struct mtk_tx_dma_v2),
                .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
                .dma_len_offset = 8,
+               .dma_size = MTK_DMA_SIZE(2K),
+               .fq_dma_size = MTK_DMA_SIZE(2K),
        },
        .rx = {
                .desc_size = sizeof(struct mtk_rx_dma),
@@ -5183,6 +5210,7 @@ static const struct mtk_soc_data mt7981_data = {
                .dma_l4_valid = RX_DMA_L4_VALID_V2,
                .dma_max_len = MTK_TX_DMA_BUF_LEN,
                .dma_len_offset = 16,
+               .dma_size = MTK_DMA_SIZE(2K),
        },
 };
 
@@ -5202,6 +5230,8 @@ static const struct mtk_soc_data mt7986_data = {
                .desc_size = sizeof(struct mtk_tx_dma_v2),
                .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
                .dma_len_offset = 8,
+               .dma_size = MTK_DMA_SIZE(2K),
+               .fq_dma_size = MTK_DMA_SIZE(2K),
        },
        .rx = {
                .desc_size = sizeof(struct mtk_rx_dma),
@@ -5209,6 +5239,7 @@ static const struct mtk_soc_data mt7986_data = {
                .dma_l4_valid = RX_DMA_L4_VALID_V2,
                .dma_max_len = MTK_TX_DMA_BUF_LEN,
                .dma_len_offset = 16,
+               .dma_size = MTK_DMA_SIZE(2K),
        },
 };
 
@@ -5228,6 +5259,8 @@ static const struct mtk_soc_data mt7988_data = {
                .desc_size = sizeof(struct mtk_tx_dma_v2),
                .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
                .dma_len_offset = 8,
+               .dma_size = MTK_DMA_SIZE(2K),
+               .fq_dma_size = MTK_DMA_SIZE(4K),
        },
        .rx = {
                .desc_size = sizeof(struct mtk_rx_dma_v2),
@@ -5235,6 +5268,7 @@ static const struct mtk_soc_data mt7988_data = {
                .dma_l4_valid = RX_DMA_L4_VALID_V2,
                .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
                .dma_len_offset = 8,
+               .dma_size = MTK_DMA_SIZE(2K),
        },
 };
 
@@ -5249,6 +5283,7 @@ static const struct mtk_soc_data rt5350_data = {
                .desc_size = sizeof(struct mtk_tx_dma),
                .dma_max_len = MTK_TX_DMA_BUF_LEN,
                .dma_len_offset = 16,
+               .dma_size = MTK_DMA_SIZE(2K),
        },
        .rx = {
                .desc_size = sizeof(struct mtk_rx_dma),
@@ -5256,6 +5291,7 @@ static const struct mtk_soc_data rt5350_data = {
                .dma_l4_valid = RX_DMA_L4_VALID_PDMA,
                .dma_max_len = MTK_TX_DMA_BUF_LEN,
                .dma_len_offset = 16,
+               .dma_size = MTK_DMA_SIZE(2K),
        },
 };
 
index 4eab30b44070633a2357f8517a6ad1e5c8340149..f5174f6cb1bbec5bfc525f31e6b7359b8e900634 100644 (file)
@@ -32,7 +32,9 @@
 #define MTK_TX_DMA_BUF_LEN     0x3fff
 #define MTK_TX_DMA_BUF_LEN_V2  0xffff
 #define MTK_QDMA_RING_SIZE     2048
-#define MTK_DMA_SIZE           512
+#define MTK_DMA_SIZE(x)                (SZ_##x)
+#define MTK_FQ_DMA_HEAD                32
+#define MTK_FQ_DMA_LENGTH      2048
 #define MTK_RX_ETH_HLEN                (ETH_HLEN + ETH_FCS_LEN)
 #define MTK_RX_HLEN            (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
 #define MTK_DMA_DUMMY_DESC     0xffffffff
@@ -1176,6 +1178,8 @@ struct mtk_soc_data {
                u32     desc_size;
                u32     dma_max_len;
                u32     dma_len_offset;
+               u32     dma_size;
+               u32     fq_dma_size;
        } tx;
        struct {
                u32     desc_size;
@@ -1183,6 +1187,7 @@ struct mtk_soc_data {
                u32     dma_l4_valid;
                u32     dma_max_len;
                u32     dma_len_offset;
+               u32     dma_size;
        } rx;
 };
 
@@ -1264,7 +1269,7 @@ struct mtk_eth {
        struct napi_struct              rx_napi;
        void                            *scratch_ring;
        dma_addr_t                      phy_scratch_ring;
-       void                            *scratch_head;
+       void                            *scratch_head[MTK_FQ_DMA_HEAD];
        struct clk                      *clks[MTK_CLK_MAX];
 
        struct mii_bus                  *mii_bus;
index caa34b9c161e5188a9ac1607a01e8c390d95d568..33e32584b07f57e631a5ee801caacb0b4298af1d 100644 (file)
@@ -102,8 +102,14 @@ static inline void
 mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
 {
        int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
+       struct udphdr *udphdr;
 
-       udp_hdr(skb)->len = htons(payload_len);
+       if (skb->encapsulation)
+               udphdr = (struct udphdr *)skb_inner_transport_header(skb);
+       else
+               udphdr = udp_hdr(skb);
+
+       udphdr->len = htons(payload_len);
 }
 
 struct mlx5e_accel_tx_state {
index 41a2543a52cda094dc15a1f863edb4ffa8b6d492..e51b03d4c717f1a740d3536fda4d492b3f57f198 100644 (file)
@@ -750,8 +750,7 @@ err_fs:
 err_fs_ft:
        if (rx->allow_tunnel_mode)
                mlx5_eswitch_unblock_encap(mdev);
-       mlx5_del_flow_rules(rx->status.rule);
-       mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
+       mlx5_ipsec_rx_status_destroy(ipsec, rx);
 err_add:
        mlx5_destroy_flow_table(rx->ft.status);
 err_fs_ft_status:
index 82064614846f5f88646b12a887bf548ccbe47639..359050f0b54ddc25980bfe7b2ebeb6a9f43c9fde 100644 (file)
@@ -97,18 +97,11 @@ mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features)
                if (!x || !x->xso.offload_handle)
                        goto out_disable;
 
-               if (xo->inner_ipproto) {
-                       /* Cannot support tunnel packet over IPsec tunnel mode
-                        * because we cannot offload three IP header csum
-                        */
-                       if (x->props.mode == XFRM_MODE_TUNNEL)
-                               goto out_disable;
-
-                       /* Only support UDP or TCP L4 checksum */
-                       if (xo->inner_ipproto != IPPROTO_UDP &&
-                           xo->inner_ipproto != IPPROTO_TCP)
-                               goto out_disable;
-               }
+               /* Only support UDP or TCP L4 checksum */
+               if (xo->inner_ipproto &&
+                   xo->inner_ipproto != IPPROTO_UDP &&
+                   xo->inner_ipproto != IPPROTO_TCP)
+                       goto out_disable;
 
                return features;
 
index b758bc72ac36bb61b40da488659368548c825005..c53c99dde558727190e7cad76d42efaabf2ecdcf 100644 (file)
@@ -3886,7 +3886,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
                mlx5e_fold_sw_stats64(priv, stats);
        }
 
-       stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
+       stats->rx_missed_errors = priv->stats.qcnt.rx_out_of_buffer;
 
        stats->rx_length_errors =
                PPORT_802_3_GET(pstats, a_in_range_length_errors) +
index e211c41cec06a8bd4bc776af80dba5b342f7e3f4..e1ed214e86517a21af4cf6f9f2e75a9eaaf4b2b8 100644 (file)
@@ -1186,6 +1186,9 @@ void mlx5e_stats_ts_get(struct mlx5e_priv *priv,
                ts_stats->err = 0;
                ts_stats->lost = 0;
 
+               if (!ptp)
+                       goto out;
+
                /* Aggregate stats across all TCs */
                for (i = 0; i < ptp->num_tc; i++) {
                        struct mlx5e_ptp_cq_stats *stats =
@@ -1214,6 +1217,7 @@ void mlx5e_stats_ts_get(struct mlx5e_priv *priv,
                }
        }
 
+out:
        mutex_unlock(&priv->state_lock);
 }
 
index 099bf10788899018ab0fa0236c0cbeff39a43941..b09e9abd39f37ffa309bd71fb018c6fadaaf48dd 100644 (file)
@@ -153,7 +153,11 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop)
 
        *hopbyhop = 0;
        if (skb->encapsulation) {
-               ihs = skb_inner_tcp_all_headers(skb);
+               if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
+                       ihs = skb_inner_transport_offset(skb) +
+                             sizeof(struct udphdr);
+               else
+                       ihs = skb_inner_tcp_all_headers(skb);
                stats->tso_inner_packets++;
                stats->tso_inner_bytes += skb->len - ihs;
        } else {
index 2d95a9b7b44e197f67a0a6c2e6c3a1c15379a7bb..b61b7d96611413deda2c987282326d84a019531c 100644 (file)
@@ -373,6 +373,10 @@ int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
        do {
                if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
                        break;
+               if (pci_channel_offline(dev->pdev)) {
+                       mlx5_core_err(dev, "PCI channel offline, stop waiting for NIC IFC\n");
+                       return -EACCES;
+               }
 
                cond_resched();
        } while (!time_after(jiffies, end));
index ad38e31822df10bbf72ab9a3416bc7143efb6b63..a6329ca2d9bffbda0b5a69b4973f061144a9fa67 100644 (file)
@@ -248,6 +248,10 @@ recover_from_sw_reset:
        do {
                if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
                        break;
+               if (pci_channel_offline(dev->pdev)) {
+                       mlx5_core_err(dev, "PCI channel offline, stop waiting for NIC IFC\n");
+                       goto unlock;
+               }
 
                msleep(20);
        } while (!time_after(jiffies, end));
@@ -317,6 +321,10 @@ int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev)
                        mlx5_core_warn(dev, "device is being removed, stop waiting for PCI\n");
                        return -ENODEV;
                }
+               if (pci_channel_offline(dev->pdev)) {
+                       mlx5_core_err(dev, "PCI channel offline, stop waiting for PCI\n");
+                       return -EACCES;
+               }
                msleep(100);
        }
        return 0;
index f7f0476a4a58d350b52b9966a0efaa0d6964a5a8..d0871c46b8c54376154cdf36e90c13636b7582ef 100644 (file)
@@ -719,6 +719,7 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
        struct mlx5_core_dev *dev;
        u8 mode;
 #endif
+       bool roce_support;
        int i;
 
        for (i = 0; i < ldev->ports; i++)
@@ -743,6 +744,11 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
                if (mlx5_sriov_is_enabled(ldev->pf[i].dev))
                        return false;
 #endif
+       roce_support = mlx5_get_roce_state(ldev->pf[MLX5_LAG_P1].dev);
+       for (i = 1; i < ldev->ports; i++)
+               if (mlx5_get_roce_state(ldev->pf[i].dev) != roce_support)
+                       return false;
+
        return true;
 }
 
@@ -910,8 +916,10 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
                } else if (roce_lag) {
                        dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
                        mlx5_rescan_drivers_locked(dev0);
-                       for (i = 1; i < ldev->ports; i++)
-                               mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
+                       for (i = 1; i < ldev->ports; i++) {
+                               if (mlx5_get_roce_state(ldev->pf[i].dev))
+                                       mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
+                       }
                } else if (shared_fdb) {
                        int i;
 
index c16b462ddedf7e6914f0c0b796f68ee2ae2bf189..ab2717012b79b5bd115b73a5cd71e245c6045979 100644 (file)
@@ -88,9 +88,13 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
                                                                      &dest, 1);
                        if (IS_ERR(lag_definer->rules[idx])) {
                                err = PTR_ERR(lag_definer->rules[idx]);
-                               while (i--)
-                                       while (j--)
+                               do {
+                                       while (j--) {
+                                               idx = i * ldev->buckets + j;
                                                mlx5_del_flow_rules(lag_definer->rules[idx]);
+                                       }
+                                       j = ldev->buckets;
+                               } while (i--);
                                goto destroy_fg;
                        }
                }
index 6b774e0c2766594250271a2931b77b4540e7ba7c..d0b595ba611014bbfe16712506daf035a012fd7e 100644 (file)
@@ -74,6 +74,10 @@ int mlx5_vsc_gw_lock(struct mlx5_core_dev *dev)
                        ret = -EBUSY;
                        goto pci_unlock;
                }
+               if (pci_channel_offline(dev->pdev)) {
+                       ret = -EACCES;
+                       goto pci_unlock;
+               }
 
                /* Check if semaphore is already locked */
                ret = vsc_read(dev, VSC_SEMAPHORE_OFFSET, &lock_val);
index dd5d186dc6148f065b986ee5d2363940314816db..f6deb5a3f82024e528436af8fd66d238518a6588 100644 (file)
@@ -100,10 +100,6 @@ static bool ft_create_alias_supported(struct mlx5_core_dev *dev)
 
 static bool mlx5_sd_is_supported(struct mlx5_core_dev *dev, u8 host_buses)
 {
-       /* Feature is currently implemented for PFs only */
-       if (!mlx5_core_is_pf(dev))
-               return false;
-
        /* Honor the SW implementation limit */
        if (host_buses > MLX5_SD_MAX_GROUP_SZ)
                return false;
@@ -162,6 +158,14 @@ static int sd_init(struct mlx5_core_dev *dev)
        bool sdm;
        int err;
 
+       /* Feature is currently implemented for PFs only */
+       if (!mlx5_core_is_pf(dev))
+               return 0;
+
+       /* Block on embedded CPU PFs */
+       if (mlx5_core_is_ecpf(dev))
+               return 0;
+
        if (!MLX5_CAP_MCAM_REG(dev, mpir))
                return 0;
 
index 6574c145dc1e2da27d2d3a6071028dc010d0a825..459a836a5d9c15321409dc75ebc6a84e66c3fac7 100644 (file)
@@ -1298,6 +1298,9 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
 
        if (!err)
                mlx5_function_disable(dev, boot);
+       else
+               mlx5_stop_health_poll(dev, boot);
+
        return err;
 }
 
index 5dba6d2d633cb6d26487b9e6db87307ab12ddef0..2427610f4306d97191f20ac6d25476e304b34870 100644 (file)
@@ -586,6 +586,7 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
                        netdev_dbg(netdev, "tx ionic_xdp_post_frame err %d\n", err);
                        goto out_xdp_abort;
                }
+               buf_info->page = NULL;
                stats->xdp_tx++;
 
                /* the Tx completion will free the buffers */
index 79ba47bb3602ec21c435cceac66bdc291fa270a3..f7d21da1a0fb62b0112019b6600f21971ce98adb 100644 (file)
@@ -455,7 +455,7 @@ void icssg_ft1_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac_addr)
 {
        const u8 mask_addr[] = { 0, 0, 0, 0, 0, 0, };
 
-       rx_class_ft1_set_start_len(miig_rt, slice, 0, 6);
+       rx_class_ft1_set_start_len(miig_rt, slice, ETH_ALEN, ETH_ALEN);
        rx_class_ft1_set_da(miig_rt, slice, 0, mac_addr);
        rx_class_ft1_set_da_mask(miig_rt, slice, 0, mask_addr);
        rx_class_ft1_cfg_set_type(miig_rt, slice, 0, FT1_CFG_TYPE_EQ);
index 2d5b021b4ea6053eeb055a76fa4c7d9380cd2a53..fef4eff7753a7acb1e11d9712abd669de7740df6 100644 (file)
@@ -439,7 +439,7 @@ static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
 
        memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
 
-       err = ip_local_out(net, skb->sk, skb);
+       err = ip_local_out(net, NULL, skb);
        if (unlikely(net_xmit_eval(err)))
                DEV_STATS_INC(dev, tx_errors);
        else
@@ -494,7 +494,7 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
 
        memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
 
-       err = ip6_local_out(dev_net(dev), skb->sk, skb);
+       err = ip6_local_out(dev_net(dev), NULL, skb);
        if (unlikely(net_xmit_eval(err)))
                DEV_STATS_INC(dev, tx_errors);
        else
index a4d2e76a8d587cc6ce7ad7f98e382a1c81f76e67..16789cd446e9e47fd738f12a9cde4e1eeef55fcf 100644 (file)
@@ -55,6 +55,7 @@ static void netkit_prep_forward(struct sk_buff *skb, bool xnet)
        skb_scrub_packet(skb, xnet);
        skb->priority = 0;
        nf_skip_egress(skb, true);
+       skb_reset_mac_header(skb);
 }
 
 static struct netkit *netkit_priv(const struct net_device *dev)
@@ -78,6 +79,7 @@ static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev)
                     skb_orphan_frags(skb, GFP_ATOMIC)))
                goto drop;
        netkit_prep_forward(skb, !net_eq(dev_net(dev), dev_net(peer)));
+       eth_skb_pkt_type(skb, peer);
        skb->dev = peer;
        entry = rcu_dereference(nk->active);
        if (entry)
@@ -85,7 +87,7 @@ static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev)
        switch (ret) {
        case NETKIT_NEXT:
        case NETKIT_PASS:
-               skb->protocol = eth_type_trans(skb, skb->dev);
+               eth_skb_pull_mac(skb);
                skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
                if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) {
                        dev_sw_netstats_tx_add(dev, 1, len);
@@ -155,6 +157,16 @@ static void netkit_set_multicast(struct net_device *dev)
        /* Nothing to do, we receive whatever gets pushed to us! */
 }
 
+static int netkit_set_macaddr(struct net_device *dev, void *sa)
+{
+       struct netkit *nk = netkit_priv(dev);
+
+       if (nk->mode != NETKIT_L2)
+               return -EOPNOTSUPP;
+
+       return eth_mac_addr(dev, sa);
+}
+
 static void netkit_set_headroom(struct net_device *dev, int headroom)
 {
        struct netkit *nk = netkit_priv(dev), *nk2;
@@ -198,6 +210,7 @@ static const struct net_device_ops netkit_netdev_ops = {
        .ndo_start_xmit         = netkit_xmit,
        .ndo_set_rx_mode        = netkit_set_multicast,
        .ndo_set_rx_headroom    = netkit_set_headroom,
+       .ndo_set_mac_address    = netkit_set_macaddr,
        .ndo_get_iflink         = netkit_get_iflink,
        .ndo_get_peer_dev       = netkit_peer_dev,
        .ndo_get_stats64        = netkit_get_stats,
@@ -300,9 +313,11 @@ static int netkit_validate(struct nlattr *tb[], struct nlattr *data[],
 
        if (!attr)
                return 0;
-       NL_SET_ERR_MSG_ATTR(extack, attr,
-                           "Setting Ethernet address is not supported");
-       return -EOPNOTSUPP;
+       if (nla_len(attr) != ETH_ALEN)
+               return -EINVAL;
+       if (!is_valid_ether_addr(nla_data(attr)))
+               return -EADDRNOTAVAIL;
+       return 0;
 }
 
 static struct rtnl_link_ops netkit_link_ops;
@@ -365,6 +380,9 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev,
                strscpy(ifname, "nk%d", IFNAMSIZ);
                ifname_assign_type = NET_NAME_ENUM;
        }
+       if (mode != NETKIT_L2 &&
+           (tb[IFLA_ADDRESS] || tbp[IFLA_ADDRESS]))
+               return -EOPNOTSUPP;
 
        net = rtnl_link_get_net(src_net, tbp);
        if (IS_ERR(net))
@@ -379,7 +397,7 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev,
 
        netif_inherit_tso_max(peer, dev);
 
-       if (mode == NETKIT_L2)
+       if (mode == NETKIT_L2 && !(ifmp && tbp[IFLA_ADDRESS]))
                eth_hw_addr_random(peer);
        if (ifmp && dev->ifindex)
                peer->ifindex = ifmp->ifi_index;
@@ -402,7 +420,7 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev,
        if (err < 0)
                goto err_configure_peer;
 
-       if (mode == NETKIT_L2)
+       if (mode == NETKIT_L2 && !tb[IFLA_ADDRESS])
                eth_hw_addr_random(dev);
        if (tb[IFLA_IFNAME])
                nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
index 13e30ea7eec5d1e8cbfca6811d09a347660e06dd..5aada7cf3da72632510612ac5863953f9d964808 100644 (file)
@@ -866,6 +866,17 @@ static int ksz8061_config_init(struct phy_device *phydev)
 {
        int ret;
 
+       /* Chip can be powered down by the bootstrap code. */
+       ret = phy_read(phydev, MII_BMCR);
+       if (ret < 0)
+               return ret;
+       if (ret & BMCR_PDOWN) {
+               ret = phy_write(phydev, MII_BMCR, ret & ~BMCR_PDOWN);
+               if (ret < 0)
+                       return ret;
+               usleep_range(1000, 2000);
+       }
+
        ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A);
        if (ret)
                return ret;
@@ -1939,7 +1950,7 @@ static const struct ksz9477_errata_write ksz9477_errata_writes[] = {
        {0x1c, 0x20, 0xeeee},
 };
 
-static int ksz9477_config_init(struct phy_device *phydev)
+static int ksz9477_phy_errata(struct phy_device *phydev)
 {
        int err;
        int i;
@@ -1967,16 +1978,30 @@ static int ksz9477_config_init(struct phy_device *phydev)
                        return err;
        }
 
+       err = genphy_restart_aneg(phydev);
+       if (err)
+               return err;
+
+       return err;
+}
+
+static int ksz9477_config_init(struct phy_device *phydev)
+{
+       int err;
+
+       /* Only KSZ9897 family of switches needs this fix. */
+       if ((phydev->phy_id & 0xf) == 1) {
+               err = ksz9477_phy_errata(phydev);
+               if (err)
+                       return err;
+       }
+
        /* According to KSZ9477 Errata DS80000754C (Module 4) all EEE modes
         * in this switch shall be regarded as broken.
         */
        if (phydev->dev_flags & MICREL_NO_EEE)
                phydev->eee_broken_modes = -1;
 
-       err = genphy_restart_aneg(phydev);
-       if (err)
-               return err;
-
        return kszphy_config_init(phydev);
 }
 
@@ -2085,6 +2110,71 @@ static int kszphy_resume(struct phy_device *phydev)
        return 0;
 }
 
+static int ksz9477_resume(struct phy_device *phydev)
+{
+       int ret;
+
+       /* No need to initialize registers if not powered down. */
+       ret = phy_read(phydev, MII_BMCR);
+       if (ret < 0)
+               return ret;
+       if (!(ret & BMCR_PDOWN))
+               return 0;
+
+       genphy_resume(phydev);
+
+       /* After switching from power-down to normal mode, an internal global
+        * reset is automatically generated. Wait a minimum of 1 ms before
+        * read/write access to the PHY registers.
+        */
+       usleep_range(1000, 2000);
+
+       /* Only KSZ9897 family of switches needs this fix. */
+       if ((phydev->phy_id & 0xf) == 1) {
+               ret = ksz9477_phy_errata(phydev);
+               if (ret)
+                       return ret;
+       }
+
+       /* Enable PHY Interrupts */
+       if (phy_interrupt_is_valid(phydev)) {
+               phydev->interrupts = PHY_INTERRUPT_ENABLED;
+               if (phydev->drv->config_intr)
+                       phydev->drv->config_intr(phydev);
+       }
+
+       return 0;
+}
+
+static int ksz8061_resume(struct phy_device *phydev)
+{
+       int ret;
+
+       /* This function can be called twice when the Ethernet device is on. */
+       ret = phy_read(phydev, MII_BMCR);
+       if (ret < 0)
+               return ret;
+       if (!(ret & BMCR_PDOWN))
+               return 0;
+
+       genphy_resume(phydev);
+       usleep_range(1000, 2000);
+
+       /* Re-program the value after chip is reset. */
+       ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A);
+       if (ret)
+               return ret;
+
+       /* Enable PHY Interrupts */
+       if (phy_interrupt_is_valid(phydev)) {
+               phydev->interrupts = PHY_INTERRUPT_ENABLED;
+               if (phydev->drv->config_intr)
+                       phydev->drv->config_intr(phydev);
+       }
+
+       return 0;
+}
+
 static int kszphy_probe(struct phy_device *phydev)
 {
        const struct kszphy_type *type = phydev->drv->driver_data;
@@ -4029,7 +4119,7 @@ static int lan8841_config_intr(struct phy_device *phydev)
 
        if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
                err = phy_read(phydev, LAN8814_INTS);
-               if (err)
+               if (err < 0)
                        return err;
 
                /* Enable / disable interrupts. It is OK to enable PTP interrupt
@@ -4045,6 +4135,14 @@ static int lan8841_config_intr(struct phy_device *phydev)
                        return err;
 
                err = phy_read(phydev, LAN8814_INTS);
+               if (err < 0)
+                       return err;
+
+               /* Getting a positive value doesn't mean that is an error, it
+                * just indicates what was the status. Therefore make sure to
+                * clear the value and say that there is no error.
+                */
+               err = 0;
        }
 
        return err;
@@ -5327,10 +5425,11 @@ static struct phy_driver ksphy_driver[] = {
        /* PHY_BASIC_FEATURES */
        .probe          = kszphy_probe,
        .config_init    = ksz8061_config_init,
+       .soft_reset     = genphy_soft_reset,
        .config_intr    = kszphy_config_intr,
        .handle_interrupt = kszphy_handle_interrupt,
        .suspend        = kszphy_suspend,
-       .resume         = kszphy_resume,
+       .resume         = ksz8061_resume,
 }, {
        .phy_id         = PHY_ID_KSZ9021,
        .phy_id_mask    = 0x000ffffe,
@@ -5484,7 +5583,7 @@ static struct phy_driver ksphy_driver[] = {
        .config_intr    = kszphy_config_intr,
        .handle_interrupt = kszphy_handle_interrupt,
        .suspend        = genphy_suspend,
-       .resume         = genphy_resume,
+       .resume         = ksz9477_resume,
        .get_features   = ksz9477_get_features,
 } };
 
index cbea246664795f27618908838b37384e8f3b67d0..8e82184be5e7d9954b83aae904a37bb7eda175bb 100644 (file)
@@ -879,7 +879,7 @@ static int smsc95xx_start_rx_path(struct usbnet *dev)
 static int smsc95xx_reset(struct usbnet *dev)
 {
        struct smsc95xx_priv *pdata = dev->driver_priv;
-       u32 read_buf, write_buf, burst_cap;
+       u32 read_buf, burst_cap;
        int ret = 0, timeout;
 
        netif_dbg(dev, ifup, dev->net, "entering smsc95xx_reset\n");
@@ -1003,10 +1003,13 @@ static int smsc95xx_reset(struct usbnet *dev)
                return ret;
        netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x\n", read_buf);
 
+       ret = smsc95xx_read_reg(dev, LED_GPIO_CFG, &read_buf);
+       if (ret < 0)
+               return ret;
        /* Configure GPIO pins as LED outputs */
-       write_buf = LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED |
-               LED_GPIO_CFG_FDX_LED;
-       ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, write_buf);
+       read_buf |= LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED |
+                   LED_GPIO_CFG_FDX_LED;
+       ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, read_buf);
        if (ret < 0)
                return ret;
 
index 4a802c0ea2cbc381ecc8a48c7ca28bc75c4808f8..61a57d134544f958682f1b2caa66e3548fc199db 100644 (file)
@@ -2686,6 +2686,7 @@ static bool virtnet_send_command_reply(struct virtnet_info *vi, u8 class, u8 cmd
 {
        struct scatterlist *sgs[5], hdr, stat;
        u32 out_num = 0, tmp, in_num = 0;
+       bool ok;
        int ret;
 
        /* Caller should know better */
@@ -2731,8 +2732,9 @@ static bool virtnet_send_command_reply(struct virtnet_info *vi, u8 class, u8 cmd
        }
 
 unlock:
+       ok = vi->ctrl->status == VIRTIO_NET_OK;
        mutex_unlock(&vi->cvq_lock);
-       return vi->ctrl->status == VIRTIO_NET_OK;
+       return ok;
 }
 
 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
@@ -4257,7 +4259,6 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
        struct virtio_net_ctrl_coal_rx *coal_rx __free(kfree) = NULL;
        bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
        struct scatterlist sgs_rx;
-       int ret = 0;
        int i;
 
        if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
@@ -4267,27 +4268,27 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
                               ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets))
                return -EINVAL;
 
-       /* Acquire all queues dim_locks */
-       for (i = 0; i < vi->max_queue_pairs; i++)
-               mutex_lock(&vi->rq[i].dim_lock);
-
        if (rx_ctrl_dim_on && !vi->rx_dim_enabled) {
                vi->rx_dim_enabled = true;
-               for (i = 0; i < vi->max_queue_pairs; i++)
+               for (i = 0; i < vi->max_queue_pairs; i++) {
+                       mutex_lock(&vi->rq[i].dim_lock);
                        vi->rq[i].dim_enabled = true;
-               goto unlock;
+                       mutex_unlock(&vi->rq[i].dim_lock);
+               }
+               return 0;
        }
 
        coal_rx = kzalloc(sizeof(*coal_rx), GFP_KERNEL);
-       if (!coal_rx) {
-               ret = -ENOMEM;
-               goto unlock;
-       }
+       if (!coal_rx)
+               return -ENOMEM;
 
        if (!rx_ctrl_dim_on && vi->rx_dim_enabled) {
                vi->rx_dim_enabled = false;
-               for (i = 0; i < vi->max_queue_pairs; i++)
+               for (i = 0; i < vi->max_queue_pairs; i++) {
+                       mutex_lock(&vi->rq[i].dim_lock);
                        vi->rq[i].dim_enabled = false;
+                       mutex_unlock(&vi->rq[i].dim_lock);
+               }
        }
 
        /* Since the per-queue coalescing params can be set,
@@ -4300,22 +4301,19 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
 
        if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
                                  VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
-                                 &sgs_rx)) {
-               ret = -EINVAL;
-               goto unlock;
-       }
+                                 &sgs_rx))
+               return -EINVAL;
 
        vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
        vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
        for (i = 0; i < vi->max_queue_pairs; i++) {
+               mutex_lock(&vi->rq[i].dim_lock);
                vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
                vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
-       }
-unlock:
-       for (i = vi->max_queue_pairs - 1; i >= 0; i--)
                mutex_unlock(&vi->rq[i].dim_lock);
+       }
 
-       return ret;
+       return 0;
 }
 
 static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
@@ -4417,9 +4415,9 @@ static void virtnet_rx_dim_work(struct work_struct *work)
                if (err)
                        pr_debug("%s: Failed to send dim parameters on rxq%d\n",
                                 dev->name, qnum);
-               dim->state = DIM_START_MEASURE;
        }
 out:
+       dim->state = DIM_START_MEASURE;
        mutex_unlock(&rq->dim_lock);
 }
 
index 89ca6e75fcc6b066bad0cb3c2b18734df3a0dec3..63822d454c00c765e2860b50a2a67487bcbafdae 100644 (file)
@@ -2034,8 +2034,8 @@ vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
                                          rq->data_ring.base,
                                          rq->data_ring.basePA);
                        rq->data_ring.base = NULL;
-                       rq->data_ring.desc_size = 0;
                }
+               rq->data_ring.desc_size = 0;
        }
 }
 
index f78dd0438843b151d15d343d6d2e713d57ab1b3a..567cb3faab709c43ae7deff0fd5a155327899e12 100644 (file)
@@ -1446,6 +1446,10 @@ static bool vxlan_snoop(struct net_device *dev,
        struct vxlan_fdb *f;
        u32 ifindex = 0;
 
+       /* Ignore packets from invalid src-address */
+       if (!is_valid_ether_addr(src_mac))
+               return true;
+
 #if IS_ENABLED(CONFIG_IPV6)
        if (src_ip->sa.sa_family == AF_INET6 &&
            (ipv6_addr_type(&src_ip->sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL))
@@ -1616,10 +1620,6 @@ static bool vxlan_set_mac(struct vxlan_dev *vxlan,
        if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
                return false;
 
-       /* Ignore packets from invalid src-address */
-       if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
-               return false;
-
        /* Get address from the outer IP header */
        if (vxlan_get_sk_family(vs) == AF_INET) {
                saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
index e6ea884cafc190ac1bcbbf15d80823896e259572..4f385f4a8cef2acfc6f7aece9f703c6b8b46eabc 100644 (file)
@@ -45,6 +45,7 @@ config ATH10K_SNOC
        depends on ATH10K
        depends on ARCH_QCOM || COMPILE_TEST
        depends on QCOM_SMEM
+       depends on QCOM_RPROC_COMMON || QCOM_RPROC_COMMON=n
        select QCOM_SCM
        select QCOM_QMI_HELPERS
        help
index 3cc817a3b4a4047600591804bbc57aa45cba866f..b82e8fb2854130b1b4ebf109f4712f6f8c586590 100644 (file)
@@ -604,7 +604,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
                .coldboot_cal_ftm = true,
                .cbcal_restart_fw = false,
                .fw_mem_mode = 0,
-               .num_vdevs = 16 + 1,
+               .num_vdevs = 3,
                .num_peers = 512,
                .supports_suspend = false,
                .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
index 4f62e38ba48b3a144ba658ce11f4c8193024d89a..9b96dbb21d8336fd5bf8264514cfd3386e74537c 100644 (file)
@@ -7988,8 +7988,6 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
        struct ath11k_base *ab = ar->ab;
        struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
        int ret;
-       struct cur_regulatory_info *reg_info;
-       enum ieee80211_ap_reg_power power_type;
 
        mutex_lock(&ar->conf_mutex);
 
@@ -8000,17 +7998,6 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
        if (ath11k_wmi_supports_6ghz_cc_ext(ar) &&
            ctx->def.chan->band == NL80211_BAND_6GHZ &&
            arvif->vdev_type == WMI_VDEV_TYPE_STA) {
-               reg_info = &ab->reg_info_store[ar->pdev_idx];
-               power_type = vif->bss_conf.power_type;
-
-               ath11k_dbg(ab, ATH11K_DBG_MAC, "chanctx power type %d\n", power_type);
-
-               if (power_type == IEEE80211_REG_UNSET_AP) {
-                       ret = -EINVAL;
-                       goto out;
-               }
-
-               ath11k_reg_handle_chan_list(ab, reg_info, power_type);
                arvif->chanctx = *ctx;
                ath11k_mac_parse_tx_pwr_env(ar, vif, ctx);
        }
@@ -9626,6 +9613,8 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
        struct ath11k *ar = hw->priv;
        struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
        struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+       enum ieee80211_ap_reg_power power_type;
+       struct cur_regulatory_info *reg_info;
        struct ath11k_peer *peer;
        int ret = 0;
 
@@ -9705,6 +9694,29 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
                                ath11k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n",
                                            sta->addr, arvif->vdev_id, ret);
                }
+
+               if (!ret &&
+                   ath11k_wmi_supports_6ghz_cc_ext(ar) &&
+                   arvif->vdev_type == WMI_VDEV_TYPE_STA &&
+                   arvif->chanctx.def.chan &&
+                   arvif->chanctx.def.chan->band == NL80211_BAND_6GHZ) {
+                       reg_info = &ar->ab->reg_info_store[ar->pdev_idx];
+                       power_type = vif->bss_conf.power_type;
+
+                       if (power_type == IEEE80211_REG_UNSET_AP) {
+                               ath11k_warn(ar->ab, "invalid power type %d\n",
+                                           power_type);
+                               ret = -EINVAL;
+                       } else {
+                               ret = ath11k_reg_handle_chan_list(ar->ab,
+                                                                 reg_info,
+                                                                 power_type);
+                               if (ret)
+                                       ath11k_warn(ar->ab,
+                                                   "failed to handle chan list with power type %d\n",
+                                                   power_type);
+                       }
+               }
        } else if (old_state == IEEE80211_STA_AUTHORIZED &&
                   new_state == IEEE80211_STA_ASSOC) {
                spin_lock_bh(&ar->ab->base_lock);
index 79eb3f9c902f4b77f3eb4481ed9d07e23038f54a..debe7c5919ef006ee911c94882c82ea3bcf61548 100644 (file)
@@ -561,6 +561,7 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
 {
        int i, j, n, ret, num_vectors = 0;
        u32 user_base_data = 0, base_vector = 0;
+       struct ath11k_ext_irq_grp *irq_grp;
        unsigned long irq_flags;
 
        ret = ath11k_pcic_get_user_msi_assignment(ab, "DP", &num_vectors,
@@ -574,14 +575,16 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
                irq_flags |= IRQF_NOBALANCING;
 
        for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
-               struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+               irq_grp = &ab->ext_irq_grp[i];
                u32 num_irq = 0;
 
                irq_grp->ab = ab;
                irq_grp->grp_id = i;
                irq_grp->napi_ndev = alloc_netdev_dummy(0);
-               if (!irq_grp->napi_ndev)
-                       return -ENOMEM;
+               if (!irq_grp->napi_ndev) {
+                       ret = -ENOMEM;
+                       goto fail_allocate;
+               }
 
                netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi,
                               ath11k_pcic_ext_grp_napi_poll);
@@ -606,11 +609,8 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
                        int irq = ath11k_pcic_get_msi_irq(ab, vector);
 
                        if (irq < 0) {
-                               for (n = 0; n <= i; n++) {
-                                       irq_grp = &ab->ext_irq_grp[n];
-                                       free_netdev(irq_grp->napi_ndev);
-                               }
-                               return irq;
+                               ret = irq;
+                               goto fail_irq;
                        }
 
                        ab->irq_num[irq_idx] = irq;
@@ -635,6 +635,15 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
        }
 
        return 0;
+fail_irq:
+       /* i ->napi_ndev was properly allocated. Free it also */
+       i += 1;
+fail_allocate:
+       for (n = 0; n < i; n++) {
+               irq_grp = &ab->ext_irq_grp[n];
+               free_netdev(irq_grp->napi_ndev);
+       }
+       return ret;
 }
 
 int ath11k_pcic_config_irq(struct ath11k_base *ab)
index 33654f228ee871f7b8bbe1463351cc677904662a..d156a9c6419404b46995068f9589b2f156d8e01a 100644 (file)
@@ -1815,8 +1815,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
 err_fw:
 #ifdef CONFIG_IWLWIFI_DEBUGFS
        debugfs_remove_recursive(drv->dbgfs_drv);
-       iwl_dbg_tlv_free(drv->trans);
 #endif
+       iwl_dbg_tlv_free(drv->trans);
        kfree(drv);
 err:
        return ERR_PTR(ret);
index 71e6b06481a93b6bf6f6eb14b1b1656510cdbc92..54f4acbbd05bd4d2f7e8a25e748c02c432025de1 100644 (file)
@@ -595,6 +595,12 @@ static void iwl_mvm_wowlan_gtk_type_iter(struct ieee80211_hw *hw,
                                         void *_data)
 {
        struct wowlan_key_gtk_type_iter *data = _data;
+       __le32 *cipher = NULL;
+
+       if (key->keyidx == 4 || key->keyidx == 5)
+               cipher = &data->kek_kck_cmd->igtk_cipher;
+       if (key->keyidx == 6 || key->keyidx == 7)
+               cipher = &data->kek_kck_cmd->bigtk_cipher;
 
        switch (key->cipher) {
        default:
@@ -606,10 +612,13 @@ static void iwl_mvm_wowlan_gtk_type_iter(struct ieee80211_hw *hw,
                return;
        case WLAN_CIPHER_SUITE_BIP_GMAC_256:
        case WLAN_CIPHER_SUITE_BIP_GMAC_128:
-               data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_GCMP);
+               if (cipher)
+                       *cipher = cpu_to_le32(STA_KEY_FLG_GCMP);
                return;
        case WLAN_CIPHER_SUITE_AES_CMAC:
-               data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_CCM);
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+               if (cipher)
+                       *cipher = cpu_to_le32(STA_KEY_FLG_CCM);
                return;
        case WLAN_CIPHER_SUITE_CCMP:
                if (!sta)
@@ -2341,7 +2350,8 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
 
 out:
        if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP,
-                                   WOWLAN_GET_STATUSES, 0) < 10) {
+                                   WOWLAN_GET_STATUSES,
+                                   IWL_FW_CMD_VER_UNKNOWN) < 10) {
                mvmvif->seqno_valid = true;
                /* +0x10 because the set API expects next-to-use, not last-used */
                mvmvif->seqno = status->non_qos_seq_ctr + 0x10;
index 79f4ac8cbc729e9a6caaf393080fe99a4320892e..8101ecbb478b6ff5b38107aa0c5a3975d878c914 100644 (file)
@@ -1617,6 +1617,15 @@ static int _iwl_dbgfs_inject_beacon_ie(struct iwl_mvm *mvm, char *bin, int len)
                                         &beacon_cmd.tim_size,
                                         beacon->data, beacon->len);
 
+               if (iwl_fw_lookup_cmd_ver(mvm->fw,
+                                         BEACON_TEMPLATE_CMD, 0) >= 14) {
+                       u32 offset = iwl_mvm_find_ie_offset(beacon->data,
+                                                           WLAN_EID_S1G_TWT,
+                                                           beacon->len);
+
+                       beacon_cmd.btwt_offset = cpu_to_le32(offset);
+               }
+
                iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd,
                                                 sizeof(beacon_cmd));
        }
index e7f5978ef2d71950e2d8bf9a006c054fe910a3b9..f4937a100cbe9b95b1d8b5758d501a47346aa50e 100644 (file)
@@ -94,20 +94,10 @@ void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data;
-       __le32 *dump_data = mfu_dump_notif->data;
-       int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32);
-       int i;
 
        if (mfu_dump_notif->index_num == 0)
                IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n",
                         le32_to_cpu(mfu_dump_notif->assert_id));
-
-       for (i = 0; i < n_words; i++)
-               IWL_DEBUG_INFO(mvm,
-                              "MFUART assert dump, dword %u: 0x%08x\n",
-                              le16_to_cpu(mfu_dump_notif->index_num) *
-                              n_words + i,
-                              le32_to_cpu(dump_data[i]));
 }
 
 static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
@@ -895,8 +885,8 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
        int ret;
        u16 len = 0;
        u32 n_subbands;
-       u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
-                                          IWL_FW_CMD_VER_UNKNOWN);
+       u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 3);
+
        if (cmd_ver >= 7) {
                len = sizeof(cmd.v7);
                n_subbands = IWL_NUM_SUB_BANDS_V2;
index 5a06f887769a6ad2de8f73c59afb1b75aa37aa1f..5144fa0f96b0e047a5feea0d2e8f564ec6d1cb94 100644 (file)
@@ -873,7 +873,7 @@ void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm,
        }
 }
 
-static u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size)
+u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size)
 {
        struct ieee80211_mgmt *mgmt = (void *)beacon;
        const u8 *ie;
index 486a6b8f3c97f7540e95e68fa7dfc0fee8fb2af9..de9f0b446545625b51288b7565871c6ff5bdcf6c 100644 (file)
@@ -1128,6 +1128,39 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
        RCU_INIT_POINTER(mvmvif->deflink.probe_resp_data, NULL);
 }
 
+static void iwl_mvm_cleanup_sta_iterator(void *data, struct ieee80211_sta *sta)
+{
+       struct iwl_mvm *mvm = data;
+       struct iwl_mvm_sta *mvm_sta;
+       struct ieee80211_vif *vif;
+       int link_id;
+
+       mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+       vif = mvm_sta->vif;
+
+       if (!sta->valid_links)
+               return;
+
+       for (link_id = 0; link_id < ARRAY_SIZE((sta)->link); link_id++) {
+               struct iwl_mvm_link_sta *mvm_link_sta;
+
+               mvm_link_sta =
+                       rcu_dereference_check(mvm_sta->link[link_id],
+                                             lockdep_is_held(&mvm->mutex));
+               if (mvm_link_sta && !(vif->active_links & BIT(link_id))) {
+                       /*
+                        * We have a link STA but the link is inactive in
+                        * mac80211. This will happen if we failed to
+                        * deactivate the link but mac80211 roll back the
+                        * deactivation of the link.
+                        * Delete the stale data to avoid issues later on.
+                        */
+                       iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_link_sta,
+                                                 link_id, false);
+               }
+       }
+}
+
 static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
 {
        iwl_mvm_stop_device(mvm);
@@ -1150,6 +1183,10 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
         */
        ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
 
+       /* cleanup stations as links may be gone after restart */
+       ieee80211_iterate_stations_atomic(mvm->hw,
+                                         iwl_mvm_cleanup_sta_iterator, mvm);
+
        mvm->p2p_device_vif = NULL;
 
        iwl_mvm_reset_phy_ctxts(mvm);
@@ -6348,7 +6385,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
                .len[0] = sizeof(cmd),
                .data[1] = data,
                .len[1] = size,
-               .flags = sync ? 0 : CMD_ASYNC,
+               .flags = CMD_SEND_IN_RFKILL | (sync ? 0 : CMD_ASYNC),
        };
        int ret;
 
index 0a3b7284eeddf9f382752e7d12a88da6a0b0d654..fcfd2dd7568e502633b7a0c4cee5808bec0e7024 100644 (file)
@@ -75,8 +75,6 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
                goto out_free_bf;
 
        iwl_mvm_tcm_add_vif(mvm, vif);
-       INIT_DELAYED_WORK(&mvmvif->csa_work,
-                         iwl_mvm_channel_switch_disconnect_wk);
 
        if (vif->type == NL80211_IFTYPE_MONITOR) {
                mvm->monitor_on = true;
index b7a461dba41ee3fe87a09f3ca10d5a1cdded0360..9d139b56e1527cdeb21e87433f9e54f169b76dd3 100644 (file)
@@ -515,11 +515,11 @@ static int iwl_mvm_mld_cfg_sta(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        return iwl_mvm_mld_send_sta_cmd(mvm, &cmd);
 }
 
-static void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
-                                     struct iwl_mvm_sta *mvm_sta,
-                                     struct iwl_mvm_link_sta *mvm_sta_link,
-                                     unsigned int link_id,
-                                     bool is_in_fw)
+void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
+                              struct iwl_mvm_sta *mvm_sta,
+                              struct iwl_mvm_link_sta *mvm_sta_link,
+                              unsigned int link_id,
+                              bool is_in_fw)
 {
        RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta_link->sta_id],
                         is_in_fw ? ERR_PTR(-EINVAL) : NULL);
@@ -1014,7 +1014,8 @@ static int iwl_mvm_mld_update_sta_baids(struct iwl_mvm *mvm,
 
                cmd.modify.tid = cpu_to_le32(data->tid);
 
-               ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cmd), &cmd);
+               ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_SEND_IN_RFKILL,
+                                          sizeof(cmd), &cmd);
                data->sta_mask = new_sta_mask;
                if (ret)
                        return ret;
index 1f58c727fa632df016241ed1a64ba5731d0b1d79..0a1959bd4079997854d9f3c2c444adc2f2f945a5 100644 (file)
@@ -1758,6 +1758,7 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
 void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type, u32 *gp2,
                           u64 *boottime, ktime_t *realtime);
 u32 iwl_mvm_get_systime(struct iwl_mvm *mvm);
+u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size);
 
 /* Tx / Host Commands */
 int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm,
index 376b23b409dcad2c3a9fffd9c1a4ce0d50b71e28..6cd4ec4d8f34411b7c4f53a2f03f468d1f1498b9 100644 (file)
@@ -122,13 +122,8 @@ enum {
 
 #define LINK_QUAL_AGG_FRAME_LIMIT_DEF  (63)
 #define LINK_QUAL_AGG_FRAME_LIMIT_MAX  (63)
-/*
- * FIXME - various places in firmware API still use u8,
- * e.g. LQ command and SCD config command.
- * This should be 256 instead.
- */
-#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF     (255)
-#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX     (255)
+#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF     (64)
+#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX     (64)
 #define LINK_QUAL_AGG_FRAME_LIMIT_MIN  (0)
 
 #define LQ_SIZE                2       /* 2 mode tables:  "Active" and "Search" */
index d78af29281522ee756b3a9a3d412e673d70106ed..489cfb0a4ab1ecb77672cb22a2c1277c52266406 100644 (file)
@@ -2450,8 +2450,11 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
         *
         * We mark it as mac header, for upper layers to know where
         * all radio tap header ends.
+        *
+        * Since data doesn't move data while putting data on skb and that is
+        * the only way we use, data + len is the next place that hdr would be put
         */
-       skb_reset_mac_header(skb);
+       skb_set_mac_header(skb, skb->len);
 
        /*
         * Override the nss from the rx_vec since the rate_n_flags has
index a7ec172eeade8513f5c3a63d6c650a3473a9594b..b5f664ae5a17d0677e10aa9e4d79bf3152130e1c 100644 (file)
@@ -1313,7 +1313,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
                if (IWL_MVM_ADWELL_MAX_BUDGET)
                        cmd->v7.adwell_max_budget =
                                cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
-               else if (params->ssids && params->ssids[0].ssid_len)
+               else if (params->n_ssids && params->ssids[0].ssid_len)
                        cmd->v7.adwell_max_budget =
                                cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
                else
@@ -1418,7 +1418,7 @@ iwl_mvm_scan_umac_dwell_v11(struct iwl_mvm *mvm,
        if (IWL_MVM_ADWELL_MAX_BUDGET)
                general_params->adwell_max_budget =
                        cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
-       else if (params->ssids && params->ssids[0].ssid_len)
+       else if (params->n_ssids && params->ssids[0].ssid_len)
                general_params->adwell_max_budget =
                        cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
        else
@@ -1730,7 +1730,10 @@ iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm *mvm,
                                break;
                }
 
-               if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE) {
+               if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE &&
+                   !WARN_ONCE(!is_valid_ether_addr(scan_6ghz_params[j].bssid),
+                              "scan: invalid BSSID at index %u, index_b=%u\n",
+                              j, idex_b)) {
                        memcpy(&pp->bssid_array[idex_b++],
                               scan_6ghz_params[j].bssid, ETH_ALEN);
                }
@@ -3319,10 +3322,11 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
 
        ret = iwl_mvm_send_cmd_pdu(mvm,
                                   WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_ABORT_UMAC),
-                                  0, sizeof(cmd), &cmd);
+                                  CMD_SEND_IN_RFKILL, sizeof(cmd), &cmd);
        if (!ret)
                mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
 
+       IWL_DEBUG_SCAN(mvm, "Scan abort: ret=%d\n", ret);
        return ret;
 }
 
index 20d4968d692a3664387b14951ebdf01af147cae8..cc79fe991c2633586d13701ebabb82c646a8f36e 100644 (file)
@@ -2848,7 +2848,12 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
                .action = start ? cpu_to_le32(IWL_RX_BAID_ACTION_ADD) :
                                  cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE),
        };
-       u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD);
+       struct iwl_host_cmd hcmd = {
+               .id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD),
+               .flags = CMD_SEND_IN_RFKILL,
+               .len[0] = sizeof(cmd),
+               .data[0] = &cmd,
+       };
        int ret;
 
        BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
@@ -2860,7 +2865,7 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
                cmd.alloc.ssn = cpu_to_le16(ssn);
                cmd.alloc.win_size = cpu_to_le16(buf_size);
                baid = -EIO;
-       } else if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1) == 1) {
+       } else if (iwl_fw_lookup_cmd_ver(mvm->fw, hcmd.id, 1) == 1) {
                cmd.remove_v1.baid = cpu_to_le32(baid);
                BUILD_BUG_ON(sizeof(cmd.remove_v1) > sizeof(cmd.remove));
        } else {
@@ -2869,8 +2874,7 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
                cmd.remove.tid = cpu_to_le32(tid);
        }
 
-       ret = iwl_mvm_send_cmd_pdu_status(mvm, cmd_id, sizeof(cmd),
-                                         &cmd, &baid);
+       ret = iwl_mvm_send_cmd_status(mvm, &hcmd, &baid);
        if (ret)
                return ret;
 
index 264f1f9394b6de26af044f5d7dc971ad73d7e7c7..754a05a8c189bcb7e2af33bbe98d549e8547dc5a 100644 (file)
@@ -662,6 +662,11 @@ int iwl_mvm_mld_update_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                           struct ieee80211_sta *sta);
 int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                       struct ieee80211_sta *sta);
+void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
+                              struct iwl_mvm_sta *mvm_sta,
+                              struct iwl_mvm_link_sta *mvm_sta_link,
+                              unsigned int link_id,
+                              bool is_in_fw);
 int iwl_mvm_mld_rm_sta_id(struct iwl_mvm *mvm, u8 sta_id);
 int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm,
                                 struct ieee80211_vif *vif,
index 0971c164b57e926d2d22dd1ef4f0559d45420db2..c27acaf0eb1cf7e9698adec73868867f349620e8 100644 (file)
@@ -1326,6 +1326,10 @@ static void mt7615_set_rekey_data(struct ieee80211_hw *hw,
 #endif /* CONFIG_PM */
 
 const struct ieee80211_ops mt7615_ops = {
+       .add_chanctx = ieee80211_emulate_add_chanctx,
+       .remove_chanctx = ieee80211_emulate_remove_chanctx,
+       .change_chanctx = ieee80211_emulate_change_chanctx,
+       .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
        .tx = mt7615_tx,
        .start = mt7615_start,
        .stop = mt7615_stop,
index 7d9fb9f2d52799b1e1c92ae285943bdd921114a0..089102ed9ae51b8fa42c0d49cdaacf34b7ed670b 100644 (file)
@@ -237,11 +237,12 @@ static int set_channel(struct wiphy *wiphy,
        struct wilc_vif *vif;
        u32 channelnum;
        int result;
+       int srcu_idx;
 
-       rcu_read_lock();
+       srcu_idx = srcu_read_lock(&wl->srcu);
        vif = wilc_get_wl_to_vif(wl);
        if (IS_ERR(vif)) {
-               rcu_read_unlock();
+               srcu_read_unlock(&wl->srcu, srcu_idx);
                return PTR_ERR(vif);
        }
 
@@ -252,7 +253,7 @@ static int set_channel(struct wiphy *wiphy,
        if (result)
                netdev_err(vif->ndev, "Error in setting channel\n");
 
-       rcu_read_unlock();
+       srcu_read_unlock(&wl->srcu, srcu_idx);
        return result;
 }
 
@@ -805,8 +806,9 @@ static int set_wiphy_params(struct wiphy *wiphy, u32 changed)
        struct wilc *wl = wiphy_priv(wiphy);
        struct wilc_vif *vif;
        struct wilc_priv *priv;
+       int srcu_idx;
 
-       rcu_read_lock();
+       srcu_idx = srcu_read_lock(&wl->srcu);
        vif = wilc_get_wl_to_vif(wl);
        if (IS_ERR(vif))
                goto out;
@@ -861,7 +863,7 @@ static int set_wiphy_params(struct wiphy *wiphy, u32 changed)
                netdev_err(priv->dev, "Error in setting WIPHY PARAMS\n");
 
 out:
-       rcu_read_unlock();
+       srcu_read_unlock(&wl->srcu, srcu_idx);
        return ret;
 }
 
@@ -1537,19 +1539,20 @@ static struct wireless_dev *add_virtual_intf(struct wiphy *wiphy,
 
        if (type == NL80211_IFTYPE_MONITOR) {
                struct net_device *ndev;
+               int srcu_idx;
 
-               rcu_read_lock();
+               srcu_idx = srcu_read_lock(&wl->srcu);
                vif = wilc_get_vif_from_type(wl, WILC_AP_MODE);
                if (!vif) {
                        vif = wilc_get_vif_from_type(wl, WILC_GO_MODE);
                        if (!vif) {
-                               rcu_read_unlock();
+                               srcu_read_unlock(&wl->srcu, srcu_idx);
                                goto validate_interface;
                        }
                }
 
                if (vif->monitor_flag) {
-                       rcu_read_unlock();
+                       srcu_read_unlock(&wl->srcu, srcu_idx);
                        goto validate_interface;
                }
 
@@ -1557,12 +1560,12 @@ static struct wireless_dev *add_virtual_intf(struct wiphy *wiphy,
                if (ndev) {
                        vif->monitor_flag = 1;
                } else {
-                       rcu_read_unlock();
+                       srcu_read_unlock(&wl->srcu, srcu_idx);
                        return ERR_PTR(-EINVAL);
                }
 
                wdev = &vif->priv.wdev;
-               rcu_read_unlock();
+               srcu_read_unlock(&wl->srcu, srcu_idx);
                return wdev;
        }
 
@@ -1610,7 +1613,7 @@ static int del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
        list_del_rcu(&vif->list);
        wl->vif_num--;
        mutex_unlock(&wl->vif_mutex);
-       synchronize_rcu();
+       synchronize_srcu(&wl->srcu);
        return 0;
 }
 
@@ -1635,23 +1638,25 @@ static void wilc_set_wakeup(struct wiphy *wiphy, bool enabled)
 {
        struct wilc *wl = wiphy_priv(wiphy);
        struct wilc_vif *vif;
+       int srcu_idx;
 
-       rcu_read_lock();
+       srcu_idx = srcu_read_lock(&wl->srcu);
        vif = wilc_get_wl_to_vif(wl);
        if (IS_ERR(vif)) {
-               rcu_read_unlock();
+               srcu_read_unlock(&wl->srcu, srcu_idx);
                return;
        }
 
        netdev_info(vif->ndev, "cfg set wake up = %d\n", enabled);
        wilc_set_wowlan_trigger(vif, enabled);
-       rcu_read_unlock();
+       srcu_read_unlock(&wl->srcu, srcu_idx);
 }
 
 static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
                        enum nl80211_tx_power_setting type, int mbm)
 {
        int ret;
+       int srcu_idx;
        s32 tx_power = MBM_TO_DBM(mbm);
        struct wilc *wl = wiphy_priv(wiphy);
        struct wilc_vif *vif;
@@ -1659,10 +1664,10 @@ static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
        if (!wl->initialized)
                return -EIO;
 
-       rcu_read_lock();
+       srcu_idx = srcu_read_lock(&wl->srcu);
        vif = wilc_get_wl_to_vif(wl);
        if (IS_ERR(vif)) {
-               rcu_read_unlock();
+               srcu_read_unlock(&wl->srcu, srcu_idx);
                return -EINVAL;
        }
 
@@ -1674,7 +1679,7 @@ static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
        ret = wilc_set_tx_power(vif, tx_power);
        if (ret)
                netdev_err(vif->ndev, "Failed to set tx power\n");
-       rcu_read_unlock();
+       srcu_read_unlock(&wl->srcu, srcu_idx);
 
        return ret;
 }
@@ -1757,6 +1762,7 @@ static void wlan_init_locks(struct wilc *wl)
        init_completion(&wl->cfg_event);
        init_completion(&wl->sync_event);
        init_completion(&wl->txq_thread_started);
+       init_srcu_struct(&wl->srcu);
 }
 
 void wlan_deinit_locks(struct wilc *wilc)
@@ -1767,6 +1773,7 @@ void wlan_deinit_locks(struct wilc *wilc)
        mutex_destroy(&wilc->txq_add_to_head_cs);
        mutex_destroy(&wilc->vif_mutex);
        mutex_destroy(&wilc->deinit_lock);
+       cleanup_srcu_struct(&wilc->srcu);
 }
 
 int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
index 919de6ffb8217c54375df09f8ecde8e1bfa997bf..f1085ccb7eedc025aba6a6cf45ecd1bc4edef66c 100644 (file)
@@ -1570,11 +1570,12 @@ void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length)
        struct host_if_drv *hif_drv;
        struct host_if_msg *msg;
        struct wilc_vif *vif;
+       int srcu_idx;
        int result;
        int id;
 
        id = get_unaligned_le32(&buffer[length - 4]);
-       rcu_read_lock();
+       srcu_idx = srcu_read_lock(&wilc->srcu);
        vif = wilc_get_vif_from_idx(wilc, id);
        if (!vif)
                goto out;
@@ -1593,7 +1594,7 @@ void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length)
        msg->body.net_info.rssi = buffer[8];
        msg->body.net_info.mgmt = kmemdup(&buffer[9],
                                          msg->body.net_info.frame_len,
-                                         GFP_ATOMIC);
+                                         GFP_KERNEL);
        if (!msg->body.net_info.mgmt) {
                kfree(msg);
                goto out;
@@ -1606,7 +1607,7 @@ void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length)
                kfree(msg);
        }
 out:
-       rcu_read_unlock();
+       srcu_read_unlock(&wilc->srcu, srcu_idx);
 }
 
 void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length)
@@ -1614,13 +1615,14 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length)
        struct host_if_drv *hif_drv;
        struct host_if_msg *msg;
        struct wilc_vif *vif;
+       int srcu_idx;
        int result;
        int id;
 
        mutex_lock(&wilc->deinit_lock);
 
        id = get_unaligned_le32(&buffer[length - 4]);
-       rcu_read_lock();
+       srcu_idx = srcu_read_lock(&wilc->srcu);
        vif = wilc_get_vif_from_idx(wilc, id);
        if (!vif)
                goto out;
@@ -1647,7 +1649,7 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length)
                kfree(msg);
        }
 out:
-       rcu_read_unlock();
+       srcu_read_unlock(&wilc->srcu, srcu_idx);
        mutex_unlock(&wilc->deinit_lock);
 }
 
@@ -1655,11 +1657,12 @@ void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length)
 {
        struct host_if_drv *hif_drv;
        struct wilc_vif *vif;
+       int srcu_idx;
        int result;
        int id;
 
        id = get_unaligned_le32(&buffer[length - 4]);
-       rcu_read_lock();
+       srcu_idx = srcu_read_lock(&wilc->srcu);
        vif = wilc_get_vif_from_idx(wilc, id);
        if (!vif)
                goto out;
@@ -1684,7 +1687,7 @@ void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length)
                }
        }
 out:
-       rcu_read_unlock();
+       srcu_read_unlock(&wilc->srcu, srcu_idx);
 }
 
 int wilc_remain_on_channel(struct wilc_vif *vif, u64 cookie, u16 chan,
index 73f56f7b002bf373277804db28c18cc8a4338a67..710e29bea5605843a8c2e78406de3d8242bbb16c 100644 (file)
@@ -127,28 +127,30 @@ void wilc_wlan_set_bssid(struct net_device *wilc_netdev, const u8 *bssid,
 
 int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc)
 {
+       int srcu_idx;
        u8 ret_val = 0;
        struct wilc_vif *vif;
 
-       rcu_read_lock();
+       srcu_idx = srcu_read_lock(&wilc->srcu);
        wilc_for_each_vif(wilc, vif) {
                if (!is_zero_ether_addr(vif->bssid))
                        ret_val++;
        }
-       rcu_read_unlock();
+       srcu_read_unlock(&wilc->srcu, srcu_idx);
        return ret_val;
 }
 
 static void wilc_wake_tx_queues(struct wilc *wl)
 {
+       int srcu_idx;
        struct wilc_vif *ifc;
 
-       rcu_read_lock();
+       srcu_idx = srcu_read_lock(&wl->srcu);
        wilc_for_each_vif(wl, ifc) {
                if (ifc->mac_opened && netif_queue_stopped(ifc->ndev))
                        netif_wake_queue(ifc->ndev);
        }
-       rcu_read_unlock();
+       srcu_read_unlock(&wl->srcu, srcu_idx);
 }
 
 static int wilc_txq_task(void *vp)
@@ -653,6 +655,7 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p)
        struct sockaddr *addr = (struct sockaddr *)p;
        unsigned char mac_addr[ETH_ALEN];
        struct wilc_vif *tmp_vif;
+       int srcu_idx;
 
        if (!is_valid_ether_addr(addr->sa_data))
                return -EADDRNOTAVAIL;
@@ -664,19 +667,19 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p)
 
        /* Verify MAC Address is not already in use: */
 
-       rcu_read_lock();
+       srcu_idx = srcu_read_lock(&wilc->srcu);
        wilc_for_each_vif(wilc, tmp_vif) {
                wilc_get_mac_address(tmp_vif, mac_addr);
                if (ether_addr_equal(addr->sa_data, mac_addr)) {
                        if (vif != tmp_vif) {
-                               rcu_read_unlock();
+                               srcu_read_unlock(&wilc->srcu, srcu_idx);
                                return -EADDRNOTAVAIL;
                        }
-                       rcu_read_unlock();
+                       srcu_read_unlock(&wilc->srcu, srcu_idx);
                        return 0;
                }
        }
-       rcu_read_unlock();
+       srcu_read_unlock(&wilc->srcu, srcu_idx);
 
        result = wilc_set_mac_address(vif, (u8 *)addr->sa_data);
        if (result)
@@ -764,14 +767,15 @@ netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
                                                wilc_tx_complete);
 
        if (queue_count > FLOW_CONTROL_UPPER_THRESHOLD) {
+               int srcu_idx;
                struct wilc_vif *vif;
 
-               rcu_read_lock();
+               srcu_idx = srcu_read_lock(&wilc->srcu);
                wilc_for_each_vif(wilc, vif) {
                        if (vif->mac_opened)
                                netif_stop_queue(vif->ndev);
                }
-               rcu_read_unlock();
+               srcu_read_unlock(&wilc->srcu, srcu_idx);
        }
 
        return NETDEV_TX_OK;
@@ -815,12 +819,13 @@ void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size,
        unsigned int frame_len = 0;
        struct wilc_vif *vif;
        struct sk_buff *skb;
+       int srcu_idx;
        int stats;
 
        if (!wilc)
                return;
 
-       rcu_read_lock();
+       srcu_idx = srcu_read_lock(&wilc->srcu);
        wilc_netdev = get_if_handler(wilc, buff);
        if (!wilc_netdev)
                goto out;
@@ -848,14 +853,15 @@ void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size,
                netdev_dbg(wilc_netdev, "netif_rx ret value is: %d\n", stats);
        }
 out:
-       rcu_read_unlock();
+       srcu_read_unlock(&wilc->srcu, srcu_idx);
 }
 
 void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size, bool is_auth)
 {
+       int srcu_idx;
        struct wilc_vif *vif;
 
-       rcu_read_lock();
+       srcu_idx = srcu_read_lock(&wilc->srcu);
        wilc_for_each_vif(wilc, vif) {
                struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buff;
                u16 type = le16_to_cpup((__le16 *)buff);
@@ -876,7 +882,7 @@ void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size, bool is_auth)
                if (vif->monitor_flag)
                        wilc_wfi_monitor_rx(wilc->monitor_dev, buff, size);
        }
-       rcu_read_unlock();
+       srcu_read_unlock(&wilc->srcu, srcu_idx);
 }
 
 static const struct net_device_ops wilc_netdev_ops = {
@@ -906,7 +912,7 @@ void wilc_netdev_cleanup(struct wilc *wilc)
                list_del_rcu(&vif->list);
                wilc->vif_num--;
                mutex_unlock(&wilc->vif_mutex);
-               synchronize_rcu();
+               synchronize_srcu(&wilc->srcu);
                if (vif->ndev)
                        unregister_netdev(vif->ndev);
        }
@@ -925,15 +931,16 @@ static u8 wilc_get_available_idx(struct wilc *wl)
 {
        int idx = 0;
        struct wilc_vif *vif;
+       int srcu_idx;
 
-       rcu_read_lock();
+       srcu_idx = srcu_read_lock(&wl->srcu);
        wilc_for_each_vif(wl, vif) {
                if (vif->idx == 0)
                        idx = 1;
                else
                        idx = 0;
        }
-       rcu_read_unlock();
+       srcu_read_unlock(&wl->srcu, srcu_idx);
        return idx;
 }
 
@@ -983,7 +990,7 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
        list_add_tail_rcu(&vif->list, &wl->vif_list);
        wl->vif_num += 1;
        mutex_unlock(&wl->vif_mutex);
-       synchronize_rcu();
+       synchronize_srcu(&wl->srcu);
 
        return vif;
 
index eecee3973d6a420cce81f721ea1c9a24de0bcccd..fde8610a9c84ba0c82b61a214643f2f9b2d43da8 100644 (file)
@@ -32,8 +32,8 @@
 
 #define wilc_for_each_vif(w, v) \
        struct wilc *_w = w; \
-       list_for_each_entry_rcu(v, &_w->vif_list, list, \
-                                rcu_read_lock_held())
+       list_for_each_entry_srcu(v, &_w->vif_list, list, \
+                                srcu_read_lock_held(&_w->srcu))
 
 struct wilc_wfi_stats {
        unsigned long rx_packets;
@@ -220,6 +220,14 @@ struct wilc {
 
        /* protect vif list */
        struct mutex vif_mutex;
+       /* Sleepable RCU struct to manipulate vif list. Sleepable version is
+        * needed over the classic RCU version because the driver's current
+        * design involves some sleeping code while manipulating a vif
+        * retrieved from vif list (so in a SRCU critical section), like:
+        * - sending commands to the chip, using info from retrieved vif
+        * - registering a new monitoring net device
+        */
+       struct srcu_struct srcu;
        u8 open_ifcs;
 
        /* protect head of transmit queue */
index 37c32d17856ea70e4aa15146463c08807da59e84..a9e872a7b2c38b59b55cb617af08d6914970baf5 100644 (file)
@@ -712,6 +712,7 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
        u32 *vmm_table = wilc->vmm_table;
        u8 ac_pkt_num_to_chip[NQUEUES] = {0, 0, 0, 0};
        const struct wilc_hif_func *func;
+       int srcu_idx;
        u8 *txb = wilc->tx_buffer;
        struct wilc_vif *vif;
 
@@ -723,10 +724,10 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
 
        mutex_lock(&wilc->txq_add_to_head_cs);
 
-       rcu_read_lock();
+       srcu_idx = srcu_read_lock(&wilc->srcu);
        wilc_for_each_vif(wilc, vif)
                wilc_wlan_txq_filter_dup_tcp_ack(vif->ndev);
-       rcu_read_unlock();
+       srcu_read_unlock(&wilc->srcu, srcu_idx);
 
        for (ac = 0; ac < NQUEUES; ac++)
                tqe_q[ac] = wilc_wlan_txq_get_first(wilc, ac);
index 2e60a6991ca1665db8c4d5c3730500e9dc53eded..42b7db12b1bd41d0c3abbaea60bb2218cbab4894 100644 (file)
@@ -633,21 +633,6 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
                }
        }
 
-       if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
-               rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
-                       "IEEE80211_CONF_CHANGE_RETRY_LIMITS %x\n",
-                       hw->conf.long_frame_max_tx_count);
-               /* brought up everything changes (changed == ~0) indicates first
-                * open, so use our default value instead of that of wiphy.
-                */
-               if (changed != ~0) {
-                       mac->retry_long = hw->conf.long_frame_max_tx_count;
-                       mac->retry_short = hw->conf.long_frame_max_tx_count;
-                       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT,
-                               (u8 *)(&hw->conf.long_frame_max_tx_count));
-               }
-       }
-
        if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
            !rtlpriv->proximity.proxim_on) {
                struct ieee80211_channel *channel = hw->conf.chandef.chan;
index bef6819986e93914edcf659e7e2591e3058ba8a2..33d6342124bc339ab403c3046c32db10573aabf9 100644 (file)
@@ -211,7 +211,7 @@ static int ipc_devlink_create_region(struct iosm_devlink *devlink)
                        rc = PTR_ERR(devlink->cd_regions[i]);
                        dev_err(devlink->dev, "Devlink region fail,err %d", rc);
                        /* Delete previously created regions */
-                       for ( ; i >= 0; i--)
+                       for (i--; i >= 0; i--)
                                devlink_region_destroy(devlink->cd_regions[i]);
                        goto region_create_fail;
                }
index 590b038e449e5c61c9950ce6074fe94020915207..6b89d596ba9afe7f5d1225ec2681be6705137532 100644 (file)
@@ -125,6 +125,10 @@ static ssize_t virtual_ncidev_write(struct file *file,
                kfree_skb(skb);
                return -EFAULT;
        }
+       if (strnlen(skb->data, count) != count) {
+               kfree_skb(skb);
+               return -EINVAL;
+       }
 
        nci_recv_frame(vdev->ndev, skb);
        return count;
index 174900072c18cd7c255fe603c68966a1ca7ea4fb..462375b293e47577a68ab79a51d00101d59b1e9d 100644 (file)
@@ -25,6 +25,8 @@
 #include <linux/string.h>
 #include <linux/slab.h>
 
+#include "of_private.h"
+
 /**
  * irq_of_parse_and_map - Parse and map an interrupt into linux virq space
  * @dev: Device node of the device whose interrupt is to be mapped
@@ -96,6 +98,57 @@ static const char * const of_irq_imap_abusers[] = {
        NULL,
 };
 
+const __be32 *of_irq_parse_imap_parent(const __be32 *imap, int len, struct of_phandle_args *out_irq)
+{
+       u32 intsize, addrsize;
+       struct device_node *np;
+
+       /* Get the interrupt parent */
+       if (of_irq_workarounds & OF_IMAP_NO_PHANDLE)
+               np = of_node_get(of_irq_dflt_pic);
+       else
+               np = of_find_node_by_phandle(be32_to_cpup(imap));
+       imap++;
+
+       /* Check if not found */
+       if (!np) {
+               pr_debug(" -> imap parent not found !\n");
+               return NULL;
+       }
+
+       /* Get #interrupt-cells and #address-cells of new parent */
+       if (of_property_read_u32(np, "#interrupt-cells",
+                                       &intsize)) {
+               pr_debug(" -> parent lacks #interrupt-cells!\n");
+               of_node_put(np);
+               return NULL;
+       }
+       if (of_property_read_u32(np, "#address-cells",
+                                       &addrsize))
+               addrsize = 0;
+
+       pr_debug(" -> intsize=%d, addrsize=%d\n",
+               intsize, addrsize);
+
+       /* Check for malformed properties */
+       if (WARN_ON(addrsize + intsize > MAX_PHANDLE_ARGS)
+               || (len < (addrsize + intsize))) {
+               of_node_put(np);
+               return NULL;
+       }
+
+       pr_debug(" -> imaplen=%d\n", len);
+
+       imap += addrsize + intsize;
+
+       out_irq->np = np;
+       for (int i = 0; i < intsize; i++)
+               out_irq->args[i] = be32_to_cpup(imap - intsize + i);
+       out_irq->args_count = intsize;
+
+       return imap;
+}
+
 /**
  * of_irq_parse_raw - Low level interrupt tree parsing
  * @addr:      address specifier (start of "reg" property of the device) in be32 format
@@ -112,12 +165,12 @@ static const char * const of_irq_imap_abusers[] = {
  */
 int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
 {
-       struct device_node *ipar, *tnode, *old = NULL, *newpar = NULL;
+       struct device_node *ipar, *tnode, *old = NULL;
        __be32 initial_match_array[MAX_PHANDLE_ARGS];
        const __be32 *match_array = initial_match_array;
-       const __be32 *tmp, *imap, *imask, dummy_imask[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(~0) };
-       u32 intsize = 1, addrsize, newintsize = 0, newaddrsize = 0;
-       int imaplen, match, i, rc = -EINVAL;
+       const __be32 *tmp, dummy_imask[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(~0) };
+       u32 intsize = 1, addrsize;
+       int i, rc = -EINVAL;
 
 #ifdef DEBUG
        of_print_phandle_args("of_irq_parse_raw: ", out_irq);
@@ -176,6 +229,9 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
 
        /* Now start the actual "proper" walk of the interrupt tree */
        while (ipar != NULL) {
+               int imaplen, match;
+               const __be32 *imap, *oldimap, *imask;
+               struct device_node *newpar;
                /*
                 * Now check if cursor is an interrupt-controller and
                 * if it is then we are done, unless there is an
@@ -216,7 +272,7 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
 
                /* Parse interrupt-map */
                match = 0;
-               while (imaplen > (addrsize + intsize + 1) && !match) {
+               while (imaplen > (addrsize + intsize + 1)) {
                        /* Compare specifiers */
                        match = 1;
                        for (i = 0; i < (addrsize + intsize); i++, imaplen--)
@@ -224,48 +280,17 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
 
                        pr_debug(" -> match=%d (imaplen=%d)\n", match, imaplen);
 
-                       /* Get the interrupt parent */
-                       if (of_irq_workarounds & OF_IMAP_NO_PHANDLE)
-                               newpar = of_node_get(of_irq_dflt_pic);
-                       else
-                               newpar = of_find_node_by_phandle(be32_to_cpup(imap));
-                       imap++;
-                       --imaplen;
-
-                       /* Check if not found */
-                       if (newpar == NULL) {
-                               pr_debug(" -> imap parent not found !\n");
-                               goto fail;
-                       }
-
-                       if (!of_device_is_available(newpar))
-                               match = 0;
-
-                       /* Get #interrupt-cells and #address-cells of new
-                        * parent
-                        */
-                       if (of_property_read_u32(newpar, "#interrupt-cells",
-                                                &newintsize)) {
-                               pr_debug(" -> parent lacks #interrupt-cells!\n");
-                               goto fail;
-                       }
-                       if (of_property_read_u32(newpar, "#address-cells",
-                                                &newaddrsize))
-                               newaddrsize = 0;
-
-                       pr_debug(" -> newintsize=%d, newaddrsize=%d\n",
-                           newintsize, newaddrsize);
-
-                       /* Check for malformed properties */
-                       if (WARN_ON(newaddrsize + newintsize > MAX_PHANDLE_ARGS)
-                           || (imaplen < (newaddrsize + newintsize))) {
-                               rc = -EFAULT;
+                       oldimap = imap;
+                       imap = of_irq_parse_imap_parent(oldimap, imaplen, out_irq);
+                       if (!imap)
                                goto fail;
-                       }
 
-                       imap += newaddrsize + newintsize;
-                       imaplen -= newaddrsize + newintsize;
+                       match &= of_device_is_available(out_irq->np);
+                       if (match)
+                               break;
 
+                       of_node_put(out_irq->np);
+                       imaplen -= imap - oldimap;
                        pr_debug(" -> imaplen=%d\n", imaplen);
                }
                if (!match) {
@@ -287,11 +312,11 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
                 * Successfully parsed an interrupt-map translation; copy new
                 * interrupt specifier into the out_irq structure
                 */
-               match_array = imap - newaddrsize - newintsize;
-               for (i = 0; i < newintsize; i++)
-                       out_irq->args[i] = be32_to_cpup(imap - newintsize + i);
-               out_irq->args_count = intsize = newintsize;
-               addrsize = newaddrsize;
+               match_array = oldimap + 1;
+
+               newpar = out_irq->np;
+               intsize = out_irq->args_count;
+               addrsize = (imap - match_array) - intsize;
 
                if (ipar == newpar) {
                        pr_debug("%pOF interrupt-map entry to self\n", ipar);
@@ -300,7 +325,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
 
        skiplevel:
                /* Iterate again with new parent */
-               out_irq->np = newpar;
                pr_debug(" -> new parent: %pOF\n", newpar);
                of_node_put(ipar);
                ipar = newpar;
@@ -310,7 +334,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
 
  fail:
        of_node_put(ipar);
-       of_node_put(newpar);
 
        return rc;
 }
index 94fc0aa07af9e383a1e0d844ac620f26a025bfd1..04aa2a91f851acd9b2da47c5b53edb025b9a1f8f 100644 (file)
@@ -159,6 +159,9 @@ extern void __of_sysfs_remove_bin_file(struct device_node *np,
 extern int of_bus_n_addr_cells(struct device_node *np);
 extern int of_bus_n_size_cells(struct device_node *np);
 
+const __be32 *of_irq_parse_imap_parent(const __be32 *imap, int len,
+                                      struct of_phandle_args *out_irq);
+
 struct bus_dma_region;
 #if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_HAS_DMA)
 int of_dma_get_range(struct device_node *np,
index a9301d293f014cb2e088b1826397c2001331866e..c85a258bc6ae64b83b76fdfb99533914b02ff8b3 100644 (file)
@@ -54,4 +54,5 @@ static struct kunit_suite of_dtb_suite = {
 kunit_test_suites(
        &of_dtb_suite,
 );
+MODULE_DESCRIPTION("KUnit tests for OF APIs");
 MODULE_LICENSE("GPL");
index 1c83e68f805baa50af98da953a6434740e8892c9..164d77cb944585d6da7d4931a9fe5a121892841e 100644 (file)
@@ -1306,10 +1306,10 @@ static struct device_node *parse_interrupts(struct device_node *np,
 static struct device_node *parse_interrupt_map(struct device_node *np,
                                               const char *prop_name, int index)
 {
-       const __be32 *imap, *imap_end, *addr;
+       const __be32 *imap, *imap_end;
        struct of_phandle_args sup_args;
        u32 addrcells, intcells;
-       int i, imaplen;
+       int imaplen;
 
        if (!IS_ENABLED(CONFIG_OF_IRQ))
                return NULL;
@@ -1322,33 +1322,23 @@ static struct device_node *parse_interrupt_map(struct device_node *np,
        addrcells = of_bus_n_addr_cells(np);
 
        imap = of_get_property(np, "interrupt-map", &imaplen);
-       if (!imap || imaplen <= (addrcells + intcells))
+       imaplen /= sizeof(*imap);
+       if (!imap)
                return NULL;
-       imap_end = imap + imaplen;
 
-       while (imap < imap_end) {
-               addr = imap;
-               imap += addrcells;
+       imap_end = imap + imaplen;
 
-               sup_args.np = np;
-               sup_args.args_count = intcells;
-               for (i = 0; i < intcells; i++)
-                       sup_args.args[i] = be32_to_cpu(imap[i]);
-               imap += intcells;
+       for (int i = 0; imap + addrcells + intcells + 1 < imap_end; i++) {
+               imap += addrcells + intcells;
 
-               /*
-                * Upon success, the function of_irq_parse_raw() returns
-                * interrupt controller DT node pointer in sup_args.np.
-                */
-               if (of_irq_parse_raw(addr, &sup_args))
+               imap = of_irq_parse_imap_parent(imap, imap_end - imap, &sup_args);
+               if (!imap)
                        return NULL;
 
-               if (!index)
+               if (i == index)
                        return sup_args.np;
 
                of_node_put(sup_args.np);
-               imap += sup_args.args_count + 1;
-               index--;
        }
 
        return NULL;
index 30f031de9cfe8c879a702a534d985bf8204f57a2..b123da16b63ba309940c4b42b0346a8d24eb5380 100644 (file)
@@ -289,8 +289,6 @@ void pci_cfg_access_lock(struct pci_dev *dev)
 {
        might_sleep();
 
-       lock_map_acquire(&dev->cfg_access_lock);
-
        raw_spin_lock_irq(&pci_lock);
        if (dev->block_cfg_access)
                pci_wait_cfg(dev);
@@ -345,8 +343,6 @@ void pci_cfg_access_unlock(struct pci_dev *dev)
        raw_spin_unlock_irqrestore(&pci_lock, flags);
 
        wake_up_all(&pci_cfg_wait);
-
-       lock_map_release(&dev->cfg_access_lock);
 }
 EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
 
index 59e0949fb079d5f62edb5f93bd19a22f8b8d3d48..35fb1f17a589c164daff9127f1cd1412cc52df9f 100644 (file)
@@ -4883,7 +4883,6 @@ void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
  */
 int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
 {
-       lock_map_assert_held(&dev->cfg_access_lock);
        pcibios_reset_secondary_bus(dev);
 
        return pci_bridge_wait_for_secondary_bus(dev, "bus reset");
index 8e696e547565c3bbb7700c8c61196d3e435f66f2..5fbabb4e3425fb2514a2f2fcced2d469f020eac3 100644 (file)
@@ -2546,9 +2546,6 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
        dev->dev.dma_mask = &dev->dma_mask;
        dev->dev.dma_parms = &dev->dma_parms;
        dev->dev.coherent_dma_mask = 0xffffffffull;
-       lockdep_register_key(&dev->cfg_access_key);
-       lockdep_init_map(&dev->cfg_access_lock, dev_name(&dev->dev),
-                        &dev->cfg_access_key, 0);
 
        dma_set_max_seg_size(&dev->dev, 65536);
        dma_set_seg_boundary(&dev->dev, 0xffffffff);
index 0ec952b5d03e770fbc1c6cf6427e9cf535762359..665fa9524986595b02826b3645a2f4172b5c048e 100644 (file)
@@ -136,6 +136,7 @@ config YOGABOOK
 config YT2_1380
        tristate "Lenovo Yoga Tablet 2 1380 fast charge driver"
        depends on SERIAL_DEV_BUS
+       depends on EXTCON
        depends on ACPI
        help
          Say Y here to enable support for the custom fast charging protocol
@@ -515,6 +516,7 @@ config THINKPAD_ACPI
        select NVRAM
        select NEW_LEDS
        select LEDS_CLASS
+       select INPUT_SPARSEKMAP
        help
          This is a driver for the IBM and Lenovo ThinkPad laptops. It adds
          support for Fn-Fx key combinations, Bluetooth control, video
index d84ea66eecc6b65656f5af7b17085b35a2ce6bc2..8fcf38eed7f00ee01aade6e3e55e20402458d5aa 100644 (file)
@@ -907,16 +907,44 @@ static int hsmp_plat_dev_register(void)
        return ret;
 }
 
+/*
+ * This check is only needed for backward compatibility of previous platforms.
+ * All new platforms are expected to support ACPI based probing.
+ */
+static bool legacy_hsmp_support(void)
+{
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+               return false;
+
+       switch (boot_cpu_data.x86) {
+       case 0x19:
+               switch (boot_cpu_data.x86_model) {
+               case 0x00 ... 0x1F:
+               case 0x30 ... 0x3F:
+               case 0x90 ... 0x9F:
+               case 0xA0 ... 0xAF:
+                       return true;
+               default:
+                       return false;
+               }
+       case 0x1A:
+               switch (boot_cpu_data.x86_model) {
+               case 0x00 ... 0x1F:
+                       return true;
+               default:
+                       return false;
+               }
+       default:
+               return false;
+       }
+
+       return false;
+}
+
 static int __init hsmp_plt_init(void)
 {
        int ret = -ENODEV;
 
-       if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD || boot_cpu_data.x86 < 0x19) {
-               pr_err("HSMP is not supported on Family:%x model:%x\n",
-                      boot_cpu_data.x86, boot_cpu_data.x86_model);
-               return ret;
-       }
-
        /*
         * amd_nb_num() returns number of SMN/DF interfaces present in the system
         * if we have N SMN/DF interfaces that ideally means N sockets
@@ -930,7 +958,15 @@ static int __init hsmp_plt_init(void)
                return ret;
 
        if (!plat_dev.is_acpi_device) {
-               ret = hsmp_plat_dev_register();
+               if (legacy_hsmp_support()) {
+                       /* Not ACPI device, but supports HSMP, register a plat_dev */
+                       ret = hsmp_plat_dev_register();
+               } else {
+                       /* Not ACPI, Does not support HSMP */
+                       pr_info("HSMP is not supported on Family:%x model:%x\n",
+                               boot_cpu_data.x86, boot_cpu_data.x86_model);
+                       ret = -ENODEV;
+               }
                if (ret)
                        platform_driver_unregister(&amd_hsmp_driver);
        }
index e61bfaf8b5c48faf8fdf2df15cf0faa140b5869e..b562ed99ec4e7964d83dfbbab688299be47dd839 100644 (file)
@@ -11,6 +11,7 @@
  */
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/container_of.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/capability.h>
@@ -25,11 +26,16 @@ static u32 da_supported_commands;
 static int da_num_tokens;
 static struct platform_device *platform_device;
 static struct calling_interface_token *da_tokens;
-static struct device_attribute *token_location_attrs;
-static struct device_attribute *token_value_attrs;
+static struct token_sysfs_data *token_entries;
 static struct attribute **token_attrs;
 static DEFINE_MUTEX(smbios_mutex);
 
+struct token_sysfs_data {
+       struct device_attribute location_attr;
+       struct device_attribute value_attr;
+       struct calling_interface_token *token;
+};
+
 struct smbios_device {
        struct list_head list;
        struct device *device;
@@ -416,47 +422,26 @@ static void __init find_tokens(const struct dmi_header *dm, void *dummy)
        }
 }
 
-static int match_attribute(struct device *dev,
-                          struct device_attribute *attr)
-{
-       int i;
-
-       for (i = 0; i < da_num_tokens * 2; i++) {
-               if (!token_attrs[i])
-                       continue;
-               if (strcmp(token_attrs[i]->name, attr->attr.name) == 0)
-                       return i/2;
-       }
-       dev_dbg(dev, "couldn't match: %s\n", attr->attr.name);
-       return -EINVAL;
-}
-
 static ssize_t location_show(struct device *dev,
                             struct device_attribute *attr, char *buf)
 {
-       int i;
+       struct token_sysfs_data *data = container_of(attr, struct token_sysfs_data, location_attr);
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
-       i = match_attribute(dev, attr);
-       if (i > 0)
-               return sysfs_emit(buf, "%08x", da_tokens[i].location);
-       return 0;
+       return sysfs_emit(buf, "%08x", data->token->location);
 }
 
 static ssize_t value_show(struct device *dev,
                          struct device_attribute *attr, char *buf)
 {
-       int i;
+       struct token_sysfs_data *data = container_of(attr, struct token_sysfs_data, value_attr);
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
-       i = match_attribute(dev, attr);
-       if (i > 0)
-               return sysfs_emit(buf, "%08x", da_tokens[i].value);
-       return 0;
+       return sysfs_emit(buf, "%08x", data->token->value);
 }
 
 static struct attribute_group smbios_attribute_group = {
@@ -473,22 +458,15 @@ static int build_tokens_sysfs(struct platform_device *dev)
 {
        char *location_name;
        char *value_name;
-       size_t size;
        int ret;
        int i, j;
 
-       /* (number of tokens  + 1 for null terminated */
-       size = sizeof(struct device_attribute) * (da_num_tokens + 1);
-       token_location_attrs = kzalloc(size, GFP_KERNEL);
-       if (!token_location_attrs)
+       token_entries = kcalloc(da_num_tokens, sizeof(*token_entries), GFP_KERNEL);
+       if (!token_entries)
                return -ENOMEM;
-       token_value_attrs = kzalloc(size, GFP_KERNEL);
-       if (!token_value_attrs)
-               goto out_allocate_value;
 
        /* need to store both location and value + terminator*/
-       size = sizeof(struct attribute *) * ((2 * da_num_tokens) + 1);
-       token_attrs = kzalloc(size, GFP_KERNEL);
+       token_attrs = kcalloc((2 * da_num_tokens) + 1, sizeof(*token_attrs), GFP_KERNEL);
        if (!token_attrs)
                goto out_allocate_attrs;
 
@@ -496,32 +474,34 @@ static int build_tokens_sysfs(struct platform_device *dev)
                /* skip empty */
                if (da_tokens[i].tokenID == 0)
                        continue;
+
+               token_entries[i].token = &da_tokens[i];
+
                /* add location */
                location_name = kasprintf(GFP_KERNEL, "%04x_location",
                                          da_tokens[i].tokenID);
                if (location_name == NULL)
                        goto out_unwind_strings;
-               sysfs_attr_init(&token_location_attrs[i].attr);
-               token_location_attrs[i].attr.name = location_name;
-               token_location_attrs[i].attr.mode = 0444;
-               token_location_attrs[i].show = location_show;
-               token_attrs[j++] = &token_location_attrs[i].attr;
+
+               sysfs_attr_init(&token_entries[i].location_attr.attr);
+               token_entries[i].location_attr.attr.name = location_name;
+               token_entries[i].location_attr.attr.mode = 0444;
+               token_entries[i].location_attr.show = location_show;
+               token_attrs[j++] = &token_entries[i].location_attr.attr;
 
                /* add value */
                value_name = kasprintf(GFP_KERNEL, "%04x_value",
                                       da_tokens[i].tokenID);
-               if (value_name == NULL)
-                       goto loop_fail_create_value;
-               sysfs_attr_init(&token_value_attrs[i].attr);
-               token_value_attrs[i].attr.name = value_name;
-               token_value_attrs[i].attr.mode = 0444;
-               token_value_attrs[i].show = value_show;
-               token_attrs[j++] = &token_value_attrs[i].attr;
-               continue;
-
-loop_fail_create_value:
-               kfree(location_name);
-               goto out_unwind_strings;
+               if (!value_name) {
+                       kfree(location_name);
+                       goto out_unwind_strings;
+               }
+
+               sysfs_attr_init(&token_entries[i].value_attr.attr);
+               token_entries[i].value_attr.attr.name = value_name;
+               token_entries[i].value_attr.attr.mode = 0444;
+               token_entries[i].value_attr.show = value_show;
+               token_attrs[j++] = &token_entries[i].value_attr.attr;
        }
        smbios_attribute_group.attrs = token_attrs;
 
@@ -532,14 +512,12 @@ loop_fail_create_value:
 
 out_unwind_strings:
        while (i--) {
-               kfree(token_location_attrs[i].attr.name);
-               kfree(token_value_attrs[i].attr.name);
+               kfree(token_entries[i].location_attr.attr.name);
+               kfree(token_entries[i].value_attr.attr.name);
        }
        kfree(token_attrs);
 out_allocate_attrs:
-       kfree(token_value_attrs);
-out_allocate_value:
-       kfree(token_location_attrs);
+       kfree(token_entries);
 
        return -ENOMEM;
 }
@@ -551,12 +529,11 @@ static void free_group(struct platform_device *pdev)
        sysfs_remove_group(&pdev->dev.kobj,
                                &smbios_attribute_group);
        for (i = 0; i < da_num_tokens; i++) {
-               kfree(token_location_attrs[i].attr.name);
-               kfree(token_value_attrs[i].attr.name);
+               kfree(token_entries[i].location_attr.attr.name);
+               kfree(token_entries[i].value_attr.attr.name);
        }
        kfree(token_attrs);
-       kfree(token_value_attrs);
-       kfree(token_location_attrs);
+       kfree(token_entries);
 }
 
 static int __init dell_smbios_init(void)
index 7bac7841ff0abaa101252428601b23752beace07..7fa360073f6ef48c2d6607c7f75c022ce9dbdf4e 100644 (file)
@@ -1610,8 +1610,8 @@ void tpmi_sst_dev_remove(struct auxiliary_device *auxdev)
        tpmi_sst->partition_mask_current &= ~BIT(plat_info->partition);
        /* Free the package instance when the all partitions are removed */
        if (!tpmi_sst->partition_mask_current) {
-               kfree(tpmi_sst);
                isst_common.sst_inst[tpmi_sst->package_id] = NULL;
+               kfree(tpmi_sst);
        }
        mutex_unlock(&isst_tpmi_dev_lock);
 }
index c6a10ec2c83f666b936247abbc17fe4bf2d87304..f74af0a689f20c456cf8b2415aaa37d3490618fa 100644 (file)
@@ -9,10 +9,13 @@
  */
 
 #include <linux/acpi.h>
+#include <linux/ctype.h>
 #include <linux/device.h>
 #include <linux/dmi.h>
 #include <linux/efi_embedded_fw.h>
 #include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kstrtox.h>
 #include <linux/notifier.h>
 #include <linux/property.h>
 #include <linux/string.h>
@@ -31,7 +34,6 @@ static const struct property_entry archos_101_cesium_educ_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-archos-101-cesium-educ.fw"),
        { }
@@ -46,7 +48,6 @@ static const struct property_entry bush_bush_windows_tablet_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1850),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-bush-bush-windows-tablet.fw"),
        { }
@@ -76,7 +77,6 @@ static const struct property_entry chuwi_hi8_air_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1148),
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-chuwi-hi8-air.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        { }
 };
 
@@ -92,7 +92,6 @@ static const struct property_entry chuwi_hi8_pro_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1148),
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-chuwi-hi8-pro.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
@@ -120,7 +119,6 @@ static const struct property_entry chuwi_hi10_air_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-fuzz-x", 5),
        PROPERTY_ENTRY_U32("touchscreen-fuzz-y", 4),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10-air.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
@@ -136,7 +134,6 @@ static const struct property_entry chuwi_hi10_plus_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1908),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1270),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10plus.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        PROPERTY_ENTRY_BOOL("silead,pen-supported"),
        PROPERTY_ENTRY_U32("silead,pen-resolution-x", 8),
@@ -168,7 +165,6 @@ static const struct property_entry chuwi_hi10_pro_props[] = {
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10-pro.fw"),
        PROPERTY_ENTRY_U32_ARRAY("silead,efi-fw-min-max", chuwi_hi10_pro_efi_min_max),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        PROPERTY_ENTRY_BOOL("silead,pen-supported"),
        PROPERTY_ENTRY_U32("silead,pen-resolution-x", 8),
@@ -198,7 +194,6 @@ static const struct property_entry chuwi_hibook_props[] = {
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hibook.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
@@ -224,7 +219,6 @@ static const struct property_entry chuwi_vi8_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-chuwi-vi8.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
@@ -252,7 +246,6 @@ static const struct property_entry chuwi_vi10_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1858),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-chuwi-vi10.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
@@ -268,7 +261,6 @@ static const struct property_entry chuwi_surbook_mini_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 2040),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1524),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-surbook-mini.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
        { }
 };
@@ -286,7 +278,6 @@ static const struct property_entry connect_tablet9_props[] = {
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-connect-tablet9.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        { }
 };
 
@@ -303,7 +294,6 @@ static const struct property_entry csl_panther_tab_hd_props[] = {
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-csl-panther-tab-hd.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        { }
 };
 
@@ -319,7 +309,6 @@ static const struct property_entry cube_iwork8_air_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-y", 896),
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-cube-iwork8-air.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        { }
 };
 
@@ -343,7 +332,6 @@ static const struct property_entry cube_knote_i1101_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1961),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1513),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-cube-knote-i1101.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
@@ -357,7 +345,6 @@ static const struct property_entry dexp_ursus_7w_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 890),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 630),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-dexp-ursus-7w.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
@@ -373,7 +360,6 @@ static const struct property_entry dexp_ursus_kx210i_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1720),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1137),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-dexp-ursus-kx210i.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
@@ -388,7 +374,6 @@ static const struct property_entry digma_citi_e200_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-digma_citi_e200.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
@@ -447,7 +432,6 @@ static const struct property_entry irbis_tw90_props[] = {
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-irbis_tw90.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
@@ -463,7 +447,6 @@ static const struct property_entry irbis_tw118_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1960),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1510),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-irbis-tw118.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        { }
 };
 
@@ -480,7 +463,6 @@ static const struct property_entry itworks_tw891_props[] = {
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-itworks-tw891.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        { }
 };
 
@@ -493,7 +475,6 @@ static const struct property_entry jumper_ezpad_6_pro_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-jumper-ezpad-6-pro.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
@@ -508,7 +489,6 @@ static const struct property_entry jumper_ezpad_6_pro_b_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-jumper-ezpad-6-pro-b.fw"),
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
@@ -524,7 +504,6 @@ static const struct property_entry jumper_ezpad_6_m4_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1950),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1525),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-jumper-ezpad-6-m4.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
@@ -541,7 +520,6 @@ static const struct property_entry jumper_ezpad_7_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1526),
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-jumper-ezpad-7.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,stuck-controller-bug"),
        { }
 };
@@ -558,7 +536,6 @@ static const struct property_entry jumper_ezpad_mini3_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1138),
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-jumper-ezpad-mini3.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        { }
 };
 
@@ -575,7 +552,6 @@ static const struct property_entry mpman_converter9_props[] = {
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-mpman-converter9.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        { }
 };
 
@@ -591,7 +567,6 @@ static const struct property_entry mpman_mpwin895cl_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1150),
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-mpman-mpwin895cl.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
@@ -608,7 +583,6 @@ static const struct property_entry myria_my8307_props[] = {
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-myria-my8307.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
@@ -625,7 +599,6 @@ static const struct property_entry onda_obook_20_plus_props[] = {
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-obook-20-plus.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
@@ -642,7 +615,6 @@ static const struct property_entry onda_v80_plus_v3_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-v80-plus-v3.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
@@ -666,7 +638,6 @@ static const struct property_entry onda_v820w_32g_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-onda-v820w-32g.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
@@ -684,7 +655,6 @@ static const struct property_entry onda_v891_v5_props[] = {
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
        PROPERTY_ENTRY_STRING("firmware-name",
                              "gsl3676-onda-v891-v5.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
@@ -700,7 +670,6 @@ static const struct property_entry onda_v891w_v1_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1676),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1130),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-onda-v891w-v1.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
@@ -717,7 +686,6 @@ static const struct property_entry onda_v891w_v3_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1135),
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-v891w-v3.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
@@ -756,7 +724,6 @@ static const struct property_entry pipo_w11_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1984),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1532),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-pipo-w11.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
@@ -772,7 +739,6 @@ static const struct property_entry positivo_c4128b_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1915),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1269),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-positivo-c4128b.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        { }
 };
 
@@ -788,7 +754,6 @@ static const struct property_entry pov_mobii_wintab_p800w_v20_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1146),
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-pov-mobii-wintab-p800w-v20.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
@@ -805,7 +770,6 @@ static const struct property_entry pov_mobii_wintab_p800w_v21_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1148),
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-pov-mobii-wintab-p800w.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
@@ -822,7 +786,6 @@ static const struct property_entry pov_mobii_wintab_p1006w_v10_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1520),
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-pov-mobii-wintab-p1006w-v10.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
@@ -839,7 +802,6 @@ static const struct property_entry predia_basic_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1144),
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-predia-basic.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
@@ -856,7 +818,6 @@ static const struct property_entry rca_cambio_w101_v2_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-y", 874),
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-rca-cambio-w101-v2.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        { }
 };
 
@@ -871,7 +832,6 @@ static const struct property_entry rwc_nanote_p8_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-rwc-nanote-p8.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        { }
 };
 
@@ -887,7 +847,6 @@ static const struct property_entry schneider_sct101ctm_props[] = {
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-schneider-sct101ctm.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
@@ -897,6 +856,21 @@ static const struct ts_dmi_data schneider_sct101ctm_data = {
        .properties     = schneider_sct101ctm_props,
 };
 
+static const struct property_entry globalspace_solt_ivw116_props[] = {
+       PROPERTY_ENTRY_U32("touchscreen-min-x", 7),
+       PROPERTY_ENTRY_U32("touchscreen-min-y", 22),
+       PROPERTY_ENTRY_U32("touchscreen-size-x", 1723),
+       PROPERTY_ENTRY_U32("touchscreen-size-y", 1077),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-globalspace-solt-ivw116.fw"),
+       PROPERTY_ENTRY_BOOL("silead,home-button"),
+       { }
+};
+
+static const struct ts_dmi_data globalspace_solt_ivw116_data = {
+       .acpi_name      = "MSSL1680:00",
+       .properties     = globalspace_solt_ivw116_props,
+};
+
 static const struct property_entry techbite_arc_11_6_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-min-x", 5),
        PROPERTY_ENTRY_U32("touchscreen-min-y", 7),
@@ -904,7 +878,6 @@ static const struct property_entry techbite_arc_11_6_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1270),
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-techbite-arc-11-6.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        { }
 };
 
@@ -920,7 +893,6 @@ static const struct property_entry teclast_tbook11_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1264),
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-teclast-tbook11.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
@@ -946,7 +918,6 @@ static const struct property_entry teclast_x16_plus_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1264),
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-teclast-x16-plus.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
@@ -969,7 +940,6 @@ static const struct property_entry teclast_x3_plus_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-teclast-x3-plus.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
@@ -985,7 +955,6 @@ static const struct property_entry teclast_x98plus2_props[] = {
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-teclast_x98plus2.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        { }
 };
 
@@ -999,7 +968,6 @@ static const struct property_entry trekstor_primebook_c11_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1530),
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primebook-c11.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
@@ -1013,7 +981,6 @@ static const struct property_entry trekstor_primebook_c13_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 2624),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1920),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primebook-c13.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
@@ -1027,7 +994,6 @@ static const struct property_entry trekstor_primetab_t13b_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 2500),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1900),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primetab-t13b.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
        { }
@@ -1055,7 +1021,6 @@ static const struct property_entry trekstor_surftab_twin_10_1_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
        PROPERTY_ENTRY_U32("touchscreen-inverted-y", 1),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-surftab-twin-10-1-st10432-8.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
@@ -1071,7 +1036,6 @@ static const struct property_entry trekstor_surftab_wintron70_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 884),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 632),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-surftab-wintron70-st70416-6.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
@@ -1088,7 +1052,6 @@ static const struct property_entry viglen_connect_10_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-fuzz-y", 6),
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-viglen-connect-10.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
@@ -1102,7 +1065,6 @@ static const struct property_entry vinga_twizzle_j116_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1920),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-vinga-twizzle_j116.fw"),
-       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
@@ -1385,6 +1347,17 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
                        DMI_MATCH(DMI_BIOS_DATE, "04/24/2018"),
                },
        },
+       {
+               /* Jumper EZpad 6s Pro */
+               .driver_data = (void *)&jumper_ezpad_6_pro_b_data,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Jumper"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Ezpad"),
+                       /* Above matches are too generic, add bios match */
+                       DMI_MATCH(DMI_BIOS_VERSION, "E.WSA116_8.E1.042.bin"),
+                       DMI_MATCH(DMI_BIOS_DATE, "01/08/2020"),
+               },
+       },
        {
                /* Jumper EZpad 6 m4 */
                .driver_data = (void *)&jumper_ezpad_6_m4_data,
@@ -1624,6 +1597,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "SCT101CTM"),
                },
        },
+       {
+               /* GlobalSpace SoLT IVW 11.6" */
+               .driver_data = (void *)&globalspace_solt_ivw116_data,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Globalspace Tech Pvt Ltd"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "SolTIVW"),
+                       DMI_MATCH(DMI_PRODUCT_SKU, "PN20170413488"),
+               },
+       },
        {
                /* Techbite Arc 11.6 */
                .driver_data = (void *)&techbite_arc_11_6_data,
@@ -1817,7 +1799,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
        { }
 };
 
-static const struct ts_dmi_data *ts_data;
+static struct ts_dmi_data *ts_data;
 
 static void ts_dmi_add_props(struct i2c_client *client)
 {
@@ -1852,6 +1834,64 @@ static int ts_dmi_notifier_call(struct notifier_block *nb,
        return 0;
 }
 
+#define MAX_CMDLINE_PROPS 16
+
+static struct property_entry ts_cmdline_props[MAX_CMDLINE_PROPS + 1];
+
+static struct ts_dmi_data ts_cmdline_data = {
+       .properties = ts_cmdline_props,
+};
+
+static int __init ts_parse_props(char *str)
+{
+       /* Save the original str to show it on syntax errors */
+       char orig_str[256];
+       char *name, *value;
+       u32 u32val;
+       int i, ret;
+
+       strscpy(orig_str, str);
+
+       /*
+        * str is part of the static_command_line from init/main.c and poking
+        * holes in that by writing 0 to it is allowed, as is taking long
+        * lasting references to it.
+        */
+       ts_cmdline_data.acpi_name = strsep(&str, ":");
+
+       for (i = 0; i < MAX_CMDLINE_PROPS; i++) {
+               name = strsep(&str, ":");
+               if (!name || !name[0])
+                       break;
+
+               /* Replace '=' with 0 and make value point past '=' or NULL */
+               value = name;
+               strsep(&value, "=");
+               if (!value) {
+                       ts_cmdline_props[i] = PROPERTY_ENTRY_BOOL(name);
+               } else if (isdigit(value[0])) {
+                       ret = kstrtou32(value, 0, &u32val);
+                       if (ret)
+                               goto syntax_error;
+
+                       ts_cmdline_props[i] = PROPERTY_ENTRY_U32(name, u32val);
+               } else {
+                       ts_cmdline_props[i] = PROPERTY_ENTRY_STRING(name, value);
+               }
+       }
+
+       if (!i || str)
+               goto syntax_error;
+
+       ts_data = &ts_cmdline_data;
+       return 1;
+
+syntax_error:
+       pr_err("Invalid '%s' value for 'i2c_touchscreen_props='\n", orig_str);
+       return 1; /* "i2c_touchscreen_props=" is still a known parameter */
+}
+__setup("i2c_touchscreen_props=", ts_parse_props);
+
 static struct notifier_block ts_dmi_notifier = {
        .notifier_call = ts_dmi_notifier_call,
 };
@@ -1859,13 +1899,25 @@ static struct notifier_block ts_dmi_notifier = {
 static int __init ts_dmi_init(void)
 {
        const struct dmi_system_id *dmi_id;
+       struct ts_dmi_data *ts_data_dmi;
        int error;
 
        dmi_id = dmi_first_match(touchscreen_dmi_table);
-       if (!dmi_id)
+       ts_data_dmi = dmi_id ? dmi_id->driver_data : NULL;
+
+       if (ts_data) {
+               /*
+                * Kernel cmdline provided data takes precedence, copy over
+                * DMI efi_embedded_fw info if available.
+                */
+               if (ts_data_dmi)
+                       ts_data->embedded_fw = ts_data_dmi->embedded_fw;
+       } else if (ts_data_dmi) {
+               ts_data = ts_data_dmi;
+       } else {
                return 0; /* Not an error */
+       }
 
-       ts_data = dmi_id->driver_data;
        /* Some dmi table entries only provide an efi_embedded_fw_desc */
        if (!ts_data->properties)
                return 0;
index 6603461d427300ab07e80c3f76698c81cbe12856..b591419de80c301dc22c625dc82c00e1522f8035 100644 (file)
@@ -6,6 +6,8 @@
 config X86_ANDROID_TABLETS
        tristate "X86 Android tablet support"
        depends on I2C && SPI && SERIAL_DEV_BUS && ACPI && EFI && GPIOLIB && PMIC_OPREGION
+       select NEW_LEDS
+       select LEDS_CLASS
        help
          X86 tablets which ship with Android as (part of) the factory image
          typically have various problems with their DSDTs. The factory kernels
index 4b828d74a6064c0d8ba8a1e153c6c3bbea678ddd..856eaac0ec140d9668a814dbf84b8e649cb26abf 100644 (file)
@@ -393,6 +393,17 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd)
                 * automatically there. Just add a delay and suppose the handshake finish
                 * after that.
                 */
+
+               /*
+                * For some BLK-CTL module (eg. AudioMix on i.MX8MP) doesn't have BUS
+                * clk-en bit, it is better to add delay here, as the BLK-CTL module
+                * doesn't need to care about how it is powered up.
+                *
+                * regmap_read_bypassed() is to make sure the above write IO transaction
+                * already reaches target before udelay()
+                */
+               regmap_read_bypassed(domain->regmap, domain->regs->hsk, &reg_val);
+               udelay(5);
        }
 
        /* Disable reset clocks for all devices in the domain */
index e74a0f6a31572bf53c87f4dfbb34cf7d76208f8f..4e80273dfb1ec9c90053faee5e1330875f3508fe 100644 (file)
@@ -6,6 +6,7 @@
 
 extern struct mutex pnp_lock;
 extern const struct attribute_group *pnp_dev_groups[];
+extern const struct bus_type pnp_bus_type;
 
 int pnp_register_protocol(struct pnp_protocol *protocol);
 void pnp_unregister_protocol(struct pnp_protocol *protocol);
index 0a5d0d8befa8408d02b3eaaf278d6a936a83e102..3483e52e3a81d13d6dbc3e68882fa8b7535e03bf 100644 (file)
@@ -266,6 +266,12 @@ const struct bus_type pnp_bus_type = {
        .dev_groups = pnp_dev_groups,
 };
 
+bool dev_is_pnp(const struct device *dev)
+{
+       return dev->bus == &pnp_bus_type;
+}
+EXPORT_SYMBOL_GPL(dev_is_pnp);
+
 int pnp_register_driver(struct pnp_driver *drv)
 {
        drv->driver.name = drv->name;
index 7513018c9f9ac72d5c1b0055b55ae9ff36e710b0..2067b0120d083d868e0f8fee2119fb0a4cd6a2b4 100644 (file)
@@ -85,7 +85,8 @@ int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
        }
 
        if (info->verify(info, pin, func, chan)) {
-               pr_err("driver cannot use function %u on pin %u\n", func, chan);
+               pr_err("driver cannot use function %u and channel %u on pin %u\n",
+                      func, chan, pin);
                return -EOPNOTSUPP;
        }
 
index b90e53d922d6d186dc05b5b009a903359425f677..c31b6dc3229cabbe436f6efb601be482ce62e06b 100644 (file)
@@ -228,6 +228,11 @@ static const struct regulator_ops rtq2208_regulator_ldo_ops = {
        .set_suspend_disable = rtq2208_set_suspend_disable,
 };
 
+static struct of_regulator_match rtq2208_ldo_match[] = {
+       {.name = "ldo2", },
+       {.name = "ldo1", },
+};
+
 static unsigned int rtq2208_of_map_mode(unsigned int mode)
 {
        switch (mode) {
@@ -322,8 +327,7 @@ static irqreturn_t rtq2208_irq_handler(int irqno, void *devid)
        return IRQ_HANDLED;
 }
 
-static int rtq2208_of_get_fixed_voltage(struct device *dev,
-                                       struct of_regulator_match *rtq2208_ldo_match, int n_fixed)
+static int rtq2208_of_get_ldo_dvs_ability(struct device *dev)
 {
        struct device_node *np;
        struct of_regulator_match *match;
@@ -338,14 +342,14 @@ static int rtq2208_of_get_fixed_voltage(struct device *dev,
        if (!np)
                np = dev->of_node;
 
-       ret = of_regulator_match(dev, np, rtq2208_ldo_match, n_fixed);
+       ret = of_regulator_match(dev, np, rtq2208_ldo_match, ARRAY_SIZE(rtq2208_ldo_match));
 
        of_node_put(np);
 
        if (ret < 0)
                return ret;
 
-       for (i = 0; i < n_fixed; i++) {
+       for (i = 0; i < ARRAY_SIZE(rtq2208_ldo_match); i++) {
                match = rtq2208_ldo_match + i;
                init_data = match->init_data;
                rdesc = (struct rtq2208_regulator_desc *)match->driver_data;
@@ -388,8 +392,7 @@ static const struct linear_range rtq2208_vout_range[] = {
        REGULATOR_LINEAR_RANGE(1310000, 181, 255, 10000),
 };
 
-static void rtq2208_init_regulator_desc(struct rtq2208_regulator_desc *rdesc, int mtp_sel,
-                                       int idx, struct of_regulator_match *rtq2208_ldo_match, int *ldo_idx)
+static void rtq2208_init_regulator_desc(struct rtq2208_regulator_desc *rdesc, int mtp_sel, int idx)
 {
        struct regulator_desc *desc;
        static const struct {
@@ -461,8 +464,7 @@ static void rtq2208_init_regulator_desc(struct rtq2208_regulator_desc *rdesc, in
 static int rtq2208_parse_regulator_dt_data(int n_regulator, const unsigned int *regulator_idx_table,
                struct rtq2208_regulator_desc *rdesc[RTQ2208_LDO_MAX], struct device *dev)
 {
-       struct of_regulator_match rtq2208_ldo_match[2];
-       int mtp_sel, ret, i, idx, ldo_idx = 0;
+       int mtp_sel, i, idx, ret;
 
        /* get mtp_sel0 or mtp_sel1 */
        mtp_sel = device_property_read_bool(dev, "richtek,mtp-sel-high");
@@ -474,7 +476,7 @@ static int rtq2208_parse_regulator_dt_data(int n_regulator, const unsigned int *
                if (!rdesc[i])
                        return -ENOMEM;
 
-               rtq2208_init_regulator_desc(rdesc[i], mtp_sel, idx, rtq2208_ldo_match, &ldo_idx);
+               rtq2208_init_regulator_desc(rdesc[i], mtp_sel, idx);
 
                /* init ldo dvs ability */
                if (idx >= RTQ2208_LDO2)
@@ -482,7 +484,7 @@ static int rtq2208_parse_regulator_dt_data(int n_regulator, const unsigned int *
        }
 
        /* init ldo fixed_uV */
-       ret = rtq2208_of_get_fixed_voltage(dev, rtq2208_ldo_match, ldo_idx);
+       ret = rtq2208_of_get_ldo_dvs_ability(dev);
        if (ret)
                return dev_err_probe(dev, ret, "Failed to get ldo fixed_uV\n");
 
index a226dc1b65d715f03addcf638fa52510360b1272..4eb0837298d4d2dddf159fbdfc348a6e5a8386c6 100644 (file)
@@ -414,28 +414,40 @@ static char print_alua_state(unsigned char state)
        }
 }
 
-static enum scsi_disposition alua_check_sense(struct scsi_device *sdev,
-                                             struct scsi_sense_hdr *sense_hdr)
+static void alua_handle_state_transition(struct scsi_device *sdev)
 {
        struct alua_dh_data *h = sdev->handler_data;
        struct alua_port_group *pg;
 
+       rcu_read_lock();
+       pg = rcu_dereference(h->pg);
+       if (pg)
+               pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
+       rcu_read_unlock();
+       alua_check(sdev, false);
+}
+
+static enum scsi_disposition alua_check_sense(struct scsi_device *sdev,
+                                             struct scsi_sense_hdr *sense_hdr)
+{
        switch (sense_hdr->sense_key) {
        case NOT_READY:
                if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) {
                        /*
                         * LUN Not Accessible - ALUA state transition
                         */
-                       rcu_read_lock();
-                       pg = rcu_dereference(h->pg);
-                       if (pg)
-                               pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
-                       rcu_read_unlock();
-                       alua_check(sdev, false);
+                       alua_handle_state_transition(sdev);
                        return NEEDS_RETRY;
                }
                break;
        case UNIT_ATTENTION:
+               if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) {
+                       /*
+                        * LUN Not Accessible - ALUA state transition
+                        */
+                       alua_handle_state_transition(sdev);
+                       return NEEDS_RETRY;
+               }
                if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) {
                        /*
                         * Power On, Reset, or Bus Device Reset.
@@ -502,7 +514,8 @@ static int alua_tur(struct scsi_device *sdev)
 
        retval = scsi_test_unit_ready(sdev, ALUA_FAILOVER_TIMEOUT * HZ,
                                      ALUA_FAILOVER_RETRIES, &sense_hdr);
-       if (sense_hdr.sense_key == NOT_READY &&
+       if ((sense_hdr.sense_key == NOT_READY ||
+            sense_hdr.sense_key == UNIT_ATTENTION) &&
            sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a)
                return SCSI_DH_RETRY;
        else if (retval)
index 329cc6ec3b589177f5ea1c870e877200ed428be1..82aa4e418c5a895c692c6c5ce57340e73edda34b 100644 (file)
@@ -1364,7 +1364,7 @@ static struct mpi3mr_sas_port *mpi3mr_sas_port_add(struct mpi3mr_ioc *mrioc,
                        continue;
 
                if (i > sizeof(mr_sas_port->phy_mask) * 8) {
-                       ioc_warn(mrioc, "skipping port %u, max allowed value is %lu\n",
+                       ioc_warn(mrioc, "skipping port %u, max allowed value is %zu\n",
                            i, sizeof(mr_sas_port->phy_mask) * 8);
                        goto out_fail;
                }
index 258647fc6bddb42b6c484bfb73055f6e40cb1c2a..1320e06727df19188e285ec99276b0f66a0704ab 100644 (file)
@@ -4774,7 +4774,7 @@ _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
        char desc[17] = {0};
        u32 iounit_pg1_flags;
 
-       strscpy(desc, ioc->manu_pg0.ChipName, sizeof(desc));
+       memtostr(desc, ioc->manu_pg0.ChipName);
        ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x)\n",
                 desc,
                 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
index 89ef43a5ef862d67cf81b5fc1a6037a385b3bc9a..12d08d8ba5382d626d25ab9f7e6578e6ca0c84e1 100644 (file)
@@ -302,8 +302,8 @@ struct _scsi_io_transfer {
 
 /**
  * _scsih_set_debug_level - global setting of ioc->logging_level.
- * @val: ?
- * @kp: ?
+ * @val: value of the parameter to be set
+ * @kp: pointer to kernel_param structure
  *
  * Note: The logging levels are defined in mpt3sas_debug.h.
  */
index 76f9a91771985baed3ede562c19c5df830696608..d84413b77d84999d5c3d43490575122123794d5d 100644 (file)
@@ -458,17 +458,13 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
                        goto out;
 
                manufacture_reply = data_out + sizeof(struct rep_manu_request);
-               strscpy(edev->vendor_id, manufacture_reply->vendor_id,
-                       sizeof(edev->vendor_id));
-               strscpy(edev->product_id, manufacture_reply->product_id,
-                       sizeof(edev->product_id));
-               strscpy(edev->product_rev, manufacture_reply->product_rev,
-                       sizeof(edev->product_rev));
+               memtostr(edev->vendor_id, manufacture_reply->vendor_id);
+               memtostr(edev->product_id, manufacture_reply->product_id);
+               memtostr(edev->product_rev, manufacture_reply->product_rev);
                edev->level = manufacture_reply->sas_format & 1;
                if (edev->level) {
-                       strscpy(edev->component_vendor_id,
-                               manufacture_reply->component_vendor_id,
-                               sizeof(edev->component_vendor_id));
+                       memtostr(edev->component_vendor_id,
+                                manufacture_reply->component_vendor_id);
                        tmp = (u8 *)&manufacture_reply->component_id;
                        edev->component_id = tmp[0] << 8 | tmp[1];
                        edev->component_revision_id =
index 5058e01b65a273084a990e1c285038c4da641c3f..98afdfe63600314bdc73152fc1b81910ccb740b7 100644 (file)
@@ -363,6 +363,7 @@ struct qedf_ctx {
 #define QEDF_IN_RECOVERY               5
 #define QEDF_DBG_STOP_IO               6
 #define QEDF_PROBING                   8
+#define QEDF_STAG_IN_PROGRESS          9
        unsigned long flags; /* Miscellaneous state flags */
        int fipvlan_retries;
        u8 num_queues;
index fd12439cbaab6bf6df58d2bc76e1b282b0392ad0..49adddf978cc737cfc4b27e71b0314a86eafbfa6 100644 (file)
@@ -318,11 +318,18 @@ static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did,
         */
        if (resp == fc_lport_flogi_resp) {
                qedf->flogi_cnt++;
+               qedf->flogi_pending++;
+
+               if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
+                       QEDF_ERR(&qedf->dbg_ctx, "Driver unloading\n");
+                       qedf->flogi_pending = 0;
+               }
+
                if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) {
                        schedule_delayed_work(&qedf->stag_work, 2);
                        return NULL;
                }
-               qedf->flogi_pending++;
+
                return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp,
                    arg, timeout);
        }
@@ -912,13 +919,14 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
        struct qedf_ctx *qedf;
        struct qed_link_output if_link;
 
+       qedf = lport_priv(lport);
+
        if (lport->vport) {
+               clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
                printk_ratelimited("Cannot issue host reset on NPIV port.\n");
                return;
        }
 
-       qedf = lport_priv(lport);
-
        qedf->flogi_pending = 0;
        /* For host reset, essentially do a soft link up/down */
        atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
@@ -938,6 +946,7 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
        if (!if_link.link_up) {
                QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
                          "Physical link is not up.\n");
+               clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
                return;
        }
        /* Flush and wait to make sure link down is processed */
@@ -950,6 +959,7 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
                  "Queue link up work.\n");
        queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
            0);
+       clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
 }
 
 /* Reset the host by gracefully logging out and then logging back in */
@@ -3463,6 +3473,7 @@ retry_probe:
        }
 
        /* Start the Slowpath-process */
+       memset(&slowpath_params, 0, sizeof(struct qed_slowpath_params));
        slowpath_params.int_mode = QED_INT_MODE_MSIX;
        slowpath_params.drv_major = QEDF_DRIVER_MAJOR_VER;
        slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER;
@@ -3721,6 +3732,7 @@ static void __qedf_remove(struct pci_dev *pdev, int mode)
 {
        struct qedf_ctx *qedf;
        int rc;
+       int cnt = 0;
 
        if (!pdev) {
                QEDF_ERR(NULL, "pdev is NULL.\n");
@@ -3738,6 +3750,17 @@ static void __qedf_remove(struct pci_dev *pdev, int mode)
                return;
        }
 
+stag_in_prog:
+       if (test_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags)) {
+               QEDF_ERR(&qedf->dbg_ctx, "Stag in progress, cnt=%d.\n", cnt);
+               cnt++;
+
+               if (cnt < 5) {
+                       msleep(500);
+                       goto stag_in_prog;
+               }
+       }
+
        if (mode != QEDF_MODE_RECOVERY)
                set_bit(QEDF_UNLOADING, &qedf->flags);
 
@@ -3997,6 +4020,24 @@ void qedf_stag_change_work(struct work_struct *work)
        struct qedf_ctx *qedf =
            container_of(work, struct qedf_ctx, stag_work.work);
 
+       if (!qedf) {
+               QEDF_ERR(&qedf->dbg_ctx, "qedf is NULL");
+               return;
+       }
+
+       if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) {
+               QEDF_ERR(&qedf->dbg_ctx,
+                        "Already is in recovery, hence not calling software context reset.\n");
+               return;
+       }
+
+       if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
+               QEDF_ERR(&qedf->dbg_ctx, "Driver unloading\n");
+               return;
+       }
+
+       set_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
+
        printk_ratelimited("[%s]:[%s:%d]:%d: Performing software context reset.",
                        dev_name(&qedf->pdev->dev), __func__, __LINE__,
                        qedf->dbg_ctx.host_no);
index 3e0c0381277acd7aed4afdbf3fc7433ef2568868..f0464db3f9de99fbae94c0522505a950a484dac0 100644 (file)
@@ -350,6 +350,13 @@ static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
                if (result < SCSI_VPD_HEADER_SIZE)
                        return 0;
 
+               if (result > sizeof(vpd)) {
+                       dev_warn_once(&sdev->sdev_gendev,
+                                     "%s: long VPD page 0 length: %d bytes\n",
+                                     __func__, result);
+                       result = sizeof(vpd);
+               }
+
                result -= SCSI_VPD_HEADER_SIZE;
                if (!memchr(&vpd[SCSI_VPD_HEADER_SIZE], page, result))
                        return 0;
index 1175f2e213b5667e4134a2d42d5dea642cc31237..dc899277b3a4411a78819c7090bcc0a329ca3bcb 100644 (file)
@@ -65,7 +65,7 @@ int sr_disk_status(struct cdrom_device_info *);
 int sr_get_last_session(struct cdrom_device_info *, struct cdrom_multisession *);
 int sr_get_mcn(struct cdrom_device_info *, struct cdrom_mcn *);
 int sr_reset(struct cdrom_device_info *);
-int sr_select_speed(struct cdrom_device_info *cdi, int speed);
+int sr_select_speed(struct cdrom_device_info *cdi, unsigned long speed);
 int sr_audio_ioctl(struct cdrom_device_info *, unsigned int, void *);
 
 int sr_is_xa(Scsi_CD *);
index 5b0b35e60e61fe6fd59aa54256a0baf38fd2cad1..a0d2556a27bba32b43cda153bb6adb471c0eef0c 100644 (file)
@@ -425,11 +425,14 @@ int sr_reset(struct cdrom_device_info *cdi)
        return 0;
 }
 
-int sr_select_speed(struct cdrom_device_info *cdi, int speed)
+int sr_select_speed(struct cdrom_device_info *cdi, unsigned long speed)
 {
        Scsi_CD *cd = cdi->handle;
        struct packet_command cgc;
 
+       /* avoid exceeding the max speed or overflowing integer bounds */
+       speed = clamp(0, speed, 0xffff / 177);
+
        if (speed == 0)
                speed = 0xffff; /* set to max */
        else
index 2209e9fc378fa94998f0c2fc152d5d472263e146..2e3eacd46b7232e91508453594accb2bb912ab96 100644 (file)
 #define CDNS_XSPI_STIG_DONE_FLAG               BIT(0)
 #define CDNS_XSPI_TRD_STATUS                   0x0104
 
+#define MODE_NO_OF_BYTES                       GENMASK(25, 24)
+#define MODEBYTES_COUNT                        1
+
 /* Helper macros for filling command registers */
 #define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_1(op, data_phase) ( \
        FIELD_PREP(CDNS_XSPI_CMD_INSTR_TYPE, (data_phase) ? \
        FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR3, ((op)->addr.val >> 24) & 0xFF) | \
        FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR4, ((op)->addr.val >> 32) & 0xFF))
 
-#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op) ( \
+#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op, modebytes) ( \
        FIELD_PREP(CDNS_XSPI_CMD_P1_R3_ADDR5, ((op)->addr.val >> 40) & 0xFF) | \
        FIELD_PREP(CDNS_XSPI_CMD_P1_R3_CMD, (op)->cmd.opcode) | \
+       FIELD_PREP(MODE_NO_OF_BYTES, modebytes) | \
        FIELD_PREP(CDNS_XSPI_CMD_P1_R3_NUM_ADDR_BYTES, (op)->addr.nbytes))
 
 #define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_4(op, chipsel) ( \
 #define CDNS_XSPI_CMD_FLD_DSEQ_CMD_2(op) \
        FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R2_DCNT_L, (op)->data.nbytes & 0xFFFF)
 
-#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op) ( \
+#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op, dummybytes) ( \
        FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_DCNT_H, \
                ((op)->data.nbytes >> 16) & 0xffff) | \
        FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_NUM_OF_DUMMY, \
                  (op)->dummy.buswidth != 0 ? \
-                 (((op)->dummy.nbytes * 8) / (op)->dummy.buswidth) : \
+                 (((dummybytes) * 8) / (op)->dummy.buswidth) : \
                  0))
 
 #define CDNS_XSPI_CMD_FLD_DSEQ_CMD_4(op, chipsel) ( \
@@ -351,6 +355,7 @@ static int cdns_xspi_send_stig_command(struct cdns_xspi_dev *cdns_xspi,
        u32 cmd_regs[6];
        u32 cmd_status;
        int ret;
+       int dummybytes = op->dummy.nbytes;
 
        ret = cdns_xspi_wait_for_controller_idle(cdns_xspi);
        if (ret < 0)
@@ -365,7 +370,12 @@ static int cdns_xspi_send_stig_command(struct cdns_xspi_dev *cdns_xspi,
        memset(cmd_regs, 0, sizeof(cmd_regs));
        cmd_regs[1] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_1(op, data_phase);
        cmd_regs[2] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_2(op);
-       cmd_regs[3] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op);
+       if (dummybytes != 0) {
+               cmd_regs[3] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op, 1);
+               dummybytes--;
+       } else {
+               cmd_regs[3] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op, 0);
+       }
        cmd_regs[4] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_4(op,
                                                       cdns_xspi->cur_cs);
 
@@ -375,7 +385,7 @@ static int cdns_xspi_send_stig_command(struct cdns_xspi_dev *cdns_xspi,
                cmd_regs[0] = CDNS_XSPI_STIG_DONE_FLAG;
                cmd_regs[1] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_1(op);
                cmd_regs[2] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_2(op);
-               cmd_regs[3] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op);
+               cmd_regs[3] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op, dummybytes);
                cmd_regs[4] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_4(op,
                                                           cdns_xspi->cur_cs);
 
index 4a68abcdcc35351dc1fab0a79b4180387caaa09b..4c4ff074e3f6f84caa2c9aa617f9808405093b04 100644 (file)
@@ -1016,8 +1016,10 @@ end_irq:
 static irqreturn_t stm32fx_spi_irq_thread(int irq, void *dev_id)
 {
        struct spi_controller *ctrl = dev_id;
+       struct stm32_spi *spi = spi_controller_get_devdata(ctrl);
 
        spi_finalize_current_transfer(ctrl);
+       stm32fx_spi_disable(spi);
 
        return IRQ_HANDLED;
 }
@@ -1055,7 +1057,7 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
                mask |= STM32H7_SPI_SR_TXP | STM32H7_SPI_SR_RXP;
 
        if (!(sr & mask)) {
-               dev_warn(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
+               dev_vdbg(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
                         sr, ier);
                spin_unlock_irqrestore(&spi->lock, flags);
                return IRQ_NONE;
@@ -1185,8 +1187,6 @@ static int stm32_spi_prepare_msg(struct spi_controller *ctrl,
                         ~clrb) | setb,
                        spi->base + spi->cfg->regs->cpol.reg);
 
-       stm32_spi_enable(spi);
-
        spin_unlock_irqrestore(&spi->lock, flags);
 
        return 0;
@@ -1204,6 +1204,7 @@ static void stm32fx_spi_dma_tx_cb(void *data)
 
        if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) {
                spi_finalize_current_transfer(spi->ctrl);
+               stm32fx_spi_disable(spi);
        }
 }
 
@@ -1218,6 +1219,7 @@ static void stm32_spi_dma_rx_cb(void *data)
        struct stm32_spi *spi = data;
 
        spi_finalize_current_transfer(spi->ctrl);
+       spi->cfg->disable(spi);
 }
 
 /**
@@ -1305,6 +1307,8 @@ static int stm32fx_spi_transfer_one_irq(struct stm32_spi *spi)
 
        stm32_spi_set_bits(spi, STM32FX_SPI_CR2, cr2);
 
+       stm32_spi_enable(spi);
+
        /* starting data transfer when buffer is loaded */
        if (spi->tx_buf)
                spi->cfg->write_tx(spi);
@@ -1341,6 +1345,8 @@ static int stm32h7_spi_transfer_one_irq(struct stm32_spi *spi)
 
        spin_lock_irqsave(&spi->lock, flags);
 
+       stm32_spi_enable(spi);
+
        /* Be sure to have data in fifo before starting data transfer */
        if (spi->tx_buf)
                stm32h7_spi_write_txfifo(spi);
@@ -1372,6 +1378,8 @@ static void stm32fx_spi_transfer_one_dma_start(struct stm32_spi *spi)
                 */
                stm32_spi_set_bits(spi, STM32FX_SPI_CR2, STM32FX_SPI_CR2_ERRIE);
        }
+
+       stm32_spi_enable(spi);
 }
 
 /**
@@ -1405,6 +1413,8 @@ static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi)
 
        stm32_spi_set_bits(spi, STM32H7_SPI_IER, ier);
 
+       stm32_spi_enable(spi);
+
        if (STM32_SPI_HOST_MODE(spi))
                stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART);
 }
index 289feccca37607a3ffe982ab0ca0dfdd7daf62d6..9bc9fd10d538d2b61643f35608a93e445645968c 100644 (file)
@@ -1220,6 +1220,11 @@ void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
        spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0);
 }
 
+/* Dummy SG for unidirect transfers */
+static struct scatterlist dummy_sg = {
+       .page_link = SG_END,
+};
+
 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
 {
        struct device *tx_dev, *rx_dev;
@@ -1243,6 +1248,7 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
        else
                rx_dev = ctlr->dev.parent;
 
+       ret = -ENOMSG;
        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
                /* The sync is done before each transfer. */
                unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
@@ -1257,6 +1263,8 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
                                                attrs);
                        if (ret != 0)
                                return ret;
+               } else {
+                       xfer->tx_sg.sgl = &dummy_sg;
                }
 
                if (xfer->rx_buf != NULL) {
@@ -1270,8 +1278,13 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
 
                                return ret;
                        }
+               } else {
+                       xfer->rx_sg.sgl = &dummy_sg;
                }
        }
+       /* No transfer has been mapped, bail out with success */
+       if (ret)
+               return 0;
 
        ctlr->cur_rx_dma_dev = rx_dev;
        ctlr->cur_tx_dma_dev = tx_dev;
@@ -1307,7 +1320,7 @@ static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
        return 0;
 }
 
-static void spi_dma_sync_for_device(struct spi_controller *ctlr,
+static void spi_dma_sync_for_device(struct spi_controller *ctlr, struct spi_message *msg,
                                    struct spi_transfer *xfer)
 {
        struct device *rx_dev = ctlr->cur_rx_dma_dev;
@@ -1316,11 +1329,14 @@ static void spi_dma_sync_for_device(struct spi_controller *ctlr,
        if (!ctlr->cur_msg_mapped)
                return;
 
+       if (!ctlr->can_dma(ctlr, msg->spi, xfer))
+               return;
+
        dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
        dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
 }
 
-static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
+static void spi_dma_sync_for_cpu(struct spi_controller *ctlr, struct spi_message *msg,
                                 struct spi_transfer *xfer)
 {
        struct device *rx_dev = ctlr->cur_rx_dma_dev;
@@ -1329,6 +1345,9 @@ static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
        if (!ctlr->cur_msg_mapped)
                return;
 
+       if (!ctlr->can_dma(ctlr, msg->spi, xfer))
+               return;
+
        dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
        dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
 }
@@ -1346,11 +1365,13 @@ static inline int __spi_unmap_msg(struct spi_controller *ctlr,
 }
 
 static void spi_dma_sync_for_device(struct spi_controller *ctrl,
+                                   struct spi_message *msg,
                                    struct spi_transfer *xfer)
 {
 }
 
 static void spi_dma_sync_for_cpu(struct spi_controller *ctrl,
+                                struct spi_message *msg,
                                 struct spi_transfer *xfer)
 {
 }
@@ -1622,10 +1643,10 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
                        reinit_completion(&ctlr->xfer_completion);
 
 fallback_pio:
-                       spi_dma_sync_for_device(ctlr, xfer);
+                       spi_dma_sync_for_device(ctlr, msg, xfer);
                        ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
                        if (ret < 0) {
-                               spi_dma_sync_for_cpu(ctlr, xfer);
+                               spi_dma_sync_for_cpu(ctlr, msg, xfer);
 
                                if (ctlr->cur_msg_mapped &&
                                   (xfer->error & SPI_TRANS_FAIL_NO_START)) {
@@ -1650,7 +1671,7 @@ fallback_pio:
                                        msg->status = ret;
                        }
 
-                       spi_dma_sync_for_cpu(ctlr, xfer);
+                       spi_dma_sync_for_cpu(ctlr, msg, xfer);
                } else {
                        if (xfer->len)
                                dev_err(&msg->spi->dev,
index 54cce4e523bc510a0b25621317f2a56a31c32e97..30567b4994551b174e2b96bdfe1189816be115e6 100644 (file)
@@ -467,6 +467,21 @@ static void thermal_governor_trip_crossed(struct thermal_governor *governor,
                governor->trip_crossed(tz, trip, crossed_up);
 }
 
+static void thermal_trip_crossed(struct thermal_zone_device *tz,
+                                const struct thermal_trip *trip,
+                                struct thermal_governor *governor,
+                                bool crossed_up)
+{
+       if (crossed_up) {
+               thermal_notify_tz_trip_up(tz, trip);
+               thermal_debug_tz_trip_up(tz, trip);
+       } else {
+               thermal_notify_tz_trip_down(tz, trip);
+               thermal_debug_tz_trip_down(tz, trip);
+       }
+       thermal_governor_trip_crossed(governor, tz, trip, crossed_up);
+}
+
 static int thermal_trip_notify_cmp(void *ascending, const struct list_head *a,
                                   const struct list_head *b)
 {
@@ -506,18 +521,12 @@ void __thermal_zone_device_update(struct thermal_zone_device *tz,
                handle_thermal_trip(tz, td, &way_up_list, &way_down_list);
 
        list_sort(&way_up_list, &way_up_list, thermal_trip_notify_cmp);
-       list_for_each_entry(td, &way_up_list, notify_list_node) {
-               thermal_notify_tz_trip_up(tz, &td->trip);
-               thermal_debug_tz_trip_up(tz, &td->trip);
-               thermal_governor_trip_crossed(governor, tz, &td->trip, true);
-       }
+       list_for_each_entry(td, &way_up_list, notify_list_node)
+               thermal_trip_crossed(tz, &td->trip, governor, true);
 
        list_sort(NULL, &way_down_list, thermal_trip_notify_cmp);
-       list_for_each_entry(td, &way_down_list, notify_list_node) {
-               thermal_notify_tz_trip_down(tz, &td->trip);
-               thermal_debug_tz_trip_down(tz, &td->trip);
-               thermal_governor_trip_crossed(governor, tz, &td->trip, false);
-       }
+       list_for_each_entry(td, &way_down_list, notify_list_node)
+               thermal_trip_crossed(tz, &td->trip, governor, false);
 
        if (governor->manage)
                governor->manage(tz);
@@ -593,6 +602,12 @@ void thermal_zone_device_update(struct thermal_zone_device *tz,
 }
 EXPORT_SYMBOL_GPL(thermal_zone_device_update);
 
+void thermal_zone_trip_down(struct thermal_zone_device *tz,
+                           const struct thermal_trip *trip)
+{
+       thermal_trip_crossed(tz, trip, thermal_get_tz_governor(tz), false);
+}
+
 int for_each_thermal_governor(int (*cb)(struct thermal_governor *, void *),
                              void *data)
 {
index d9785e5bbb08cde8b2c5a672aa1c14936a4fc06c..20e7b45673d683064209f63ecb0d4cb1b2f5fbdb 100644 (file)
@@ -246,6 +246,8 @@ int thermal_zone_trip_id(const struct thermal_zone_device *tz,
 void thermal_zone_trip_updated(struct thermal_zone_device *tz,
                               const struct thermal_trip *trip);
 int __thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp);
+void thermal_zone_trip_down(struct thermal_zone_device *tz,
+                           const struct thermal_trip *trip);
 
 /* sysfs I/F */
 int thermal_zone_create_device_groups(struct thermal_zone_device *tz);
index 91f9c21235a8a9dcb05d51a45ddb4ee0f2ecf983..9424472291570861de5e68a27f8e0d7fa00f8964 100644 (file)
@@ -91,6 +91,8 @@ struct cdev_record {
  *
  * @timestamp: the trip crossing timestamp
  * @duration: total time when the zone temperature was above the trip point
+ * @trip_temp: trip temperature at mitigation start
+ * @trip_hyst: trip hysteresis at mitigation start
  * @count: the number of times the zone temperature was above the trip point
  * @max: maximum recorded temperature above the trip point
  * @min: minimum recorded temperature above the trip point
@@ -99,6 +101,8 @@ struct cdev_record {
 struct trip_stats {
        ktime_t timestamp;
        ktime_t duration;
+       int trip_temp;
+       int trip_hyst;
        int count;
        int max;
        int min;
@@ -574,6 +578,7 @@ void thermal_debug_tz_trip_up(struct thermal_zone_device *tz,
        struct thermal_debugfs *thermal_dbg = tz->debugfs;
        int trip_id = thermal_zone_trip_id(tz, trip);
        ktime_t now = ktime_get();
+       struct trip_stats *trip_stats;
 
        if (!thermal_dbg)
                return;
@@ -639,7 +644,10 @@ void thermal_debug_tz_trip_up(struct thermal_zone_device *tz,
        tz_dbg->trips_crossed[tz_dbg->nr_trips++] = trip_id;
 
        tze = list_first_entry(&tz_dbg->tz_episodes, struct tz_episode, node);
-       tze->trip_stats[trip_id].timestamp = now;
+       trip_stats = &tze->trip_stats[trip_id];
+       trip_stats->trip_temp = trip->temperature;
+       trip_stats->trip_hyst = trip->hysteresis;
+       trip_stats->timestamp = now;
 
 unlock:
        mutex_unlock(&thermal_dbg->lock);
@@ -794,10 +802,6 @@ static int tze_seq_show(struct seq_file *s, void *v)
                const struct thermal_trip *trip = &td->trip;
                struct trip_stats *trip_stats;
 
-               /* Skip invalid trips. */
-               if (trip->temperature == THERMAL_TEMP_INVALID)
-                       continue;
-
                /*
                 * There is no possible mitigation happening at the
                 * critical trip point, so the stats will be always
@@ -836,8 +840,8 @@ static int tze_seq_show(struct seq_file *s, void *v)
                seq_printf(s, "| %*d | %*s | %*d | %*d | %c%*lld | %*d | %*d | %*d |\n",
                           4 , trip_id,
                           8, type,
-                          9, trip->temperature,
-                          9, trip->hysteresis,
+                          9, trip_stats->trip_temp,
+                          9, trip_stats->trip_hyst,
                           c, 10, duration_ms,
                           9, trip_stats->avg,
                           9, trip_stats->min,
index d6a6acc78ddb6c1586bd32c878e59a128933464f..49e63db685172e44a7f3c83a2df958bc5d839d90 100644 (file)
@@ -152,17 +152,23 @@ void thermal_zone_set_trip_temp(struct thermal_zone_device *tz,
        if (trip->temperature == temp)
                return;
 
+       trip->temperature = temp;
+       thermal_notify_tz_trip_change(tz, trip);
+
        if (temp == THERMAL_TEMP_INVALID) {
                struct thermal_trip_desc *td = trip_to_trip_desc(trip);
 
-               if (trip->type == THERMAL_TRIP_PASSIVE &&
-                   tz->temperature >= td->threshold) {
+               if (tz->temperature >= td->threshold) {
                        /*
-                        * The trip has been crossed, so the thermal zone's
-                        * passive count needs to be adjusted.
+                        * The trip has been crossed on the way up, so some
+                        * adjustments are needed to compensate for the lack
+                        * of it going forward.
                         */
-                       tz->passive--;
-                       WARN_ON_ONCE(tz->passive < 0);
+                       if (trip->type == THERMAL_TRIP_PASSIVE) {
+                               tz->passive--;
+                               WARN_ON_ONCE(tz->passive < 0);
+                       }
+                       thermal_zone_trip_down(tz, trip);
                }
                /*
                 * Invalidate the threshold to avoid triggering a spurious
@@ -170,7 +176,5 @@ void thermal_zone_set_trip_temp(struct thermal_zone_device *tz,
                 */
                td->threshold = INT_MAX;
        }
-       trip->temperature = temp;
-       thermal_notify_tz_trip_change(tz, trip);
 }
 EXPORT_SYMBOL_GPL(thermal_zone_set_trip_temp);
index 005d63ab1f441c1aaf77513449f88267e025650c..8944548c30fa15a00f5abd7a097011b1ab64742e 100644 (file)
@@ -634,20 +634,20 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
        struct ufshcd_lrb *lrbp = &hba->lrb[tag];
        struct ufs_hw_queue *hwq;
        unsigned long flags;
-       int err = FAILED;
+       int err;
 
        if (!ufshcd_cmd_inflight(lrbp->cmd)) {
                dev_err(hba->dev,
                        "%s: skip abort. cmd at tag %d already completed.\n",
                        __func__, tag);
-               goto out;
+               return FAILED;
        }
 
        /* Skip task abort in case previous aborts failed and report failure */
        if (lrbp->req_abort_skip) {
                dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n",
                        __func__, tag);
-               goto out;
+               return FAILED;
        }
 
        hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
@@ -659,7 +659,7 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
                 */
                dev_err(hba->dev, "%s: cmd found in sq. hwq=%d, tag=%d\n",
                        __func__, hwq->id, tag);
-               goto out;
+               return FAILED;
        }
 
        /*
@@ -667,18 +667,17 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
         * in the completion queue either. Query the device to see if
         * the command is being processed in the device.
         */
-       if (ufshcd_try_to_abort_task(hba, tag)) {
+       err = ufshcd_try_to_abort_task(hba, tag);
+       if (err) {
                dev_err(hba->dev, "%s: device abort failed %d\n", __func__, err);
                lrbp->req_abort_skip = true;
-               goto out;
+               return FAILED;
        }
 
-       err = SUCCESS;
        spin_lock_irqsave(&hwq->cq_lock, flags);
        if (ufshcd_cmd_inflight(lrbp->cmd))
                ufshcd_release_scsi_cmd(hba, lrbp);
        spin_unlock_irqrestore(&hwq->cq_lock, flags);
 
-out:
-       return err;
+       return SUCCESS;
 }
index f16f7358163490f633f67d3e93a82eb8da327da5..01338d4c2d9e6fde95e8e69816781ce968522ef6 100644 (file)
@@ -48,12 +48,17 @@ static int v9fs_cached_dentry_delete(const struct dentry *dentry)
 static void v9fs_dentry_release(struct dentry *dentry)
 {
        struct hlist_node *p, *n;
+       struct hlist_head head;
 
        p9_debug(P9_DEBUG_VFS, " dentry: %pd (%p)\n",
                 dentry, dentry);
-       hlist_for_each_safe(p, n, (struct hlist_head *)&dentry->d_fsdata)
+
+       spin_lock(&dentry->d_lock);
+       hlist_move_list((struct hlist_head *)&dentry->d_fsdata, &head);
+       spin_unlock(&dentry->d_lock);
+
+       hlist_for_each_safe(p, n, &head)
                p9_fid_put(hlist_entry(p, struct p9_fid, dlist));
-       dentry->d_fsdata = NULL;
 }
 
 static int v9fs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
index 7a3308d776060e2e2565af09d358f2cf33416b6b..fd72fc38c8f5b1ff971414388f6fab16ce53ac4c 100644 (file)
@@ -348,6 +348,7 @@ void v9fs_evict_inode(struct inode *inode)
        __le32 __maybe_unused version;
 
        if (!is_bad_inode(inode)) {
+               netfs_wait_for_outstanding_io(inode);
                truncate_inode_pages_final(&inode->i_data);
 
                version = cpu_to_le32(v9inode->qid.version);
index 94fc049aff584f43e622d164a13fc30962dd04f1..15bb7989c387ae59a4e1e4fd49c9b9f99e4f0ecf 100644 (file)
@@ -648,6 +648,7 @@ void afs_evict_inode(struct inode *inode)
 
        ASSERTCMP(inode->i_ino, ==, vnode->fid.vnode);
 
+       netfs_wait_for_outstanding_io(inode);
        truncate_inode_pages_final(&inode->i_data);
 
        afs_set_cache_aux(vnode, &aux);
index 97f50e9fd9eb017384e5790a33519662d3cca978..297487ee832317a43608b38db09186b47cb1d583 100644 (file)
@@ -140,6 +140,11 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
                put_page(page);
                if (ret < 0)
                        return ret;
+
+               /* Don't cross a backup volume mountpoint from a backup volume */
+               if (src_as->volume && src_as->volume->type == AFSVL_BACKVOL &&
+                   ctx->type == AFSVL_BACKVOL)
+                       return -ENODEV;
        }
 
        return 0;
index 692b1c7d5018c876bb41883e8443e3517dbd3be8..4321f9fb73bd9f7e098b663e20f516b0c505f6d4 100644 (file)
@@ -690,7 +690,7 @@ static int check_extent_to_backpointers(struct btree_trans *trans,
 
        ptrs = bch2_bkey_ptrs_c(k);
        bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
-               struct bpos bucket_pos;
+               struct bpos bucket_pos = POS_MIN;
                struct bch_backpointer bp;
 
                if (p.ptr.cached)
index bc0ea2c4efef25fce089f4b3ca24e3cf6b2f5bdb..2a538eb2af110c6e8bc5fbef4301b7eed29841c0 100644 (file)
@@ -457,6 +457,7 @@ enum bch_time_stats {
 };
 
 #include "alloc_types.h"
+#include "btree_gc_types.h"
 #include "btree_types.h"
 #include "btree_node_scan_types.h"
 #include "btree_write_buffer_types.h"
@@ -488,49 +489,6 @@ enum bch_time_stats {
 
 struct btree;
 
-enum gc_phase {
-       GC_PHASE_NOT_RUNNING,
-       GC_PHASE_START,
-       GC_PHASE_SB,
-
-       GC_PHASE_BTREE_stripes,
-       GC_PHASE_BTREE_extents,
-       GC_PHASE_BTREE_inodes,
-       GC_PHASE_BTREE_dirents,
-       GC_PHASE_BTREE_xattrs,
-       GC_PHASE_BTREE_alloc,
-       GC_PHASE_BTREE_quotas,
-       GC_PHASE_BTREE_reflink,
-       GC_PHASE_BTREE_subvolumes,
-       GC_PHASE_BTREE_snapshots,
-       GC_PHASE_BTREE_lru,
-       GC_PHASE_BTREE_freespace,
-       GC_PHASE_BTREE_need_discard,
-       GC_PHASE_BTREE_backpointers,
-       GC_PHASE_BTREE_bucket_gens,
-       GC_PHASE_BTREE_snapshot_trees,
-       GC_PHASE_BTREE_deleted_inodes,
-       GC_PHASE_BTREE_logged_ops,
-       GC_PHASE_BTREE_rebalance_work,
-       GC_PHASE_BTREE_subvolume_children,
-
-       GC_PHASE_PENDING_DELETE,
-};
-
-struct gc_pos {
-       enum gc_phase           phase;
-       u16                     level;
-       struct bpos             pos;
-};
-
-struct reflink_gc {
-       u64             offset;
-       u32             size;
-       u32             refcount;
-};
-
-typedef GENRADIX(struct reflink_gc) reflink_gc_table;
-
 struct io_count {
        u64                     sectors[2][BCH_DATA_NR];
 };
index d801e19cb4890e014589f44de53e1c1aa6355aaa..90c12fe2a2cd3b030f0c6eb6aee1149262cc8dd1 100644 (file)
@@ -503,16 +503,22 @@ struct bch_sb_field {
 
 #include "alloc_background_format.h"
 #include "extents_format.h"
-#include "reflink_format.h"
 #include "ec_format.h"
-#include "inode_format.h"
 #include "dirent_format.h"
-#include "xattr_format.h"
-#include "quota_format.h"
+#include "disk_groups_format.h"
+#include "inode_format.h"
+#include "journal_seq_blacklist_format.h"
 #include "logged_ops_format.h"
+#include "quota_format.h"
+#include "reflink_format.h"
+#include "replicas_format.h"
 #include "snapshot_format.h"
 #include "subvolume_format.h"
 #include "sb-counters_format.h"
+#include "sb-downgrade_format.h"
+#include "sb-errors_format.h"
+#include "sb-members_format.h"
+#include "xattr_format.h"
 
 enum bch_sb_field_type {
 #define x(f, nr)       BCH_SB_FIELD_##f = nr,
@@ -545,107 +551,6 @@ struct bch_sb_field_journal_v2 {
        }                       d[];
 };
 
-/* BCH_SB_FIELD_members_v1: */
-
-#define BCH_MIN_NR_NBUCKETS    (1 << 6)
-
-#define BCH_IOPS_MEASUREMENTS()                        \
-       x(seqread,      0)                      \
-       x(seqwrite,     1)                      \
-       x(randread,     2)                      \
-       x(randwrite,    3)
-
-enum bch_iops_measurement {
-#define x(t, n) BCH_IOPS_##t = n,
-       BCH_IOPS_MEASUREMENTS()
-#undef x
-       BCH_IOPS_NR
-};
-
-#define BCH_MEMBER_ERROR_TYPES()               \
-       x(read,         0)                      \
-       x(write,        1)                      \
-       x(checksum,     2)
-
-enum bch_member_error_type {
-#define x(t, n) BCH_MEMBER_ERROR_##t = n,
-       BCH_MEMBER_ERROR_TYPES()
-#undef x
-       BCH_MEMBER_ERROR_NR
-};
-
-struct bch_member {
-       __uuid_t                uuid;
-       __le64                  nbuckets;       /* device size */
-       __le16                  first_bucket;   /* index of first bucket used */
-       __le16                  bucket_size;    /* sectors */
-       __u8                    btree_bitmap_shift;
-       __u8                    pad[3];
-       __le64                  last_mount;     /* time_t */
-
-       __le64                  flags;
-       __le32                  iops[4];
-       __le64                  errors[BCH_MEMBER_ERROR_NR];
-       __le64                  errors_at_reset[BCH_MEMBER_ERROR_NR];
-       __le64                  errors_reset_time;
-       __le64                  seq;
-       __le64                  btree_allocated_bitmap;
-       /*
-        * On recovery from a clean shutdown we don't normally read the journal,
-        * but we still want to resume writing from where we left off so we
-        * don't overwrite more than is necessary, for list journal debugging:
-        */
-       __le32                  last_journal_bucket;
-       __le32                  last_journal_bucket_offset;
-};
-
-/*
- * This limit comes from the bucket_gens array - it's a single allocation, and
- * kernel allocation are limited to INT_MAX
- */
-#define BCH_MEMBER_NBUCKETS_MAX        (INT_MAX - 64)
-
-#define BCH_MEMBER_V1_BYTES    56
-
-LE64_BITMASK(BCH_MEMBER_STATE,         struct bch_member, flags,  0,  4)
-/* 4-14 unused, was TIER, HAS_(META)DATA, REPLACEMENT */
-LE64_BITMASK(BCH_MEMBER_DISCARD,       struct bch_member, flags, 14, 15)
-LE64_BITMASK(BCH_MEMBER_DATA_ALLOWED,  struct bch_member, flags, 15, 20)
-LE64_BITMASK(BCH_MEMBER_GROUP,         struct bch_member, flags, 20, 28)
-LE64_BITMASK(BCH_MEMBER_DURABILITY,    struct bch_member, flags, 28, 30)
-LE64_BITMASK(BCH_MEMBER_FREESPACE_INITIALIZED,
-                                       struct bch_member, flags, 30, 31)
-
-#if 0
-LE64_BITMASK(BCH_MEMBER_NR_READ_ERRORS,        struct bch_member, flags[1], 0,  20);
-LE64_BITMASK(BCH_MEMBER_NR_WRITE_ERRORS,struct bch_member, flags[1], 20, 40);
-#endif
-
-#define BCH_MEMBER_STATES()                    \
-       x(rw,           0)                      \
-       x(ro,           1)                      \
-       x(failed,       2)                      \
-       x(spare,        3)
-
-enum bch_member_state {
-#define x(t, n) BCH_MEMBER_STATE_##t = n,
-       BCH_MEMBER_STATES()
-#undef x
-       BCH_MEMBER_STATE_NR
-};
-
-struct bch_sb_field_members_v1 {
-       struct bch_sb_field     field;
-       struct bch_member       _members[]; //Members are now variable size
-};
-
-struct bch_sb_field_members_v2 {
-       struct bch_sb_field     field;
-       __le16                  member_bytes; //size of single member entry
-       u8                      pad[6];
-       struct bch_member       _members[];
-};
-
 /* BCH_SB_FIELD_crypt: */
 
 struct nonce {
@@ -694,8 +599,6 @@ LE64_BITMASK(BCH_KDF_SCRYPT_N,      struct bch_sb_field_crypt, kdf_flags,  0, 16);
 LE64_BITMASK(BCH_KDF_SCRYPT_R, struct bch_sb_field_crypt, kdf_flags, 16, 32);
 LE64_BITMASK(BCH_KDF_SCRYPT_P, struct bch_sb_field_crypt, kdf_flags, 32, 48);
 
-/* BCH_SB_FIELD_replicas: */
-
 #define BCH_DATA_TYPES()               \
        x(free,         0)              \
        x(sb,           1)              \
@@ -738,50 +641,6 @@ static inline bool data_type_is_hidden(enum bch_data_type type)
        }
 }
 
-struct bch_replicas_entry_v0 {
-       __u8                    data_type;
-       __u8                    nr_devs;
-       __u8                    devs[];
-} __packed;
-
-struct bch_sb_field_replicas_v0 {
-       struct bch_sb_field     field;
-       struct bch_replicas_entry_v0 entries[];
-} __packed __aligned(8);
-
-struct bch_replicas_entry_v1 {
-       __u8                    data_type;
-       __u8                    nr_devs;
-       __u8                    nr_required;
-       __u8                    devs[];
-} __packed;
-
-#define replicas_entry_bytes(_i)                                       \
-       (offsetof(typeof(*(_i)), devs) + (_i)->nr_devs)
-
-struct bch_sb_field_replicas {
-       struct bch_sb_field     field;
-       struct bch_replicas_entry_v1 entries[];
-} __packed __aligned(8);
-
-/* BCH_SB_FIELD_disk_groups: */
-
-#define BCH_SB_LABEL_SIZE              32
-
-struct bch_disk_group {
-       __u8                    label[BCH_SB_LABEL_SIZE];
-       __le64                  flags[2];
-} __packed __aligned(8);
-
-LE64_BITMASK(BCH_GROUP_DELETED,                struct bch_disk_group, flags[0], 0,  1)
-LE64_BITMASK(BCH_GROUP_DATA_ALLOWED,   struct bch_disk_group, flags[0], 1,  6)
-LE64_BITMASK(BCH_GROUP_PARENT,         struct bch_disk_group, flags[0], 6, 24)
-
-struct bch_sb_field_disk_groups {
-       struct bch_sb_field     field;
-       struct bch_disk_group   entries[];
-} __packed __aligned(8);
-
 /*
  * On clean shutdown, store btree roots and current journal sequence number in
  * the superblock:
@@ -809,27 +668,6 @@ struct bch_sb_field_clean {
        __u64                   _data[];
 };
 
-struct journal_seq_blacklist_entry {
-       __le64                  start;
-       __le64                  end;
-};
-
-struct bch_sb_field_journal_seq_blacklist {
-       struct bch_sb_field     field;
-       struct journal_seq_blacklist_entry start[];
-};
-
-struct bch_sb_field_errors {
-       struct bch_sb_field     field;
-       struct bch_sb_field_error_entry {
-               __le64          v;
-               __le64          last_error_time;
-       }                       entries[];
-};
-
-LE64_BITMASK(BCH_SB_ERROR_ENTRY_ID,    struct bch_sb_field_error_entry, v,  0, 16);
-LE64_BITMASK(BCH_SB_ERROR_ENTRY_NR,    struct bch_sb_field_error_entry, v, 16, 64);
-
 struct bch_sb_field_ext {
        struct bch_sb_field     field;
        __le64                  recovery_passes_required[2];
@@ -837,18 +675,6 @@ struct bch_sb_field_ext {
        __le64                  btrees_lost_data;
 };
 
-struct bch_sb_field_downgrade_entry {
-       __le16                  version;
-       __le64                  recovery_passes[2];
-       __le16                  nr_errors;
-       __le16                  errors[] __counted_by(nr_errors);
-} __packed __aligned(2);
-
-struct bch_sb_field_downgrade {
-       struct bch_sb_field     field;
-       struct bch_sb_field_downgrade_entry entries[];
-};
-
 /* Superblock: */
 
 /*
@@ -909,7 +735,6 @@ unsigned bcachefs_metadata_required_upgrade_below = bcachefs_metadata_version_re
 #define bcachefs_metadata_version_current      (bcachefs_metadata_version_max - 1)
 
 #define BCH_SB_SECTOR                  8
-#define BCH_SB_MEMBERS_MAX             64 /* XXX kill */
 
 #define BCH_SB_LAYOUT_SIZE_BITS_MAX    16 /* 32 MB */
 
index 8035c8b797ab37658ebd10b55fd1b28ee5304271..dc97991bcd6adad823121fd27f6f89889ce9a0d6 100644 (file)
@@ -585,16 +585,17 @@ static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
 
                if (fsck_err_on(k.k->version.lo > atomic64_read(&c->key_version), c,
                                bkey_version_in_future,
-                               "key version number higher than recorded: %llu > %llu",
-                               k.k->version.lo,
-                               atomic64_read(&c->key_version)))
+                               "key version number higher than recorded %llu\n  %s",
+                               atomic64_read(&c->key_version),
+                               (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
                        atomic64_set(&c->key_version, k.k->version.lo);
        }
 
        if (mustfix_fsck_err_on(level && !bch2_dev_btree_bitmap_marked(c, k),
                                c, btree_bitmap_not_marked,
                                "btree ptr not marked in member info btree allocated bitmap\n  %s",
-                               (bch2_bkey_val_to_text(&buf, c, k),
+                               (printbuf_reset(&buf),
+                                bch2_bkey_val_to_text(&buf, c, k),
                                 buf.buf))) {
                mutex_lock(&c->sb_lock);
                bch2_dev_btree_bitmap_mark(c, k);
@@ -673,8 +674,7 @@ static int bch2_gc_btree(struct btree_trans *trans, enum btree_id btree, bool in
 
 static inline int btree_id_gc_phase_cmp(enum btree_id l, enum btree_id r)
 {
-       return  (int) btree_id_to_gc_phase(l) -
-               (int) btree_id_to_gc_phase(r);
+       return cmp_int(gc_btree_order(l), gc_btree_order(r));
 }
 
 static int bch2_gc_btrees(struct bch_fs *c)
@@ -711,7 +711,7 @@ fsck_err:
 static int bch2_mark_superblocks(struct bch_fs *c)
 {
        mutex_lock(&c->sb_lock);
-       gc_pos_set(c, gc_phase(GC_PHASE_SB));
+       gc_pos_set(c, gc_phase(GC_PHASE_sb));
 
        int ret = bch2_trans_mark_dev_sbs_flags(c, BTREE_TRIGGER_gc);
        mutex_unlock(&c->sb_lock);
@@ -1209,7 +1209,7 @@ int bch2_check_allocations(struct bch_fs *c)
        if (ret)
                goto out;
 
-       gc_pos_set(c, gc_phase(GC_PHASE_START));
+       gc_pos_set(c, gc_phase(GC_PHASE_start));
 
        ret = bch2_mark_superblocks(c);
        BUG_ON(ret);
@@ -1231,7 +1231,7 @@ out:
 
        percpu_down_write(&c->mark_lock);
        /* Indicates that gc is no longer in progress: */
-       __gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING));
+       __gc_pos_set(c, gc_phase(GC_PHASE_not_running));
 
        bch2_gc_free(c);
        percpu_up_write(&c->mark_lock);
index 1b6489d8e0f4fa4a953ffb02cbc424abe9b60415..876d81e2017d73e3240aa239c20a2defb5132981 100644 (file)
@@ -3,6 +3,7 @@
 #define _BCACHEFS_BTREE_GC_H
 
 #include "bkey.h"
+#include "btree_gc_types.h"
 #include "btree_types.h"
 
 int bch2_check_topology(struct bch_fs *);
@@ -32,36 +33,15 @@ int bch2_check_allocations(struct bch_fs *);
 /* Position of (the start of) a gc phase: */
 static inline struct gc_pos gc_phase(enum gc_phase phase)
 {
-       return (struct gc_pos) {
-               .phase  = phase,
-               .level  = 0,
-               .pos    = POS_MIN,
-       };
-}
-
-static inline int gc_pos_cmp(struct gc_pos l, struct gc_pos r)
-{
-       return   cmp_int(l.phase, r.phase) ?:
-               -cmp_int(l.level, r.level) ?:
-                bpos_cmp(l.pos, r.pos);
-}
-
-static inline enum gc_phase btree_id_to_gc_phase(enum btree_id id)
-{
-       switch (id) {
-#define x(name, v, ...) case BTREE_ID_##name: return GC_PHASE_BTREE_##name;
-       BCH_BTREE_IDS()
-#undef x
-       default:
-               BUG();
-       }
+       return (struct gc_pos) { .phase = phase, };
 }
 
 static inline struct gc_pos gc_pos_btree(enum btree_id btree, unsigned level,
                                         struct bpos pos)
 {
        return (struct gc_pos) {
-               .phase  = btree_id_to_gc_phase(btree),
+               .phase  = GC_PHASE_btree,
+               .btree  = btree,
                .level  = level,
                .pos    = pos,
        };
@@ -76,6 +56,22 @@ static inline struct gc_pos gc_pos_btree_node(struct btree *b)
        return gc_pos_btree(b->c.btree_id, b->c.level, b->key.k.p);
 }
 
+static inline int gc_btree_order(enum btree_id btree)
+{
+       if (btree == BTREE_ID_stripes)
+               return -1;
+       return btree;
+}
+
+static inline int gc_pos_cmp(struct gc_pos l, struct gc_pos r)
+{
+       return   cmp_int(l.phase, r.phase) ?:
+                cmp_int(gc_btree_order(l.btree),
+                        gc_btree_order(r.btree)) ?:
+               -cmp_int(l.level, r.level) ?:
+                bpos_cmp(l.pos, r.pos);
+}
+
 static inline bool gc_visited(struct bch_fs *c, struct gc_pos pos)
 {
        unsigned seq;
diff --git a/fs/bcachefs/btree_gc_types.h b/fs/bcachefs/btree_gc_types.h
new file mode 100644 (file)
index 0000000..b82c24b
--- /dev/null
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_BTREE_GC_TYPES_H
+#define _BCACHEFS_BTREE_GC_TYPES_H
+
+#include <linux/generic-radix-tree.h>
+
+enum gc_phase {
+       GC_PHASE_not_running,
+       GC_PHASE_start,
+       GC_PHASE_sb,
+       GC_PHASE_btree,
+};
+
+struct gc_pos {
+       enum gc_phase           phase:8;
+       enum btree_id           btree:8;
+       u16                     level;
+       struct bpos             pos;
+};
+
+struct reflink_gc {
+       u64             offset;
+       u32             size;
+       u32             refcount;
+};
+
+typedef GENRADIX(struct reflink_gc) reflink_gc_table;
+
+#endif /* _BCACHEFS_BTREE_GC_TYPES_H */
index cbf8f5d90602115576a4bdb975450cdb9199926d..829c1b91477d796bf1e3176807271ff8760e6cf7 100644 (file)
@@ -519,7 +519,7 @@ void bch2_btree_init_next(struct btree_trans *trans, struct btree *b)
 
 static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
                          struct bch_dev *ca,
-                         struct btree *b, struct bset *i,
+                         struct btree *b, struct bset *i, struct bkey_packed *k,
                          unsigned offset, int write)
 {
        prt_printf(out, bch2_log_msg(c, "%s"),
@@ -537,15 +537,20 @@ static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
                   b->written, btree_ptr_sectors_written(&b->key));
        if (i)
                prt_printf(out, " bset u64s %u", le16_to_cpu(i->u64s));
+       if (k)
+               prt_printf(out, " bset byte offset %lu",
+                          (unsigned long)(void *)k -
+                          ((unsigned long)(void *)i & ~511UL));
        prt_str(out, ": ");
 }
 
-__printf(9, 10)
+__printf(10, 11)
 static int __btree_err(int ret,
                       struct bch_fs *c,
                       struct bch_dev *ca,
                       struct btree *b,
                       struct bset *i,
+                      struct bkey_packed *k,
                       int write,
                       bool have_retry,
                       enum bch_sb_error_id err_type,
@@ -555,7 +560,7 @@ static int __btree_err(int ret,
        bool silent = c->curr_recovery_pass == BCH_RECOVERY_PASS_scan_for_btree_nodes;
        va_list args;
 
-       btree_err_msg(&out, c, ca, b, i, b->written, write);
+       btree_err_msg(&out, c, ca, b, i, k, b->written, write);
 
        va_start(args, fmt);
        prt_vprintf(&out, fmt, args);
@@ -611,9 +616,9 @@ fsck_err:
        return ret;
 }
 
-#define btree_err(type, c, ca, b, i, _err_type, msg, ...)              \
+#define btree_err(type, c, ca, b, i, k, _err_type, msg, ...)           \
 ({                                                                     \
-       int _ret = __btree_err(type, c, ca, b, i, write, have_retry,    \
+       int _ret = __btree_err(type, c, ca, b, i, k, write, have_retry, \
                               BCH_FSCK_ERR_##_err_type,                \
                               msg, ##__VA_ARGS__);                     \
                                                                        \
@@ -690,7 +695,7 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
 
        btree_err_on(!bch2_version_compatible(version),
                     -BCH_ERR_btree_node_read_err_incompatible,
-                    c, ca, b, i,
+                    c, ca, b, i, NULL,
                     btree_node_unsupported_version,
                     "unsupported bset version %u.%u",
                     BCH_VERSION_MAJOR(version),
@@ -698,7 +703,7 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
 
        if (btree_err_on(version < c->sb.version_min,
                         -BCH_ERR_btree_node_read_err_fixable,
-                        c, NULL, b, i,
+                        c, NULL, b, i, NULL,
                         btree_node_bset_older_than_sb_min,
                         "bset version %u older than superblock version_min %u",
                         version, c->sb.version_min)) {
@@ -711,7 +716,7 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
        if (btree_err_on(BCH_VERSION_MAJOR(version) >
                         BCH_VERSION_MAJOR(c->sb.version),
                         -BCH_ERR_btree_node_read_err_fixable,
-                        c, NULL, b, i,
+                        c, NULL, b, i, NULL,
                         btree_node_bset_newer_than_sb,
                         "bset version %u newer than superblock version %u",
                         version, c->sb.version)) {
@@ -723,13 +728,13 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
 
        btree_err_on(BSET_SEPARATE_WHITEOUTS(i),
                     -BCH_ERR_btree_node_read_err_incompatible,
-                    c, ca, b, i,
+                    c, ca, b, i, NULL,
                     btree_node_unsupported_version,
                     "BSET_SEPARATE_WHITEOUTS no longer supported");
 
        if (btree_err_on(offset + sectors > btree_sectors(c),
                         -BCH_ERR_btree_node_read_err_fixable,
-                        c, ca, b, i,
+                        c, ca, b, i, NULL,
                         bset_past_end_of_btree_node,
                         "bset past end of btree node")) {
                i->u64s = 0;
@@ -739,13 +744,13 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
 
        btree_err_on(offset && !i->u64s,
                     -BCH_ERR_btree_node_read_err_fixable,
-                    c, ca, b, i,
+                    c, ca, b, i, NULL,
                     bset_empty,
                     "empty bset");
 
        btree_err_on(BSET_OFFSET(i) && BSET_OFFSET(i) != offset,
                     -BCH_ERR_btree_node_read_err_want_retry,
-                    c, ca, b, i,
+                    c, ca, b, i, NULL,
                     bset_wrong_sector_offset,
                     "bset at wrong sector offset");
 
@@ -761,20 +766,20 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
                        /* XXX endianness */
                        btree_err_on(bp->seq != bn->keys.seq,
                                     -BCH_ERR_btree_node_read_err_must_retry,
-                                    c, ca, b, NULL,
+                                    c, ca, b, NULL, NULL,
                                     bset_bad_seq,
                                     "incorrect sequence number (wrong btree node)");
                }
 
                btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id,
                             -BCH_ERR_btree_node_read_err_must_retry,
-                            c, ca, b, i,
+                            c, ca, b, i, NULL,
                             btree_node_bad_btree,
                             "incorrect btree id");
 
                btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level,
                             -BCH_ERR_btree_node_read_err_must_retry,
-                            c, ca, b, i,
+                            c, ca, b, i, NULL,
                             btree_node_bad_level,
                             "incorrect level");
 
@@ -793,7 +798,7 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
 
                        btree_err_on(!bpos_eq(b->data->min_key, bp->min_key),
                                     -BCH_ERR_btree_node_read_err_must_retry,
-                                    c, ca, b, NULL,
+                                    c, ca, b, NULL, NULL,
                                     btree_node_bad_min_key,
                                     "incorrect min_key: got %s should be %s",
                                     (printbuf_reset(&buf1),
@@ -804,7 +809,7 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
 
                btree_err_on(!bpos_eq(bn->max_key, b->key.k.p),
                             -BCH_ERR_btree_node_read_err_must_retry,
-                            c, ca, b, i,
+                            c, ca, b, i, NULL,
                             btree_node_bad_max_key,
                             "incorrect max key %s",
                             (printbuf_reset(&buf1),
@@ -816,7 +821,7 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
 
                btree_err_on(bch2_bkey_format_invalid(c, &bn->format, write, &buf1),
                             -BCH_ERR_btree_node_read_err_bad_node,
-                            c, ca, b, i,
+                            c, ca, b, i, NULL,
                             btree_node_bad_format,
                             "invalid bkey format: %s\n  %s", buf1.buf,
                             (printbuf_reset(&buf2),
@@ -883,7 +888,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
 
                if (btree_err_on(bkey_p_next(k) > vstruct_last(i),
                                 -BCH_ERR_btree_node_read_err_fixable,
-                                c, NULL, b, i,
+                                c, NULL, b, i, k,
                                 btree_node_bkey_past_bset_end,
                                 "key extends past end of bset")) {
                        i->u64s = cpu_to_le16((u64 *) k - i->_data);
@@ -892,14 +897,14 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
 
                if (btree_err_on(k->format > KEY_FORMAT_CURRENT,
                                 -BCH_ERR_btree_node_read_err_fixable,
-                                c, NULL, b, i,
+                                c, NULL, b, i, k,
                                 btree_node_bkey_bad_format,
                                 "invalid bkey format %u", k->format))
                        goto drop_this_key;
 
                if (btree_err_on(!bkeyp_u64s_valid(&b->format, k),
                                 -BCH_ERR_btree_node_read_err_fixable,
-                                c, NULL, b, i,
+                                c, NULL, b, i, k,
                                 btree_node_bkey_bad_u64s,
                                 "bad k->u64s %u (min %u max %zu)", k->u64s,
                                 bkeyp_key_u64s(&b->format, k),
@@ -921,7 +926,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
                        bch2_bkey_val_to_text(&buf, c, u.s_c);
 
                        btree_err(-BCH_ERR_btree_node_read_err_fixable,
-                                 c, NULL, b, i,
+                                 c, NULL, b, i, k,
                                  btree_node_bad_bkey,
                                  "invalid bkey: %s", buf.buf);
                        goto drop_this_key;
@@ -942,7 +947,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
                        bch2_bkey_to_text(&buf, u.k);
 
                        if (btree_err(-BCH_ERR_btree_node_read_err_fixable,
-                                     c, NULL, b, i,
+                                     c, NULL, b, i, k,
                                      btree_node_bkey_out_of_order,
                                      "%s", buf.buf))
                                goto drop_this_key;
@@ -1011,13 +1016,13 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
 
        if (bch2_meta_read_fault("btree"))
                btree_err(-BCH_ERR_btree_node_read_err_must_retry,
-                         c, ca, b, NULL,
+                         c, ca, b, NULL, NULL,
                          btree_node_fault_injected,
                          "dynamic fault");
 
        btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c),
                     -BCH_ERR_btree_node_read_err_must_retry,
-                    c, ca, b, NULL,
+                    c, ca, b, NULL, NULL,
                     btree_node_bad_magic,
                     "bad magic: want %llx, got %llx",
                     bset_magic(c), le64_to_cpu(b->data->magic));
@@ -1032,7 +1037,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
 
                btree_err_on(b->data->keys.seq != bp->seq,
                             -BCH_ERR_btree_node_read_err_must_retry,
-                            c, ca, b, NULL,
+                            c, ca, b, NULL, NULL,
                             btree_node_bad_seq,
                             "got wrong btree node: got\n%s",
                             (printbuf_reset(&buf),
@@ -1041,7 +1046,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
        } else {
                btree_err_on(!b->data->keys.seq,
                             -BCH_ERR_btree_node_read_err_must_retry,
-                            c, ca, b, NULL,
+                            c, ca, b, NULL, NULL,
                             btree_node_bad_seq,
                             "bad btree header: seq 0\n%s",
                             (printbuf_reset(&buf),
@@ -1060,7 +1065,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
 
                        btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
                                     -BCH_ERR_btree_node_read_err_want_retry,
-                                    c, ca, b, i,
+                                    c, ca, b, i, NULL,
                                     bset_unknown_csum,
                                     "unknown checksum type %llu", BSET_CSUM_TYPE(i));
 
@@ -1073,7 +1078,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
 
                        btree_err_on(csum_bad,
                                     -BCH_ERR_btree_node_read_err_want_retry,
-                                    c, ca, b, i,
+                                    c, ca, b, i, NULL,
                                     bset_bad_csum,
                                     "%s",
                                     (printbuf_reset(&buf),
@@ -1088,7 +1093,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
                        btree_err_on(btree_node_type_is_extents(btree_node_type(b)) &&
                                     !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data),
                                     -BCH_ERR_btree_node_read_err_incompatible,
-                                    c, NULL, b, NULL,
+                                    c, NULL, b, NULL, NULL,
                                     btree_node_unsupported_version,
                                     "btree node does not have NEW_EXTENT_OVERWRITE set");
 
@@ -1102,7 +1107,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
 
                        btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
                                     -BCH_ERR_btree_node_read_err_want_retry,
-                                    c, ca, b, i,
+                                    c, ca, b, i, NULL,
                                     bset_unknown_csum,
                                     "unknown checksum type %llu", BSET_CSUM_TYPE(i));
 
@@ -1114,7 +1119,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
 
                        btree_err_on(csum_bad,
                                     -BCH_ERR_btree_node_read_err_want_retry,
-                                    c, ca, b, i,
+                                    c, ca, b, i, NULL,
                                     bset_bad_csum,
                                     "%s",
                                     (printbuf_reset(&buf),
@@ -1152,14 +1157,14 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
 
                btree_err_on(blacklisted && first,
                             -BCH_ERR_btree_node_read_err_fixable,
-                            c, ca, b, i,
+                            c, ca, b, i, NULL,
                             bset_blacklisted_journal_seq,
                             "first btree node bset has blacklisted journal seq (%llu)",
                             le64_to_cpu(i->journal_seq));
 
                btree_err_on(blacklisted && ptr_written,
                             -BCH_ERR_btree_node_read_err_fixable,
-                            c, ca, b, i,
+                            c, ca, b, i, NULL,
                             first_bset_blacklisted_journal_seq,
                             "found blacklisted bset (journal seq %llu) in btree node at offset %u-%u/%u",
                             le64_to_cpu(i->journal_seq),
@@ -1178,7 +1183,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
        if (ptr_written) {
                btree_err_on(b->written < ptr_written,
                             -BCH_ERR_btree_node_read_err_want_retry,
-                            c, ca, b, NULL,
+                            c, ca, b, NULL, NULL,
                             btree_node_data_missing,
                             "btree node data missing: expected %u sectors, found %u",
                             ptr_written, b->written);
@@ -1191,7 +1196,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
                                                                      le64_to_cpu(bne->keys.journal_seq),
                                                                      true),
                                     -BCH_ERR_btree_node_read_err_want_retry,
-                                    c, ca, b, NULL,
+                                    c, ca, b, NULL, NULL,
                                     btree_node_bset_after_end,
                                     "found bset signature after last bset");
        }
@@ -1235,7 +1240,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
                        bch2_bkey_val_to_text(&buf, c, u.s_c);
 
                        btree_err(-BCH_ERR_btree_node_read_err_fixable,
-                                 c, NULL, b, i,
+                                 c, NULL, b, i, k,
                                  btree_node_bad_bkey,
                                  "%s", buf.buf);
 
@@ -1471,18 +1476,18 @@ static CLOSURE_CALLBACK(btree_node_read_all_replicas_done)
 
                written2 = btree_node_sectors_written(c, ra->buf[i]);
                if (btree_err_on(written2 != written, -BCH_ERR_btree_node_read_err_fixable,
-                                c, NULL, b, NULL,
+                                c, NULL, b, NULL, NULL,
                                 btree_node_replicas_sectors_written_mismatch,
                                 "btree node sectors written mismatch: %u != %u",
                                 written, written2) ||
                    btree_err_on(btree_node_has_extra_bsets(c, written2, ra->buf[i]),
                                 -BCH_ERR_btree_node_read_err_fixable,
-                                c, NULL, b, NULL,
+                                c, NULL, b, NULL, NULL,
                                 btree_node_bset_after_end,
                                 "found bset signature after last bset") ||
                    btree_err_on(memcmp(ra->buf[best], ra->buf[i], written << 9),
                                 -BCH_ERR_btree_node_read_err_fixable,
-                                c, NULL, b, NULL,
+                                c, NULL, b, NULL, NULL,
                                 btree_node_replicas_data_mismatch,
                                 "btree node replicas content mismatch"))
                        dump_bset_maps = true;
index 75f5e6fe46349f0ed0a2d3b3b4611883d69b6700..34056aaece009e80c1ef2bd39d304f8b185ce68d 100644 (file)
@@ -424,16 +424,16 @@ static int btree_key_cache_fill(struct btree_trans *trans,
                                goto err;
                        }
 
-                       if (!bch2_btree_node_relock(trans, ck_path, 0)) {
+                       ret = bch2_trans_relock(trans);
+                       if (ret) {
                                kfree(new_k);
-                               trace_and_count(trans->c, trans_restart_relock_key_cache_fill, trans, _THIS_IP_, ck_path);
-                               ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_fill);
                                goto err;
                        }
 
-                       ret = bch2_trans_relock(trans);
-                       if (ret) {
+                       if (!bch2_btree_node_relock(trans, ck_path, 0)) {
                                kfree(new_k);
+                               trace_and_count(trans->c, trans_restart_relock_key_cache_fill, trans, _THIS_IP_, ck_path);
+                               ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_fill);
                                goto err;
                        }
                }
index c3e9b0cc7bbdc9ece6ee8da4966a43838f64df9e..d66fff22109ae791b3a803209bc263c8d899a864 100644 (file)
@@ -215,6 +215,7 @@ static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle)
 
        if (unlikely(!best)) {
                struct printbuf buf = PRINTBUF;
+               buf.atomic++;
 
                prt_printf(&buf, bch2_fmt(g->g->trans->c, "cycle of nofail locks"));
 
index b469586517a86dbbf60110838208d3633f55cbe6..ed97712d0db1edc1b9564700342f2f8ffca06361 100644 (file)
@@ -1134,7 +1134,7 @@ static int __trigger_extent(struct btree_trans *trans,
        r.e.nr_required = 1;
 
        bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
-               s64 disk_sectors;
+               s64 disk_sectors = 0;
                ret = bch2_trigger_pointer(trans, btree_id, level, k, p, entry, &disk_sectors, flags);
                if (ret < 0)
                        return ret;
diff --git a/fs/bcachefs/disk_groups_format.h b/fs/bcachefs/disk_groups_format.h
new file mode 100644 (file)
index 0000000..698990b
--- /dev/null
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_DISK_GROUPS_FORMAT_H
+#define _BCACHEFS_DISK_GROUPS_FORMAT_H
+
+#define BCH_SB_LABEL_SIZE              32
+
+struct bch_disk_group {
+       __u8                    label[BCH_SB_LABEL_SIZE];
+       __le64                  flags[2];
+} __packed __aligned(8);
+
+LE64_BITMASK(BCH_GROUP_DELETED,                struct bch_disk_group, flags[0], 0,  1)
+LE64_BITMASK(BCH_GROUP_DATA_ALLOWED,   struct bch_disk_group, flags[0], 1,  6)
+LE64_BITMASK(BCH_GROUP_PARENT,         struct bch_disk_group, flags[0], 6, 24)
+
+struct bch_sb_field_disk_groups {
+       struct bch_sb_field     field;
+       struct bch_disk_group   entries[];
+} __packed __aligned(8);
+
+#endif /* _BCACHEFS_DISK_GROUPS_FORMAT_H */
index b26dc7424662390b07c4d205d245403fbd1cde69..d8b9beca377627200d1bda8ffd75136ff5fdaa25 100644 (file)
@@ -908,7 +908,7 @@ static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp)
        if (!genradix_ptr_alloc(&c->stripes, idx, gfp))
                return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
 
-       if (c->gc_pos.phase != GC_PHASE_NOT_RUNNING &&
+       if (c->gc_pos.phase != GC_PHASE_not_running &&
            !genradix_ptr_alloc(&c->gc_stripes, idx, gfp))
                return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
 
index 6b69e5cd68dd514bc51c3d7ae65b7977fad7fb3d..54873ecc635cb03176feb921badda3229ed525fd 100644 (file)
@@ -437,8 +437,8 @@ static void bch2_writepage_io_done(struct bch_write_op *op)
         */
 
        /*
-        * PageWriteback is effectively our ref on the inode - fixup i_blocks
-        * before calling end_page_writeback:
+        * The writeback flag is effectively our ref on the inode -
+        * fixup i_blocks before calling folio_end_writeback:
         */
        bch2_i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
 
@@ -898,7 +898,7 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
        darray_for_each(fs, fi) {
                f = *fi;
                f_len = min(end, folio_end_pos(f)) - f_pos;
-               f_copied = copy_page_from_iter_atomic(&f->page, f_offset, f_len, iter);
+               f_copied = copy_folio_from_iter_atomic(f, f_offset, f_len, iter);
                if (!f_copied) {
                        folios_trunc(&fs, fi);
                        break;
index 09d21aef879af89bbdadbfabcc71bc630604e069..049b61bc9a5b38e491c367d9a6b5a0f9c6567426 100644 (file)
@@ -609,8 +609,10 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
        if (unlikely(ret))
                goto err_put_write_ref;
 
-       if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1)))
+       if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1))) {
+               ret = -EINVAL;
                goto err_put_write_ref;
+       }
 
        inode_dio_begin(&inode->v);
        bch2_pagecache_block_get(inode);
index 96040a95cf4667069ad8dac4cc02a56f45382dc8..cd388f1702dc85a98f2d5b05db518ea66e77243a 100644 (file)
@@ -1939,8 +1939,7 @@ got_sb:
 
        if (IS_ERR(sb)) {
                ret = PTR_ERR(sb);
-               ret = bch2_err_class(ret);
-               return ERR_PTR(ret);
+               goto err;
        }
 
        c = sb->s_fs_info;
@@ -2016,6 +2015,15 @@ out:
 err_put_super:
        __bch2_fs_stop(c);
        deactivate_locked_super(sb);
+err:
+       /*
+        * On an inconsistency error in recovery we might see an -EROFS derived
+        * errorcode (from the journal), but we don't want to return that to
+        * userspace as that causes util-linux to retry the mount RO - which is
+        * confusing:
+        */
+       if (bch2_err_matches(ret, EROFS) && ret != -EROFS)
+               ret = -EIO;
        return ERR_PTR(bch2_err_class(ret));
 }
 
index c8f57465131c54becd0cb745d7ce2b54b1e87cd1..fd277bd58ed34afe3ad5a86f93210ea621756b62 100644 (file)
@@ -77,21 +77,17 @@ static int lookup_first_inode(struct btree_trans *trans, u64 inode_nr,
        struct bkey_s_c k;
        int ret;
 
-       bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes,
-                            POS(0, inode_nr),
-                            BTREE_ITER_all_snapshots);
-       k = bch2_btree_iter_peek(&iter);
-       ret = bkey_err(k);
-       if (ret)
-               goto err;
-
-       if (!k.k || !bkey_eq(k.k->p, POS(0, inode_nr))) {
-               ret = -BCH_ERR_ENOENT_inode;
-               goto err;
+       for_each_btree_key_norestart(trans, iter, BTREE_ID_inodes, POS(0, inode_nr),
+                                    BTREE_ITER_all_snapshots, k, ret) {
+               if (k.k->p.offset != inode_nr)
+                       break;
+               if (!bkey_is_inode(k.k))
+                       continue;
+               ret = bch2_inode_unpack(k, inode);
+               goto found;
        }
-
-       ret = bch2_inode_unpack(k, inode);
-err:
+       ret = -BCH_ERR_ENOENT_inode;
+found:
        bch_err_msg(trans->c, ret, "fetching inode %llu", inode_nr);
        bch2_trans_iter_exit(trans, &iter);
        return ret;
@@ -770,25 +766,6 @@ static int get_visible_inodes(struct btree_trans *trans,
        return ret;
 }
 
-static int check_key_has_snapshot(struct btree_trans *trans,
-                                 struct btree_iter *iter,
-                                 struct bkey_s_c k)
-{
-       struct bch_fs *c = trans->c;
-       struct printbuf buf = PRINTBUF;
-       int ret = 0;
-
-       if (mustfix_fsck_err_on(!bch2_snapshot_equiv(c, k.k->p.snapshot), c,
-                               bkey_in_missing_snapshot,
-                               "key in missing snapshot: %s",
-                               (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
-               ret = bch2_btree_delete_at(trans, iter,
-                                           BTREE_UPDATE_internal_snapshot_node) ?: 1;
-fsck_err:
-       printbuf_exit(&buf);
-       return ret;
-}
-
 static int hash_redo_key(struct btree_trans *trans,
                         const struct bch_hash_desc desc,
                         struct bch_hash_info *hash_info,
@@ -983,7 +960,7 @@ static int check_inode(struct btree_trans *trans,
        bool do_update = false;
        int ret;
 
-       ret = check_key_has_snapshot(trans, iter, k);
+       ret = bch2_check_key_has_snapshot(trans, iter, k);
        if (ret < 0)
                goto err;
        if (ret)
@@ -1487,7 +1464,7 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
        struct printbuf buf = PRINTBUF;
        int ret = 0;
 
-       ret = check_key_has_snapshot(trans, iter, k);
+       ret = bch2_check_key_has_snapshot(trans, iter, k);
        if (ret) {
                ret = ret < 0 ? ret : 0;
                goto out;
@@ -2010,7 +1987,7 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
        struct printbuf buf = PRINTBUF;
        int ret = 0;
 
-       ret = check_key_has_snapshot(trans, iter, k);
+       ret = bch2_check_key_has_snapshot(trans, iter, k);
        if (ret) {
                ret = ret < 0 ? ret : 0;
                goto out;
@@ -2165,7 +2142,7 @@ static int check_xattr(struct btree_trans *trans, struct btree_iter *iter,
        struct inode_walker_entry *i;
        int ret;
 
-       ret = check_key_has_snapshot(trans, iter, k);
+       ret = bch2_check_key_has_snapshot(trans, iter, k);
        if (ret < 0)
                return ret;
        if (ret)
diff --git a/fs/bcachefs/journal_seq_blacklist_format.h b/fs/bcachefs/journal_seq_blacklist_format.h
new file mode 100644 (file)
index 0000000..2566b12
--- /dev/null
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_JOURNAL_SEQ_BLACKLIST_FORMAT_H
+#define _BCACHEFS_JOURNAL_SEQ_BLACKLIST_FORMAT_H
+
+struct journal_seq_blacklist_entry {
+       __le64                  start;
+       __le64                  end;
+};
+
+struct bch_sb_field_journal_seq_blacklist {
+       struct bch_sb_field     field;
+       struct journal_seq_blacklist_entry start[];
+};
+
+#endif /* _BCACHEFS_JOURNAL_SEQ_BLACKLIST_FORMAT_H */
index 4c298e74723db3023b9120cf1f823e46bfbaec4c..e9d9c0212e44b155322df9d2d6c9996042f8a452 100644 (file)
@@ -217,4 +217,5 @@ static struct kunit_suite mean_and_variance_test_suite = {
 kunit_test_suite(mean_and_variance_test_suite);
 
 MODULE_AUTHOR("Daniel B. Hill");
+MODULE_DESCRIPTION("bcachefs filesystem mean and variance unit tests");
 MODULE_LICENSE("GPL");
index 8171f947fac8f8d75f376d2df4a4ac341a0645d1..6e477fadaa2a5b546e191f4da614e8d74722bd47 100644 (file)
@@ -547,6 +547,7 @@ static int bch2_move_data_btree(struct moving_context *ctxt,
                ctxt->stats->pos        = BBPOS(btree_id, start);
        }
 
+       bch2_trans_begin(trans);
        bch2_trans_iter_init(trans, &iter, btree_id, start,
                             BTREE_ITER_prefetch|
                             BTREE_ITER_all_snapshots);
@@ -920,7 +921,20 @@ static bool rereplicate_pred(struct bch_fs *c, void *arg,
                ? c->opts.metadata_replicas
                : io_opts->data_replicas;
 
-       if (!nr_good || nr_good >= replicas)
+       rcu_read_lock();
+       struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+       unsigned i = 0;
+       bkey_for_each_ptr(ptrs, ptr) {
+               struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
+               if (!ptr->cached &&
+                   (!ca || !ca->mi.durability))
+                       data_opts->kill_ptrs |= BIT(i);
+               i++;
+       }
+       rcu_read_unlock();
+
+       if (!data_opts->kill_ptrs &&
+           (!nr_good || nr_good >= replicas))
                return false;
 
        data_opts->target               = 0;
diff --git a/fs/bcachefs/replicas_format.h b/fs/bcachefs/replicas_format.h
new file mode 100644 (file)
index 0000000..b972081
--- /dev/null
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_REPLICAS_FORMAT_H
+#define _BCACHEFS_REPLICAS_FORMAT_H
+
+struct bch_replicas_entry_v0 {
+       __u8                    data_type;
+       __u8                    nr_devs;
+       __u8                    devs[];
+} __packed;
+
+struct bch_sb_field_replicas_v0 {
+       struct bch_sb_field     field;
+       struct bch_replicas_entry_v0 entries[];
+} __packed __aligned(8);
+
+struct bch_replicas_entry_v1 {
+       __u8                    data_type;
+       __u8                    nr_devs;
+       __u8                    nr_required;
+       __u8                    devs[];
+} __packed;
+
+struct bch_sb_field_replicas {
+       struct bch_sb_field     field;
+       struct bch_replicas_entry_v1 entries[];
+} __packed __aligned(8);
+
+#define replicas_entry_bytes(_i)                                       \
+       (offsetof(typeof(*(_i)), devs) + (_i)->nr_devs)
+
+#endif /* _BCACHEFS_REPLICAS_FORMAT_H */
index 390a1bbd2567ac271f3c69b505753f969d0508eb..3fb23e399ffb3e6c021b8825fe3403e5a048ba4c 100644 (file)
@@ -146,10 +146,17 @@ static int bch2_sb_downgrade_validate(struct bch_sb *sb, struct bch_sb_field *f,
        for (const struct bch_sb_field_downgrade_entry *i = e->entries;
             (void *) i < vstruct_end(&e->field);
             i = downgrade_entry_next_c(i)) {
+               /*
+                * Careful: sb_field_downgrade_entry is only 2 byte aligned, but
+                * section sizes are 8 byte aligned - an empty entry spanning
+                * the end of the section is allowed (and ignored):
+                */
+               if ((void *) &i->errors[0] > vstruct_end(&e->field))
+                       break;
+
                if (flags & BCH_VALIDATE_write &&
-                   ((void *) &i->errors[0] > vstruct_end(&e->field) ||
-                    (void *) downgrade_entry_next_c(i) > vstruct_end(&e->field))) {
-                       prt_printf(err, "downgrade entry overruns end of superblock section)");
+                   (void *) downgrade_entry_next_c(i) > vstruct_end(&e->field)) {
+                       prt_printf(err, "downgrade entry overruns end of superblock section");
                        return -BCH_ERR_invalid_sb_downgrade;
                }
 
diff --git a/fs/bcachefs/sb-downgrade_format.h b/fs/bcachefs/sb-downgrade_format.h
new file mode 100644 (file)
index 0000000..cffd932
--- /dev/null
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_SB_DOWNGRADE_FORMAT_H
+#define _BCACHEFS_SB_DOWNGRADE_FORMAT_H
+
+struct bch_sb_field_downgrade_entry {
+       __le16                  version;
+       __le64                  recovery_passes[2];
+       __le16                  nr_errors;
+       __le16                  errors[] __counted_by(nr_errors);
+} __packed __aligned(2);
+
+struct bch_sb_field_downgrade {
+       struct bch_sb_field     field;
+       struct bch_sb_field_downgrade_entry entries[];
+};
+
+#endif /* _BCACHEFS_SB_DOWNGRADE_FORMAT_H */
diff --git a/fs/bcachefs/sb-errors_format.h b/fs/bcachefs/sb-errors_format.h
new file mode 100644 (file)
index 0000000..84d2763
--- /dev/null
@@ -0,0 +1,296 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_SB_ERRORS_FORMAT_H
+#define _BCACHEFS_SB_ERRORS_FORMAT_H
+
+#define BCH_SB_ERRS()                                                  \
+       x(clean_but_journal_not_empty,                          0)      \
+       x(dirty_but_no_journal_entries,                         1)      \
+       x(dirty_but_no_journal_entries_post_drop_nonflushes,    2)      \
+       x(sb_clean_journal_seq_mismatch,                        3)      \
+       x(sb_clean_btree_root_mismatch,                         4)      \
+       x(sb_clean_missing,                                     5)      \
+       x(jset_unsupported_version,                             6)      \
+       x(jset_unknown_csum,                                    7)      \
+       x(jset_last_seq_newer_than_seq,                         8)      \
+       x(jset_past_bucket_end,                                 9)      \
+       x(jset_seq_blacklisted,                                 10)     \
+       x(journal_entries_missing,                              11)     \
+       x(journal_entry_replicas_not_marked,                    12)     \
+       x(journal_entry_past_jset_end,                          13)     \
+       x(journal_entry_replicas_data_mismatch,                 14)     \
+       x(journal_entry_bkey_u64s_0,                            15)     \
+       x(journal_entry_bkey_past_end,                          16)     \
+       x(journal_entry_bkey_bad_format,                        17)     \
+       x(journal_entry_bkey_invalid,                           18)     \
+       x(journal_entry_btree_root_bad_size,                    19)     \
+       x(journal_entry_blacklist_bad_size,                     20)     \
+       x(journal_entry_blacklist_v2_bad_size,                  21)     \
+       x(journal_entry_blacklist_v2_start_past_end,            22)     \
+       x(journal_entry_usage_bad_size,                         23)     \
+       x(journal_entry_data_usage_bad_size,                    24)     \
+       x(journal_entry_clock_bad_size,                         25)     \
+       x(journal_entry_clock_bad_rw,                           26)     \
+       x(journal_entry_dev_usage_bad_size,                     27)     \
+       x(journal_entry_dev_usage_bad_dev,                      28)     \
+       x(journal_entry_dev_usage_bad_pad,                      29)     \
+       x(btree_node_unreadable,                                30)     \
+       x(btree_node_fault_injected,                            31)     \
+       x(btree_node_bad_magic,                                 32)     \
+       x(btree_node_bad_seq,                                   33)     \
+       x(btree_node_unsupported_version,                       34)     \
+       x(btree_node_bset_older_than_sb_min,                    35)     \
+       x(btree_node_bset_newer_than_sb,                        36)     \
+       x(btree_node_data_missing,                              37)     \
+       x(btree_node_bset_after_end,                            38)     \
+       x(btree_node_replicas_sectors_written_mismatch,         39)     \
+       x(btree_node_replicas_data_mismatch,                    40)     \
+       x(bset_unknown_csum,                                    41)     \
+       x(bset_bad_csum,                                        42)     \
+       x(bset_past_end_of_btree_node,                          43)     \
+       x(bset_wrong_sector_offset,                             44)     \
+       x(bset_empty,                                           45)     \
+       x(bset_bad_seq,                                         46)     \
+       x(bset_blacklisted_journal_seq,                         47)     \
+       x(first_bset_blacklisted_journal_seq,                   48)     \
+       x(btree_node_bad_btree,                                 49)     \
+       x(btree_node_bad_level,                                 50)     \
+       x(btree_node_bad_min_key,                               51)     \
+       x(btree_node_bad_max_key,                               52)     \
+       x(btree_node_bad_format,                                53)     \
+       x(btree_node_bkey_past_bset_end,                        54)     \
+       x(btree_node_bkey_bad_format,                           55)     \
+       x(btree_node_bad_bkey,                                  56)     \
+       x(btree_node_bkey_out_of_order,                         57)     \
+       x(btree_root_bkey_invalid,                              58)     \
+       x(btree_root_read_error,                                59)     \
+       x(btree_root_bad_min_key,                               60)     \
+       x(btree_root_bad_max_key,                               61)     \
+       x(btree_node_read_error,                                62)     \
+       x(btree_node_topology_bad_min_key,                      63)     \
+       x(btree_node_topology_bad_max_key,                      64)     \
+       x(btree_node_topology_overwritten_by_prev_node,         65)     \
+       x(btree_node_topology_overwritten_by_next_node,         66)     \
+       x(btree_node_topology_interior_node_empty,              67)     \
+       x(fs_usage_hidden_wrong,                                68)     \
+       x(fs_usage_btree_wrong,                                 69)     \
+       x(fs_usage_data_wrong,                                  70)     \
+       x(fs_usage_cached_wrong,                                71)     \
+       x(fs_usage_reserved_wrong,                              72)     \
+       x(fs_usage_persistent_reserved_wrong,                   73)     \
+       x(fs_usage_nr_inodes_wrong,                             74)     \
+       x(fs_usage_replicas_wrong,                              75)     \
+       x(dev_usage_buckets_wrong,                              76)     \
+       x(dev_usage_sectors_wrong,                              77)     \
+       x(dev_usage_fragmented_wrong,                           78)     \
+       x(dev_usage_buckets_ec_wrong,                           79)     \
+       x(bkey_version_in_future,                               80)     \
+       x(bkey_u64s_too_small,                                  81)     \
+       x(bkey_invalid_type_for_btree,                          82)     \
+       x(bkey_extent_size_zero,                                83)     \
+       x(bkey_extent_size_greater_than_offset,                 84)     \
+       x(bkey_size_nonzero,                                    85)     \
+       x(bkey_snapshot_nonzero,                                86)     \
+       x(bkey_snapshot_zero,                                   87)     \
+       x(bkey_at_pos_max,                                      88)     \
+       x(bkey_before_start_of_btree_node,                      89)     \
+       x(bkey_after_end_of_btree_node,                         90)     \
+       x(bkey_val_size_nonzero,                                91)     \
+       x(bkey_val_size_too_small,                              92)     \
+       x(alloc_v1_val_size_bad,                                93)     \
+       x(alloc_v2_unpack_error,                                94)     \
+       x(alloc_v3_unpack_error,                                95)     \
+       x(alloc_v4_val_size_bad,                                96)     \
+       x(alloc_v4_backpointers_start_bad,                      97)     \
+       x(alloc_key_data_type_bad,                              98)     \
+       x(alloc_key_empty_but_have_data,                        99)     \
+       x(alloc_key_dirty_sectors_0,                            100)    \
+       x(alloc_key_data_type_inconsistency,                    101)    \
+       x(alloc_key_to_missing_dev_bucket,                      102)    \
+       x(alloc_key_cached_inconsistency,                       103)    \
+       x(alloc_key_cached_but_read_time_zero,                  104)    \
+       x(alloc_key_to_missing_lru_entry,                       105)    \
+       x(alloc_key_data_type_wrong,                            106)    \
+       x(alloc_key_gen_wrong,                                  107)    \
+       x(alloc_key_dirty_sectors_wrong,                        108)    \
+       x(alloc_key_cached_sectors_wrong,                       109)    \
+       x(alloc_key_stripe_wrong,                               110)    \
+       x(alloc_key_stripe_redundancy_wrong,                    111)    \
+       x(bucket_sector_count_overflow,                         112)    \
+       x(bucket_metadata_type_mismatch,                        113)    \
+       x(need_discard_key_wrong,                               114)    \
+       x(freespace_key_wrong,                                  115)    \
+       x(freespace_hole_missing,                               116)    \
+       x(bucket_gens_val_size_bad,                             117)    \
+       x(bucket_gens_key_wrong,                                118)    \
+       x(bucket_gens_hole_wrong,                               119)    \
+       x(bucket_gens_to_invalid_dev,                           120)    \
+       x(bucket_gens_to_invalid_buckets,                       121)    \
+       x(bucket_gens_nonzero_for_invalid_buckets,              122)    \
+       x(need_discard_freespace_key_to_invalid_dev_bucket,     123)    \
+       x(need_discard_freespace_key_bad,                       124)    \
+       x(backpointer_bucket_offset_wrong,                      125)    \
+       x(backpointer_to_missing_device,                        126)    \
+       x(backpointer_to_missing_alloc,                         127)    \
+       x(backpointer_to_missing_ptr,                           128)    \
+       x(lru_entry_at_time_0,                                  129)    \
+       x(lru_entry_to_invalid_bucket,                          130)    \
+       x(lru_entry_bad,                                        131)    \
+       x(btree_ptr_val_too_big,                                132)    \
+       x(btree_ptr_v2_val_too_big,                             133)    \
+       x(btree_ptr_has_non_ptr,                                134)    \
+       x(extent_ptrs_invalid_entry,                            135)    \
+       x(extent_ptrs_no_ptrs,                                  136)    \
+       x(extent_ptrs_too_many_ptrs,                            137)    \
+       x(extent_ptrs_redundant_crc,                            138)    \
+       x(extent_ptrs_redundant_stripe,                         139)    \
+       x(extent_ptrs_unwritten,                                140)    \
+       x(extent_ptrs_written_and_unwritten,                    141)    \
+       x(ptr_to_invalid_device,                                142)    \
+       x(ptr_to_duplicate_device,                              143)    \
+       x(ptr_after_last_bucket,                                144)    \
+       x(ptr_before_first_bucket,                              145)    \
+       x(ptr_spans_multiple_buckets,                           146)    \
+       x(ptr_to_missing_backpointer,                           147)    \
+       x(ptr_to_missing_alloc_key,                             148)    \
+       x(ptr_to_missing_replicas_entry,                        149)    \
+       x(ptr_to_missing_stripe,                                150)    \
+       x(ptr_to_incorrect_stripe,                              151)    \
+       x(ptr_gen_newer_than_bucket_gen,                        152)    \
+       x(ptr_too_stale,                                        153)    \
+       x(stale_dirty_ptr,                                      154)    \
+       x(ptr_bucket_data_type_mismatch,                        155)    \
+       x(ptr_cached_and_erasure_coded,                         156)    \
+       x(ptr_crc_uncompressed_size_too_small,                  157)    \
+       x(ptr_crc_csum_type_unknown,                            158)    \
+       x(ptr_crc_compression_type_unknown,                     159)    \
+       x(ptr_crc_redundant,                                    160)    \
+       x(ptr_crc_uncompressed_size_too_big,                    161)    \
+       x(ptr_crc_nonce_mismatch,                               162)    \
+       x(ptr_stripe_redundant,                                 163)    \
+       x(reservation_key_nr_replicas_invalid,                  164)    \
+       x(reflink_v_refcount_wrong,                             165)    \
+       x(reflink_p_to_missing_reflink_v,                       166)    \
+       x(stripe_pos_bad,                                       167)    \
+       x(stripe_val_size_bad,                                  168)    \
+       x(stripe_sector_count_wrong,                            169)    \
+       x(snapshot_tree_pos_bad,                                170)    \
+       x(snapshot_tree_to_missing_snapshot,                    171)    \
+       x(snapshot_tree_to_missing_subvol,                      172)    \
+       x(snapshot_tree_to_wrong_subvol,                        173)    \
+       x(snapshot_tree_to_snapshot_subvol,                     174)    \
+       x(snapshot_pos_bad,                                     175)    \
+       x(snapshot_parent_bad,                                  176)    \
+       x(snapshot_children_not_normalized,                     177)    \
+       x(snapshot_child_duplicate,                             178)    \
+       x(snapshot_child_bad,                                   179)    \
+       x(snapshot_skiplist_not_normalized,                     180)    \
+       x(snapshot_skiplist_bad,                                181)    \
+       x(snapshot_should_not_have_subvol,                      182)    \
+       x(snapshot_to_bad_snapshot_tree,                        183)    \
+       x(snapshot_bad_depth,                                   184)    \
+       x(snapshot_bad_skiplist,                                185)    \
+       x(subvol_pos_bad,                                       186)    \
+       x(subvol_not_master_and_not_snapshot,                   187)    \
+       x(subvol_to_missing_root,                               188)    \
+       x(subvol_root_wrong_bi_subvol,                          189)    \
+       x(bkey_in_missing_snapshot,                             190)    \
+       x(inode_pos_inode_nonzero,                              191)    \
+       x(inode_pos_blockdev_range,                             192)    \
+       x(inode_unpack_error,                                   193)    \
+       x(inode_str_hash_invalid,                               194)    \
+       x(inode_v3_fields_start_bad,                            195)    \
+       x(inode_snapshot_mismatch,                              196)    \
+       x(inode_unlinked_but_clean,                             197)    \
+       x(inode_unlinked_but_nlink_nonzero,                     198)    \
+       x(inode_checksum_type_invalid,                          199)    \
+       x(inode_compression_type_invalid,                       200)    \
+       x(inode_subvol_root_but_not_dir,                        201)    \
+       x(inode_i_size_dirty_but_clean,                         202)    \
+       x(inode_i_sectors_dirty_but_clean,                      203)    \
+       x(inode_i_sectors_wrong,                                204)    \
+       x(inode_dir_wrong_nlink,                                205)    \
+       x(inode_dir_multiple_links,                             206)    \
+       x(inode_multiple_links_but_nlink_0,                     207)    \
+       x(inode_wrong_backpointer,                              208)    \
+       x(inode_wrong_nlink,                                    209)    \
+       x(inode_unreachable,                                    210)    \
+       x(deleted_inode_but_clean,                              211)    \
+       x(deleted_inode_missing,                                212)    \
+       x(deleted_inode_is_dir,                                 213)    \
+       x(deleted_inode_not_unlinked,                           214)    \
+       x(extent_overlapping,                                   215)    \
+       x(extent_in_missing_inode,                              216)    \
+       x(extent_in_non_reg_inode,                              217)    \
+       x(extent_past_end_of_inode,                             218)    \
+       x(dirent_empty_name,                                    219)    \
+       x(dirent_val_too_big,                                   220)    \
+       x(dirent_name_too_long,                                 221)    \
+       x(dirent_name_embedded_nul,                             222)    \
+       x(dirent_name_dot_or_dotdot,                            223)    \
+       x(dirent_name_has_slash,                                224)    \
+       x(dirent_d_type_wrong,                                  225)    \
+       x(inode_bi_parent_wrong,                                226)    \
+       x(dirent_in_missing_dir_inode,                          227)    \
+       x(dirent_in_non_dir_inode,                              228)    \
+       x(dirent_to_missing_inode,                              229)    \
+       x(dirent_to_missing_subvol,                             230)    \
+       x(dirent_to_itself,                                     231)    \
+       x(quota_type_invalid,                                   232)    \
+       x(xattr_val_size_too_small,                             233)    \
+       x(xattr_val_size_too_big,                               234)    \
+       x(xattr_invalid_type,                                   235)    \
+       x(xattr_name_invalid_chars,                             236)    \
+       x(xattr_in_missing_inode,                               237)    \
+       x(root_subvol_missing,                                  238)    \
+       x(root_dir_missing,                                     239)    \
+       x(root_inode_not_dir,                                   240)    \
+       x(dir_loop,                                             241)    \
+       x(hash_table_key_duplicate,                             242)    \
+       x(hash_table_key_wrong_offset,                          243)    \
+       x(unlinked_inode_not_on_deleted_list,                   244)    \
+       x(reflink_p_front_pad_bad,                              245)    \
+       x(journal_entry_dup_same_device,                        246)    \
+       x(inode_bi_subvol_missing,                              247)    \
+       x(inode_bi_subvol_wrong,                                248)    \
+       x(inode_points_to_missing_dirent,                       249)    \
+       x(inode_points_to_wrong_dirent,                         250)    \
+       x(inode_bi_parent_nonzero,                              251)    \
+       x(dirent_to_missing_parent_subvol,                      252)    \
+       x(dirent_not_visible_in_parent_subvol,                  253)    \
+       x(subvol_fs_path_parent_wrong,                          254)    \
+       x(subvol_root_fs_path_parent_nonzero,                   255)    \
+       x(subvol_children_not_set,                              256)    \
+       x(subvol_children_bad,                                  257)    \
+       x(subvol_loop,                                          258)    \
+       x(subvol_unreachable,                                   259)    \
+       x(btree_node_bkey_bad_u64s,                             260)    \
+       x(btree_node_topology_empty_interior_node,              261)    \
+       x(btree_ptr_v2_min_key_bad,                             262)    \
+       x(btree_root_unreadable_and_scan_found_nothing,         263)    \
+       x(snapshot_node_missing,                                264)    \
+       x(dup_backpointer_to_bad_csum_extent,                   265)    \
+       x(btree_bitmap_not_marked,                              266)    \
+       x(sb_clean_entry_overrun,                               267)    \
+       x(btree_ptr_v2_written_0,                               268)    \
+       x(subvol_snapshot_bad,                                  269)    \
+       x(subvol_inode_bad,                                     270)
+
+enum bch_sb_error_id {
+#define x(t, n) BCH_FSCK_ERR_##t = n,
+       BCH_SB_ERRS()
+#undef x
+       BCH_SB_ERR_MAX
+};
+
+struct bch_sb_field_errors {
+       struct bch_sb_field     field;
+       struct bch_sb_field_error_entry {
+               __le64          v;
+               __le64          last_error_time;
+       }                       entries[];
+};
+
+LE64_BITMASK(BCH_SB_ERROR_ENTRY_ID,    struct bch_sb_field_error_entry, v,  0, 16);
+LE64_BITMASK(BCH_SB_ERROR_ENTRY_NR,    struct bch_sb_field_error_entry, v, 16, 64);
+
+#endif /* _BCACHEFS_SB_ERRORS_FORMAT_H */
index 666599d3fb9d7d1a684a1efa94f77eab2d9dc0bb..40325239c3b0f23572e67c722b13e631d9a6d78f 100644 (file)
@@ -4,286 +4,6 @@
 
 #include "darray.h"
 
-#define BCH_SB_ERRS()                                                  \
-       x(clean_but_journal_not_empty,                          0)      \
-       x(dirty_but_no_journal_entries,                         1)      \
-       x(dirty_but_no_journal_entries_post_drop_nonflushes,    2)      \
-       x(sb_clean_journal_seq_mismatch,                        3)      \
-       x(sb_clean_btree_root_mismatch,                         4)      \
-       x(sb_clean_missing,                                     5)      \
-       x(jset_unsupported_version,                             6)      \
-       x(jset_unknown_csum,                                    7)      \
-       x(jset_last_seq_newer_than_seq,                         8)      \
-       x(jset_past_bucket_end,                                 9)      \
-       x(jset_seq_blacklisted,                                 10)     \
-       x(journal_entries_missing,                              11)     \
-       x(journal_entry_replicas_not_marked,                    12)     \
-       x(journal_entry_past_jset_end,                          13)     \
-       x(journal_entry_replicas_data_mismatch,                 14)     \
-       x(journal_entry_bkey_u64s_0,                            15)     \
-       x(journal_entry_bkey_past_end,                          16)     \
-       x(journal_entry_bkey_bad_format,                        17)     \
-       x(journal_entry_bkey_invalid,                           18)     \
-       x(journal_entry_btree_root_bad_size,                    19)     \
-       x(journal_entry_blacklist_bad_size,                     20)     \
-       x(journal_entry_blacklist_v2_bad_size,                  21)     \
-       x(journal_entry_blacklist_v2_start_past_end,            22)     \
-       x(journal_entry_usage_bad_size,                         23)     \
-       x(journal_entry_data_usage_bad_size,                    24)     \
-       x(journal_entry_clock_bad_size,                         25)     \
-       x(journal_entry_clock_bad_rw,                           26)     \
-       x(journal_entry_dev_usage_bad_size,                     27)     \
-       x(journal_entry_dev_usage_bad_dev,                      28)     \
-       x(journal_entry_dev_usage_bad_pad,                      29)     \
-       x(btree_node_unreadable,                                30)     \
-       x(btree_node_fault_injected,                            31)     \
-       x(btree_node_bad_magic,                                 32)     \
-       x(btree_node_bad_seq,                                   33)     \
-       x(btree_node_unsupported_version,                       34)     \
-       x(btree_node_bset_older_than_sb_min,                    35)     \
-       x(btree_node_bset_newer_than_sb,                        36)     \
-       x(btree_node_data_missing,                              37)     \
-       x(btree_node_bset_after_end,                            38)     \
-       x(btree_node_replicas_sectors_written_mismatch,         39)     \
-       x(btree_node_replicas_data_mismatch,                    40)     \
-       x(bset_unknown_csum,                                    41)     \
-       x(bset_bad_csum,                                        42)     \
-       x(bset_past_end_of_btree_node,                          43)     \
-       x(bset_wrong_sector_offset,                             44)     \
-       x(bset_empty,                                           45)     \
-       x(bset_bad_seq,                                         46)     \
-       x(bset_blacklisted_journal_seq,                         47)     \
-       x(first_bset_blacklisted_journal_seq,                   48)     \
-       x(btree_node_bad_btree,                                 49)     \
-       x(btree_node_bad_level,                                 50)     \
-       x(btree_node_bad_min_key,                               51)     \
-       x(btree_node_bad_max_key,                               52)     \
-       x(btree_node_bad_format,                                53)     \
-       x(btree_node_bkey_past_bset_end,                        54)     \
-       x(btree_node_bkey_bad_format,                           55)     \
-       x(btree_node_bad_bkey,                                  56)     \
-       x(btree_node_bkey_out_of_order,                         57)     \
-       x(btree_root_bkey_invalid,                              58)     \
-       x(btree_root_read_error,                                59)     \
-       x(btree_root_bad_min_key,                               60)     \
-       x(btree_root_bad_max_key,                               61)     \
-       x(btree_node_read_error,                                62)     \
-       x(btree_node_topology_bad_min_key,                      63)     \
-       x(btree_node_topology_bad_max_key,                      64)     \
-       x(btree_node_topology_overwritten_by_prev_node,         65)     \
-       x(btree_node_topology_overwritten_by_next_node,         66)     \
-       x(btree_node_topology_interior_node_empty,              67)     \
-       x(fs_usage_hidden_wrong,                                68)     \
-       x(fs_usage_btree_wrong,                                 69)     \
-       x(fs_usage_data_wrong,                                  70)     \
-       x(fs_usage_cached_wrong,                                71)     \
-       x(fs_usage_reserved_wrong,                              72)     \
-       x(fs_usage_persistent_reserved_wrong,                   73)     \
-       x(fs_usage_nr_inodes_wrong,                             74)     \
-       x(fs_usage_replicas_wrong,                              75)     \
-       x(dev_usage_buckets_wrong,                              76)     \
-       x(dev_usage_sectors_wrong,                              77)     \
-       x(dev_usage_fragmented_wrong,                           78)     \
-       x(dev_usage_buckets_ec_wrong,                           79)     \
-       x(bkey_version_in_future,                               80)     \
-       x(bkey_u64s_too_small,                                  81)     \
-       x(bkey_invalid_type_for_btree,                          82)     \
-       x(bkey_extent_size_zero,                                83)     \
-       x(bkey_extent_size_greater_than_offset,                 84)     \
-       x(bkey_size_nonzero,                                    85)     \
-       x(bkey_snapshot_nonzero,                                86)     \
-       x(bkey_snapshot_zero,                                   87)     \
-       x(bkey_at_pos_max,                                      88)     \
-       x(bkey_before_start_of_btree_node,                      89)     \
-       x(bkey_after_end_of_btree_node,                         90)     \
-       x(bkey_val_size_nonzero,                                91)     \
-       x(bkey_val_size_too_small,                              92)     \
-       x(alloc_v1_val_size_bad,                                93)     \
-       x(alloc_v2_unpack_error,                                94)     \
-       x(alloc_v3_unpack_error,                                95)     \
-       x(alloc_v4_val_size_bad,                                96)     \
-       x(alloc_v4_backpointers_start_bad,                      97)     \
-       x(alloc_key_data_type_bad,                              98)     \
-       x(alloc_key_empty_but_have_data,                        99)     \
-       x(alloc_key_dirty_sectors_0,                            100)    \
-       x(alloc_key_data_type_inconsistency,                    101)    \
-       x(alloc_key_to_missing_dev_bucket,                      102)    \
-       x(alloc_key_cached_inconsistency,                       103)    \
-       x(alloc_key_cached_but_read_time_zero,                  104)    \
-       x(alloc_key_to_missing_lru_entry,                       105)    \
-       x(alloc_key_data_type_wrong,                            106)    \
-       x(alloc_key_gen_wrong,                                  107)    \
-       x(alloc_key_dirty_sectors_wrong,                        108)    \
-       x(alloc_key_cached_sectors_wrong,                       109)    \
-       x(alloc_key_stripe_wrong,                               110)    \
-       x(alloc_key_stripe_redundancy_wrong,                    111)    \
-       x(bucket_sector_count_overflow,                         112)    \
-       x(bucket_metadata_type_mismatch,                        113)    \
-       x(need_discard_key_wrong,                               114)    \
-       x(freespace_key_wrong,                                  115)    \
-       x(freespace_hole_missing,                               116)    \
-       x(bucket_gens_val_size_bad,                             117)    \
-       x(bucket_gens_key_wrong,                                118)    \
-       x(bucket_gens_hole_wrong,                               119)    \
-       x(bucket_gens_to_invalid_dev,                           120)    \
-       x(bucket_gens_to_invalid_buckets,                       121)    \
-       x(bucket_gens_nonzero_for_invalid_buckets,              122)    \
-       x(need_discard_freespace_key_to_invalid_dev_bucket,     123)    \
-       x(need_discard_freespace_key_bad,                       124)    \
-       x(backpointer_bucket_offset_wrong,                      125)    \
-       x(backpointer_to_missing_device,                        126)    \
-       x(backpointer_to_missing_alloc,                         127)    \
-       x(backpointer_to_missing_ptr,                           128)    \
-       x(lru_entry_at_time_0,                                  129)    \
-       x(lru_entry_to_invalid_bucket,                          130)    \
-       x(lru_entry_bad,                                        131)    \
-       x(btree_ptr_val_too_big,                                132)    \
-       x(btree_ptr_v2_val_too_big,                             133)    \
-       x(btree_ptr_has_non_ptr,                                134)    \
-       x(extent_ptrs_invalid_entry,                            135)    \
-       x(extent_ptrs_no_ptrs,                                  136)    \
-       x(extent_ptrs_too_many_ptrs,                            137)    \
-       x(extent_ptrs_redundant_crc,                            138)    \
-       x(extent_ptrs_redundant_stripe,                         139)    \
-       x(extent_ptrs_unwritten,                                140)    \
-       x(extent_ptrs_written_and_unwritten,                    141)    \
-       x(ptr_to_invalid_device,                                142)    \
-       x(ptr_to_duplicate_device,                              143)    \
-       x(ptr_after_last_bucket,                                144)    \
-       x(ptr_before_first_bucket,                              145)    \
-       x(ptr_spans_multiple_buckets,                           146)    \
-       x(ptr_to_missing_backpointer,                           147)    \
-       x(ptr_to_missing_alloc_key,                             148)    \
-       x(ptr_to_missing_replicas_entry,                        149)    \
-       x(ptr_to_missing_stripe,                                150)    \
-       x(ptr_to_incorrect_stripe,                              151)    \
-       x(ptr_gen_newer_than_bucket_gen,                        152)    \
-       x(ptr_too_stale,                                        153)    \
-       x(stale_dirty_ptr,                                      154)    \
-       x(ptr_bucket_data_type_mismatch,                        155)    \
-       x(ptr_cached_and_erasure_coded,                         156)    \
-       x(ptr_crc_uncompressed_size_too_small,                  157)    \
-       x(ptr_crc_csum_type_unknown,                            158)    \
-       x(ptr_crc_compression_type_unknown,                     159)    \
-       x(ptr_crc_redundant,                                    160)    \
-       x(ptr_crc_uncompressed_size_too_big,                    161)    \
-       x(ptr_crc_nonce_mismatch,                               162)    \
-       x(ptr_stripe_redundant,                                 163)    \
-       x(reservation_key_nr_replicas_invalid,                  164)    \
-       x(reflink_v_refcount_wrong,                             165)    \
-       x(reflink_p_to_missing_reflink_v,                       166)    \
-       x(stripe_pos_bad,                                       167)    \
-       x(stripe_val_size_bad,                                  168)    \
-       x(stripe_sector_count_wrong,                            169)    \
-       x(snapshot_tree_pos_bad,                                170)    \
-       x(snapshot_tree_to_missing_snapshot,                    171)    \
-       x(snapshot_tree_to_missing_subvol,                      172)    \
-       x(snapshot_tree_to_wrong_subvol,                        173)    \
-       x(snapshot_tree_to_snapshot_subvol,                     174)    \
-       x(snapshot_pos_bad,                                     175)    \
-       x(snapshot_parent_bad,                                  176)    \
-       x(snapshot_children_not_normalized,                     177)    \
-       x(snapshot_child_duplicate,                             178)    \
-       x(snapshot_child_bad,                                   179)    \
-       x(snapshot_skiplist_not_normalized,                     180)    \
-       x(snapshot_skiplist_bad,                                181)    \
-       x(snapshot_should_not_have_subvol,                      182)    \
-       x(snapshot_to_bad_snapshot_tree,                        183)    \
-       x(snapshot_bad_depth,                                   184)    \
-       x(snapshot_bad_skiplist,                                185)    \
-       x(subvol_pos_bad,                                       186)    \
-       x(subvol_not_master_and_not_snapshot,                   187)    \
-       x(subvol_to_missing_root,                               188)    \
-       x(subvol_root_wrong_bi_subvol,                          189)    \
-       x(bkey_in_missing_snapshot,                             190)    \
-       x(inode_pos_inode_nonzero,                              191)    \
-       x(inode_pos_blockdev_range,                             192)    \
-       x(inode_unpack_error,                                   193)    \
-       x(inode_str_hash_invalid,                               194)    \
-       x(inode_v3_fields_start_bad,                            195)    \
-       x(inode_snapshot_mismatch,                              196)    \
-       x(inode_unlinked_but_clean,                             197)    \
-       x(inode_unlinked_but_nlink_nonzero,                     198)    \
-       x(inode_checksum_type_invalid,                          199)    \
-       x(inode_compression_type_invalid,                       200)    \
-       x(inode_subvol_root_but_not_dir,                        201)    \
-       x(inode_i_size_dirty_but_clean,                         202)    \
-       x(inode_i_sectors_dirty_but_clean,                      203)    \
-       x(inode_i_sectors_wrong,                                204)    \
-       x(inode_dir_wrong_nlink,                                205)    \
-       x(inode_dir_multiple_links,                             206)    \
-       x(inode_multiple_links_but_nlink_0,                     207)    \
-       x(inode_wrong_backpointer,                              208)    \
-       x(inode_wrong_nlink,                                    209)    \
-       x(inode_unreachable,                                    210)    \
-       x(deleted_inode_but_clean,                              211)    \
-       x(deleted_inode_missing,                                212)    \
-       x(deleted_inode_is_dir,                                 213)    \
-       x(deleted_inode_not_unlinked,                           214)    \
-       x(extent_overlapping,                                   215)    \
-       x(extent_in_missing_inode,                              216)    \
-       x(extent_in_non_reg_inode,                              217)    \
-       x(extent_past_end_of_inode,                             218)    \
-       x(dirent_empty_name,                                    219)    \
-       x(dirent_val_too_big,                                   220)    \
-       x(dirent_name_too_long,                                 221)    \
-       x(dirent_name_embedded_nul,                             222)    \
-       x(dirent_name_dot_or_dotdot,                            223)    \
-       x(dirent_name_has_slash,                                224)    \
-       x(dirent_d_type_wrong,                                  225)    \
-       x(inode_bi_parent_wrong,                                226)    \
-       x(dirent_in_missing_dir_inode,                          227)    \
-       x(dirent_in_non_dir_inode,                              228)    \
-       x(dirent_to_missing_inode,                              229)    \
-       x(dirent_to_missing_subvol,                             230)    \
-       x(dirent_to_itself,                                     231)    \
-       x(quota_type_invalid,                                   232)    \
-       x(xattr_val_size_too_small,                             233)    \
-       x(xattr_val_size_too_big,                               234)    \
-       x(xattr_invalid_type,                                   235)    \
-       x(xattr_name_invalid_chars,                             236)    \
-       x(xattr_in_missing_inode,                               237)    \
-       x(root_subvol_missing,                                  238)    \
-       x(root_dir_missing,                                     239)    \
-       x(root_inode_not_dir,                                   240)    \
-       x(dir_loop,                                             241)    \
-       x(hash_table_key_duplicate,                             242)    \
-       x(hash_table_key_wrong_offset,                          243)    \
-       x(unlinked_inode_not_on_deleted_list,                   244)    \
-       x(reflink_p_front_pad_bad,                              245)    \
-       x(journal_entry_dup_same_device,                        246)    \
-       x(inode_bi_subvol_missing,                              247)    \
-       x(inode_bi_subvol_wrong,                                248)    \
-       x(inode_points_to_missing_dirent,                       249)    \
-       x(inode_points_to_wrong_dirent,                         250)    \
-       x(inode_bi_parent_nonzero,                              251)    \
-       x(dirent_to_missing_parent_subvol,                      252)    \
-       x(dirent_not_visible_in_parent_subvol,                  253)    \
-       x(subvol_fs_path_parent_wrong,                          254)    \
-       x(subvol_root_fs_path_parent_nonzero,                   255)    \
-       x(subvol_children_not_set,                              256)    \
-       x(subvol_children_bad,                                  257)    \
-       x(subvol_loop,                                          258)    \
-       x(subvol_unreachable,                                   259)    \
-       x(btree_node_bkey_bad_u64s,                             260)    \
-       x(btree_node_topology_empty_interior_node,              261)    \
-       x(btree_ptr_v2_min_key_bad,                             262)    \
-       x(btree_root_unreadable_and_scan_found_nothing,         263)    \
-       x(snapshot_node_missing,                                264)    \
-       x(dup_backpointer_to_bad_csum_extent,                   265)    \
-       x(btree_bitmap_not_marked,                              266)    \
-       x(sb_clean_entry_overrun,                               267)    \
-       x(btree_ptr_v2_written_0,                               268)    \
-       x(subvol_snapshot_bad,                                  269)    \
-       x(subvol_inode_bad,                                     270)
-
-enum bch_sb_error_id {
-#define x(t, n) BCH_FSCK_ERR_##t = n,
-       BCH_SB_ERRS()
-#undef x
-       BCH_SB_ERR_MAX
-};
-
 struct bch_sb_error_entry_cpu {
        u64                     id:16,
                                nr:48;
@@ -293,4 +13,3 @@ struct bch_sb_error_entry_cpu {
 typedef DARRAY(struct bch_sb_error_entry_cpu) bch_sb_errors_cpu;
 
 #endif /* _BCACHEFS_SB_ERRORS_TYPES_H */
-
diff --git a/fs/bcachefs/sb-members_format.h b/fs/bcachefs/sb-members_format.h
new file mode 100644 (file)
index 0000000..e263054
--- /dev/null
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_SB_MEMBERS_FORMAT_H
+#define _BCACHEFS_SB_MEMBERS_FORMAT_H
+
+/*
+ * We refer to members with bitmasks in various places - but we need to get rid
+ * of this limit:
+ */
+#define BCH_SB_MEMBERS_MAX             64
+
+#define BCH_MIN_NR_NBUCKETS    (1 << 6)
+
+#define BCH_IOPS_MEASUREMENTS()                        \
+       x(seqread,      0)                      \
+       x(seqwrite,     1)                      \
+       x(randread,     2)                      \
+       x(randwrite,    3)
+
+enum bch_iops_measurement {
+#define x(t, n) BCH_IOPS_##t = n,
+       BCH_IOPS_MEASUREMENTS()
+#undef x
+       BCH_IOPS_NR
+};
+
+#define BCH_MEMBER_ERROR_TYPES()               \
+       x(read,         0)                      \
+       x(write,        1)                      \
+       x(checksum,     2)
+
+enum bch_member_error_type {
+#define x(t, n) BCH_MEMBER_ERROR_##t = n,
+       BCH_MEMBER_ERROR_TYPES()
+#undef x
+       BCH_MEMBER_ERROR_NR
+};
+
+struct bch_member {
+       __uuid_t                uuid;
+       __le64                  nbuckets;       /* device size */
+       __le16                  first_bucket;   /* index of first bucket used */
+       __le16                  bucket_size;    /* sectors */
+       __u8                    btree_bitmap_shift;
+       __u8                    pad[3];
+       __le64                  last_mount;     /* time_t */
+
+       __le64                  flags;
+       __le32                  iops[4];
+       __le64                  errors[BCH_MEMBER_ERROR_NR];
+       __le64                  errors_at_reset[BCH_MEMBER_ERROR_NR];
+       __le64                  errors_reset_time;
+       __le64                  seq;
+       __le64                  btree_allocated_bitmap;
+       /*
+        * On recovery from a clean shutdown we don't normally read the journal,
+        * but we still want to resume writing from where we left off so we
+        * don't overwrite more than is necessary, for list journal debugging:
+        */
+       __le32                  last_journal_bucket;
+       __le32                  last_journal_bucket_offset;
+};
+
+/*
+ * This limit comes from the bucket_gens array - it's a single allocation, and
+ * kernel allocation are limited to INT_MAX
+ */
+#define BCH_MEMBER_NBUCKETS_MAX        (INT_MAX - 64)
+
+#define BCH_MEMBER_V1_BYTES    56
+
+LE64_BITMASK(BCH_MEMBER_STATE,         struct bch_member, flags,  0,  4)
+/* 4-14 unused, was TIER, HAS_(META)DATA, REPLACEMENT */
+LE64_BITMASK(BCH_MEMBER_DISCARD,       struct bch_member, flags, 14, 15)
+LE64_BITMASK(BCH_MEMBER_DATA_ALLOWED,  struct bch_member, flags, 15, 20)
+LE64_BITMASK(BCH_MEMBER_GROUP,         struct bch_member, flags, 20, 28)
+LE64_BITMASK(BCH_MEMBER_DURABILITY,    struct bch_member, flags, 28, 30)
+LE64_BITMASK(BCH_MEMBER_FREESPACE_INITIALIZED,
+                                       struct bch_member, flags, 30, 31)
+
+#if 0
+LE64_BITMASK(BCH_MEMBER_NR_READ_ERRORS,        struct bch_member, flags[1], 0,  20);
+LE64_BITMASK(BCH_MEMBER_NR_WRITE_ERRORS,struct bch_member, flags[1], 20, 40);
+#endif
+
+#define BCH_MEMBER_STATES()                    \
+       x(rw,           0)                      \
+       x(ro,           1)                      \
+       x(failed,       2)                      \
+       x(spare,        3)
+
+enum bch_member_state {
+#define x(t, n) BCH_MEMBER_STATE_##t = n,
+       BCH_MEMBER_STATES()
+#undef x
+       BCH_MEMBER_STATE_NR
+};
+
+struct bch_sb_field_members_v1 {
+       struct bch_sb_field     field;
+       struct bch_member       _members[]; //Members are now variable size
+};
+
+struct bch_sb_field_members_v2 {
+       struct bch_sb_field     field;
+       __le16                  member_bytes; //size of single member entry
+       u8                      pad[6];
+       struct bch_member       _members[];
+};
+
+#endif /* _BCACHEFS_SB_MEMBERS_FORMAT_H */
index 629900a5e6411c2ee78e9be528646c973b500b09..51918acfd72681cc2249a0b123f1e020ecf6e3d5 100644 (file)
@@ -1042,6 +1042,25 @@ err:
        return ret;
 }
 
+int bch2_check_key_has_snapshot(struct btree_trans *trans,
+                               struct btree_iter *iter,
+                               struct bkey_s_c k)
+{
+       struct bch_fs *c = trans->c;
+       struct printbuf buf = PRINTBUF;
+       int ret = 0;
+
+       if (fsck_err_on(!bch2_snapshot_equiv(c, k.k->p.snapshot), c,
+                       bkey_in_missing_snapshot,
+                       "key in missing snapshot %s, delete?",
+                       (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
+               ret = bch2_btree_delete_at(trans, iter,
+                                           BTREE_UPDATE_internal_snapshot_node) ?: 1;
+fsck_err:
+       printbuf_exit(&buf);
+       return ret;
+}
+
 /*
  * Mark a snapshot as deleted, for future cleanup:
  */
@@ -1351,35 +1370,39 @@ int bch2_snapshot_node_create(struct btree_trans *trans, u32 parent,
  * that key to snapshot leaf nodes, where we can mutate it
  */
 
-static int snapshot_delete_key(struct btree_trans *trans,
+static int delete_dead_snapshots_process_key(struct btree_trans *trans,
                               struct btree_iter *iter,
                               struct bkey_s_c k,
                               snapshot_id_list *deleted,
                               snapshot_id_list *equiv_seen,
                               struct bpos *last_pos)
 {
+       int ret = bch2_check_key_has_snapshot(trans, iter, k);
+       if (ret)
+               return ret < 0 ? ret : 0;
+
        struct bch_fs *c = trans->c;
        u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
+       if (!equiv) /* key for invalid snapshot node, but we chose not to delete */
+               return 0;
 
        if (!bkey_eq(k.k->p, *last_pos))
                equiv_seen->nr = 0;
-       *last_pos = k.k->p;
 
-       if (snapshot_list_has_id(deleted, k.k->p.snapshot) ||
-           snapshot_list_has_id(equiv_seen, equiv)) {
+       if (snapshot_list_has_id(deleted, k.k->p.snapshot))
                return bch2_btree_delete_at(trans, iter,
                                            BTREE_UPDATE_internal_snapshot_node);
-       } else {
-               return snapshot_list_add(c, equiv_seen, equiv);
-       }
-}
 
-static int move_key_to_correct_snapshot(struct btree_trans *trans,
-                              struct btree_iter *iter,
-                              struct bkey_s_c k)
-{
-       struct bch_fs *c = trans->c;
-       u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
+       if (!bpos_eq(*last_pos, k.k->p) &&
+           snapshot_list_has_id(equiv_seen, equiv))
+               return bch2_btree_delete_at(trans, iter,
+                                           BTREE_UPDATE_internal_snapshot_node);
+
+       *last_pos = k.k->p;
+
+       ret = snapshot_list_add_nodup(c, equiv_seen, equiv);
+       if (ret)
+               return ret;
 
        /*
         * When we have a linear chain of snapshot nodes, we consider
@@ -1389,21 +1412,20 @@ static int move_key_to_correct_snapshot(struct btree_trans *trans,
         *
         * If there are multiple keys in different snapshots at the same
         * position, we're only going to keep the one in the newest
-        * snapshot - the rest have been overwritten and are redundant,
-        * and for the key we're going to keep we need to move it to the
-        * equivalance class ID if it's not there already.
+        * snapshot (we delete the others above) - the rest have been
+        * overwritten and are redundant, and for the key we're going to keep we
+        * need to move it to the equivalance class ID if it's not there
+        * already.
         */
        if (equiv != k.k->p.snapshot) {
                struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
-               struct btree_iter new_iter;
-               int ret;
-
-               ret = PTR_ERR_OR_ZERO(new);
+               int ret = PTR_ERR_OR_ZERO(new);
                if (ret)
                        return ret;
 
                new->k.p.snapshot = equiv;
 
+               struct btree_iter new_iter;
                bch2_trans_iter_init(trans, &new_iter, iter->btree_id, new->k.p,
                                     BTREE_ITER_all_snapshots|
                                     BTREE_ITER_cached|
@@ -1538,7 +1560,6 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
        struct btree_trans *trans;
        snapshot_id_list deleted = { 0 };
        snapshot_id_list deleted_interior = { 0 };
-       u32 id;
        int ret = 0;
 
        if (!test_and_clear_bit(BCH_FS_need_delete_dead_snapshots, &c->flags))
@@ -1585,33 +1606,20 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
        if (ret)
                goto err;
 
-       for (id = 0; id < BTREE_ID_NR; id++) {
+       for (unsigned btree = 0; btree < BTREE_ID_NR; btree++) {
                struct bpos last_pos = POS_MIN;
                snapshot_id_list equiv_seen = { 0 };
                struct disk_reservation res = { 0 };
 
-               if (!btree_type_has_snapshots(id))
-                       continue;
-
-               /*
-                * deleted inodes btree is maintained by a trigger on the inodes
-                * btree - no work for us to do here, and it's not safe to scan
-                * it because we'll see out of date keys due to the btree write
-                * buffer:
-                */
-               if (id == BTREE_ID_deleted_inodes)
+               if (!btree_type_has_snapshots(btree))
                        continue;
 
                ret = for_each_btree_key_commit(trans, iter,
-                               id, POS_MIN,
-                               BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
-                               &res, NULL, BCH_TRANS_COMMIT_no_enospc,
-                       snapshot_delete_key(trans, &iter, k, &deleted, &equiv_seen, &last_pos)) ?:
-                     for_each_btree_key_commit(trans, iter,
-                               id, POS_MIN,
+                               btree, POS_MIN,
                                BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
                                &res, NULL, BCH_TRANS_COMMIT_no_enospc,
-                       move_key_to_correct_snapshot(trans, &iter, k));
+                       delete_dead_snapshots_process_key(trans, &iter, k, &deleted,
+                                                         &equiv_seen, &last_pos));
 
                bch2_disk_reservation_put(c, &res);
                darray_exit(&equiv_seen);
index ab13d8f4b41e1e934c1b5f5c21975358df128019..31b0ee03e96288e8dd525fab7f65e9eb56020990 100644 (file)
@@ -242,6 +242,7 @@ int bch2_snapshot_node_create(struct btree_trans *, u32,
 int bch2_check_snapshot_trees(struct bch_fs *);
 int bch2_check_snapshots(struct bch_fs *);
 int bch2_reconstruct_snapshots(struct bch_fs *);
+int bch2_check_key_has_snapshot(struct btree_trans *, struct btree_iter *, struct bkey_s_c);
 
 int bch2_snapshot_node_set_deleted(struct btree_trans *, u32);
 void bch2_delete_dead_snapshots_work(struct work_struct *);
index f1bee6c5222d24b0dd32da210c7924b350c05d59..d73a0222f709541e5fadd288360c202d787d04e3 100644 (file)
@@ -1132,18 +1132,12 @@ bool bch2_check_version_downgrade(struct bch_fs *c)
         * c->sb will be checked before we write the superblock, so update it as
         * well:
         */
-       if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) > bcachefs_metadata_version_current) {
+       if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) > bcachefs_metadata_version_current)
                SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, bcachefs_metadata_version_current);
-               c->sb.version_upgrade_complete = bcachefs_metadata_version_current;
-       }
-       if (c->sb.version > bcachefs_metadata_version_current) {
+       if (c->sb.version > bcachefs_metadata_version_current)
                c->disk_sb.sb->version = cpu_to_le16(bcachefs_metadata_version_current);
-               c->sb.version = bcachefs_metadata_version_current;
-       }
-       if (c->sb.version_min > bcachefs_metadata_version_current) {
+       if (c->sb.version_min > bcachefs_metadata_version_current)
                c->disk_sb.sb->version_min = cpu_to_le16(bcachefs_metadata_version_current);
-               c->sb.version_min = bcachefs_metadata_version_current;
-       }
        c->disk_sb.sb->compat[0] &= cpu_to_le64((1ULL << BCH_COMPAT_NR) - 1);
        return ret;
 }
index 2206a8dee693ab1ce183ec621860c72fcf5bb7e9..df2bea38e83f0ff8996b4df57fec86374dfdf34c 100644 (file)
@@ -564,7 +564,7 @@ static void __bch2_fs_free(struct bch_fs *c)
        BUG_ON(atomic_read(&c->journal_keys.ref));
        bch2_fs_btree_write_buffer_exit(c);
        percpu_free_rwsem(&c->mark_lock);
-       EBUG_ON(percpu_u64_get(c->online_reserved));
+       EBUG_ON(c->online_reserved && percpu_u64_get(c->online_reserved));
        free_percpu(c->online_reserved);
 
        darray_exit(&c->btree_roots_extra);
index 91c994b569f3b805600e5979c3c07aff12d102a0..6ed495ca7a311dd974e3a4359ca4ada94ec22aec 100644 (file)
@@ -89,6 +89,16 @@ enum {
        BTRFS_INODE_FREE_SPACE_INODE,
        /* Set when there are no capabilities in XATTs for the inode. */
        BTRFS_INODE_NO_CAP_XATTR,
+       /*
+        * Set if an error happened when doing a COW write before submitting a
+        * bio or during writeback. Used for both buffered writes and direct IO
+        * writes. This is to signal a fast fsync that it has to wait for
+        * ordered extents to complete and therefore not log extent maps that
+        * point to unwritten extents (when an ordered extent completes and it
+        * has the BTRFS_ORDERED_IOERR flag set, it drops extent maps in its
+        * range).
+        */
+       BTRFS_INODE_COW_WRITE_ERROR,
 };
 
 /* in memory btrfs inode */
index 1b20b3e390dfd15c4c5b5c49463f0912cde86f51..38cdb8875e8e86bd89703abb9ef7748c25108d33 100644 (file)
@@ -4538,18 +4538,10 @@ static void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
                                       struct btrfs_fs_info *fs_info)
 {
        struct rb_node *node;
-       struct btrfs_delayed_ref_root *delayed_refs;
+       struct btrfs_delayed_ref_root *delayed_refs = &trans->delayed_refs;
        struct btrfs_delayed_ref_node *ref;
 
-       delayed_refs = &trans->delayed_refs;
-
        spin_lock(&delayed_refs->lock);
-       if (atomic_read(&delayed_refs->num_entries) == 0) {
-               spin_unlock(&delayed_refs->lock);
-               btrfs_debug(fs_info, "delayed_refs has NO entry");
-               return;
-       }
-
        while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) {
                struct btrfs_delayed_ref_head *head;
                struct rb_node *n;
index 597387e9f040075ac46c02c107337b67762cb76c..f688fab55251ea4e54ea576e59087034c675cff8 100644 (file)
@@ -3689,6 +3689,8 @@ static struct extent_buffer *grab_extent_buffer(
        struct folio *folio = page_folio(page);
        struct extent_buffer *exists;
 
+       lockdep_assert_held(&page->mapping->i_private_lock);
+
        /*
         * For subpage case, we completely rely on radix tree to ensure we
         * don't try to insert two ebs for the same bytenr.  So here we always
@@ -3756,13 +3758,14 @@ static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
  * The caller needs to free the existing folios and retry using the same order.
  */
 static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i,
+                                     struct btrfs_subpage *prealloc,
                                      struct extent_buffer **found_eb_ret)
 {
 
        struct btrfs_fs_info *fs_info = eb->fs_info;
        struct address_space *mapping = fs_info->btree_inode->i_mapping;
        const unsigned long index = eb->start >> PAGE_SHIFT;
-       struct folio *existing_folio;
+       struct folio *existing_folio = NULL;
        int ret;
 
        ASSERT(found_eb_ret);
@@ -3774,12 +3777,14 @@ retry:
        ret = filemap_add_folio(mapping, eb->folios[i], index + i,
                                GFP_NOFS | __GFP_NOFAIL);
        if (!ret)
-               return 0;
+               goto finish;
 
        existing_folio = filemap_lock_folio(mapping, index + i);
        /* The page cache only exists for a very short time, just retry. */
-       if (IS_ERR(existing_folio))
+       if (IS_ERR(existing_folio)) {
+               existing_folio = NULL;
                goto retry;
+       }
 
        /* For now, we should only have single-page folios for btree inode. */
        ASSERT(folio_nr_pages(existing_folio) == 1);
@@ -3790,14 +3795,13 @@ retry:
                return -EAGAIN;
        }
 
-       if (fs_info->nodesize < PAGE_SIZE) {
-               /*
-                * We're going to reuse the existing page, can drop our page
-                * and subpage structure now.
-                */
+finish:
+       spin_lock(&mapping->i_private_lock);
+       if (existing_folio && fs_info->nodesize < PAGE_SIZE) {
+               /* We're going to reuse the existing page, can drop our folio now. */
                __free_page(folio_page(eb->folios[i], 0));
                eb->folios[i] = existing_folio;
-       } else {
+       } else if (existing_folio) {
                struct extent_buffer *existing_eb;
 
                existing_eb = grab_extent_buffer(fs_info,
@@ -3805,6 +3809,7 @@ retry:
                if (existing_eb) {
                        /* The extent buffer still exists, we can use it directly. */
                        *found_eb_ret = existing_eb;
+                       spin_unlock(&mapping->i_private_lock);
                        folio_unlock(existing_folio);
                        folio_put(existing_folio);
                        return 1;
@@ -3813,6 +3818,22 @@ retry:
                __free_page(folio_page(eb->folios[i], 0));
                eb->folios[i] = existing_folio;
        }
+       eb->folio_size = folio_size(eb->folios[i]);
+       eb->folio_shift = folio_shift(eb->folios[i]);
+       /* Should not fail, as we have preallocated the memory. */
+       ret = attach_extent_buffer_folio(eb, eb->folios[i], prealloc);
+       ASSERT(!ret);
+       /*
+        * To inform we have an extra eb under allocation, so that
+        * detach_extent_buffer_page() won't release the folio private when the
+        * eb hasn't been inserted into radix tree yet.
+        *
+        * The ref will be decreased when the eb releases the page, in
+        * detach_extent_buffer_page().  Thus needs no special handling in the
+        * error path.
+        */
+       btrfs_folio_inc_eb_refs(fs_info, eb->folios[i]);
+       spin_unlock(&mapping->i_private_lock);
        return 0;
 }
 
@@ -3824,7 +3845,6 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
        int attached = 0;
        struct extent_buffer *eb;
        struct extent_buffer *existing_eb = NULL;
-       struct address_space *mapping = fs_info->btree_inode->i_mapping;
        struct btrfs_subpage *prealloc = NULL;
        u64 lockdep_owner = owner_root;
        bool page_contig = true;
@@ -3890,7 +3910,7 @@ reallocate:
        for (int i = 0; i < num_folios; i++) {
                struct folio *folio;
 
-               ret = attach_eb_folio_to_filemap(eb, i, &existing_eb);
+               ret = attach_eb_folio_to_filemap(eb, i, prealloc, &existing_eb);
                if (ret > 0) {
                        ASSERT(existing_eb);
                        goto out;
@@ -3927,24 +3947,6 @@ reallocate:
                 * and free the allocated page.
                 */
                folio = eb->folios[i];
-               eb->folio_size = folio_size(folio);
-               eb->folio_shift = folio_shift(folio);
-               spin_lock(&mapping->i_private_lock);
-               /* Should not fail, as we have preallocated the memory */
-               ret = attach_extent_buffer_folio(eb, folio, prealloc);
-               ASSERT(!ret);
-               /*
-                * To inform we have extra eb under allocation, so that
-                * detach_extent_buffer_page() won't release the folio private
-                * when the eb hasn't yet been inserted into radix tree.
-                *
-                * The ref will be decreased when the eb released the page, in
-                * detach_extent_buffer_page().
-                * Thus needs no special handling in error path.
-                */
-               btrfs_folio_inc_eb_refs(fs_info, folio);
-               spin_unlock(&mapping->i_private_lock);
-
                WARN_ON(btrfs_folio_test_dirty(fs_info, folio, eb->start, eb->len));
 
                /*
index e764ac3f22e23674b8969ee2641634d1916e14fb..d90138683a0a3031ed9e04060d6d1fc0c84a03ce 100644 (file)
@@ -1885,6 +1885,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
         */
        if (full_sync || btrfs_is_zoned(fs_info)) {
                ret = btrfs_wait_ordered_range(inode, start, len);
+               clear_bit(BTRFS_INODE_COW_WRITE_ERROR, &BTRFS_I(inode)->runtime_flags);
        } else {
                /*
                 * Get our ordered extents as soon as possible to avoid doing
@@ -1894,6 +1895,21 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
                btrfs_get_ordered_extents_for_logging(BTRFS_I(inode),
                                                      &ctx.ordered_extents);
                ret = filemap_fdatawait_range(inode->i_mapping, start, end);
+               if (ret)
+                       goto out_release_extents;
+
+               /*
+                * Check and clear the BTRFS_INODE_COW_WRITE_ERROR now after
+                * starting and waiting for writeback, because for buffered IO
+                * it may have been set during the end IO callback
+                * (end_bbio_data_write() -> btrfs_finish_ordered_extent()) in
+                * case an error happened and we need to wait for ordered
+                * extents to complete so that any extent maps that point to
+                * unwritten locations are dropped and we don't log them.
+                */
+               if (test_and_clear_bit(BTRFS_INODE_COW_WRITE_ERROR,
+                                      &BTRFS_I(inode)->runtime_flags))
+                       ret = btrfs_wait_ordered_range(inode, start, len);
        }
 
        if (ret)
index c5bdd674f55c534f03c52c7c6d8c8b3f6cf116ba..35a413ce935d9e6ce67bf103940b879e43ab3ead 100644 (file)
@@ -388,6 +388,37 @@ bool btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
        ret = can_finish_ordered_extent(ordered, page, file_offset, len, uptodate);
        spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
 
+       /*
+        * If this is a COW write it means we created new extent maps for the
+        * range and they point to unwritten locations if we got an error either
+        * before submitting a bio or during IO.
+        *
+        * We have marked the ordered extent with BTRFS_ORDERED_IOERR, and we
+        * are queuing its completion below. During completion, at
+        * btrfs_finish_one_ordered(), we will drop the extent maps for the
+        * unwritten extents.
+        *
+        * However because completion runs in a work queue we can end up having
+        * a fast fsync running before that. In the case of direct IO, once we
+        * unlock the inode the fsync might start, and we queue the completion
+        * before unlocking the inode. In the case of buffered IO when writeback
+        * finishes (end_bbio_data_write()) we queue the completion, so if the
+        * writeback was triggered by a fast fsync, the fsync might start
+        * logging before ordered extent completion runs in the work queue.
+        *
+        * The fast fsync will log file extent items based on the extent maps it
+        * finds, so if by the time it collects extent maps the ordered extent
+        * completion didn't happen yet, it will log file extent items that
+        * point to unwritten extents, resulting in a corruption if a crash
+        * happens and the log tree is replayed. Note that a fast fsync does not
+        * wait for completion of ordered extents in order to reduce latency.
+        *
+        * Set a flag in the inode so that the next fast fsync will wait for
+        * ordered extents to complete before starting to log.
+        */
+       if (!uptodate && !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
+               set_bit(BTRFS_INODE_COW_WRITE_ERROR, &inode->runtime_flags);
+
        if (ret)
                btrfs_queue_ordered_fn(ordered);
        return ret;
index 5146387b416bfef5de320d8cba38a1e525df7958..26a2e5aa08e9ca05a106849703414a1e0a55d360 100644 (file)
@@ -4860,18 +4860,23 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
                        path->slots[0]++;
                        continue;
                }
-               if (!dropped_extents) {
-                       /*
-                        * Avoid logging extent items logged in past fsync calls
-                        * and leading to duplicate keys in the log tree.
-                        */
+               /*
+                * Avoid overlapping items in the log tree. The first time we
+                * get here, get rid of everything from a past fsync. After
+                * that, if the current extent starts before the end of the last
+                * extent we copied, truncate the last one. This can happen if
+                * an ordered extent completion modifies the subvolume tree
+                * while btrfs_next_leaf() has the tree unlocked.
+                */
+               if (!dropped_extents || key.offset < truncate_offset) {
                        ret = truncate_inode_items(trans, root->log_root, inode,
-                                                  truncate_offset,
+                                                  min(key.offset, truncate_offset),
                                                   BTRFS_EXTENT_DATA_KEY);
                        if (ret)
                                goto out;
                        dropped_extents = true;
                }
+               truncate_offset = btrfs_file_extent_end(path);
                if (ins_nr == 0)
                        start_slot = slot;
                ins_nr++;
index 1ee6404b430baa5bd66972f64315db0ee1944f3a..407095188f83a7faeec0a8c0542fb6598952b7be 100644 (file)
@@ -2360,17 +2360,19 @@ EXPORT_SYMBOL(d_hash_and_lookup);
  * - unhash this dentry and free it.
  *
  * Usually, we want to just turn this into
- * a negative dentry, but certain workloads can
- * generate a large number of negative dentries.
- * Therefore, it would be better to simply
- * unhash it.
+ * a negative dentry, but if anybody else is
+ * currently using the dentry or the inode
+ * we can't do that and we fall back on removing
+ * it from the hash queues and waiting for
+ * it to be deleted later when it has no users
  */
  
 /**
  * d_delete - delete a dentry
  * @dentry: The dentry to delete
  *
- * Remove the dentry from the hash queues so it can be deleted later.
+ * Turn the dentry into a negative dentry if possible, otherwise
+ * remove it from the hash queues so it can be deleted later
  */
  
 void d_delete(struct dentry * dentry)
@@ -2379,8 +2381,6 @@ void d_delete(struct dentry * dentry)
 
        spin_lock(&inode->i_lock);
        spin_lock(&dentry->d_lock);
-       __d_drop(dentry);
-
        /*
         * Are we the only user?
         */
@@ -2388,6 +2388,7 @@ void d_delete(struct dentry * dentry)
                dentry->d_flags &= ~DCACHE_CANT_MOUNT;
                dentry_unlink_inode(dentry);
        } else {
+               __d_drop(dentry);
                spin_unlock(&dentry->d_lock);
                spin_unlock(&inode->i_lock);
        }
index 41c8f0c68ef564960a4341f605d9da135e9f14ef..c5802a4593345a9de8c3b2685b04b6ef0e7fdc41 100644 (file)
@@ -898,11 +898,11 @@ static bool iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
 static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
 {
        loff_t length = iomap_length(iter);
-       size_t chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER;
        loff_t pos = iter->pos;
        ssize_t total_written = 0;
        long status = 0;
        struct address_space *mapping = iter->inode->i_mapping;
+       size_t chunk = mapping_max_folio_size(mapping);
        unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0;
 
        do {
index 1121601536d1844eddff472160adb61d1982040b..07bc1fd4353092acb9697f76b487c59f290a6f6c 100644 (file)
@@ -181,7 +181,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
        struct folio *folio, *writethrough = NULL;
        enum netfs_how_to_modify howto;
        enum netfs_folio_trace trace;
-       unsigned int bdp_flags = (iocb->ki_flags & IOCB_SYNC) ? 0: BDP_ASYNC;
+       unsigned int bdp_flags = (iocb->ki_flags & IOCB_NOWAIT) ? BDP_ASYNC : 0;
        ssize_t written = 0, ret, ret2;
        loff_t i_size, pos = iocb->ki_pos, from, to;
        size_t max_chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER;
index f516460e994e2ad142c0fd26ccb2528c7f1dae64..e14cd53ac9fd7559337036699fc39a8c49030a39 100644 (file)
@@ -12,7 +12,7 @@
 static void netfs_cleanup_dio_write(struct netfs_io_request *wreq)
 {
        struct inode *inode = wreq->inode;
-       unsigned long long end = wreq->start + wreq->len;
+       unsigned long long end = wreq->start + wreq->transferred;
 
        if (!wreq->error &&
            i_size_read(inode) < end) {
index c90d482b16505d319dede502bcf23c7bed38e254..f4a642727479252de0212f80fe32ef23468e0ebf 100644 (file)
@@ -72,6 +72,7 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
                }
        }
 
+       atomic_inc(&ctx->io_count);
        trace_netfs_rreq_ref(rreq->debug_id, 1, netfs_rreq_trace_new);
        netfs_proc_add_rreq(rreq);
        netfs_stat(&netfs_n_rh_rreq);
@@ -124,6 +125,7 @@ static void netfs_free_request(struct work_struct *work)
 {
        struct netfs_io_request *rreq =
                container_of(work, struct netfs_io_request, work);
+       struct netfs_inode *ictx = netfs_inode(rreq->inode);
        unsigned int i;
 
        trace_netfs_rreq(rreq, netfs_rreq_trace_free);
@@ -142,6 +144,9 @@ static void netfs_free_request(struct work_struct *work)
                }
                kvfree(rreq->direct_bv);
        }
+
+       if (atomic_dec_and_test(&ictx->io_count))
+               wake_up_var(&ictx->io_count);
        call_rcu(&rreq->rcu, netfs_free_request_rcu);
 }
 
index 60112e4b2c5eb70e350a873feb2cc397517534a7..426cf87aaf2ecd97f82ea43eabb326b2c83b8080 100644 (file)
@@ -510,7 +510,7 @@ reassess_streams:
         * stream has a gap that can be jumped.
         */
        if (notes & SOME_EMPTY) {
-               unsigned long long jump_to = wreq->start + wreq->len;
+               unsigned long long jump_to = wreq->start + READ_ONCE(wreq->submitted);
 
                for (s = 0; s < NR_IO_STREAMS; s++) {
                        stream = &wreq->io_streams[s];
@@ -690,10 +690,11 @@ void netfs_write_collection_worker(struct work_struct *work)
        wake_up_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS);
 
        if (wreq->iocb) {
-               wreq->iocb->ki_pos += wreq->transferred;
+               size_t written = min(wreq->transferred, wreq->len);
+               wreq->iocb->ki_pos += written;
                if (wreq->iocb->ki_complete)
                        wreq->iocb->ki_complete(
-                               wreq->iocb, wreq->error ? wreq->error : wreq->transferred);
+                               wreq->iocb, wreq->error ? wreq->error : written);
                wreq->iocb = VFS_PTR_POISON;
        }
 
index e190043bc0daa724f749003a2a80e151e02806d3..3aa86e268f40d4a7b01a85715d41b7fd4a270da6 100644 (file)
@@ -254,7 +254,7 @@ static void netfs_issue_write(struct netfs_io_request *wreq,
        stream->construct = NULL;
 
        if (subreq->start + subreq->len > wreq->start + wreq->submitted)
-               wreq->len = wreq->submitted = subreq->start + subreq->len - wreq->start;
+               WRITE_ONCE(wreq->submitted, subreq->start + subreq->len - wreq->start);
        netfs_do_issue_write(stream, subreq);
 }
 
@@ -636,7 +636,12 @@ int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_contr
 
        mutex_unlock(&ictx->wb_lock);
 
-       ret = wreq->error;
+       if (wreq->iocb) {
+               ret = -EIOCBQUEUED;
+       } else {
+               wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS, TASK_UNINTERRUPTIBLE);
+               ret = wreq->error;
+       }
        netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
        return ret;
 }
index 4a5614442dbfa733ff2dc9fa704532e8b5cc147c..ec7b2da2477a41c3b7ac04bbf85835febafd08f1 100644 (file)
@@ -282,14 +282,10 @@ static int do_signalfd4(int ufd, sigset_t *mask, int flags)
                if (IS_ERR(file)) {
                        put_unused_fd(ufd);
                        kfree(ctx);
-                       return ufd;
+                       return PTR_ERR(file);
                }
                file->f_mode |= FMODE_NOWAIT;
 
-               /*
-                * When we call this, the initialization must be complete, since
-                * anon_inode_getfd() will install the fd.
-                */
                fd_install(ufd, file);
        } else {
                struct fd f = fdget(ufd);
index a665aac9be9f4bcef7c017a01577c45809dcefed..bb86fc0641d83ab3757fd50fb074a082894ae088 100644 (file)
@@ -431,6 +431,7 @@ cifs_free_inode(struct inode *inode)
 static void
 cifs_evict_inode(struct inode *inode)
 {
+       netfs_wait_for_outstanding_io(inode);
        truncate_inode_pages_final(&inode->i_data);
        if (inode->i_state & I_PINNING_NETFS_WB)
                cifs_fscache_unuse_inode_cookie(inode, true);
index c46d418c1c0c3ea065eaa5d4f4f751750df90080..a2072ab9e586d0d3ba34afe5bcab985b38537202 100644 (file)
@@ -2574,7 +2574,7 @@ typedef struct {
 
 
 struct win_dev {
-       unsigned char type[8]; /* IntxCHR or IntxBLK or LnxFIFO*/
+       unsigned char type[8]; /* IntxCHR or IntxBLK or LnxFIFO or LnxSOCK */
        __le64 major;
        __le64 minor;
 } __attribute__((packed));
index 262576573eb5182b8a1729677e1b98e7bdee66a3..4a8aa1de95223dfc487e577e476be18d677b6477 100644 (file)
@@ -606,6 +606,10 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
                                mnr = le64_to_cpu(*(__le64 *)(pbuf+16));
                                fattr->cf_rdev = MKDEV(mjr, mnr);
                        }
+               } else if (memcmp("LnxSOCK", pbuf, 8) == 0) {
+                       cifs_dbg(FYI, "Socket\n");
+                       fattr->cf_mode |= S_IFSOCK;
+                       fattr->cf_dtype = DT_SOCK;
                } else if (memcmp("IntxLNK", pbuf, 7) == 0) {
                        cifs_dbg(FYI, "Symlink\n");
                        fattr->cf_mode |= S_IFLNK;
index 4ce6c3121a7ef17866be79561fa4589360967a17..c8e536540895a2e6378ec16b640752134742d134 100644 (file)
@@ -4997,6 +4997,9 @@ static int __cifs_sfu_make_node(unsigned int xid, struct inode *inode,
                pdev.major = cpu_to_le64(MAJOR(dev));
                pdev.minor = cpu_to_le64(MINOR(dev));
                break;
+       case S_IFSOCK:
+               strscpy(pdev.type, "LnxSOCK");
+               break;
        case S_IFIFO:
                strscpy(pdev.type, "LnxFIFO");
                break;
index 043e4cb839fa235f7c2e05fac3fe558e82e9d4f2..df360ca47826ab7fa62f284ec6c71017732c6455 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/module.h>
 #include "arc4.h"
 
+MODULE_DESCRIPTION("ARC4 Cipher Algorithm");
 MODULE_LICENSE("GPL");
 
 int cifs_arc4_setkey(struct arc4_ctx *ctx, const u8 *in_key, unsigned int key_len)
index 50f78cfc6ce921efd8e8ea38e5237ac1fe7fe19a..7ee7f4dad90c00f5c15fe00a6874fbeeb631665d 100644 (file)
@@ -24,6 +24,7 @@
 #include <asm/byteorder.h>
 #include "md4.h"
 
+MODULE_DESCRIPTION("MD4 Message Digest Algorithm (RFC1320)");
 MODULE_LICENSE("GPL");
 
 static inline u32 lshift(u32 x, unsigned int s)
index 6cb8b2ddc541b4b28d553ffee414b903725e97f8..6c55a6e88eba3ffb8ac76803b2b14ea03b45b467 100644 (file)
@@ -1008,13 +1008,12 @@ xfs_alloc_cur_finish(
        struct xfs_alloc_arg    *args,
        struct xfs_alloc_cur    *acur)
 {
-       struct xfs_agf __maybe_unused *agf = args->agbp->b_addr;
        int                     error;
 
        ASSERT(acur->cnt && acur->bnolt);
        ASSERT(acur->bno >= acur->rec_bno);
        ASSERT(acur->bno + acur->len <= acur->rec_bno + acur->rec_len);
-       ASSERT(acur->rec_bno + acur->rec_len <= be32_to_cpu(agf->agf_length));
+       ASSERT(xfs_verify_agbext(args->pag, acur->rec_bno, acur->rec_len));
 
        error = xfs_alloc_fixup_trees(acur->cnt, acur->bnolt, acur->rec_bno,
                                      acur->rec_len, acur->bno, acur->len, 0);
@@ -1217,7 +1216,6 @@ STATIC int                        /* error */
 xfs_alloc_ag_vextent_exact(
        xfs_alloc_arg_t *args)  /* allocation argument structure */
 {
-       struct xfs_agf __maybe_unused *agf = args->agbp->b_addr;
        struct xfs_btree_cur *bno_cur;/* by block-number btree cursor */
        struct xfs_btree_cur *cnt_cur;/* by count btree cursor */
        int             error;
@@ -1297,7 +1295,7 @@ xfs_alloc_ag_vextent_exact(
         */
        cnt_cur = xfs_cntbt_init_cursor(args->mp, args->tp, args->agbp,
                                        args->pag);
-       ASSERT(args->agbno + args->len <= be32_to_cpu(agf->agf_length));
+       ASSERT(xfs_verify_agbext(args->pag, args->agbno, args->len));
        error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
                                      args->len, XFSA_FIXUP_BNO_OK);
        if (error) {
index 430cd3244c143dc065f0490b1ec3c3feef47e2e3..f30bcc64100d56b7199fc6b73ee777f6a6e06aa9 100644 (file)
@@ -329,26 +329,20 @@ xfs_attr_calc_size(
        return nblks;
 }
 
-/* Initialize transaction reservation for attr operations */
-void
-xfs_init_attr_trans(
-       struct xfs_da_args      *args,
-       struct xfs_trans_res    *tres,
-       unsigned int            *total)
+/* Initialize transaction reservation for an xattr set/replace/upsert */
+inline struct xfs_trans_res
+xfs_attr_set_resv(
+       const struct xfs_da_args        *args)
 {
-       struct xfs_mount        *mp = args->dp->i_mount;
-
-       if (args->value) {
-               tres->tr_logres = M_RES(mp)->tr_attrsetm.tr_logres +
-                                M_RES(mp)->tr_attrsetrt.tr_logres *
-                                args->total;
-               tres->tr_logcount = XFS_ATTRSET_LOG_COUNT;
-               tres->tr_logflags = XFS_TRANS_PERM_LOG_RES;
-               *total = args->total;
-       } else {
-               *tres = M_RES(mp)->tr_attrrm;
-               *total = XFS_ATTRRM_SPACE_RES(mp);
-       }
+       struct xfs_mount                *mp = args->dp->i_mount;
+       struct xfs_trans_res            ret = {
+               .tr_logres = M_RES(mp)->tr_attrsetm.tr_logres +
+                           M_RES(mp)->tr_attrsetrt.tr_logres * args->total,
+               .tr_logcount            = XFS_ATTRSET_LOG_COUNT,
+               .tr_logflags            = XFS_TRANS_PERM_LOG_RES,
+       };
+
+       return ret;
 }
 
 /*
@@ -1006,7 +1000,7 @@ xfs_attr_set(
        struct xfs_trans_res    tres;
        int                     error, local;
        int                     rmt_blks = 0;
-       unsigned int            total;
+       unsigned int            total = 0;
 
        ASSERT(!args->trans);
 
@@ -1033,10 +1027,15 @@ xfs_attr_set(
 
                if (!local)
                        rmt_blks = xfs_attr3_rmt_blocks(mp, args->valuelen);
+
+               tres = xfs_attr_set_resv(args);
+               total = args->total;
                break;
        case XFS_ATTRUPDATE_REMOVE:
                XFS_STATS_INC(mp, xs_attr_remove);
                rmt_blks = xfs_attr3_max_rmt_blocks(mp);
+               tres = M_RES(mp)->tr_attrrm;
+               total = XFS_ATTRRM_SPACE_RES(mp);
                break;
        }
 
@@ -1044,7 +1043,6 @@ xfs_attr_set(
         * Root fork attributes can use reserved data blocks for this
         * operation if necessary
         */
-       xfs_init_attr_trans(args, &tres, &total);
        error = xfs_trans_alloc_inode(dp, &tres, total, 0, rsvd, &args->trans);
        if (error)
                return error;
index 088cb7b301680ca024d58cf2ca8d7bbdc5a4ef8d..0e51d0723f9aa36c1519f6ad2c1881489a26dd8a 100644 (file)
@@ -565,8 +565,7 @@ bool xfs_attr_check_namespace(unsigned int attr_flags);
 bool xfs_attr_namecheck(unsigned int attr_flags, const void *name,
                size_t length);
 int xfs_attr_calc_size(struct xfs_da_args *args, int *local);
-void xfs_init_attr_trans(struct xfs_da_args *args, struct xfs_trans_res *tres,
-                        unsigned int *total);
+struct xfs_trans_res xfs_attr_set_resv(const struct xfs_da_args *args);
 
 /*
  * Check to see if the attr should be upgraded from non-existent or shortform to
index 3b3206d312d6fe157ff90e14f28e96ab39f2e583..c101cf266bc4dbf605e5ebe40cfe124dfc1ab8e2 100644 (file)
@@ -6383,6 +6383,7 @@ xfs_bunmapi_range(
                error = xfs_defer_finish(tpp);
                if (error)
                        goto out;
+               cond_resched();
        }
 out:
        return error;
index d79002343d0b631290d7dfdaf69e715075111c52..e7a7bfbe75b46a2145f7ed7b47d00bdab0afdfca 100644 (file)
@@ -374,17 +374,37 @@ xfs_dinode_verify_fork(
        /*
         * For fork types that can contain local data, check that the fork
         * format matches the size of local data contained within the fork.
-        *
-        * For all types, check that when the size says the should be in extent
-        * or btree format, the inode isn't claiming it is in local format.
         */
        if (whichfork == XFS_DATA_FORK) {
-               if (S_ISDIR(mode) || S_ISLNK(mode)) {
+               /*
+                * A directory small enough to fit in the inode must be stored
+                * in local format.  The directory sf <-> extents conversion
+                * code updates the directory size accordingly.
+                */
+               if (S_ISDIR(mode)) {
+                       if (be64_to_cpu(dip->di_size) <= fork_size &&
+                           fork_format != XFS_DINODE_FMT_LOCAL)
+                               return __this_address;
+               }
+
+               /*
+                * A symlink with a target small enough to fit in the inode can
+                * be stored in extents format if xattrs were added (thus
+                * converting the data fork from shortform to remote format)
+                * and then removed.
+                */
+               if (S_ISLNK(mode)) {
                        if (be64_to_cpu(dip->di_size) <= fork_size &&
+                           fork_format != XFS_DINODE_FMT_EXTENTS &&
                            fork_format != XFS_DINODE_FMT_LOCAL)
                                return __this_address;
                }
 
+               /*
+                * For all types, check that when the size says the fork should
+                * be in extent or btree format, the inode isn't claiming to be
+                * in local format.
+                */
                if (be64_to_cpu(dip->di_size) > fork_size &&
                    fork_format == XFS_DINODE_FMT_LOCAL)
                        return __this_address;
index c013f0ba4f36b2cfc9a3fe7c8131b14954fcc10a..4cbcf7a86dbec5920af6b2070d9e5e5e287225e8 100644 (file)
@@ -856,7 +856,7 @@ xfs_ioc_scrubv_metadata(
        if (vec_bytes > PAGE_SIZE)
                return -ENOMEM;
 
-       uvectors = (void __user *)(uintptr_t)head.svh_vectors;
+       uvectors = u64_to_user_ptr(head.svh_vectors);
        vectors = memdup_user(uvectors, vec_bytes);
        if (IS_ERR(vectors))
                return PTR_ERR(vectors);
index 9185ae7088d49a9dd5b1f8c133e92205f45fe1a3..cdd13ed9c569a7b46e385f234b32faa2ac7e3cd6 100644 (file)
@@ -822,12 +822,14 @@ xfarray_sort_scan(
 
        /* Grab the first folio that backs this array element. */
        if (!si->folio) {
+               struct folio    *folio;
                loff_t          next_pos;
 
-               si->folio = xfile_get_folio(si->array->xfile, idx_pos,
+               folio = xfile_get_folio(si->array->xfile, idx_pos,
                                si->array->obj_size, XFILE_ALLOC);
-               if (IS_ERR(si->folio))
-                       return PTR_ERR(si->folio);
+               if (IS_ERR(folio))
+                       return PTR_ERR(folio);
+               si->folio = folio;
 
                si->first_folio_idx = xfarray_idx(si->array,
                                folio_pos(si->folio) + si->array->obj_size - 1);
@@ -1048,6 +1050,7 @@ xfarray_sort(
 
 out_free:
        trace_xfarray_sort_stats(si, error);
+       xfarray_sort_scan_done(si);
        kvfree(si);
        return error;
 }
index 2b10ac4c5fce24d0df38b1fb2445e491c1aa389b..f683b7a9323f16146b1c8cad6e54a40ca056997d 100644 (file)
@@ -746,7 +746,7 @@ xfs_attr_recover_work(
        struct xfs_attri_log_format     *attrp;
        struct xfs_attri_log_nameval    *nv = attrip->attri_nameval;
        int                             error;
-       int                             total;
+       unsigned int                    total = 0;
 
        /*
         * First check the validity of the attr described by the ATTRI.  If any
@@ -763,7 +763,20 @@ xfs_attr_recover_work(
                return PTR_ERR(attr);
        args = attr->xattri_da_args;
 
-       xfs_init_attr_trans(args, &resv, &total);
+       switch (xfs_attr_intent_op(attr)) {
+       case XFS_ATTRI_OP_FLAGS_PPTR_SET:
+       case XFS_ATTRI_OP_FLAGS_PPTR_REPLACE:
+       case XFS_ATTRI_OP_FLAGS_SET:
+       case XFS_ATTRI_OP_FLAGS_REPLACE:
+               resv = xfs_attr_set_resv(args);
+               total = args->total;
+               break;
+       case XFS_ATTRI_OP_FLAGS_PPTR_REMOVE:
+       case XFS_ATTRI_OP_FLAGS_REMOVE:
+               resv = M_RES(mp)->tr_attrrm;
+               total = XFS_ATTRRM_SPACE_RES(mp);
+               break;
+       }
        resv = xlog_recover_resv(&resv);
        error = xfs_trans_alloc(mp, &resv, total, 0, XFS_TRANS_RESERVE, &tp);
        if (error)
index c8785ed595434223086affd3b0a71d18e25662bb..a3f16e9b6fe5b53baf7e75e6391435772d51044b 100644 (file)
@@ -773,11 +773,6 @@ xfs_getparents_expand_lastrec(
        trace_xfs_getparents_expand_lastrec(gpx->ip, gp, &gpx->context, gpr);
 }
 
-static inline void __user *u64_to_uptr(u64 val)
-{
-       return (void __user *)(uintptr_t)val;
-}
-
 /* Retrieve the parent pointers for a given inode. */
 STATIC int
 xfs_getparents(
@@ -862,7 +857,7 @@ xfs_getparents(
        ASSERT(gpx->context.firstu <= gpx->gph.gph_request.gp_bufsize);
 
        /* Copy the records to userspace. */
-       if (copy_to_user(u64_to_uptr(gpx->gph.gph_request.gp_buffer),
+       if (copy_to_user(u64_to_user_ptr(gpx->gph.gph_request.gp_buffer),
                                gpx->krecords, gpx->context.firstu))
                error = -EFAULT;
 
index 730c8d48da2827714127324cc5f8954465aef4c4..86f14ec7c31fede6fc22d9b9d7a5eec26b34a79f 100644 (file)
@@ -351,7 +351,6 @@ xfs_iwalk_run_callbacks(
        int                             *has_more)
 {
        struct xfs_mount                *mp = iwag->mp;
-       struct xfs_inobt_rec_incore     *irec;
        xfs_agino_t                     next_agino;
        int                             error;
 
@@ -361,8 +360,8 @@ xfs_iwalk_run_callbacks(
 
        /* Delete cursor but remember the last record we cached... */
        xfs_iwalk_del_inobt(iwag->tp, curpp, agi_bpp, 0);
-       irec = &iwag->recs[iwag->nr_recs - 1];
-       ASSERT(next_agino >= irec->ir_startino + XFS_INODES_PER_CHUNK);
+       ASSERT(next_agino >= iwag->recs[iwag->nr_recs - 1].ir_startino +
+                       XFS_INODES_PER_CHUNK);
 
        if (iwag->drop_trans) {
                xfs_trans_cancel(iwag->tp);
index 063a2e00d1691564c4bd5bce9bef31955299aa11..265a2a418bc7b098a595237dbbf3a7931a66e94d 100644 (file)
@@ -1387,6 +1387,7 @@ xfs_reflink_remap_blocks(
                destoff += imap.br_blockcount;
                len -= imap.br_blockcount;
                remapped_len += imap.br_blockcount;
+               cond_resched();
        }
 
        if (error)
index 82570f77e8172a5d88b63170ba7b44fee1dc6486..2a74fa9d0ce554831bb3fa43b79a8a17aec91562 100644 (file)
@@ -56,8 +56,8 @@ struct drm_buddy_block {
        struct list_head tmp_link;
 };
 
-/* Order-zero must be at least PAGE_SIZE */
-#define DRM_BUDDY_MAX_ORDER (63 - PAGE_SHIFT)
+/* Order-zero must be at least SZ_4K */
+#define DRM_BUDDY_MAX_ORDER (63 - 12)
 
 /*
  * Binary Buddy System.
@@ -85,7 +85,7 @@ struct drm_buddy {
        unsigned int n_roots;
        unsigned int max_order;
 
-       /* Must be at least PAGE_SIZE */
+       /* Must be at least SZ_4K */
        u64 chunk_size;
        u64 size;
        u64 avail;
diff --git a/include/linux/amd-pstate.h b/include/linux/amd-pstate.h
deleted file mode 100644 (file)
index d58fc02..0000000
+++ /dev/null
@@ -1,137 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * linux/include/linux/amd-pstate.h
- *
- * Copyright (C) 2022 Advanced Micro Devices, Inc.
- *
- * Author: Meng Li <[email protected]>
- */
-
-#ifndef _LINUX_AMD_PSTATE_H
-#define _LINUX_AMD_PSTATE_H
-
-#include <linux/pm_qos.h>
-
-#define AMD_CPPC_EPP_PERFORMANCE               0x00
-#define AMD_CPPC_EPP_BALANCE_PERFORMANCE       0x80
-#define AMD_CPPC_EPP_BALANCE_POWERSAVE         0xBF
-#define AMD_CPPC_EPP_POWERSAVE                 0xFF
-
-/*********************************************************************
- *                        AMD P-state INTERFACE                       *
- *********************************************************************/
-/**
- * struct  amd_aperf_mperf
- * @aperf: actual performance frequency clock count
- * @mperf: maximum performance frequency clock count
- * @tsc:   time stamp counter
- */
-struct amd_aperf_mperf {
-       u64 aperf;
-       u64 mperf;
-       u64 tsc;
-};
-
-/**
- * struct amd_cpudata - private CPU data for AMD P-State
- * @cpu: CPU number
- * @req: constraint request to apply
- * @cppc_req_cached: cached performance request hints
- * @highest_perf: the maximum performance an individual processor may reach,
- *               assuming ideal conditions
- *               For platforms that do not support the preferred core feature, the
- *               highest_pef may be configured with 166 or 255, to avoid max frequency
- *               calculated wrongly. we take the fixed value as the highest_perf.
- * @nominal_perf: the maximum sustained performance level of the processor,
- *               assuming ideal operating conditions
- * @lowest_nonlinear_perf: the lowest performance level at which nonlinear power
- *                        savings are achieved
- * @lowest_perf: the absolute lowest performance level of the processor
- * @prefcore_ranking: the preferred core ranking, the higher value indicates a higher
- *               priority.
- * @min_limit_perf: Cached value of the performance corresponding to policy->min
- * @max_limit_perf: Cached value of the performance corresponding to policy->max
- * @min_limit_freq: Cached value of policy->min (in khz)
- * @max_limit_freq: Cached value of policy->max (in khz)
- * @max_freq: the frequency (in khz) that mapped to highest_perf
- * @min_freq: the frequency (in khz) that mapped to lowest_perf
- * @nominal_freq: the frequency (in khz) that mapped to nominal_perf
- * @lowest_nonlinear_freq: the frequency (in khz) that mapped to lowest_nonlinear_perf
- * @cur: Difference of Aperf/Mperf/tsc count between last and current sample
- * @prev: Last Aperf/Mperf/tsc count value read from register
- * @freq: current cpu frequency value (in khz)
- * @boost_supported: check whether the Processor or SBIOS supports boost mode
- * @hw_prefcore: check whether HW supports preferred core featue.
- *               Only when hw_prefcore and early prefcore param are true,
- *               AMD P-State driver supports preferred core featue.
- * @epp_policy: Last saved policy used to set energy-performance preference
- * @epp_cached: Cached CPPC energy-performance preference value
- * @policy: Cpufreq policy value
- * @cppc_cap1_cached Cached MSR_AMD_CPPC_CAP1 register value
- *
- * The amd_cpudata is key private data for each CPU thread in AMD P-State, and
- * represents all the attributes and goals that AMD P-State requests at runtime.
- */
-struct amd_cpudata {
-       int     cpu;
-
-       struct  freq_qos_request req[2];
-       u64     cppc_req_cached;
-
-       u32     highest_perf;
-       u32     nominal_perf;
-       u32     lowest_nonlinear_perf;
-       u32     lowest_perf;
-       u32     prefcore_ranking;
-       u32     min_limit_perf;
-       u32     max_limit_perf;
-       u32     min_limit_freq;
-       u32     max_limit_freq;
-
-       u32     max_freq;
-       u32     min_freq;
-       u32     nominal_freq;
-       u32     lowest_nonlinear_freq;
-
-       struct amd_aperf_mperf cur;
-       struct amd_aperf_mperf prev;
-
-       u64     freq;
-       bool    boost_supported;
-       bool    hw_prefcore;
-
-       /* EPP feature related attributes*/
-       s16     epp_policy;
-       s16     epp_cached;
-       u32     policy;
-       u64     cppc_cap1_cached;
-       bool    suspended;
-};
-
-/*
- * enum amd_pstate_mode - driver working mode of amd pstate
- */
-enum amd_pstate_mode {
-       AMD_PSTATE_UNDEFINED = 0,
-       AMD_PSTATE_DISABLE,
-       AMD_PSTATE_PASSIVE,
-       AMD_PSTATE_ACTIVE,
-       AMD_PSTATE_GUIDED,
-       AMD_PSTATE_MAX,
-};
-
-static const char * const amd_pstate_mode_string[] = {
-       [AMD_PSTATE_UNDEFINED]   = "undefined",
-       [AMD_PSTATE_DISABLE]     = "disable",
-       [AMD_PSTATE_PASSIVE]     = "passive",
-       [AMD_PSTATE_ACTIVE]      = "active",
-       [AMD_PSTATE_GUIDED]      = "guided",
-       NULL,
-};
-
-struct quirk_entry {
-       u32 nominal_freq;
-       u32 lowest_freq;
-};
-
-#endif /* _LINUX_AMD_PSTATE_H */
index 98c6fd0b39b634b893bb463804a7e0af68408cb6..fdfb61ccf55aef23721aa8dbcd52161a1b9e1c90 100644 (file)
@@ -77,7 +77,7 @@ struct cdrom_device_ops {
                                      unsigned int clearing, int slot);
        int (*tray_move) (struct cdrom_device_info *, int);
        int (*lock_door) (struct cdrom_device_info *, int);
-       int (*select_speed) (struct cdrom_device_info *, int);
+       int (*select_speed) (struct cdrom_device_info *, unsigned long);
        int (*get_last_session) (struct cdrom_device_info *,
                                 struct cdrom_multisession *);
        int (*get_mcn) (struct cdrom_device_info *,
index 2ad1ffa4ccb9fec92cfb8d37897a4c5696d9dff6..0ed47d00549ba72d4c39451ce8d0c3fcf0fee108 100644 (file)
@@ -636,6 +636,14 @@ static inline void eth_skb_pkt_type(struct sk_buff *skb,
        }
 }
 
+static inline struct ethhdr *eth_skb_pull_mac(struct sk_buff *skb)
+{
+       struct ethhdr *eth = (struct ethhdr *)skb->data;
+
+       skb_pull_inline(skb, ETH_HLEN);
+       return eth;
+}
+
 /**
  * eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame
  * @skb: Buffer to pad
index 5e6cd43a6dbdd9fcd29e30d59492c736a149e25a..9709537370ee9b993e7baf1118588755e1f98017 100644 (file)
@@ -852,7 +852,6 @@ static inline void i2c_mark_adapter_resumed(struct i2c_adapter *adap)
 
 /* i2c adapter classes (bitmask) */
 #define I2C_CLASS_HWMON                (1<<0)  /* lm_sensors, ... */
-#define I2C_CLASS_SPD          (1<<7)  /* Memory modules */
 /* Warn users that the adapter doesn't support classes anymore */
 #define I2C_CLASS_DEPRECATED   (1<<8)
 
index 7bc8dff7cf6d760b2c23b037810ba66b234b9fa6..17b3f36ad843ee98922bfad936262b67524d0886 100644 (file)
@@ -1533,7 +1533,7 @@ struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
 static inline struct iommu_sva *
 iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
 {
-       return NULL;
+       return ERR_PTR(-ENODEV);
 }
 
 static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
index 5e51b0de4c4b578a1a85a9818404d2d3470f914b..08b0d1d9d78b76355e76c5751a094b197a01b096 100644 (file)
@@ -297,9 +297,6 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
                .wait_type_inner = _wait_type,          \
                .lock_type = LD_LOCK_WAIT_OVERRIDE, }
 
-#define lock_map_assert_held(l)                \
-       lockdep_assert(lock_is_held(l) != LOCK_STATE_NOT_HELD)
-
 #else /* !CONFIG_LOCKDEP */
 
 static inline void lockdep_init_task(struct task_struct *task)
@@ -391,8 +388,6 @@ extern int lockdep_is_held(const void *);
 #define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type)    \
        struct lockdep_map __maybe_unused _name = {}
 
-#define lock_map_assert_held(l)                        do { (void)(l); } while (0)
-
 #endif /* !LOCKDEP */
 
 #ifdef CONFIG_PROVE_LOCKING
index f468763478ae61229f88ba1aeafd3f14191a9d81..5df52e15f7d6ce777ab39b8c7bb25cf8257b70f4 100644 (file)
@@ -10308,9 +10308,9 @@ struct mlx5_ifc_mcam_access_reg_bits {
        u8         mfrl[0x1];
        u8         regs_39_to_32[0x8];
 
-       u8         regs_31_to_10[0x16];
+       u8         regs_31_to_11[0x15];
        u8         mtmp[0x1];
-       u8         regs_8_to_0[0x9];
+       u8         regs_9_to_0[0xa];
 };
 
 struct mlx5_ifc_mcam_access_reg_bits1 {
index d2d291a9cdadba94743daf38cb7ad56bad99ebf3..3ca3906bb8da661c9514d17356004dcf349b2dd1 100644 (file)
@@ -68,6 +68,7 @@ struct netfs_inode {
        loff_t                  remote_i_size;  /* Size of the remote file */
        loff_t                  zero_point;     /* Size after which we assume there's no data
                                                 * on the server */
+       atomic_t                io_count;       /* Number of outstanding reqs */
        unsigned long           flags;
 #define NETFS_ICTX_ODIRECT     0               /* The file has DIO in progress */
 #define NETFS_ICTX_UNBUFFERED  1               /* I/O should not use the pagecache */
@@ -474,6 +475,7 @@ static inline void netfs_inode_init(struct netfs_inode *ctx,
        ctx->remote_i_size = i_size_read(&ctx->inode);
        ctx->zero_point = LLONG_MAX;
        ctx->flags = 0;
+       atomic_set(&ctx->io_count, 0);
 #if IS_ENABLED(CONFIG_FSCACHE)
        ctx->cache = NULL;
 #endif
@@ -517,4 +519,20 @@ static inline struct fscache_cookie *netfs_i_cookie(struct netfs_inode *ctx)
 #endif
 }
 
+/**
+ * netfs_wait_for_outstanding_io - Wait for outstanding I/O to complete
+ * @ctx: The netfs inode to wait on
+ *
+ * Wait for outstanding I/O requests of any type to complete.  This is intended
+ * to be called from inode eviction routines.  This makes sure that any
+ * resources held by those requests are cleaned up before we let the inode get
+ * cleaned up.
+ */
+static inline void netfs_wait_for_outstanding_io(struct inode *inode)
+{
+       struct netfs_inode *ictx = netfs_inode(inode);
+
+       wait_var_event(&ictx->io_count, atomic_read(&ictx->io_count) == 0);
+}
+
 #endif /* _LINUX_NETFS_H */
index 3d69589c00a4bb4d9ee7a69172e7fb9a65a1ca90..ee633712bba0b9b89a8715a3ee92b59d3499bdf3 100644 (file)
@@ -346,6 +346,19 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
        m->gfp_mask = mask;
 }
 
+/*
+ * There are some parts of the kernel which assume that PMD entries
+ * are exactly HPAGE_PMD_ORDER.  Those should be fixed, but until then,
+ * limit the maximum allocation order to PMD size.  I'm not aware of any
+ * assumptions about maximum order if THP are disabled, but 8 seems like
+ * a good order (that's 1MB if you're using 4kB pages)
+ */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define MAX_PAGECACHE_ORDER    HPAGE_PMD_ORDER
+#else
+#define MAX_PAGECACHE_ORDER    8
+#endif
+
 /**
  * mapping_set_large_folios() - Indicate the file supports large folios.
  * @mapping: The file.
@@ -372,6 +385,14 @@ static inline bool mapping_large_folio_support(struct address_space *mapping)
                test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
 }
 
+/* Return the maximum folio size for this pagecache mapping, in bytes. */
+static inline size_t mapping_max_folio_size(struct address_space *mapping)
+{
+       if (mapping_large_folio_support(mapping))
+               return PAGE_SIZE << MAX_PAGECACHE_ORDER;
+       return PAGE_SIZE;
+}
+
 static inline int filemap_nr_thps(struct address_space *mapping)
 {
 #ifdef CONFIG_READ_ONLY_THP_FOR_FS
@@ -530,19 +551,6 @@ static inline void *detach_page_private(struct page *page)
        return folio_detach_private(page_folio(page));
 }
 
-/*
- * There are some parts of the kernel which assume that PMD entries
- * are exactly HPAGE_PMD_ORDER.  Those should be fixed, but until then,
- * limit the maximum allocation order to PMD size.  I'm not aware of any
- * assumptions about maximum order if THP are disabled, but 8 seems like
- * a good order (that's 1MB if you're using 4kB pages)
- */
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#define MAX_PAGECACHE_ORDER    HPAGE_PMD_ORDER
-#else
-#define MAX_PAGECACHE_ORDER    8
-#endif
-
 #ifdef CONFIG_NUMA
 struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order);
 #else
index fb004fd4e8890537e002e0181427487c9e021732..cafc5ab1cbcb4211808815cec69dceb94370963d 100644 (file)
@@ -413,8 +413,6 @@ struct pci_dev {
        struct resource driver_exclusive_resource;       /* driver exclusive resource ranges */
 
        bool            match_driver;           /* Skip attaching driver */
-       struct lock_class_key cfg_access_key;
-       struct lockdep_map cfg_access_lock;
 
        unsigned int    transparent:1;          /* Subtractive decode bridge */
        unsigned int    io_window:1;            /* Bridge has I/O window */
index 82561242cda4281a109bcef3bcba3a3e9739f5b7..7f2ff95d2deb5e4a79dad6cf96ae9626e9ef1bd0 100644 (file)
@@ -435,8 +435,6 @@ struct pnp_protocol {
 #define protocol_for_each_dev(protocol, dev)   \
        list_for_each_entry(dev, &(protocol)->devices, protocol_list)
 
-extern const struct bus_type pnp_bus_type;
-
 #if defined(CONFIG_PNP)
 
 /* device management */
@@ -469,7 +467,7 @@ int compare_pnp_id(struct pnp_id *pos, const char *id);
 int pnp_register_driver(struct pnp_driver *drv);
 void pnp_unregister_driver(struct pnp_driver *drv);
 
-#define dev_is_pnp(d) ((d)->bus == &pnp_bus_type)
+bool dev_is_pnp(const struct device *dev);
 
 #else
 
@@ -502,7 +500,7 @@ static inline int compare_pnp_id(struct pnp_id *pos, const char *id) { return -E
 static inline int pnp_register_driver(struct pnp_driver *drv) { return -ENODEV; }
 static inline void pnp_unregister_driver(struct pnp_driver *drv) { }
 
-#define dev_is_pnp(d) false
+static inline bool dev_is_pnp(const struct device *dev) { return false; }
 
 #endif /* CONFIG_PNP */
 
index c17e4efbb2e5cd367f880fec4c7e263517e631ed..21a67dc9efe80b2d0dc6bc2851b2b4ab67933818 100644 (file)
@@ -394,21 +394,6 @@ enum tpm2_object_attributes {
        TPM2_OA_SIGN                    = BIT(18),
 };
 
-/*
- * definitions for the canonical template.  These are mandated
- * by the TCG key template documents
- */
-
-#define AES_KEY_BYTES  AES_KEYSIZE_128
-#define AES_KEY_BITS   (AES_KEY_BYTES*8)
-#define TPM2_OA_TMPL   (TPM2_OA_NO_DA |                        \
-                        TPM2_OA_FIXED_TPM |                    \
-                        TPM2_OA_FIXED_PARENT |                 \
-                        TPM2_OA_SENSITIVE_DATA_ORIGIN |        \
-                        TPM2_OA_USER_WITH_AUTH |               \
-                        TPM2_OA_DECRYPT |                      \
-                        TPM2_OA_RESTRICTED)
-
 enum tpm2_session_attributes {
        TPM2_SA_CONTINUE_SESSION        = BIT(0),
        TPM2_SA_AUDIT_EXCLUSIVE         = BIT(1),
@@ -437,8 +422,6 @@ u8 tpm_buf_read_u8(struct tpm_buf *buf, off_t *offset);
 u16 tpm_buf_read_u16(struct tpm_buf *buf, off_t *offset);
 u32 tpm_buf_read_u32(struct tpm_buf *buf, off_t *offset);
 
-u8 *tpm_buf_parameters(struct tpm_buf *buf);
-
 /*
  * Check if TPM device is in the firmware upgrade mode.
  */
index 6d1c8541183dbe7bd6d3e5bd6c57174de9524a50..3a9001a042a5c392a79cfc59af528ef410a28668 100644 (file)
@@ -24,7 +24,7 @@ struct dst_ops {
        void                    (*destroy)(struct dst_entry *);
        void                    (*ifdown)(struct dst_entry *,
                                          struct net_device *dev);
-       struct dst_entry *      (*negative_advice)(struct dst_entry *);
+       void                    (*negative_advice)(struct sock *sk, struct dst_entry *);
        void                    (*link_failure)(struct sk_buff *);
        void                    (*update_pmtu)(struct dst_entry *dst, struct sock *sk,
                                               struct sk_buff *skb, u32 mtu,
index b088d131aeb0d039ce2c34fc6674c616f2f2559d..7e8477057f3d14103f9132c9366a2ae9ed5f0724 100644 (file)
@@ -45,16 +45,17 @@ struct pp_alloc_cache {
 
 /**
  * struct page_pool_params - page pool parameters
+ * @fast:      params accessed frequently on hotpath
  * @order:     2^order pages on allocation
  * @pool_size: size of the ptr_ring
  * @nid:       NUMA node id to allocate from pages from
  * @dev:       device, for DMA pre-mapping purposes
- * @netdev:    netdev this pool will serve (leave as NULL if none or multiple)
  * @napi:      NAPI which is the sole consumer of pages, otherwise NULL
  * @dma_dir:   DMA mapping direction
  * @max_len:   max DMA sync memory size for PP_FLAG_DMA_SYNC_DEV
  * @offset:    DMA sync address offset for PP_FLAG_DMA_SYNC_DEV
- * @netdev:    corresponding &net_device for Netlink introspection
+ * @slow:      params with slowpath access only (initialization and Netlink)
+ * @netdev:    netdev this pool will serve (leave as NULL if none or multiple)
  * @flags:     PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_SYSTEM_POOL
  */
 struct page_pool_params {
index d88c0dfc2d46831e63492becfa15603624fc457d..ebcb8896bffc1f0266c3928f40fea493169c0136 100644 (file)
@@ -285,4 +285,16 @@ static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
        return atomic_read(&queue->young);
 }
 
+/* RFC 7323 2.3 Using the Window Scale Option
+ *  The window field (SEG.WND) of every outgoing segment, with the
+ *  exception of <SYN> segments, MUST be right-shifted by
+ *  Rcv.Wind.Shift bits.
+ *
+ * This means the SEG.WND carried in SYNACK can not exceed 65535.
+ * We use this property to harden TCP stack while in NEW_SYN_RECV state.
+ */
+static inline u32 tcp_synack_window(const struct request_sock *req)
+{
+       return min(req->rsk_rcv_wnd, 65535U);
+}
 #endif /* _REQUEST_SOCK_H */
index 3bfb80bad1739d244a3906fa7f0e1a606dfaf868..b45d57b5968af4d17a1fc002a75a0923aefabf12 100644 (file)
@@ -13,6 +13,7 @@ enum rtnl_link_flags {
        RTNL_FLAG_DOIT_UNLOCKED         = BIT(0),
        RTNL_FLAG_BULK_DEL_SUPPORTED    = BIT(1),
        RTNL_FLAG_DUMP_UNLOCKED         = BIT(2),
+       RTNL_FLAG_DUMP_SPLIT_NLM_DONE   = BIT(3),       /* legacy behavior */
 };
 
 enum rtnl_kinds {
index 5f4d0629348f3fcb7b8d5e5e0796a35a9b913101..953c8dc4e259e84b927cc77edc0e55cdde654e94 100644 (file)
@@ -2063,17 +2063,10 @@ sk_dst_get(const struct sock *sk)
 
 static inline void __dst_negative_advice(struct sock *sk)
 {
-       struct dst_entry *ndst, *dst = __sk_dst_get(sk);
+       struct dst_entry *dst = __sk_dst_get(sk);
 
-       if (dst && dst->ops->negative_advice) {
-               ndst = dst->ops->negative_advice(dst);
-
-               if (ndst != dst) {
-                       rcu_assign_pointer(sk->sk_dst_cache, ndst);
-                       sk_tx_queue_clear(sk);
-                       WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
-               }
-       }
+       if (dst && dst->ops->negative_advice)
+               dst->ops->negative_advice(sk, dst);
 }
 
 static inline void dst_negative_advice(struct sock *sk)
index 471e177362b4c02ed37a85d4b5b1174aed67f18c..5d8e9ed2c0056f8437baf6b7d1853d30f079a64a 100644 (file)
@@ -86,7 +86,8 @@ static inline int tcp_ao_sizeof_key(const struct tcp_ao_key *key)
 struct tcp_ao_info {
        /* List of tcp_ao_key's */
        struct hlist_head       head;
-       /* current_key and rnext_key aren't maintained on listen sockets.
+       /* current_key and rnext_key are maintained on sockets
+        * in TCP_AO_ESTABLISHED states.
         * Their purpose is to cache keys on established connections,
         * saving needless lookups. Never dereference any of them from
         * listen sockets.
@@ -201,9 +202,9 @@ struct tcp6_ao_context {
 };
 
 struct tcp_sigpool;
+/* Established states are fast-path and there always is current_key/rnext_key */
 #define TCP_AO_ESTABLISHED (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | \
-                           TCPF_CLOSE | TCPF_CLOSE_WAIT | \
-                           TCPF_LAST_ACK | TCPF_CLOSING)
+                           TCPF_CLOSE_WAIT | TCPF_LAST_ACK | TCPF_CLOSING)
 
 int tcp_ao_transmit_skb(struct sock *sk, struct sk_buff *skb,
                        struct tcp_ao_key *key, struct tcphdr *th,
index 61c6054618c8ead951b15390ec91bb40b4a63ca2..3edd7a7346daaaf30ddb44e1151d8caae8371a64 100644 (file)
@@ -124,7 +124,7 @@ struct snd_pcm_ops {
 #define SNDRV_PCM_RATE_768000          (1U<<16)        /* 768000Hz */
 
 #define SNDRV_PCM_RATE_CONTINUOUS      (1U<<30)        /* continuous range */
-#define SNDRV_PCM_RATE_KNOT            (1U<<31)        /* supports more non-continuos rates */
+#define SNDRV_PCM_RATE_KNOT            (1U<<31)        /* supports more non-continuous rates */
 
 #define SNDRV_PCM_RATE_8000_44100      (SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_11025|\
                                         SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_22050|\
index f2afb7cc4926cdc048541a2ca2e18b8e474ea69d..18e3745b86cd481f97fe7330a297373a25c29209 100644 (file)
@@ -69,8 +69,7 @@ struct proc_input {
 
 static inline enum proc_cn_event valid_event(enum proc_cn_event ev_type)
 {
-       ev_type &= PROC_EVENT_ALL;
-       return ev_type;
+       return (enum proc_cn_event)(ev_type & PROC_EVENT_ALL);
 }
 
 /*
index 8ddb2219a84bdfa6412097e0375e7c3f92a19560..6b384065c01356efbe8db490fd3f85c9cebbe6d3 100644 (file)
@@ -5,61 +5,60 @@
 #include <linux/compiler.h>
 
 /* 0x4B is 'K', to avoid collision with termios and vt */
-#define KD_IOCTL_BASE  'K'
 
-#define GIO_FONT       _IO(KD_IOCTL_BASE, 0x60)        /* gets font in expanded form */
-#define PIO_FONT       _IO(KD_IOCTL_BASE, 0x61)        /* use font in expanded form */
+#define GIO_FONT       0x4B60  /* gets font in expanded form */
+#define PIO_FONT       0x4B61  /* use font in expanded form */
 
-#define GIO_FONTX      _IO(KD_IOCTL_BASE, 0x6B)        /* get font using struct consolefontdesc */
-#define PIO_FONTX      _IO(KD_IOCTL_BASE, 0x6C)        /* set font using struct consolefontdesc */
+#define GIO_FONTX      0x4B6B  /* get font using struct consolefontdesc */
+#define PIO_FONTX      0x4B6C  /* set font using struct consolefontdesc */
 struct consolefontdesc {
        unsigned short charcount;       /* characters in font (256 or 512) */
        unsigned short charheight;      /* scan lines per character (1-32) */
        char __user *chardata;          /* font data in expanded form */
 };
 
-#define PIO_FONTRESET  _IO(KD_IOCTL_BASE, 0x6D)        /* reset to default font */
+#define PIO_FONTRESET   0x4B6D /* reset to default font */
 
-#define GIO_CMAP       _IO(KD_IOCTL_BASE, 0x70)        /* gets colour palette on VGA+ */
-#define PIO_CMAP       _IO(KD_IOCTL_BASE, 0x71)        /* sets colour palette on VGA+ */
+#define GIO_CMAP       0x4B70  /* gets colour palette on VGA+ */
+#define PIO_CMAP       0x4B71  /* sets colour palette on VGA+ */
 
-#define KIOCSOUND      _IO(KD_IOCTL_BASE, 0x2F)        /* start sound generation (0 for off) */
-#define KDMKTONE       _IO(KD_IOCTL_BASE, 0x30)        /* generate tone */
+#define KIOCSOUND      0x4B2F  /* start sound generation (0 for off) */
+#define KDMKTONE       0x4B30  /* generate tone */
 
-#define KDGETLED       _IO(KD_IOCTL_BASE, 0x31)        /* return current led state */
-#define KDSETLED       _IO(KD_IOCTL_BASE, 0x32)        /* set led state [lights, not flags] */
+#define KDGETLED       0x4B31  /* return current led state */
+#define KDSETLED       0x4B32  /* set led state [lights, not flags] */
 #define        LED_SCR         0x01    /* scroll lock led */
 #define        LED_NUM         0x02    /* num lock led */
 #define        LED_CAP         0x04    /* caps lock led */
 
-#define KDGKBTYPE      _IO(KD_IOCTL_BASE, 0x33)        /* get keyboard type */
+#define KDGKBTYPE      0x4B33  /* get keyboard type */
 #define        KB_84           0x01
 #define        KB_101          0x02    /* this is what we always answer */
 #define        KB_OTHER        0x03
 
-#define KDADDIO                _IO(KD_IOCTL_BASE, 0x34)        /* add i/o port as valid */
-#define KDDELIO                _IO(KD_IOCTL_BASE, 0x35)        /* del i/o port as valid */
-#define KDENABIO       _IO(KD_IOCTL_BASE, 0x36)        /* enable i/o to video board */
-#define KDDISABIO      _IO(KD_IOCTL_BASE, 0x37)        /* disable i/o to video board */
+#define KDADDIO                0x4B34  /* add i/o port as valid */
+#define KDDELIO                0x4B35  /* del i/o port as valid */
+#define KDENABIO       0x4B36  /* enable i/o to video board */
+#define KDDISABIO      0x4B37  /* disable i/o to video board */
 
-#define KDSETMODE      _IO(KD_IOCTL_BASE, 0x3A)        /* set text/graphics mode */
+#define KDSETMODE      0x4B3A  /* set text/graphics mode */
 #define                KD_TEXT         0x00
 #define                KD_GRAPHICS     0x01
 #define                KD_TEXT0        0x02    /* obsolete */
 #define                KD_TEXT1        0x03    /* obsolete */
-#define KDGETMODE      _IO(KD_IOCTL_BASE, 0x3B)        /* get current mode */
+#define KDGETMODE      0x4B3B  /* get current mode */
 
-#define KDMAPDISP      _IO(KD_IOCTL_BASE, 0x3C)        /* map display into address space */
-#define KDUNMAPDISP    _IO(KD_IOCTL_BASE, 0x3D)        /* unmap display from address space */
+#define KDMAPDISP      0x4B3C  /* map display into address space */
+#define KDUNMAPDISP    0x4B3D  /* unmap display from address space */
 
 typedef char scrnmap_t;
 #define                E_TABSZ         256
-#define GIO_SCRNMAP    _IO(KD_IOCTL_BASE, 0x40)        /* get screen mapping from kernel */
-#define PIO_SCRNMAP    _IO(KD_IOCTL_BASE, 0x41)        /* put screen mapping table in kernel */
-#define GIO_UNISCRNMAP _IO(KD_IOCTL_BASE, 0x69)        /* get full Unicode screen mapping */
-#define PIO_UNISCRNMAP _IO(KD_IOCTL_BASE, 0x6A)        /* set full Unicode screen mapping */
+#define GIO_SCRNMAP    0x4B40  /* get screen mapping from kernel */
+#define PIO_SCRNMAP    0x4B41  /* put screen mapping table in kernel */
+#define GIO_UNISCRNMAP  0x4B69 /* get full Unicode screen mapping */
+#define PIO_UNISCRNMAP  0x4B6A  /* set full Unicode screen mapping */
 
-#define GIO_UNIMAP     _IO(KD_IOCTL_BASE, 0x66)        /* get unicode-to-font mapping from kernel */
+#define GIO_UNIMAP     0x4B66  /* get unicode-to-font mapping from kernel */
 struct unipair {
        unsigned short unicode;
        unsigned short fontpos;
@@ -68,8 +67,8 @@ struct unimapdesc {
        unsigned short entry_ct;
        struct unipair __user *entries;
 };
-#define PIO_UNIMAP     _IO(KD_IOCTL_BASE, 0x67)        /* put unicode-to-font mapping in kernel */
-#define PIO_UNIMAPCLR  _IO(KD_IOCTL_BASE, 0x68)        /* clear table, possibly advise hash algorithm */
+#define PIO_UNIMAP     0x4B67  /* put unicode-to-font mapping in kernel */
+#define PIO_UNIMAPCLR  0x4B68  /* clear table, possibly advise hash algorithm */
 struct unimapinit {
        unsigned short advised_hashsize;  /* 0 if no opinion */
        unsigned short advised_hashstep;  /* 0 if no opinion */
@@ -84,19 +83,19 @@ struct unimapinit {
 #define                K_MEDIUMRAW     0x02
 #define                K_UNICODE       0x03
 #define                K_OFF           0x04
-#define KDGKBMODE      _IO(KD_IOCTL_BASE, 0x44)        /* gets current keyboard mode */
-#define KDSKBMODE      _IO(KD_IOCTL_BASE, 0x45)        /* sets current keyboard mode */
+#define KDGKBMODE      0x4B44  /* gets current keyboard mode */
+#define KDSKBMODE      0x4B45  /* sets current keyboard mode */
 
 #define                K_METABIT       0x03
 #define                K_ESCPREFIX     0x04
-#define KDGKBMETA      _IO(KD_IOCTL_BASE, 0x62)        /* gets meta key handling mode */
-#define KDSKBMETA      _IO(KD_IOCTL_BASE, 0x63)        /* sets meta key handling mode */
+#define KDGKBMETA      0x4B62  /* gets meta key handling mode */
+#define KDSKBMETA      0x4B63  /* sets meta key handling mode */
 
 #define                K_SCROLLLOCK    0x01
 #define                K_NUMLOCK       0x02
 #define                K_CAPSLOCK      0x04
-#define        KDGKBLED        _IO(KD_IOCTL_BASE, 0x64)        /* get led flags (not lights) */
-#define        KDSKBLED        _IO(KD_IOCTL_BASE, 0x65)        /* set led flags (not lights) */
+#define        KDGKBLED        0x4B64  /* get led flags (not lights) */
+#define        KDSKBLED        0x4B65  /* set led flags (not lights) */
 
 struct kbentry {
        unsigned char kb_table;
@@ -108,15 +107,15 @@ struct kbentry {
 #define                K_ALTTAB        0x02
 #define                K_ALTSHIFTTAB   0x03
 
-#define KDGKBENT       _IO(KD_IOCTL_BASE, 0x46)        /* gets one entry in translation table */
-#define KDSKBENT       _IO(KD_IOCTL_BASE, 0x47)        /* sets one entry in translation table */
+#define KDGKBENT       0x4B46  /* gets one entry in translation table */
+#define KDSKBENT       0x4B47  /* sets one entry in translation table */
 
 struct kbsentry {
        unsigned char kb_func;
        unsigned char kb_string[512];
 };
-#define KDGKBSENT      _IO(KD_IOCTL_BASE, 0x48)        /* gets one function key string entry */
-#define KDSKBSENT      _IO(KD_IOCTL_BASE, 0x49)        /* sets one function key string entry */
+#define KDGKBSENT      0x4B48  /* gets one function key string entry */
+#define KDSKBSENT      0x4B49  /* sets one function key string entry */
 
 struct kbdiacr {
         unsigned char diacr, base, result;
@@ -125,8 +124,8 @@ struct kbdiacrs {
         unsigned int kb_cnt;    /* number of entries in following array */
        struct kbdiacr kbdiacr[256];    /* MAX_DIACR from keyboard.h */
 };
-#define KDGKBDIACR     _IO(KD_IOCTL_BASE, 0x4A)  /* read kernel accent table */
-#define KDSKBDIACR     _IO(KD_IOCTL_BASE, 0x4B)  /* write kernel accent table */
+#define KDGKBDIACR      0x4B4A  /* read kernel accent table */
+#define KDSKBDIACR      0x4B4B  /* write kernel accent table */
 
 struct kbdiacruc {
        unsigned int diacr, base, result;
@@ -135,16 +134,16 @@ struct kbdiacrsuc {
         unsigned int kb_cnt;    /* number of entries in following array */
        struct kbdiacruc kbdiacruc[256];    /* MAX_DIACR from keyboard.h */
 };
-#define KDGKBDIACRUC   _IO(KD_IOCTL_BASE, 0xFA)  /* read kernel accent table - UCS */
-#define KDSKBDIACRUC   _IO(KD_IOCTL_BASE, 0xFB)  /* write kernel accent table - UCS */
+#define KDGKBDIACRUC    0x4BFA  /* read kernel accent table - UCS */
+#define KDSKBDIACRUC    0x4BFB  /* write kernel accent table - UCS */
 
 struct kbkeycode {
        unsigned int scancode, keycode;
 };
-#define KDGETKEYCODE   _IO(KD_IOCTL_BASE, 0x4C)        /* read kernel keycode table entry */
-#define KDSETKEYCODE   _IO(KD_IOCTL_BASE, 0x4D)        /* write kernel keycode table entry */
+#define KDGETKEYCODE   0x4B4C  /* read kernel keycode table entry */
+#define KDSETKEYCODE   0x4B4D  /* write kernel keycode table entry */
 
-#define KDSIGACCEPT    _IO(KD_IOCTL_BASE, 0x4E)        /* accept kbd generated signals */
+#define KDSIGACCEPT    0x4B4E  /* accept kbd generated signals */
 
 struct kbd_repeat {
        int delay;      /* in msec; <= 0: don't change */
@@ -152,11 +151,10 @@ struct kbd_repeat {
                        /* earlier this field was misnamed "rate" */
 };
 
-#define KDKBDREP       _IO(KD_IOCTL_BASE, 0x52)        /* set keyboard delay/repeat rate;
-                                                        * actually used values are returned
-                                                        */
+#define KDKBDREP        0x4B52  /* set keyboard delay/repeat rate;
+                                * actually used values are returned */
 
-#define KDFONTOP       _IO(KD_IOCTL_BASE, 0x72)        /* font operations */
+#define KDFONTOP       0x4B72  /* font operations */
 
 struct console_font_op {
        unsigned int op;        /* operation code KD_FONT_OP_* */
index a8188202413ec4cab8ea0c1df92637a64750cb4e..43742ac5b00da0aa66660d86cb627561d8eaf298 100644 (file)
@@ -148,6 +148,7 @@ enum {
        NETDEV_A_QSTATS_RX_ALLOC_FAIL,
        NETDEV_A_QSTATS_RX_HW_DROPS,
        NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS,
+       NETDEV_A_QSTATS_RX_CSUM_COMPLETE,
        NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY,
        NETDEV_A_QSTATS_RX_CSUM_NONE,
        NETDEV_A_QSTATS_RX_CSUM_BAD,
index d1c47a9d921582d3ebd646c0cc46de70333d92fc..7d3316fe9bfc469a9fe3cf316b61807004861252 100644 (file)
@@ -927,7 +927,11 @@ void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
 {
        struct io_wq_acct *acct = io_work_get_acct(wq, work);
        unsigned long work_flags = work->flags;
-       struct io_cb_cancel_data match;
+       struct io_cb_cancel_data match = {
+               .fn             = io_wq_work_match_item,
+               .data           = work,
+               .cancel_all     = false,
+       };
        bool do_create;
 
        /*
@@ -965,10 +969,6 @@ void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
                raw_spin_unlock(&wq->lock);
 
                /* fatal condition, failed to create the first worker */
-               match.fn                = io_wq_work_match_item,
-               match.data              = work,
-               match.cancel_all        = false,
-
                io_acct_cancel_pending_work(wq, acct, &match);
        }
 }
index 624ca9076a50beba7c60c73d9beb394a9326d709..726e6367af4d3742641f756e3cd1d01d26ef214f 100644 (file)
@@ -433,7 +433,7 @@ static inline bool io_file_can_poll(struct io_kiocb *req)
 {
        if (req->flags & REQ_F_CAN_POLL)
                return true;
-       if (file_can_poll(req->file)) {
+       if (req->file && file_can_poll(req->file)) {
                req->flags |= REQ_F_CAN_POLL;
                return true;
        }
index 4785d6af5fee9298940fa6c9a6cfb68c06df9d24..a0f32a255fd1e1a0e3bd0c2f20421d66fc0b2a25 100644 (file)
@@ -244,6 +244,7 @@ __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
        struct io_ring_ctx *ctx = file->private_data;
        size_t sz = vma->vm_end - vma->vm_start;
        long offset = vma->vm_pgoff << PAGE_SHIFT;
+       unsigned int npages;
        void *ptr;
 
        ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
@@ -253,8 +254,8 @@ __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
        switch (offset & IORING_OFF_MMAP_MASK) {
        case IORING_OFF_SQ_RING:
        case IORING_OFF_CQ_RING:
-               return io_uring_mmap_pages(ctx, vma, ctx->ring_pages,
-                                               ctx->n_ring_pages);
+               npages = min(ctx->n_ring_pages, (sz + PAGE_SIZE - 1) >> PAGE_SHIFT);
+               return io_uring_mmap_pages(ctx, vma, ctx->ring_pages, npages);
        case IORING_OFF_SQES:
                return io_uring_mmap_pages(ctx, vma, ctx->sqe_pages,
                                                ctx->n_sqe_pages);
index 883a1a6659075604259f15265dd57096ebfe7536..8c18ede595c4152c64b0cd7e8c4476c8ea9f5a24 100644 (file)
@@ -261,12 +261,14 @@ int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg)
 }
 
 /*
- * __io_napi_adjust_timeout() - Add napi id to the busy poll list
+ * __io_napi_adjust_timeout() - adjust busy loop timeout
  * @ctx: pointer to io-uring context structure
  * @iowq: pointer to io wait queue
  * @ts: pointer to timespec or NULL
  *
  * Adjust the busy loop timeout according to timespec and busy poll timeout.
+ * If the specified NAPI timeout is bigger than the wait timeout, then adjust
+ * the NAPI timeout accordingly.
  */
 void __io_napi_adjust_timeout(struct io_ring_ctx *ctx, struct io_wait_queue *iowq,
                              struct timespec64 *ts)
@@ -274,16 +276,16 @@ void __io_napi_adjust_timeout(struct io_ring_ctx *ctx, struct io_wait_queue *iow
        unsigned int poll_to = READ_ONCE(ctx->napi_busy_poll_to);
 
        if (ts) {
-               struct timespec64 poll_to_ts = ns_to_timespec64(1000 * (s64)poll_to);
-
-               if (timespec64_compare(ts, &poll_to_ts) > 0) {
-                       *ts = timespec64_sub(*ts, poll_to_ts);
-               } else {
-                       u64 to = timespec64_to_ns(ts);
-
-                       do_div(to, 1000);
-                       ts->tv_sec = 0;
-                       ts->tv_nsec = 0;
+               struct timespec64 poll_to_ts;
+
+               poll_to_ts = ns_to_timespec64(1000 * (s64)poll_to);
+               if (timespec64_compare(ts, &poll_to_ts) < 0) {
+                       s64 poll_to_ns = timespec64_to_ns(ts);
+                       if (poll_to_ns > 0) {
+                               u64 val = poll_to_ns + 999;
+                               do_div(val, (s64) 1000);
+                               poll_to = val;
+                       }
                }
        }
 
index 0a48596429d9f53edd252a58435619654f7ee7a0..7c98c4d5094633cb948bb761d2345e65cfb380f2 100644 (file)
@@ -1127,6 +1127,9 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
                flags |= MSG_DONTWAIT;
 
 retry_multishot:
+       kmsg->msg.msg_inq = -1;
+       kmsg->msg.msg_flags = 0;
+
        if (io_do_buffer_select(req)) {
                ret = io_recv_buf_select(req, kmsg, &len, issue_flags);
                if (unlikely(ret))
@@ -1134,9 +1137,6 @@ retry_multishot:
                sr->buf = NULL;
        }
 
-       kmsg->msg.msg_inq = -1;
-       kmsg->msg.msg_flags = 0;
-
        if (flags & MSG_WAITALL)
                min_ret = iov_iter_count(&kmsg->msg.msg_iter);
 
index 2de5cca9504eb4bc9be4002b0f9a7f27e9c605fc..2e3b7b16effb34154b2be88dc07af5e6e0a4699a 100644 (file)
@@ -516,10 +516,12 @@ const struct io_cold_def io_cold_defs[] = {
        },
        [IORING_OP_READ_FIXED] = {
                .name                   = "READ_FIXED",
+               .cleanup                = io_readv_writev_cleanup,
                .fail                   = io_rw_fail,
        },
        [IORING_OP_WRITE_FIXED] = {
                .name                   = "WRITE_FIXED",
+               .cleanup                = io_readv_writev_cleanup,
                .fail                   = io_rw_fail,
        },
        [IORING_OP_POLL_ADD] = {
@@ -582,10 +584,12 @@ const struct io_cold_def io_cold_defs[] = {
        },
        [IORING_OP_READ] = {
                .name                   = "READ",
+               .cleanup                = io_readv_writev_cleanup,
                .fail                   = io_rw_fail,
        },
        [IORING_OP_WRITE] = {
                .name                   = "WRITE",
+               .cleanup                = io_readv_writev_cleanup,
                .fail                   = io_rw_fail,
        },
        [IORING_OP_FADVISE] = {
@@ -692,6 +696,7 @@ const struct io_cold_def io_cold_defs[] = {
        },
        [IORING_OP_READ_MULTISHOT] = {
                .name                   = "READ_MULTISHOT",
+               .cleanup                = io_readv_writev_cleanup,
        },
        [IORING_OP_WAITID] = {
                .name                   = "WAITID",
index ef8c908346a4ef9ed5d4a3a8015e38612d0bd75d..c0010a66a6f2c2e72c795b10e6ad3773716200ac 100644 (file)
@@ -355,8 +355,10 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
        }
 
        if (sqd) {
+               mutex_unlock(&ctx->uring_lock);
                mutex_unlock(&sqd->lock);
                io_put_sq_data(sqd);
+               mutex_lock(&ctx->uring_lock);
        }
 
        if (copy_to_user(arg, new_count, sizeof(new_count)))
@@ -380,8 +382,10 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
        return 0;
 err:
        if (sqd) {
+               mutex_unlock(&ctx->uring_lock);
                mutex_unlock(&sqd->lock);
                io_put_sq_data(sqd);
+               mutex_lock(&ctx->uring_lock);
        }
        return ret;
 }
index 4e2cdbb5629f22fc1464c5cba296688c45fc8d26..7f3b34452243c83b6e113b5a5d831fc088854f54 100644 (file)
@@ -760,9 +760,6 @@ int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
                for (i = 0; i < dtab->n_buckets; i++) {
                        head = dev_map_index_hash(dtab, i);
                        hlist_for_each_entry_safe(dst, next, head, index_hlist) {
-                               if (!dst)
-                                       continue;
-
                                if (is_ifindex_excluded(excluded_devices, num_excluded,
                                                        dst->dev->ifindex))
                                        continue;
index 2222c3ff88e7fd639390112de66581b934584457..f45ed6adc092af680ae01b01027b1d572e5c418f 100644 (file)
@@ -2998,6 +2998,7 @@ static int bpf_obj_get(const union bpf_attr *attr)
 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
                   const struct bpf_link_ops *ops, struct bpf_prog *prog)
 {
+       WARN_ON(ops->dealloc && ops->dealloc_deferred);
        atomic64_set(&link->refcnt, 1);
        link->type = type;
        link->id = 0;
@@ -3056,16 +3057,17 @@ static void bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head *rcu)
 /* bpf_link_free is guaranteed to be called from process context */
 static void bpf_link_free(struct bpf_link *link)
 {
+       const struct bpf_link_ops *ops = link->ops;
        bool sleepable = false;
 
        bpf_link_free_id(link->id);
        if (link->prog) {
                sleepable = link->prog->sleepable;
                /* detach BPF program, clean up used resources */
-               link->ops->release(link);
+               ops->release(link);
                bpf_prog_put(link->prog);
        }
-       if (link->ops->dealloc_deferred) {
+       if (ops->dealloc_deferred) {
                /* schedule BPF link deallocation; if underlying BPF program
                 * is sleepable, we need to first wait for RCU tasks trace
                 * sync, then go through "classic" RCU grace period
@@ -3074,9 +3076,8 @@ static void bpf_link_free(struct bpf_link *link)
                        call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_mult_rcu_gp);
                else
                        call_rcu(&link->rcu, bpf_link_defer_dealloc_rcu_gp);
-       }
-       if (link->ops->dealloc)
-               link->ops->dealloc(link);
+       } else if (ops->dealloc)
+               ops->dealloc(link);
 }
 
 static void bpf_link_put_deferred(struct work_struct *work)
index 77da1f438beccec71f59a4b129323a1064389e12..36ef8e96787ed571e8a266687f916d3b450bd3bf 100644 (file)
@@ -8882,7 +8882,8 @@ static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
        enum bpf_attach_type eatype = env->prog->expected_attach_type;
        enum bpf_prog_type type = resolve_prog_type(env->prog);
 
-       if (func_id != BPF_FUNC_map_update_elem)
+       if (func_id != BPF_FUNC_map_update_elem &&
+           func_id != BPF_FUNC_map_delete_elem)
                return false;
 
        /* It's not possible to get access to a locked struct sock in these
@@ -8893,6 +8894,11 @@ static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
                if (eatype == BPF_TRACE_ITER)
                        return true;
                break;
+       case BPF_PROG_TYPE_SOCK_OPS:
+               /* map_update allowed only via dedicated helpers with event type checks */
+               if (func_id == BPF_FUNC_map_delete_elem)
+                       return true;
+               break;
        case BPF_PROG_TYPE_SOCKET_FILTER:
        case BPF_PROG_TYPE_SCHED_CLS:
        case BPF_PROG_TYPE_SCHED_ACT:
@@ -8988,7 +8994,6 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
        case BPF_MAP_TYPE_SOCKMAP:
                if (func_id != BPF_FUNC_sk_redirect_map &&
                    func_id != BPF_FUNC_sock_map_update &&
-                   func_id != BPF_FUNC_map_delete_elem &&
                    func_id != BPF_FUNC_msg_redirect_map &&
                    func_id != BPF_FUNC_sk_select_reuseport &&
                    func_id != BPF_FUNC_map_lookup_elem &&
@@ -8998,7 +9003,6 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
        case BPF_MAP_TYPE_SOCKHASH:
                if (func_id != BPF_FUNC_sk_redirect_hash &&
                    func_id != BPF_FUNC_sock_hash_update &&
-                   func_id != BPF_FUNC_map_delete_elem &&
                    func_id != BPF_FUNC_msg_redirect_hash &&
                    func_id != BPF_FUNC_sk_select_reuseport &&
                    func_id != BPF_FUNC_map_lookup_elem &&
@@ -11124,7 +11128,11 @@ BTF_ID(func, bpf_iter_css_task_new)
 #else
 BTF_ID_UNUSED
 #endif
+#ifdef CONFIG_BPF_EVENTS
 BTF_ID(func, bpf_session_cookie)
+#else
+BTF_ID_UNUSED
+#endif
 
 static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta)
 {
index 02205ab53b7e93b2122fcfd3a4ab811fa2d97166..4950e0b622b1f3af9e985bb67624401193b8b1db 100644 (file)
@@ -101,7 +101,6 @@ static int do_map_benchmark(struct map_benchmark_data *map)
        struct task_struct **tsk;
        int threads = map->bparam.threads;
        int node = map->bparam.node;
-       const cpumask_t *cpu_mask = cpumask_of_node(node);
        u64 loops;
        int ret = 0;
        int i;
@@ -118,11 +117,13 @@ static int do_map_benchmark(struct map_benchmark_data *map)
                if (IS_ERR(tsk[i])) {
                        pr_err("create dma_map thread failed\n");
                        ret = PTR_ERR(tsk[i]);
+                       while (--i >= 0)
+                               kthread_stop(tsk[i]);
                        goto out;
                }
 
                if (node != NUMA_NO_NODE)
-                       kthread_bind_mask(tsk[i], cpu_mask);
+                       kthread_bind_mask(tsk[i], cpumask_of_node(node));
        }
 
        /* clear the old value in the previous benchmark */
@@ -139,13 +140,17 @@ static int do_map_benchmark(struct map_benchmark_data *map)
 
        msleep_interruptible(map->bparam.seconds * 1000);
 
-       /* wait for the completion of benchmark threads */
+       /* wait for the completion of all started benchmark threads */
        for (i = 0; i < threads; i++) {
-               ret = kthread_stop(tsk[i]);
-               if (ret)
-                       goto out;
+               int kthread_ret = kthread_stop_put(tsk[i]);
+
+               if (kthread_ret)
+                       ret = kthread_ret;
        }
 
+       if (ret)
+               goto out;
+
        loops = atomic64_read(&map->loops);
        if (likely(loops > 0)) {
                u64 map_variance, unmap_variance;
@@ -170,8 +175,6 @@ static int do_map_benchmark(struct map_benchmark_data *map)
        }
 
 out:
-       for (i = 0; i < threads; i++)
-               put_task_struct(tsk[i]);
        put_device(map->dev);
        kfree(tsk);
        return ret;
@@ -208,7 +211,8 @@ static long map_benchmark_ioctl(struct file *file, unsigned int cmd,
                }
 
                if (map->bparam.node != NUMA_NO_NODE &&
-                   !node_possible(map->bparam.node)) {
+                   (map->bparam.node < 0 || map->bparam.node >= MAX_NUMNODES ||
+                    !node_possible(map->bparam.node))) {
                        pr_err("invalid numa node\n");
                        return -EINVAL;
                }
@@ -252,6 +256,9 @@ static long map_benchmark_ioctl(struct file *file, unsigned int cmd,
                 * dma_mask changed by benchmark
                 */
                dma_set_mask(map->dev, old_dma_mask);
+
+               if (ret)
+                       return ret;
                break;
        default:
                return -EINVAL;
index 6d443ea22bb7325beb1f7b6e2807497cbdd35cac..383fd43ac6122247310720afc391ab2610bd9847 100755 (executable)
@@ -14,7 +14,12 @@ include/
 arch/$SRCARCH/include/
 "
 
-type cpio > /dev/null
+if ! command -v cpio >/dev/null; then
+       echo >&2 "***"
+       echo >&2 "*** 'cpio' could not be found."
+       echo >&2 "***"
+       exit 1
+fi
 
 # Support incremental builds by skipping archive generation
 # if timestamps of files being archived are not changed.
@@ -84,7 +89,7 @@ find $cpio_dir -type f -print0 |
 
 # Create archive and try to normalize metadata for reproducibility.
 tar "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \
-    --owner=0 --group=0 --sort=name --numeric-owner \
+    --owner=0 --group=0 --sort=name --numeric-owner --mode=u=rw,go=r,a+X \
     -I $XZ -cf $tarfile -C $cpio_dir/ . > /dev/null
 
 echo $headers_md5 > kernel/kheaders.md5
index d9abb7ab031dd5f8dc6950ba26ac0a4d0bb4d7da..753b8dd42a59a5beddf5e13cece6d8ecc1846157 100644 (file)
@@ -1595,7 +1595,7 @@ int swsusp_check(bool exclusive)
 
 put:
                if (error)
-                       fput(hib_resume_bdev_file);
+                       bdev_fput(hib_resume_bdev_file);
                else
                        pr_debug("Image signature found, resuming\n");
        } else {
index f5154c051d2c105dea6d42735872a34a8d41150c..d1daeab1bbc141df37023a2f1c683ec7268b732e 100644 (file)
@@ -3295,7 +3295,7 @@ static int uprobe_prog_run(struct bpf_uprobe *uprobe,
        struct bpf_run_ctx *old_run_ctx;
        int err = 0;
 
-       if (link->task && current != link->task)
+       if (link->task && current->mm != link->task->mm)
                return 0;
 
        if (sleepable)
@@ -3396,8 +3396,9 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
        upath = u64_to_user_ptr(attr->link_create.uprobe_multi.path);
        uoffsets = u64_to_user_ptr(attr->link_create.uprobe_multi.offsets);
        cnt = attr->link_create.uprobe_multi.cnt;
+       pid = attr->link_create.uprobe_multi.pid;
 
-       if (!upath || !uoffsets || !cnt)
+       if (!upath || !uoffsets || !cnt || pid < 0)
                return -EINVAL;
        if (cnt > MAX_UPROBE_MULTI_CNT)
                return -E2BIG;
@@ -3421,11 +3422,8 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
                goto error_path_put;
        }
 
-       pid = attr->link_create.uprobe_multi.pid;
        if (pid) {
-               rcu_read_lock();
-               task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
-               rcu_read_unlock();
+               task = get_pid_task(find_vpid(pid), PIDTYPE_TGID);
                if (!task) {
                        err = -ESRCH;
                        goto error_path_put;
@@ -3519,7 +3517,6 @@ static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
 }
 #endif /* CONFIG_UPROBES */
 
-#ifdef CONFIG_FPROBE
 __bpf_kfunc_start_defs();
 
 __bpf_kfunc bool bpf_session_is_return(void)
@@ -3568,4 +3565,3 @@ static int __init bpf_kprobe_multi_kfuncs_init(void)
 }
 
 late_initcall(bpf_kprobe_multi_kfuncs_init);
-#endif
index 5e263c14157403cd9ad65be5bf543fe525fb09ba..39877c80d6cb9ace8163e2bfd16525629562ba82 100644 (file)
@@ -554,6 +554,10 @@ static int parse_btf_field(char *fieldname, const struct btf_type *type,
                        anon_offs = 0;
                        field = btf_find_struct_member(ctx->btf, type, fieldname,
                                                       &anon_offs);
+                       if (IS_ERR(field)) {
+                               trace_probe_log_err(ctx->offset, BAD_BTF_TID);
+                               return PTR_ERR(field);
+                       }
                        if (!field) {
                                trace_probe_log_err(ctx->offset, NO_BTF_FIELD);
                                return -ENOENT;
index 8541fa1494ae3d7a22e18fd37b99434b9042f5f0..c98e3b3386badb60a187e86e557ed4c26f76c966 100644 (file)
@@ -970,19 +970,17 @@ static struct uprobe_cpu_buffer *prepare_uprobe_buffer(struct trace_uprobe *tu,
 
 static void __uprobe_trace_func(struct trace_uprobe *tu,
                                unsigned long func, struct pt_regs *regs,
-                               struct uprobe_cpu_buffer **ucbp,
+                               struct uprobe_cpu_buffer *ucb,
                                struct trace_event_file *trace_file)
 {
        struct uprobe_trace_entry_head *entry;
        struct trace_event_buffer fbuffer;
-       struct uprobe_cpu_buffer *ucb;
        void *data;
        int size, esize;
        struct trace_event_call *call = trace_probe_event_call(&tu->tp);
 
        WARN_ON(call != trace_file->event_call);
 
-       ucb = prepare_uprobe_buffer(tu, regs, ucbp);
        if (WARN_ON_ONCE(ucb->dsize > PAGE_SIZE))
                return;
 
@@ -1014,13 +1012,16 @@ static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
                             struct uprobe_cpu_buffer **ucbp)
 {
        struct event_file_link *link;
+       struct uprobe_cpu_buffer *ucb;
 
        if (is_ret_probe(tu))
                return 0;
 
+       ucb = prepare_uprobe_buffer(tu, regs, ucbp);
+
        rcu_read_lock();
        trace_probe_for_each_link_rcu(link, &tu->tp)
-               __uprobe_trace_func(tu, 0, regs, ucbp, link->file);
+               __uprobe_trace_func(tu, 0, regs, ucb, link->file);
        rcu_read_unlock();
 
        return 0;
@@ -1031,10 +1032,13 @@ static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
                                 struct uprobe_cpu_buffer **ucbp)
 {
        struct event_file_link *link;
+       struct uprobe_cpu_buffer *ucb;
+
+       ucb = prepare_uprobe_buffer(tu, regs, ucbp);
 
        rcu_read_lock();
        trace_probe_for_each_link_rcu(link, &tu->tp)
-               __uprobe_trace_func(tu, func, regs, ucbp, link->file);
+               __uprobe_trace_func(tu, func, regs, ucb, link->file);
        rcu_read_unlock();
 }
 
index 39da5b3bc649c28896e60d9dac974b98973da8d0..f9cc467334ce3de06fbf101ace3dcab87566cbf0 100644 (file)
@@ -235,9 +235,6 @@ static void fortify_test_alloc_size_##allocator##_dynamic(struct kunit *test) \
                kmalloc_array_node(alloc_size, 1, gfp, NUMA_NO_NODE),   \
                kfree(p));                                              \
        checker(expected_size, __kmalloc(alloc_size, gfp),              \
-               kfree(p));                                              \
-       checker(expected_size,                                          \
-               __kmalloc_node(alloc_size, gfp, NUMA_NO_NODE),          \
                kfree(p));                                              \
                                                                        \
        orig = kmalloc(alloc_size, gfp);                                \
index 42b585208249c6d8731cb02e81ff62c5939c121d..c63db03ebb9dcfd5730c7363bf4585e2424af4bc 100644 (file)
@@ -811,4 +811,5 @@ static void __exit test_rht_exit(void)
 module_init(test_rht_init);
 module_exit(test_rht_exit);
 
+MODULE_DESCRIPTION("Resizable, Scalable, Concurrent Hash Table test module");
 MODULE_LICENSE("GPL v2");
index 00774656eeac8368f538e59cb8b456af405a3469..5cd94721d974f4428d9a642c13bf325ee15fd2be 100644 (file)
@@ -236,6 +236,8 @@ static int p9_fcall_init(struct p9_client *c, struct p9_fcall *fc,
        if (!fc->sdata)
                return -ENOMEM;
        fc->capacity = alloc_msize;
+       fc->id = 0;
+       fc->tag = P9_NOTAG;
        return 0;
 }
 
index 8077cf2ee448038fab65e842785f6d18bf0e4dc4..d6f9fae06a9d8139ec0505358327e3876af228ae 100644 (file)
@@ -1378,8 +1378,10 @@ static int ax25_accept(struct socket *sock, struct socket *newsock,
 {
        struct sk_buff *skb;
        struct sock *newsk;
+       ax25_dev *ax25_dev;
        DEFINE_WAIT(wait);
        struct sock *sk;
+       ax25_cb *ax25;
        int err = 0;
 
        if (sock->state != SS_UNCONNECTED)
@@ -1434,6 +1436,10 @@ static int ax25_accept(struct socket *sock, struct socket *newsock,
        kfree_skb(skb);
        sk_acceptq_removed(sk);
        newsock->state = SS_CONNECTED;
+       ax25 = sk_to_ax25(newsk);
+       ax25_dev = ax25->ax25_dev;
+       netdev_hold(ax25_dev->dev, &ax25->dev_tracker, GFP_ATOMIC);
+       ax25_dev_hold(ax25_dev);
 
 out:
        release_sock(sk);
index 742d7c68e7e7e9fb6c1734f75d20402b2e4ad5a9..9efd6690b3443653a2f2ef421080aa48b214a8ba 100644 (file)
@@ -196,7 +196,7 @@ void __exit ax25_dev_free(void)
        list_for_each_entry_safe(s, n, &ax25_dev_list, list) {
                netdev_put(s->dev, &s->dev_tracker);
                list_del(&s->list);
-               kfree(s);
+               ax25_dev_put(s);
        }
        spin_unlock_bh(&ax25_dev_lock);
 }
index f6aad4ed2ab2f6b09ad2f6442cde26b39949871a..36ae54f57bf5743b37c5c785baa7c4aa8e871024 100644 (file)
@@ -727,10 +727,16 @@ static void
 __bpf_prog_test_run_raw_tp(void *data)
 {
        struct bpf_raw_tp_test_run_info *info = data;
+       struct bpf_trace_run_ctx run_ctx = {};
+       struct bpf_run_ctx *old_run_ctx;
+
+       old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
 
        rcu_read_lock();
        info->retval = bpf_prog_run(info->prog, info->ctx);
        rcu_read_unlock();
+
+       bpf_reset_run_ctx(old_run_ctx);
 }
 
 int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
index e1bb6d7856d922dfca1499df91bdade86bf391c1..4d4de9008f6f3bd4e96b15fda5a2350d9d5efe10 100644 (file)
@@ -4516,12 +4516,13 @@ set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
            struct rps_dev_flow *rflow, u16 next_cpu)
 {
        if (next_cpu < nr_cpu_ids) {
+               u32 head;
 #ifdef CONFIG_RFS_ACCEL
                struct netdev_rx_queue *rxqueue;
                struct rps_dev_flow_table *flow_table;
                struct rps_dev_flow *old_rflow;
-               u32 flow_id, head;
                u16 rxq_index;
+               u32 flow_id;
                int rc;
 
                /* Should we steer this flow to a different hardware queue? */
index 6a0482e676d379f1f9bffdda51c7535243b3ec38..70c634b9e7b02300188582a1634d5977838db132 100644 (file)
@@ -27,6 +27,7 @@ struct dst_cache_pcpu {
 static void dst_cache_per_cpu_dst_set(struct dst_cache_pcpu *dst_cache,
                                      struct dst_entry *dst, u32 cookie)
 {
+       DEBUG_NET_WARN_ON_ONCE(!in_softirq());
        dst_release(dst_cache->dst);
        if (dst)
                dst_hold(dst);
@@ -40,6 +41,7 @@ static struct dst_entry *dst_cache_per_cpu_get(struct dst_cache *dst_cache,
 {
        struct dst_entry *dst;
 
+       DEBUG_NET_WARN_ON_ONCE(!in_softirq());
        dst = idst->dst;
        if (!dst)
                goto fail;
index b86b0a87367dd53b5bdef80cfddad558a186bb7c..4668d671804070d978d4e1f0f46200dbb496145a 100644 (file)
@@ -6484,6 +6484,46 @@ static int rtnl_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
 
 /* Process one rtnetlink message. */
 
+static int rtnl_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       rtnl_dumpit_func dumpit = cb->data;
+       int err;
+
+       /* Previous iteration have already finished, avoid calling->dumpit()
+        * again, it may not expect to be called after it reached the end.
+        */
+       if (!dumpit)
+               return 0;
+
+       err = dumpit(skb, cb);
+
+       /* Old dump handlers used to send NLM_DONE as in a separate recvmsg().
+        * Some applications which parse netlink manually depend on this.
+        */
+       if (cb->flags & RTNL_FLAG_DUMP_SPLIT_NLM_DONE) {
+               if (err < 0 && err != -EMSGSIZE)
+                       return err;
+               if (!err)
+                       cb->data = NULL;
+
+               return skb->len;
+       }
+       return err;
+}
+
+static int rtnetlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+                               const struct nlmsghdr *nlh,
+                               struct netlink_dump_control *control)
+{
+       if (control->flags & RTNL_FLAG_DUMP_SPLIT_NLM_DONE) {
+               WARN_ON(control->data);
+               control->data = control->dump;
+               control->dump = rtnl_dumpit;
+       }
+
+       return netlink_dump_start(ssk, skb, nlh, control);
+}
+
 static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
                             struct netlink_ext_ack *extack)
 {
@@ -6548,7 +6588,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
                                .module         = owner,
                                .flags          = flags,
                        };
-                       err = netlink_dump_start(rtnl, skb, nlh, &c);
+                       err = rtnetlink_dump_start(rtnl, skb, nlh, &c);
                        /* netlink_dump_start() will keep a reference on
                         * module if dump is still in progress.
                         */
@@ -6694,7 +6734,7 @@ void __init rtnetlink_init(void)
        register_netdevice_notifier(&rtnetlink_dev_notifier);
 
        rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
-                     rtnl_dump_ifinfo, 0);
+                     rtnl_dump_ifinfo, RTNL_FLAG_DUMP_SPLIT_NLM_DONE);
        rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0);
        rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0);
        rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0);
index 9402889840bf7e4fe2adb743d387b9dcdbe17024..d3dbb92153f2fe7f1ddc8e35b495533fbf60a8cb 100644 (file)
@@ -423,9 +423,6 @@ static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
        struct sock *sk;
        int err = 0;
 
-       if (irqs_disabled())
-               return -EOPNOTSUPP; /* locks here are hardirq-unsafe */
-
        spin_lock_bh(&stab->lock);
        sk = *psk;
        if (!sk_test || sk_test == sk)
@@ -948,9 +945,6 @@ static long sock_hash_delete_elem(struct bpf_map *map, void *key)
        struct bpf_shtab_elem *elem;
        int ret = -ENOENT;
 
-       if (irqs_disabled())
-               return -EOPNOTSUPP; /* locks here are hardirq-unsafe */
-
        hash = sock_hash_bucket_hash(key, key_size);
        bucket = sock_hash_select_bucket(htab, hash);
 
@@ -1680,19 +1674,23 @@ void sock_map_close(struct sock *sk, long timeout)
 
        lock_sock(sk);
        rcu_read_lock();
-       psock = sk_psock_get(sk);
-       if (unlikely(!psock)) {
-               rcu_read_unlock();
-               release_sock(sk);
-               saved_close = READ_ONCE(sk->sk_prot)->close;
-       } else {
+       psock = sk_psock(sk);
+       if (likely(psock)) {
                saved_close = psock->saved_close;
                sock_map_remove_links(sk, psock);
+               psock = sk_psock_get(sk);
+               if (unlikely(!psock))
+                       goto no_psock;
                rcu_read_unlock();
                sk_psock_stop(psock);
                release_sock(sk);
                cancel_delayed_work_sync(&psock->work);
                sk_psock_put(sk, psock);
+       } else {
+               saved_close = READ_ONCE(sk->sk_prot)->close;
+no_psock:
+               rcu_read_unlock();
+               release_sock(sk);
        }
 
        /* Make sure we do not recurse. This is a bug.
index 049c3adeb85044ac78e5adf7dcfb389d21e75652..4e3651101b8660064a29a24b74263c1d522c9d01 100644 (file)
@@ -161,9 +161,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
        skb->dev = dev;
        skb_reset_mac_header(skb);
 
-       eth = (struct ethhdr *)skb->data;
-       skb_pull_inline(skb, ETH_HLEN);
-
+       eth = eth_skb_pull_mac(skb);
        eth_skb_pkt_type(skb, dev);
 
        /*
index 5a55270aa86e8804f467bd094f8bca9171bc2617..e645d751a5e8998064ef5fa239d465f66c044e6f 100644 (file)
@@ -2220,7 +2220,7 @@ static int ethtool_get_phy_stats_ethtool(struct net_device *dev,
        const struct ethtool_ops *ops = dev->ethtool_ops;
        int n_stats, ret;
 
-       if (!ops || !ops->get_sset_count || ops->get_ethtool_phy_stats)
+       if (!ops || !ops->get_sset_count || !ops->get_ethtool_phy_stats)
                return -EOPNOTSUPP;
 
        n_stats = ops->get_sset_count(dev, ETH_SS_PHY_STATS);
index be2755c8d8fde7a2e74225aed4a276a39d72d0f1..57d496287e523797b05f85fc26dca111dd701c6a 100644 (file)
@@ -38,11 +38,11 @@ static int tsinfo_prepare_data(const struct ethnl_req_info *req_base,
        ret = ethnl_ops_begin(dev);
        if (ret < 0)
                return ret;
-       if (req_base->flags & ETHTOOL_FLAG_STATS &&
-           dev->ethtool_ops->get_ts_stats) {
+       if (req_base->flags & ETHTOOL_FLAG_STATS) {
                ethtool_stats_init((u64 *)&data->stats,
                                   sizeof(data->stats) / sizeof(u64));
-               dev->ethtool_ops->get_ts_stats(dev, &data->stats);
+               if (dev->ethtool_ops->get_ts_stats)
+                       dev->ethtool_ops->get_ts_stats(dev, &data->stats);
        }
        ret = __ethtool_get_ts_info(dev, &data->ts_info);
        ethnl_ops_complete(dev);
index e03ba4a21c39048b2e78547a0815c50d2a620849..b24d74616637a0e5fc1183cad927fe7c862104c2 100644 (file)
@@ -1532,7 +1532,7 @@ struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
        }
 
        NAPI_GRO_CB(skb)->flush |= flush;
-       NAPI_GRO_CB(skb)->inner_network_offset = off;
+       NAPI_GRO_CB(skb)->network_offsets[NAPI_GRO_CB(skb)->encap_mark] = off;
 
        /* Note : No need to call skb_gro_postpull_rcsum() here,
         * as we already checked checksum over ipv4 header was 0
index 96accde527da2484417b14d96ca0222ef5f67c2f..d09f557eaa7790baf83cf567bf7874d32f010de0 100644 (file)
@@ -1887,10 +1887,11 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
                        goto done;
 
                if (fillargs.ifindex) {
-                       err = -ENODEV;
                        dev = dev_get_by_index_rcu(tgt_net, fillargs.ifindex);
-                       if (!dev)
+                       if (!dev) {
+                               err = -ENODEV;
                                goto done;
+                       }
                        in_dev = __in_dev_get_rcu(dev);
                        if (!in_dev)
                                goto done;
@@ -1902,7 +1903,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
 
        cb->seq = inet_base_seq(tgt_net);
 
-       for_each_netdev_dump(net, dev, ctx->ifindex) {
+       for_each_netdev_dump(tgt_net, dev, ctx->ifindex) {
                in_dev = __in_dev_get_rcu(dev);
                if (!in_dev)
                        continue;
@@ -2804,7 +2805,7 @@ void __init devinet_init(void)
        rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, 0);
        rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, 0);
        rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr,
-                     RTNL_FLAG_DUMP_UNLOCKED);
+                     RTNL_FLAG_DUMP_UNLOCKED | RTNL_FLAG_DUMP_SPLIT_NLM_DONE);
        rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf,
                      inet_netconf_dump_devconf,
                      RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED);
index c484b1c0fc00a79a45a1c3e7fde230ce59cb67a3..7ad2cafb927634fd60935c36ad68ded45e52dbab 100644 (file)
@@ -1050,11 +1050,6 @@ next:
                        e++;
                }
        }
-
-       /* Don't let NLM_DONE coalesce into a message, even if it could.
-        * Some user space expects NLM_DONE in a separate recv().
-        */
-       err = skb->len;
 out:
 
        cb->args[1] = e;
@@ -1665,5 +1660,5 @@ void __init ip_fib_init(void)
        rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, 0);
        rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, 0);
        rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib,
-                     RTNL_FLAG_DUMP_UNLOCKED);
+                     RTNL_FLAG_DUMP_UNLOCKED | RTNL_FLAG_DUMP_SPLIT_NLM_DONE);
 }
index 69e331799604304078367011040b0dea69fcacf0..73e66a088e25eb179569d88a0362452b7d06ccd2 100644 (file)
@@ -58,6 +58,8 @@ __be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr)
 
        laddr = 0;
        indev = __in_dev_get_rcu(skb->dev);
+       if (!indev)
+               return daddr;
 
        in_dev_for_each_ifa_rcu(ifa, indev) {
                if (ifa->ifa_flags & IFA_F_SECONDARY)
index 5fd54103174f72d24d8015ad69029cebdd50740f..b3073d1c8f8f71c88dc525eefb2b03be8f1f2945 100644 (file)
@@ -129,7 +129,8 @@ struct dst_entry    *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
 static unsigned int     ipv4_default_advmss(const struct dst_entry *dst);
 INDIRECT_CALLABLE_SCOPE
 unsigned int           ipv4_mtu(const struct dst_entry *dst);
-static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
+static void            ipv4_negative_advice(struct sock *sk,
+                                            struct dst_entry *dst);
 static void             ipv4_link_failure(struct sk_buff *skb);
 static void             ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
                                           struct sk_buff *skb, u32 mtu,
@@ -825,22 +826,15 @@ static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buf
        __ip_do_redirect(rt, skb, &fl4, true);
 }
 
-static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
+static void ipv4_negative_advice(struct sock *sk,
+                                struct dst_entry *dst)
 {
        struct rtable *rt = dst_rtable(dst);
-       struct dst_entry *ret = dst;
 
-       if (rt) {
-               if (dst->obsolete > 0) {
-                       ip_rt_put(rt);
-                       ret = NULL;
-               } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
-                          rt->dst.expires) {
-                       ip_rt_put(rt);
-                       ret = NULL;
-               }
-       }
-       return ret;
+       if ((dst->obsolete > 0) ||
+           (rt->rt_flags & RTCF_REDIRECTED) ||
+           rt->dst.expires)
+               sk_dst_reset(sk);
 }
 
 /*
index 681b54e1f3a64387787738ab6495531b8abe1771..e6790ea7487738d8ab825ac0298d15f6744fb3c4 100644 (file)
@@ -1165,6 +1165,9 @@ new_segment:
 
                        process_backlog++;
 
+#ifdef CONFIG_SKB_DECRYPTED
+                       skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED);
+#endif
                        tcp_skb_entail(sk, skb);
                        copy = size_goal;
 
@@ -2646,6 +2649,10 @@ void tcp_set_state(struct sock *sk, int state)
                if (oldstate != TCP_ESTABLISHED)
                        TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
                break;
+       case TCP_CLOSE_WAIT:
+               if (oldstate == TCP_SYN_RECV)
+                       TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
+               break;
 
        case TCP_CLOSE:
                if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
@@ -2657,7 +2664,7 @@ void tcp_set_state(struct sock *sk, int state)
                        inet_put_port(sk);
                fallthrough;
        default:
-               if (oldstate == TCP_ESTABLISHED)
+               if (oldstate == TCP_ESTABLISHED || oldstate == TCP_CLOSE_WAIT)
                        TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
        }
 
index 781b67a525719a42f21b713eb424427670d7afb2..37c42b63ff993466b52c5eea7270312149ca913b 100644 (file)
@@ -933,6 +933,7 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
        struct tcp_ao_key *key;
        __be32 sisn, disn;
        u8 *traffic_key;
+       int state;
        u32 sne = 0;
 
        info = rcu_dereference(tcp_sk(sk)->ao_info);
@@ -948,8 +949,9 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
                disn = 0;
        }
 
+       state = READ_ONCE(sk->sk_state);
        /* Fast-path */
-       if (likely((1 << sk->sk_state) & TCP_AO_ESTABLISHED)) {
+       if (likely((1 << state) & TCP_AO_ESTABLISHED)) {
                enum skb_drop_reason err;
                struct tcp_ao_key *current_key;
 
@@ -988,6 +990,9 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
                return SKB_NOT_DROPPED_YET;
        }
 
+       if (unlikely(state == TCP_CLOSE))
+               return SKB_DROP_REASON_TCP_CLOSE;
+
        /* Lookup key based on peer address and keyid.
         * current_key and rnext_key must not be used on tcp listen
         * sockets as otherwise:
@@ -1001,7 +1006,7 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
        if (th->syn && !th->ack)
                goto verify_hash;
 
-       if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV)) {
+       if ((1 << state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV)) {
                /* Make the initial syn the likely case here */
                if (unlikely(req)) {
                        sne = tcp_ao_compute_sne(0, tcp_rsk(req)->rcv_isn,
@@ -1018,14 +1023,14 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
                        /* no way to figure out initial sisn/disn - drop */
                        return SKB_DROP_REASON_TCP_FLAGS;
                }
-       } else if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
+       } else if ((1 << state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
                disn = info->lisn;
                if (th->syn || th->rst)
                        sisn = th->seq;
                else
                        sisn = info->risn;
        } else {
-               WARN_ONCE(1, "TCP-AO: Unexpected sk_state %d", sk->sk_state);
+               WARN_ONCE(1, "TCP-AO: Unexpected sk_state %d", state);
                return SKB_DROP_REASON_TCP_AOFAILURE;
        }
 verify_hash:
index 30ef0c8f5e92d301c31ea1a05f662c1fc4cf37af..b710958393e64e2278c088018c87ac97a1291a23 100644 (file)
@@ -1144,14 +1144,9 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
 #endif
        }
 
-       /* RFC 7323 2.3
-        * The window field (SEG.WND) of every outgoing segment, with the
-        * exception of <SYN> segments, MUST be right-shifted by
-        * Rcv.Wind.Shift bits:
-        */
        tcp_v4_send_ack(sk, skb, seq,
                        tcp_rsk(req)->rcv_nxt,
-                       req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
+                       tcp_synack_window(req) >> inet_rsk(req)->rcv_wscale,
                        tcp_rsk_tsval(tcp_rsk(req)),
                        READ_ONCE(req->ts_recent),
                        0, &key,
index b93619b2384b3735ecb6e40238f8367d9afb7e15..538c06f95918dedf29e0f4790795fcc417f2516f 100644 (file)
@@ -783,8 +783,11 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
 
        /* RFC793: "first check sequence number". */
 
-       if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
-                                         tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
+       if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq,
+                                         TCP_SKB_CB(skb)->end_seq,
+                                         tcp_rsk(req)->rcv_nxt,
+                                         tcp_rsk(req)->rcv_nxt +
+                                         tcp_synack_window(req))) {
                /* Out of window: send ACK and drop. */
                if (!(flg & TCP_FLAG_RST) &&
                    !tcp_oow_rate_limited(sock_net(sk), skb,
index 0601bad798221389fe83318fbb17f192cec880d4..ff7e734e335b06f03c4c8815163a706136e42a3e 100644 (file)
@@ -58,7 +58,9 @@ static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb)
                return orig_dst->lwtstate->orig_output(net, sk, skb);
        }
 
+       local_bh_disable();
        dst = dst_cache_get(&ilwt->dst_cache);
+       local_bh_enable();
        if (unlikely(!dst)) {
                struct ipv6hdr *ip6h = ipv6_hdr(skb);
                struct flowi6 fl6;
@@ -86,8 +88,11 @@ static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb)
                        goto drop;
                }
 
-               if (ilwt->connected)
+               if (ilwt->connected) {
+                       local_bh_disable();
                        dst_cache_set_ip6(&ilwt->dst_cache, dst, &fl6.saddr);
+                       local_bh_enable();
+               }
        }
 
        skb_dst_set(skb, dst);
index 7563f8c6aa87cf9f7841ee78dcea2a16f60ac344..bf7120ecea1ebe834e70073710be0c1692d7ad1d 100644 (file)
@@ -351,9 +351,9 @@ do_encap:
                goto drop;
 
        if (!ipv6_addr_equal(&orig_daddr, &ipv6_hdr(skb)->daddr)) {
-               preempt_disable();
+               local_bh_disable();
                dst = dst_cache_get(&ilwt->cache);
-               preempt_enable();
+               local_bh_enable();
 
                if (unlikely(!dst)) {
                        struct ipv6hdr *hdr = ipv6_hdr(skb);
@@ -373,9 +373,9 @@ do_encap:
                                goto drop;
                        }
 
-                       preempt_disable();
+                       local_bh_disable();
                        dst_cache_set_ip6(&ilwt->cache, dst, &fl6.saddr);
-                       preempt_enable();
+                       local_bh_enable();
                }
 
                skb_dst_drop(skb);
index 31d77885bcae3e3843b6d486cfc21cdbe709bcf0..6e57c03e3255f09ac3927fd4b61232ed07103332 100644 (file)
@@ -966,6 +966,7 @@ static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh,
        if (!fib6_nh->rt6i_pcpu)
                return;
 
+       rcu_read_lock();
        /* release the reference to this fib entry from
         * all of its cached pcpu routes
         */
@@ -974,7 +975,9 @@ static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh,
                struct rt6_info *pcpu_rt;
 
                ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
-               pcpu_rt = *ppcpu_rt;
+
+               /* Paired with xchg() in rt6_get_pcpu_route() */
+               pcpu_rt = READ_ONCE(*ppcpu_rt);
 
                /* only dropping the 'from' reference if the cached route
                 * is using 'match'. The cached pcpu_rt->from only changes
@@ -988,6 +991,7 @@ static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh,
                        fib6_info_release(from);
                }
        }
+       rcu_read_unlock();
 }
 
 struct fib6_nh_pcpu_arg {
index bd5aff97d8b1d821329f067d15dd5143844612ce..9822163428b028d6dbf3f48abe4674bd6c581725 100644 (file)
@@ -236,7 +236,7 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
        if (unlikely(!iph))
                goto out;
 
-       NAPI_GRO_CB(skb)->inner_network_offset = off;
+       NAPI_GRO_CB(skb)->network_offsets[NAPI_GRO_CB(skb)->encap_mark] = off;
 
        flush += ntohs(iph->payload_len) != skb->len - hlen;
 
index bbc2a0dd931429e7f8c68df0df48bce6d604fb56..f083d9faba6b1e544121d711c8cf391aea292f37 100644 (file)
@@ -87,7 +87,8 @@ struct dst_entry      *ip6_dst_check(struct dst_entry *dst, u32 cookie);
 static unsigned int     ip6_default_advmss(const struct dst_entry *dst);
 INDIRECT_CALLABLE_SCOPE
 unsigned int           ip6_mtu(const struct dst_entry *dst);
-static struct dst_entry *ip6_negative_advice(struct dst_entry *);
+static void            ip6_negative_advice(struct sock *sk,
+                                           struct dst_entry *dst);
 static void            ip6_dst_destroy(struct dst_entry *);
 static void            ip6_dst_ifdown(struct dst_entry *,
                                       struct net_device *dev);
@@ -1408,6 +1409,7 @@ static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
                struct rt6_info *prev, **p;
 
                p = this_cpu_ptr(res->nh->rt6i_pcpu);
+               /* Paired with READ_ONCE() in __fib6_drop_pcpu_from() */
                prev = xchg(p, NULL);
                if (prev) {
                        dst_dev_put(&prev->dst);
@@ -2770,24 +2772,24 @@ INDIRECT_CALLABLE_SCOPE struct dst_entry *ip6_dst_check(struct dst_entry *dst,
 }
 EXPORT_INDIRECT_CALLABLE(ip6_dst_check);
 
-static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
+static void ip6_negative_advice(struct sock *sk,
+                               struct dst_entry *dst)
 {
        struct rt6_info *rt = dst_rt6_info(dst);
 
-       if (rt) {
-               if (rt->rt6i_flags & RTF_CACHE) {
-                       rcu_read_lock();
-                       if (rt6_check_expired(rt)) {
-                               rt6_remove_exception_rt(rt);
-                               dst = NULL;
-                       }
-                       rcu_read_unlock();
-               } else {
-                       dst_release(dst);
-                       dst = NULL;
+       if (rt->rt6i_flags & RTF_CACHE) {
+               rcu_read_lock();
+               if (rt6_check_expired(rt)) {
+                       /* counteract the dst_release() in sk_dst_reset() */
+                       dst_hold(dst);
+                       sk_dst_reset(sk);
+
+                       rt6_remove_exception_rt(rt);
                }
+               rcu_read_unlock();
+               return;
        }
-       return dst;
+       sk_dst_reset(sk);
 }
 
 static void ip6_link_failure(struct sk_buff *skb)
index a013b92cbb860aa36a23f50d3d5c5963857d601c..2c83b7586422ddd2ae877f98e47698410e47b233 100644 (file)
@@ -212,9 +212,9 @@ static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb)
        if (unlikely(err))
                goto drop;
 
-       preempt_disable();
+       local_bh_disable();
        dst = dst_cache_get(&rlwt->cache);
-       preempt_enable();
+       local_bh_enable();
 
        if (unlikely(!dst)) {
                struct ipv6hdr *hdr = ipv6_hdr(skb);
@@ -234,9 +234,9 @@ static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb)
                        goto drop;
                }
 
-               preempt_disable();
+               local_bh_disable();
                dst_cache_set_ip6(&rlwt->cache, dst, &fl6.saddr);
-               preempt_enable();
+               local_bh_enable();
        }
 
        skb_dst_drop(skb);
@@ -268,23 +268,21 @@ static int rpl_input(struct sk_buff *skb)
                return err;
        }
 
-       preempt_disable();
+       local_bh_disable();
        dst = dst_cache_get(&rlwt->cache);
-       preempt_enable();
 
        if (!dst) {
                ip6_route_input(skb);
                dst = skb_dst(skb);
                if (!dst->error) {
-                       preempt_disable();
                        dst_cache_set_ip6(&rlwt->cache, dst,
                                          &ipv6_hdr(skb)->saddr);
-                       preempt_enable();
                }
        } else {
                skb_dst_drop(skb);
                skb_dst_set(skb, dst);
        }
+       local_bh_enable();
 
        err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
        if (unlikely(err))
index a75df2ec8db0d369a4e3481576fc09f511a4dd36..098632adc9b5afa69e4b65439ee54c3fc0a8d668 100644 (file)
@@ -464,23 +464,21 @@ static int seg6_input_core(struct net *net, struct sock *sk,
 
        slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
 
-       preempt_disable();
+       local_bh_disable();
        dst = dst_cache_get(&slwt->cache);
-       preempt_enable();
 
        if (!dst) {
                ip6_route_input(skb);
                dst = skb_dst(skb);
                if (!dst->error) {
-                       preempt_disable();
                        dst_cache_set_ip6(&slwt->cache, dst,
                                          &ipv6_hdr(skb)->saddr);
-                       preempt_enable();
                }
        } else {
                skb_dst_drop(skb);
                skb_dst_set(skb, dst);
        }
+       local_bh_enable();
 
        err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
        if (unlikely(err))
@@ -536,9 +534,9 @@ static int seg6_output_core(struct net *net, struct sock *sk,
 
        slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
 
-       preempt_disable();
+       local_bh_disable();
        dst = dst_cache_get(&slwt->cache);
-       preempt_enable();
+       local_bh_enable();
 
        if (unlikely(!dst)) {
                struct ipv6hdr *hdr = ipv6_hdr(skb);
@@ -558,9 +556,9 @@ static int seg6_output_core(struct net *net, struct sock *sk,
                        goto drop;
                }
 
-               preempt_disable();
+               local_bh_disable();
                dst_cache_set_ip6(&slwt->cache, dst, &fl6.saddr);
-               preempt_enable();
+               local_bh_enable();
        }
 
        skb_dst_drop(skb);
index 4c3605485b68e7c333a0144df3d685b3db9ff45d..8c577b651bfcd2f94b45e339ed4a2b47e93ff17a 100644 (file)
@@ -1272,15 +1272,10 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
        /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
         * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
         */
-       /* RFC 7323 2.3
-        * The window field (SEG.WND) of every outgoing segment, with the
-        * exception of <SYN> segments, MUST be right-shifted by
-        * Rcv.Wind.Shift bits:
-        */
        tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
                        tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
                        tcp_rsk(req)->rcv_nxt,
-                       req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
+                       tcp_synack_window(req) >> inet_rsk(req)->rcv_wscale,
                        tcp_rsk_tsval(tcp_rsk(req)),
                        READ_ONCE(req->ts_recent), sk->sk_bound_dev_if,
                        &key, ipv6_get_dsfield(ipv6_hdr(skb)), 0,
index b08e5d7687e3fcfb855b3db7ea916491d5c10c1b..83ad6c9709fe609414dc2a9a3519fc943293a2c5 100644 (file)
@@ -2958,8 +2958,9 @@ static int ieee80211_set_mcast_rate(struct wiphy *wiphy, struct net_device *dev,
        memcpy(sdata->vif.bss_conf.mcast_rate, rate,
               sizeof(int) * NUM_NL80211_BANDS);
 
-       ieee80211_link_info_change_notify(sdata, &sdata->deflink,
-                                         BSS_CHANGED_MCAST_RATE);
+       if (ieee80211_sdata_running(sdata))
+               ieee80211_link_info_change_notify(sdata, &sdata->deflink,
+                                                 BSS_CHANGED_MCAST_RATE);
 
        return 0;
 }
@@ -4016,7 +4017,7 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
                goto out;
        }
 
-       link_data->csa_chanreq = chanreq; 
+       link_data->csa_chanreq = chanreq;
        link_conf->csa_active = true;
 
        if (params->block_tx &&
@@ -4027,7 +4028,7 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
        }
 
        cfg80211_ch_switch_started_notify(sdata->dev,
-                                         &link_data->csa_chanreq.oper, 0,
+                                         &link_data->csa_chanreq.oper, link_id,
                                          params->count, params->block_tx);
 
        if (changed) {
index 9f5ffdc9db284a73be270c48f40d77a94b301e79..ecbb042dd0433e2224237dc7e8ae127bf7e97449 100644 (file)
@@ -230,15 +230,21 @@ ieee80211_he_spr_ie_to_bss_conf(struct ieee80211_vif *vif,
 
        if (!he_spr_ie_elem)
                return;
+
+       he_obss_pd->sr_ctrl = he_spr_ie_elem->he_sr_control;
        data = he_spr_ie_elem->optional;
 
        if (he_spr_ie_elem->he_sr_control &
            IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT)
-               data++;
+               he_obss_pd->non_srg_max_offset = *data++;
+
        if (he_spr_ie_elem->he_sr_control &
            IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT) {
-               he_obss_pd->max_offset = *data++;
                he_obss_pd->min_offset = *data++;
+               he_obss_pd->max_offset = *data++;
+               memcpy(he_obss_pd->bss_color_bitmap, data, 8);
+               data += 8;
+               memcpy(he_obss_pd->partial_bssid_bitmap, data, 8);
                he_obss_pd->enable = true;
        }
 }
index eb62b7d4b4f7e298ce70b87c45fac16a28a03c94..3cedfdc9099b7deb3decc8b2583ebc495572c472 100644 (file)
@@ -1845,6 +1845,8 @@ void ieee80211_link_info_change_notify(struct ieee80211_sub_if_data *sdata,
 void ieee80211_configure_filter(struct ieee80211_local *local);
 u64 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata);
 
+void ieee80211_handle_queued_frames(struct ieee80211_local *local);
+
 u64 ieee80211_mgmt_tx_cookie(struct ieee80211_local *local);
 int ieee80211_attach_ack_skb(struct ieee80211_local *local, struct sk_buff *skb,
                             u64 *cookie, gfp_t gfp);
index 4eaea0a9975b474b0b6b314b3244a88d8e0f1d77..1132dea0e290ea9ca06240cd09a5f11aba278f4c 100644 (file)
@@ -423,9 +423,8 @@ u64 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
               BSS_CHANGED_ERP_SLOT;
 }
 
-static void ieee80211_tasklet_handler(struct tasklet_struct *t)
+void ieee80211_handle_queued_frames(struct ieee80211_local *local)
 {
-       struct ieee80211_local *local = from_tasklet(local, t, tasklet);
        struct sk_buff *skb;
 
        while ((skb = skb_dequeue(&local->skb_queue)) ||
@@ -450,6 +449,13 @@ static void ieee80211_tasklet_handler(struct tasklet_struct *t)
        }
 }
 
+static void ieee80211_tasklet_handler(struct tasklet_struct *t)
+{
+       struct ieee80211_local *local = from_tasklet(local, t, tasklet);
+
+       ieee80211_handle_queued_frames(local);
+}
+
 static void ieee80211_restart_work(struct work_struct *work)
 {
        struct ieee80211_local *local =
index cbc9b5e40cb35e81fb80dd55016c3afc8c31deb7..6d4510221c98e695ea140e40e5e9ec5cbf385a5e 100644 (file)
@@ -1776,6 +1776,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
        ifmsh->last_preq = jiffies;
        ifmsh->next_perr = jiffies;
        ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_NONE;
+       ifmsh->nonpeer_pm = NL80211_MESH_POWER_ACTIVE;
        /* Allocate all mesh structures when creating the first mesh interface. */
        if (!mesh_allocated)
                ieee80211s_init();
index a6b62169f08483c5aa481f4f8f59f67fa56a4ef7..c0a5c75cddcb9f1c3f509b909e443ea0e74fb19d 100644 (file)
@@ -1017,10 +1017,23 @@ void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
  */
 void mesh_path_flush_pending(struct mesh_path *mpath)
 {
+       struct ieee80211_sub_if_data *sdata = mpath->sdata;
+       struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+       struct mesh_preq_queue *preq, *tmp;
        struct sk_buff *skb;
 
        while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
                mesh_path_discard_frame(mpath->sdata, skb);
+
+       spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
+       list_for_each_entry_safe(preq, tmp, &ifmsh->preq_queue.list, list) {
+               if (ether_addr_equal(mpath->dst, preq->dst)) {
+                       list_del(&preq->list);
+                       kfree(preq);
+                       --ifmsh->preq_queue_len;
+               }
+       }
+       spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
 }
 
 /**
index 55e5497f89781c0fc2c74965b30b966366672d25..055a60e90979b32129d6ba93a2361aec70af0cbb 100644 (file)
@@ -111,7 +111,7 @@ ieee80211_parse_extension_element(u32 *crc,
                if (params->mode < IEEE80211_CONN_MODE_HE)
                        break;
                if (len >= sizeof(*elems->he_spr) &&
-                   len >= ieee80211_he_spr_size(data))
+                   len >= ieee80211_he_spr_size(data) - 1)
                        elems->he_spr = data;
                break;
        case WLAN_EID_EXT_HE_6GHZ_CAPA:
index 3da1c5c450358cb77422d48b9e9a0dd5542a7508..8ecc4b710b0e64449c2a03339b3d3093ec939b7b 100644 (file)
@@ -744,15 +744,21 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
                        local->hw_scan_ies_bufsize *= n_bands;
                }
 
-               local->hw_scan_req = kmalloc(
-                               sizeof(*local->hw_scan_req) +
-                               req->n_channels * sizeof(req->channels[0]) +
-                               local->hw_scan_ies_bufsize, GFP_KERNEL);
+               local->hw_scan_req = kmalloc(struct_size(local->hw_scan_req,
+                                                        req.channels,
+                                                        req->n_channels) +
+                                            local->hw_scan_ies_bufsize,
+                                            GFP_KERNEL);
                if (!local->hw_scan_req)
                        return -ENOMEM;
 
                local->hw_scan_req->req.ssids = req->ssids;
                local->hw_scan_req->req.n_ssids = req->n_ssids;
+               /* None of the channels are actually set
+                * up but let UBSAN know the boundaries.
+                */
+               local->hw_scan_req->req.n_channels = req->n_channels;
+
                ies = (u8 *)local->hw_scan_req +
                        sizeof(*local->hw_scan_req) +
                        req->n_channels * sizeof(req->channels[0]);
index da5fdd6f5c852ba5514f1d04e3ef71398b126b9c..aa22f09e6d145f3e7fb40b95005fb434856d6d26 100644 (file)
@@ -1724,7 +1724,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
        skb_queue_head_init(&pending);
 
        /* sync with ieee80211_tx_h_unicast_ps_buf */
-       spin_lock(&sta->ps_lock);
+       spin_lock_bh(&sta->ps_lock);
        /* Send all buffered frames to the station */
        for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
                int count = skb_queue_len(&pending), tmp;
@@ -1753,7 +1753,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
         */
        clear_sta_flag(sta, WLAN_STA_PSPOLL);
        clear_sta_flag(sta, WLAN_STA_UAPSD);
-       spin_unlock(&sta->ps_lock);
+       spin_unlock_bh(&sta->ps_lock);
 
        atomic_dec(&ps->num_sta_ps);
 
index 0b893e958959440e4b918e284533933a5c6145da..283bfc99417e57dd65ed713a42991a2362bfff81 100644 (file)
@@ -1567,6 +1567,8 @@ u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
 
 void ieee80211_stop_device(struct ieee80211_local *local)
 {
+       ieee80211_handle_queued_frames(local);
+
        ieee80211_led_radio(local, false);
        ieee80211_mod_tpt_led_trig(local, 0, IEEE80211_TPT_LEDTRIG_FL_RADIO);
 
index 7d44196ec5b630500d28815fde9f92c0890c8314..96b113854bd3cea706c46490b92bdb139ed0a242 100644 (file)
@@ -2916,9 +2916,14 @@ void mptcp_set_state(struct sock *sk, int state)
                if (oldstate != TCP_ESTABLISHED)
                        MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB);
                break;
-
+       case TCP_CLOSE_WAIT:
+               /* Unlike TCP, MPTCP sk would not have the TCP_SYN_RECV state:
+                * MPTCP "accepted" sockets will be created later on. So no
+                * transition from TCP_SYN_RECV to TCP_CLOSE_WAIT.
+                */
+               break;
        default:
-               if (oldstate == TCP_ESTABLISHED)
+               if (oldstate == TCP_ESTABLISHED || oldstate == TCP_CLOSE_WAIT)
                        MPTCP_DEC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB);
        }
 
index 374412ed780b6ed601e63e2af23cb0a364df80fc..ef0f8f73826f53b8995b7b11ae13da5f3bf82af9 100644 (file)
@@ -325,6 +325,7 @@ struct ncsi_dev_priv {
        spinlock_t          lock;            /* Protect the NCSI device    */
        unsigned int        package_probe_id;/* Current ID during probe    */
        unsigned int        package_num;     /* Number of packages         */
+       unsigned int        channel_probe_id;/* Current cahnnel ID during probe */
        struct list_head    packages;        /* List of packages           */
        struct ncsi_channel *hot_channel;    /* Channel was ever active    */
        struct ncsi_request requests[256];   /* Request table              */
@@ -343,6 +344,7 @@ struct ncsi_dev_priv {
        bool                multi_package;   /* Enable multiple packages   */
        bool                mlx_multi_host;  /* Enable multi host Mellanox */
        u32                 package_whitelist; /* Packages to configure    */
+       unsigned char       channel_count;     /* Num of channels to probe   */
 };
 
 struct ncsi_cmd_arg {
index 745c788f1d1dfcd3a0467f08271d2ce44dacf2a8..5ecf611c882009d647909e52a686286e090ba6c3 100644 (file)
@@ -510,17 +510,19 @@ static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
 
                break;
        case ncsi_dev_state_suspend_gls:
-               ndp->pending_req_num = np->channel_num;
+               ndp->pending_req_num = 1;
 
                nca.type = NCSI_PKT_CMD_GLS;
                nca.package = np->id;
+               nca.channel = ndp->channel_probe_id;
+               ret = ncsi_xmit_cmd(&nca);
+               if (ret)
+                       goto error;
+               ndp->channel_probe_id++;
 
-               nd->state = ncsi_dev_state_suspend_dcnt;
-               NCSI_FOR_EACH_CHANNEL(np, nc) {
-                       nca.channel = nc->id;
-                       ret = ncsi_xmit_cmd(&nca);
-                       if (ret)
-                               goto error;
+               if (ndp->channel_probe_id == ndp->channel_count) {
+                       ndp->channel_probe_id = 0;
+                       nd->state = ncsi_dev_state_suspend_dcnt;
                }
 
                break;
@@ -1345,7 +1347,6 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
 {
        struct ncsi_dev *nd = &ndp->ndev;
        struct ncsi_package *np;
-       struct ncsi_channel *nc;
        struct ncsi_cmd_arg nca;
        unsigned char index;
        int ret;
@@ -1423,23 +1424,6 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
 
                nd->state = ncsi_dev_state_probe_cis;
                break;
-       case ncsi_dev_state_probe_cis:
-               ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
-
-               /* Clear initial state */
-               nca.type = NCSI_PKT_CMD_CIS;
-               nca.package = ndp->active_package->id;
-               for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) {
-                       nca.channel = index;
-                       ret = ncsi_xmit_cmd(&nca);
-                       if (ret)
-                               goto error;
-               }
-
-               nd->state = ncsi_dev_state_probe_gvi;
-               if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY))
-                       nd->state = ncsi_dev_state_probe_keep_phy;
-               break;
        case ncsi_dev_state_probe_keep_phy:
                ndp->pending_req_num = 1;
 
@@ -1452,14 +1436,17 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
 
                nd->state = ncsi_dev_state_probe_gvi;
                break;
+       case ncsi_dev_state_probe_cis:
        case ncsi_dev_state_probe_gvi:
        case ncsi_dev_state_probe_gc:
        case ncsi_dev_state_probe_gls:
                np = ndp->active_package;
-               ndp->pending_req_num = np->channel_num;
+               ndp->pending_req_num = 1;
 
-               /* Retrieve version, capability or link status */
-               if (nd->state == ncsi_dev_state_probe_gvi)
+               /* Clear initial state Retrieve version, capability or link status */
+               if (nd->state == ncsi_dev_state_probe_cis)
+                       nca.type = NCSI_PKT_CMD_CIS;
+               else if (nd->state == ncsi_dev_state_probe_gvi)
                        nca.type = NCSI_PKT_CMD_GVI;
                else if (nd->state == ncsi_dev_state_probe_gc)
                        nca.type = NCSI_PKT_CMD_GC;
@@ -1467,19 +1454,29 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
                        nca.type = NCSI_PKT_CMD_GLS;
 
                nca.package = np->id;
-               NCSI_FOR_EACH_CHANNEL(np, nc) {
-                       nca.channel = nc->id;
-                       ret = ncsi_xmit_cmd(&nca);
-                       if (ret)
-                               goto error;
-               }
+               nca.channel = ndp->channel_probe_id;
 
-               if (nd->state == ncsi_dev_state_probe_gvi)
+               ret = ncsi_xmit_cmd(&nca);
+               if (ret)
+                       goto error;
+
+               if (nd->state == ncsi_dev_state_probe_cis) {
+                       nd->state = ncsi_dev_state_probe_gvi;
+                       if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY) && ndp->channel_probe_id == 0)
+                               nd->state = ncsi_dev_state_probe_keep_phy;
+               } else if (nd->state == ncsi_dev_state_probe_gvi) {
                        nd->state = ncsi_dev_state_probe_gc;
-               else if (nd->state == ncsi_dev_state_probe_gc)
+               } else if (nd->state == ncsi_dev_state_probe_gc) {
                        nd->state = ncsi_dev_state_probe_gls;
-               else
+               } else {
+                       nd->state = ncsi_dev_state_probe_cis;
+                       ndp->channel_probe_id++;
+               }
+
+               if (ndp->channel_probe_id == ndp->channel_count) {
+                       ndp->channel_probe_id = 0;
                        nd->state = ncsi_dev_state_probe_dp;
+               }
                break;
        case ncsi_dev_state_probe_dp:
                ndp->pending_req_num = 1;
@@ -1780,6 +1777,7 @@ struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
                ndp->requests[i].ndp = ndp;
                timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0);
        }
+       ndp->channel_count = NCSI_RESERVED_CHANNEL;
 
        spin_lock_irqsave(&ncsi_dev_lock, flags);
        list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
@@ -1813,6 +1811,7 @@ int ncsi_start_dev(struct ncsi_dev *nd)
 
        if (!(ndp->flags & NCSI_DEV_PROBED)) {
                ndp->package_probe_id = 0;
+               ndp->channel_probe_id = 0;
                nd->state = ncsi_dev_state_probe;
                schedule_work(&ndp->work);
                return 0;
index bee290d0f48b6f25701312fd2de9b1b796761c7a..e28be33bdf2c487c0fbfe3a1b4de6f52c8f923cc 100644 (file)
@@ -795,12 +795,13 @@ static int ncsi_rsp_handler_gc(struct ncsi_request *nr)
        struct ncsi_rsp_gc_pkt *rsp;
        struct ncsi_dev_priv *ndp = nr->ndp;
        struct ncsi_channel *nc;
+       struct ncsi_package *np;
        size_t size;
 
        /* Find the channel */
        rsp = (struct ncsi_rsp_gc_pkt *)skb_network_header(nr->rsp);
        ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
-                                     NULL, &nc);
+                                     &np, &nc);
        if (!nc)
                return -ENODEV;
 
@@ -835,6 +836,7 @@ static int ncsi_rsp_handler_gc(struct ncsi_request *nr)
         */
        nc->vlan_filter.bitmap = U64_MAX;
        nc->vlan_filter.n_vids = rsp->vlan_cnt;
+       np->ndp->channel_count = rsp->channel_cnt;
 
        return 0;
 }
index 6c3f28bc59b3259f0033cd4adc0ba5711db08c26..54e2a1dd7f5f5163fef436dc6ed000ce25e837b7 100644 (file)
@@ -549,6 +549,9 @@ list_set_cancel_gc(struct ip_set *set)
 
        if (SET_WITH_TIMEOUT(set))
                timer_shutdown_sync(&map->gc);
+
+       /* Flush list to drop references to other ipsets */
+       list_set_flush(set);
 }
 
 static const struct ip_set_type_variant set_variant = {
index 00f4bd21c59b419e96794127693c21ccb05e45b0..f1c31757e4969e8f975c7a1ebbc3b96148ec9724 100644 (file)
@@ -169,7 +169,9 @@ instance_destroy_rcu(struct rcu_head *head)
        struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance,
                                                   rcu);
 
+       rcu_read_lock();
        nfqnl_flush(inst, NULL, 0);
+       rcu_read_unlock();
        kfree(inst);
        module_put(THIS_MODULE);
 }
index 37cfe6dd712d8b138fc290abe66fd8d9b69963d6..b58f62195ff3ee04aebcf62a166349ff3a1f1285 100644 (file)
@@ -35,11 +35,9 @@ int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
        switch (priv->result) {
        case NFT_FIB_RESULT_OIF:
        case NFT_FIB_RESULT_OIFNAME:
-               hooks = (1 << NF_INET_PRE_ROUTING);
-               if (priv->flags & NFTA_FIB_F_IIF) {
-                       hooks |= (1 << NF_INET_LOCAL_IN) |
-                                (1 << NF_INET_FORWARD);
-               }
+               hooks = (1 << NF_INET_PRE_ROUTING) |
+                       (1 << NF_INET_LOCAL_IN) |
+                       (1 << NF_INET_FORWARD);
                break;
        case NFT_FIB_RESULT_ADDRTYPE:
                if (priv->flags & NFTA_FIB_F_IIF)
index 0a689c8e0295dfed3511997abcac53fad4b56fd5..0c43d748e23ae1537eb9043e2209054584e4a352 100644 (file)
@@ -45,36 +45,27 @@ nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
        int mac_off = skb_mac_header(skb) - skb->data;
        u8 *vlanh, *dst_u8 = (u8 *) d;
        struct vlan_ethhdr veth;
-       u8 vlan_hlen = 0;
-
-       if ((skb->protocol == htons(ETH_P_8021AD) ||
-            skb->protocol == htons(ETH_P_8021Q)) &&
-           offset >= VLAN_ETH_HLEN && offset < VLAN_ETH_HLEN + VLAN_HLEN)
-               vlan_hlen += VLAN_HLEN;
 
        vlanh = (u8 *) &veth;
-       if (offset < VLAN_ETH_HLEN + vlan_hlen) {
+       if (offset < VLAN_ETH_HLEN) {
                u8 ethlen = len;
 
-               if (vlan_hlen &&
-                   skb_copy_bits(skb, mac_off, &veth, VLAN_ETH_HLEN) < 0)
-                       return false;
-               else if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
+               if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
                        return false;
 
-               if (offset + len > VLAN_ETH_HLEN + vlan_hlen)
-                       ethlen -= offset + len - VLAN_ETH_HLEN - vlan_hlen;
+               if (offset + len > VLAN_ETH_HLEN)
+                       ethlen -= offset + len - VLAN_ETH_HLEN;
 
-               memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen);
+               memcpy(dst_u8, vlanh + offset, ethlen);
 
                len -= ethlen;
                if (len == 0)
                        return true;
 
                dst_u8 += ethlen;
-               offset = ETH_HLEN + vlan_hlen;
+               offset = ETH_HLEN;
        } else {
-               offset -= VLAN_HLEN + vlan_hlen;
+               offset -= VLAN_HLEN;
        }
 
        return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
@@ -154,12 +145,12 @@ int nft_payload_inner_offset(const struct nft_pktinfo *pkt)
        return pkt->inneroff;
 }
 
-static bool nft_payload_need_vlan_copy(const struct nft_payload *priv)
+static bool nft_payload_need_vlan_adjust(u32 offset, u32 len)
 {
-       unsigned int len = priv->offset + priv->len;
+       unsigned int boundary = offset + len;
 
        /* data past ether src/dst requested, copy needed */
-       if (len > offsetof(struct ethhdr, h_proto))
+       if (boundary > offsetof(struct ethhdr, h_proto))
                return true;
 
        return false;
@@ -183,7 +174,7 @@ void nft_payload_eval(const struct nft_expr *expr,
                        goto err;
 
                if (skb_vlan_tag_present(skb) &&
-                   nft_payload_need_vlan_copy(priv)) {
+                   nft_payload_need_vlan_adjust(priv->offset, priv->len)) {
                        if (!nft_payload_copy_vlan(dest, skb,
                                                   priv->offset, priv->len))
                                goto err;
@@ -810,21 +801,79 @@ struct nft_payload_set {
        u8                      csum_flags;
 };
 
+/* This is not struct vlan_hdr. */
+struct nft_payload_vlan_hdr {
+       __be16                  h_vlan_proto;
+       __be16                  h_vlan_TCI;
+};
+
+static bool
+nft_payload_set_vlan(const u32 *src, struct sk_buff *skb, u8 offset, u8 len,
+                    int *vlan_hlen)
+{
+       struct nft_payload_vlan_hdr *vlanh;
+       __be16 vlan_proto;
+       u16 vlan_tci;
+
+       if (offset >= offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto)) {
+               *vlan_hlen = VLAN_HLEN;
+               return true;
+       }
+
+       switch (offset) {
+       case offsetof(struct vlan_ethhdr, h_vlan_proto):
+               if (len == 2) {
+                       vlan_proto = nft_reg_load_be16(src);
+                       skb->vlan_proto = vlan_proto;
+               } else if (len == 4) {
+                       vlanh = (struct nft_payload_vlan_hdr *)src;
+                       __vlan_hwaccel_put_tag(skb, vlanh->h_vlan_proto,
+                                              ntohs(vlanh->h_vlan_TCI));
+               } else {
+                       return false;
+               }
+               break;
+       case offsetof(struct vlan_ethhdr, h_vlan_TCI):
+               if (len != 2)
+                       return false;
+
+               vlan_tci = ntohs(nft_reg_load_be16(src));
+               skb->vlan_tci = vlan_tci;
+               break;
+       default:
+               return false;
+       }
+
+       return true;
+}
+
 static void nft_payload_set_eval(const struct nft_expr *expr,
                                 struct nft_regs *regs,
                                 const struct nft_pktinfo *pkt)
 {
        const struct nft_payload_set *priv = nft_expr_priv(expr);
-       struct sk_buff *skb = pkt->skb;
        const u32 *src = &regs->data[priv->sreg];
-       int offset, csum_offset;
+       int offset, csum_offset, vlan_hlen = 0;
+       struct sk_buff *skb = pkt->skb;
        __wsum fsum, tsum;
 
        switch (priv->base) {
        case NFT_PAYLOAD_LL_HEADER:
                if (!skb_mac_header_was_set(skb))
                        goto err;
-               offset = skb_mac_header(skb) - skb->data;
+
+               if (skb_vlan_tag_present(skb) &&
+                   nft_payload_need_vlan_adjust(priv->offset, priv->len)) {
+                       if (!nft_payload_set_vlan(src, skb,
+                                                 priv->offset, priv->len,
+                                                 &vlan_hlen))
+                               goto err;
+
+                       if (!vlan_hlen)
+                               return;
+               }
+
+               offset = skb_mac_header(skb) - skb->data - vlan_hlen;
                break;
        case NFT_PAYLOAD_NETWORK_HEADER:
                offset = skb_network_offset(skb);
index 79e93a19d5fabeb5ee91e20388d67ef019064064..06e03f5cd7ce182590c142e4c83f8d93f800b89e 100644 (file)
@@ -185,7 +185,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
 
        qopt->bands = qdisc_dev(sch)->real_num_tx_queues;
 
-       removed = kmalloc(sizeof(*removed) * (q->max_bands - q->bands),
+       removed = kmalloc(sizeof(*removed) * (q->max_bands - qopt->bands),
                          GFP_KERNEL);
        if (!removed)
                return -ENOMEM;
index 1ab17e8a72605385280fad9b7f656a6771236acc..b284a06b5a75fa3408c4a6515b71ed1a6ec63e78 100644 (file)
@@ -1151,11 +1151,6 @@ static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
                list_for_each_entry(entry, &new->entries, list)
                        cycle = ktime_add_ns(cycle, entry->interval);
 
-               if (!cycle) {
-                       NL_SET_ERR_MSG(extack, "'cycle_time' can never be 0");
-                       return -EINVAL;
-               }
-
                if (cycle < 0 || cycle > INT_MAX) {
                        NL_SET_ERR_MSG(extack, "'cycle_time' is too big");
                        return -EINVAL;
@@ -1164,6 +1159,11 @@ static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
                new->cycle_time = cycle;
        }
 
+       if (new->cycle_time < new->num_entries * length_to_duration(q, ETH_ZLEN)) {
+               NL_SET_ERR_MSG(extack, "'cycle_time' is too small");
+               return -EINVAL;
+       }
+
        taprio_calculate_gate_durations(q, new);
 
        return 0;
@@ -1176,16 +1176,13 @@ static int taprio_parse_mqprio_opt(struct net_device *dev,
 {
        bool allow_overlapping_txqs = TXTIME_ASSIST_IS_ENABLED(taprio_flags);
 
-       if (!qopt && !dev->num_tc) {
-               NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
-               return -EINVAL;
-       }
-
-       /* If num_tc is already set, it means that the user already
-        * configured the mqprio part
-        */
-       if (dev->num_tc)
+       if (!qopt) {
+               if (!dev->num_tc) {
+                       NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
+                       return -EINVAL;
+               }
                return 0;
+       }
 
        /* taprio imposes that traffic classes map 1:n to tx queues */
        if (qopt->num_tc > dev->num_tx_queues) {
@@ -1848,6 +1845,9 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
        }
        q->flags = taprio_flags;
 
+       /* Needed for length_to_duration() during netlink attribute parsing */
+       taprio_set_picos_per_byte(dev, q);
+
        err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags);
        if (err < 0)
                return err;
@@ -1907,7 +1907,6 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
        if (err < 0)
                goto free_sched;
 
-       taprio_set_picos_per_byte(dev, q);
        taprio_update_queue_max_sdu(q, new_admin, stab);
 
        if (FULL_OFFLOAD_IS_ENABLED(q->flags))
index e50a286fd0fb77dfe2644d794ffb11d9db332906..c5f98c6b25613f542066f8fab1bb0d630d46a77d 100644 (file)
@@ -459,29 +459,11 @@ out:
 static void smc_adjust_sock_bufsizes(struct sock *nsk, struct sock *osk,
                                     unsigned long mask)
 {
-       struct net *nnet = sock_net(nsk);
-
        nsk->sk_userlocks = osk->sk_userlocks;
-       if (osk->sk_userlocks & SOCK_SNDBUF_LOCK) {
+       if (osk->sk_userlocks & SOCK_SNDBUF_LOCK)
                nsk->sk_sndbuf = osk->sk_sndbuf;
-       } else {
-               if (mask == SK_FLAGS_SMC_TO_CLC)
-                       WRITE_ONCE(nsk->sk_sndbuf,
-                                  READ_ONCE(nnet->ipv4.sysctl_tcp_wmem[1]));
-               else
-                       WRITE_ONCE(nsk->sk_sndbuf,
-                                  2 * READ_ONCE(nnet->smc.sysctl_wmem));
-       }
-       if (osk->sk_userlocks & SOCK_RCVBUF_LOCK) {
+       if (osk->sk_userlocks & SOCK_RCVBUF_LOCK)
                nsk->sk_rcvbuf = osk->sk_rcvbuf;
-       } else {
-               if (mask == SK_FLAGS_SMC_TO_CLC)
-                       WRITE_ONCE(nsk->sk_rcvbuf,
-                                  READ_ONCE(nnet->ipv4.sysctl_tcp_rmem[1]));
-               else
-                       WRITE_ONCE(nsk->sk_rcvbuf,
-                                  2 * READ_ONCE(nnet->smc.sysctl_rmem));
-       }
 }
 
 static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
index 96ab50eda9c2eb66b40fb376662aab21a861e9e5..73a90ad873fb9da659ba76184b2e2a0e5324ce0d 100644 (file)
@@ -1069,7 +1069,7 @@ static int gss_read_proxy_verf(struct svc_rqst *rqstp,
                goto out_denied_free;
 
        pages = DIV_ROUND_UP(inlen, PAGE_SIZE);
-       in_token->pages = kcalloc(pages, sizeof(struct page *), GFP_KERNEL);
+       in_token->pages = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
        if (!in_token->pages)
                goto out_denied_free;
        in_token->page_base = 0;
index e4af6616e1dff1da32772eddfdf645fb4c9fa45c..80846279de9f3b94be5c60eda8be17f2adeeaf6b 100644 (file)
@@ -221,15 +221,9 @@ static inline int unix_may_send(struct sock *sk, struct sock *osk)
        return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
 }
 
-static inline int unix_recvq_full(const struct sock *sk)
-{
-       return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
-}
-
 static inline int unix_recvq_full_lockless(const struct sock *sk)
 {
-       return skb_queue_len_lockless(&sk->sk_receive_queue) >
-               READ_ONCE(sk->sk_max_ack_backlog);
+       return skb_queue_len_lockless(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
 }
 
 struct sock *unix_peer_get(struct sock *s)
@@ -530,10 +524,10 @@ static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
        return 0;
 }
 
-static int unix_writable(const struct sock *sk)
+static int unix_writable(const struct sock *sk, unsigned char state)
 {
-       return sk->sk_state != TCP_LISTEN &&
-              (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
+       return state != TCP_LISTEN &&
+               (refcount_read(&sk->sk_wmem_alloc) << 2) <= READ_ONCE(sk->sk_sndbuf);
 }
 
 static void unix_write_space(struct sock *sk)
@@ -541,7 +535,7 @@ static void unix_write_space(struct sock *sk)
        struct socket_wq *wq;
 
        rcu_read_lock();
-       if (unix_writable(sk)) {
+       if (unix_writable(sk, READ_ONCE(sk->sk_state))) {
                wq = rcu_dereference(sk->sk_wq);
                if (skwq_has_sleeper(wq))
                        wake_up_interruptible_sync_poll(&wq->wait,
@@ -570,7 +564,6 @@ static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
                        sk_error_report(other);
                }
        }
-       other->sk_state = TCP_CLOSE;
 }
 
 static void unix_sock_destructor(struct sock *sk)
@@ -617,7 +610,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
        u->path.dentry = NULL;
        u->path.mnt = NULL;
        state = sk->sk_state;
-       sk->sk_state = TCP_CLOSE;
+       WRITE_ONCE(sk->sk_state, TCP_CLOSE);
 
        skpair = unix_peer(sk);
        unix_peer(sk) = NULL;
@@ -638,7 +631,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
                        unix_state_lock(skpair);
                        /* No more writes */
                        WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
-                       if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
+                       if (!skb_queue_empty_lockless(&sk->sk_receive_queue) || embrion)
                                WRITE_ONCE(skpair->sk_err, ECONNRESET);
                        unix_state_unlock(skpair);
                        skpair->sk_state_change(skpair);
@@ -731,7 +724,7 @@ static int unix_listen(struct socket *sock, int backlog)
        if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
                goto out;       /* Only stream/seqpacket sockets accept */
        err = -EINVAL;
-       if (!u->addr)
+       if (!READ_ONCE(u->addr))
                goto out;       /* No listens on an unbound socket */
        unix_state_lock(sk);
        if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
@@ -739,7 +732,8 @@ static int unix_listen(struct socket *sock, int backlog)
        if (backlog > sk->sk_max_ack_backlog)
                wake_up_interruptible_all(&u->peer_wait);
        sk->sk_max_ack_backlog  = backlog;
-       sk->sk_state            = TCP_LISTEN;
+       WRITE_ONCE(sk->sk_state, TCP_LISTEN);
+
        /* set credentials so connect can copy them */
        init_peercred(sk);
        err = 0;
@@ -976,7 +970,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern,
        sk->sk_hash             = unix_unbound_hash(sk);
        sk->sk_allocation       = GFP_KERNEL_ACCOUNT;
        sk->sk_write_space      = unix_write_space;
-       sk->sk_max_ack_backlog  = net->unx.sysctl_max_dgram_qlen;
+       sk->sk_max_ack_backlog  = READ_ONCE(net->unx.sysctl_max_dgram_qlen);
        sk->sk_destruct         = unix_sock_destructor;
        u = unix_sk(sk);
        u->listener = NULL;
@@ -1131,8 +1125,8 @@ static struct sock *unix_find_other(struct net *net,
 
 static int unix_autobind(struct sock *sk)
 {
-       unsigned int new_hash, old_hash = sk->sk_hash;
        struct unix_sock *u = unix_sk(sk);
+       unsigned int new_hash, old_hash;
        struct net *net = sock_net(sk);
        struct unix_address *addr;
        u32 lastnum, ordernum;
@@ -1155,6 +1149,7 @@ static int unix_autobind(struct sock *sk)
        addr->name->sun_family = AF_UNIX;
        refcount_set(&addr->refcnt, 1);
 
+       old_hash = sk->sk_hash;
        ordernum = get_random_u32();
        lastnum = ordernum & 0xFFFFF;
 retry:
@@ -1195,8 +1190,8 @@ static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
 {
        umode_t mode = S_IFSOCK |
               (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
-       unsigned int new_hash, old_hash = sk->sk_hash;
        struct unix_sock *u = unix_sk(sk);
+       unsigned int new_hash, old_hash;
        struct net *net = sock_net(sk);
        struct mnt_idmap *idmap;
        struct unix_address *addr;
@@ -1234,6 +1229,7 @@ static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
        if (u->addr)
                goto out_unlock;
 
+       old_hash = sk->sk_hash;
        new_hash = unix_bsd_hash(d_backing_inode(dentry));
        unix_table_double_lock(net, old_hash, new_hash);
        u->path.mnt = mntget(parent.mnt);
@@ -1261,8 +1257,8 @@ out:
 static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
                              int addr_len)
 {
-       unsigned int new_hash, old_hash = sk->sk_hash;
        struct unix_sock *u = unix_sk(sk);
+       unsigned int new_hash, old_hash;
        struct net *net = sock_net(sk);
        struct unix_address *addr;
        int err;
@@ -1280,6 +1276,7 @@ static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
                goto out_mutex;
        }
 
+       old_hash = sk->sk_hash;
        new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
        unix_table_double_lock(net, old_hash, new_hash);
 
@@ -1369,7 +1366,7 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
 
                if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
                     test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
-                   !unix_sk(sk)->addr) {
+                   !READ_ONCE(unix_sk(sk)->addr)) {
                        err = unix_autobind(sk);
                        if (err)
                                goto out;
@@ -1399,7 +1396,8 @@ restart:
                if (err)
                        goto out_unlock;
 
-               sk->sk_state = other->sk_state = TCP_ESTABLISHED;
+               WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
+               WRITE_ONCE(other->sk_state, TCP_ESTABLISHED);
        } else {
                /*
                 *      1003.1g breaking connected state with AF_UNSPEC
@@ -1416,13 +1414,20 @@ restart:
 
                unix_peer(sk) = other;
                if (!other)
-                       sk->sk_state = TCP_CLOSE;
+                       WRITE_ONCE(sk->sk_state, TCP_CLOSE);
                unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
 
                unix_state_double_unlock(sk, other);
 
-               if (other != old_peer)
+               if (other != old_peer) {
                        unix_dgram_disconnected(sk, old_peer);
+
+                       unix_state_lock(old_peer);
+                       if (!unix_peer(old_peer))
+                               WRITE_ONCE(old_peer->sk_state, TCP_CLOSE);
+                       unix_state_unlock(old_peer);
+               }
+
                sock_put(old_peer);
        } else {
                unix_peer(sk) = other;
@@ -1470,7 +1475,6 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
        struct sk_buff *skb = NULL;
        long timeo;
        int err;
-       int st;
 
        err = unix_validate_addr(sunaddr, addr_len);
        if (err)
@@ -1481,7 +1485,8 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
                goto out;
 
        if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
-            test_bit(SOCK_PASSPIDFD, &sock->flags)) && !u->addr) {
+            test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
+           !READ_ONCE(u->addr)) {
                err = unix_autobind(sk);
                if (err)
                        goto out;
@@ -1534,7 +1539,7 @@ restart:
        if (other->sk_shutdown & RCV_SHUTDOWN)
                goto out_unlock;
 
-       if (unix_recvq_full(other)) {
+       if (unix_recvq_full_lockless(other)) {
                err = -EAGAIN;
                if (!timeo)
                        goto out_unlock;
@@ -1559,9 +1564,7 @@ restart:
 
           Well, and we have to recheck the state after socket locked.
         */
-       st = sk->sk_state;
-
-       switch (st) {
+       switch (READ_ONCE(sk->sk_state)) {
        case TCP_CLOSE:
                /* This is ok... continue with connect */
                break;
@@ -1576,7 +1579,7 @@ restart:
 
        unix_state_lock_nested(sk, U_LOCK_SECOND);
 
-       if (sk->sk_state != st) {
+       if (sk->sk_state != TCP_CLOSE) {
                unix_state_unlock(sk);
                unix_state_unlock(other);
                sock_put(other);
@@ -1629,7 +1632,7 @@ restart:
        copy_peercred(sk, other);
 
        sock->state     = SS_CONNECTED;
-       sk->sk_state    = TCP_ESTABLISHED;
+       WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
        sock_hold(newsk);
 
        smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
@@ -1701,7 +1704,7 @@ static int unix_accept(struct socket *sock, struct socket *newsock,
                goto out;
 
        arg->err = -EINVAL;
-       if (sk->sk_state != TCP_LISTEN)
+       if (READ_ONCE(sk->sk_state) != TCP_LISTEN)
                goto out;
 
        /* If socket state is TCP_LISTEN it cannot change (for now...),
@@ -1950,14 +1953,15 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
        }
 
        if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
-            test_bit(SOCK_PASSPIDFD, &sock->flags)) && !u->addr) {
+            test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
+           !READ_ONCE(u->addr)) {
                err = unix_autobind(sk);
                if (err)
                        goto out;
        }
 
        err = -EMSGSIZE;
-       if (len > sk->sk_sndbuf - 32)
+       if (len > READ_ONCE(sk->sk_sndbuf) - 32)
                goto out;
 
        if (len > SKB_MAX_ALLOC) {
@@ -2039,7 +2043,7 @@ restart_locked:
                        unix_peer(sk) = NULL;
                        unix_dgram_peer_wake_disconnect_wakeup(sk, other);
 
-                       sk->sk_state = TCP_CLOSE;
+                       WRITE_ONCE(sk->sk_state, TCP_CLOSE);
                        unix_state_unlock(sk);
 
                        unix_dgram_disconnected(sk, other);
@@ -2216,7 +2220,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
        }
 
        if (msg->msg_namelen) {
-               err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
+               err = READ_ONCE(sk->sk_state) == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
                goto out_err;
        } else {
                err = -ENOTCONN;
@@ -2237,7 +2241,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
                                                   &err, 0);
                } else {
                        /* Keep two messages in the pipe so it schedules better */
-                       size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
+                       size = min_t(int, size, (READ_ONCE(sk->sk_sndbuf) >> 1) - 64);
 
                        /* allow fallback to order-0 allocations */
                        size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
@@ -2330,7 +2334,7 @@ static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
        if (err)
                return err;
 
-       if (sk->sk_state != TCP_ESTABLISHED)
+       if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
                return -ENOTCONN;
 
        if (msg->msg_namelen)
@@ -2344,7 +2348,7 @@ static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
 {
        struct sock *sk = sock->sk;
 
-       if (sk->sk_state != TCP_ESTABLISHED)
+       if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
                return -ENOTCONN;
 
        return unix_dgram_recvmsg(sock, msg, size, flags);
@@ -2649,7 +2653,7 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
 
 static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
 {
-       if (unlikely(sk->sk_state != TCP_ESTABLISHED))
+       if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED))
                return -ENOTCONN;
 
        return unix_read_skb(sk, recv_actor);
@@ -2673,7 +2677,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state,
        size_t size = state->size;
        unsigned int last_len;
 
-       if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
+       if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)) {
                err = -EINVAL;
                goto out;
        }
@@ -3004,7 +3008,7 @@ long unix_inq_len(struct sock *sk)
        struct sk_buff *skb;
        long amount = 0;
 
-       if (sk->sk_state == TCP_LISTEN)
+       if (READ_ONCE(sk->sk_state) == TCP_LISTEN)
                return -EINVAL;
 
        spin_lock(&sk->sk_receive_queue.lock);
@@ -3116,12 +3120,14 @@ static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned lon
 static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
 {
        struct sock *sk = sock->sk;
+       unsigned char state;
        __poll_t mask;
        u8 shutdown;
 
        sock_poll_wait(file, sock, wait);
        mask = 0;
        shutdown = READ_ONCE(sk->sk_shutdown);
+       state = READ_ONCE(sk->sk_state);
 
        /* exceptional events? */
        if (READ_ONCE(sk->sk_err))
@@ -3143,14 +3149,14 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa
 
        /* Connection-based need to check for termination and startup */
        if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
-           sk->sk_state == TCP_CLOSE)
+           state == TCP_CLOSE)
                mask |= EPOLLHUP;
 
        /*
         * we set writable also when the other side has shut down the
         * connection. This prevents stuck sockets.
         */
-       if (unix_writable(sk))
+       if (unix_writable(sk, state))
                mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
 
        return mask;
@@ -3161,12 +3167,14 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
 {
        struct sock *sk = sock->sk, *other;
        unsigned int writable;
+       unsigned char state;
        __poll_t mask;
        u8 shutdown;
 
        sock_poll_wait(file, sock, wait);
        mask = 0;
        shutdown = READ_ONCE(sk->sk_shutdown);
+       state = READ_ONCE(sk->sk_state);
 
        /* exceptional events? */
        if (READ_ONCE(sk->sk_err) ||
@@ -3186,19 +3194,14 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
                mask |= EPOLLIN | EPOLLRDNORM;
 
        /* Connection-based need to check for termination and startup */
-       if (sk->sk_type == SOCK_SEQPACKET) {
-               if (sk->sk_state == TCP_CLOSE)
-                       mask |= EPOLLHUP;
-               /* connection hasn't started yet? */
-               if (sk->sk_state == TCP_SYN_SENT)
-                       return mask;
-       }
+       if (sk->sk_type == SOCK_SEQPACKET && state == TCP_CLOSE)
+               mask |= EPOLLHUP;
 
        /* No write status requested, avoid expensive OUT tests. */
        if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
                return mask;
 
-       writable = unix_writable(sk);
+       writable = unix_writable(sk, state);
        if (writable) {
                unix_state_lock(sk);
 
index ae39538c5042b34e864e709004bf554841eaf4c9..937edf4afed41339afce117bd08f9a58bb2a6118 100644 (file)
@@ -65,7 +65,7 @@ static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
        u32 *buf;
        int i;
 
-       if (sk->sk_state == TCP_LISTEN) {
+       if (READ_ONCE(sk->sk_state) == TCP_LISTEN) {
                spin_lock(&sk->sk_receive_queue.lock);
 
                attr = nla_reserve(nlskb, UNIX_DIAG_ICONS,
@@ -103,8 +103,8 @@ static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
 {
        struct unix_diag_rqlen rql;
 
-       if (sk->sk_state == TCP_LISTEN) {
-               rql.udiag_rqueue = sk->sk_receive_queue.qlen;
+       if (READ_ONCE(sk->sk_state) == TCP_LISTEN) {
+               rql.udiag_rqueue = skb_queue_len_lockless(&sk->sk_receive_queue);
                rql.udiag_wqueue = sk->sk_max_ack_backlog;
        } else {
                rql.udiag_rqueue = (u32) unix_inq_len(sk);
@@ -136,7 +136,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
        rep = nlmsg_data(nlh);
        rep->udiag_family = AF_UNIX;
        rep->udiag_type = sk->sk_type;
-       rep->udiag_state = sk->sk_state;
+       rep->udiag_state = READ_ONCE(sk->sk_state);
        rep->pad = 0;
        rep->udiag_ino = sk_ino;
        sock_diag_save_cookie(sk, rep->udiag_cookie);
@@ -165,7 +165,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
            sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
                goto out_nlmsg_trim;
 
-       if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, sk->sk_shutdown))
+       if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, READ_ONCE(sk->sk_shutdown)))
                goto out_nlmsg_trim;
 
        if ((req->udiag_show & UDIAG_SHOW_UID) &&
@@ -215,7 +215,7 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
                sk_for_each(sk, &net->unx.table.buckets[slot]) {
                        if (num < s_num)
                                goto next;
-                       if (!(req->udiag_states & (1 << sk->sk_state)))
+                       if (!(req->udiag_states & (1 << READ_ONCE(sk->sk_state))))
                                goto next;
                        if (sk_diag_dump(sk, skb, req, sk_user_ns(skb->sk),
                                         NETLINK_CB(cb->skb).portid,
index 3fb1b637352a9d0b469206d890601031ffd4c68f..4b1f45e3070e06c72095037793cf5b087bf5cd0f 100644 (file)
@@ -431,7 +431,7 @@ static void cfg80211_wiphy_work(struct work_struct *work)
        if (wk) {
                list_del_init(&wk->entry);
                if (!list_empty(&rdev->wiphy_work_list))
-                       schedule_work(work);
+                       queue_work(system_unbound_wq, work);
                spin_unlock_irq(&rdev->wiphy_work_lock);
 
                wk->func(&rdev->wiphy, wk);
index e106dcea3977828456992ca3ca9af97d7c9775c7..c569c37da31758a1b7f4051541537588140ef45d 100644 (file)
@@ -56,7 +56,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
        out->ftm.burst_period = 0;
        if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD])
                out->ftm.burst_period =
-                       nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD]);
+                       nla_get_u16(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD]);
 
        out->ftm.asap = !!tb[NL80211_PMSR_FTM_REQ_ATTR_ASAP];
        if (out->ftm.asap && !capa->ftm.asap) {
@@ -75,7 +75,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
        out->ftm.num_bursts_exp = 0;
        if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP])
                out->ftm.num_bursts_exp =
-                       nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP]);
+                       nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP]);
 
        if (capa->ftm.max_bursts_exponent >= 0 &&
            out->ftm.num_bursts_exp > capa->ftm.max_bursts_exponent) {
@@ -88,7 +88,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
        out->ftm.burst_duration = 15;
        if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION])
                out->ftm.burst_duration =
-                       nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION]);
+                       nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION]);
 
        out->ftm.ftms_per_burst = 0;
        if (tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST])
@@ -107,7 +107,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
        out->ftm.ftmr_retries = 3;
        if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES])
                out->ftm.ftmr_retries =
-                       nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES]);
+                       nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES]);
 
        out->ftm.request_lci = !!tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI];
        if (out->ftm.request_lci && !capa->ftm.request_lci) {
index 43897a5269b6a9e011e8e31bd882f55cd19480c5..755af47b88b91a6a315daecbc47341b9f4c71eb8 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Portions of this file
  * Copyright(c) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2018, 2021-2023 Intel Corporation
+ * Copyright (C) 2018, 2021-2024 Intel Corporation
  */
 #ifndef __CFG80211_RDEV_OPS
 #define __CFG80211_RDEV_OPS
@@ -458,6 +458,10 @@ static inline int rdev_scan(struct cfg80211_registered_device *rdev,
                            struct cfg80211_scan_request *request)
 {
        int ret;
+
+       if (WARN_ON_ONCE(!request->n_ssids && request->ssids))
+               return -EINVAL;
+
        trace_rdev_scan(&rdev->wiphy, request);
        ret = rdev->ops->scan(&rdev->wiphy, request);
        trace_rdev_return_int(&rdev->wiphy, ret);
index 127853877a0ad14fec0f92615976c8fc8d33c3ef..2f2a3163968a7cc5c44ce85f73068e0a2765df76 100644 (file)
@@ -812,6 +812,7 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
        LIST_HEAD(coloc_ap_list);
        bool need_scan_psc = true;
        const struct ieee80211_sband_iftype_data *iftd;
+       size_t size, offs_ssids, offs_6ghz_params, offs_ies;
 
        rdev_req->scan_6ghz = true;
 
@@ -877,10 +878,15 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
                spin_unlock_bh(&rdev->bss_lock);
        }
 
-       request = kzalloc(struct_size(request, channels, n_channels) +
-                         sizeof(*request->scan_6ghz_params) * count +
-                         sizeof(*request->ssids) * rdev_req->n_ssids,
-                         GFP_KERNEL);
+       size = struct_size(request, channels, n_channels);
+       offs_ssids = size;
+       size += sizeof(*request->ssids) * rdev_req->n_ssids;
+       offs_6ghz_params = size;
+       size += sizeof(*request->scan_6ghz_params) * count;
+       offs_ies = size;
+       size += rdev_req->ie_len;
+
+       request = kzalloc(size, GFP_KERNEL);
        if (!request) {
                cfg80211_free_coloc_ap_list(&coloc_ap_list);
                return -ENOMEM;
@@ -888,8 +894,26 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
 
        *request = *rdev_req;
        request->n_channels = 0;
-       request->scan_6ghz_params =
-               (void *)&request->channels[n_channels];
+       request->n_6ghz_params = 0;
+       if (rdev_req->n_ssids) {
+               /*
+                * Add the ssids from the parent scan request to the new
+                * scan request, so the driver would be able to use them
+                * in its probe requests to discover hidden APs on PSC
+                * channels.
+                */
+               request->ssids = (void *)request + offs_ssids;
+               memcpy(request->ssids, rdev_req->ssids,
+                      sizeof(*request->ssids) * request->n_ssids);
+       }
+       request->scan_6ghz_params = (void *)request + offs_6ghz_params;
+
+       if (rdev_req->ie_len) {
+               void *ie = (void *)request + offs_ies;
+
+               memcpy(ie, rdev_req->ie, rdev_req->ie_len);
+               request->ie = ie;
+       }
 
        /*
         * PSC channels should not be scanned in case of direct scan with 1 SSID
@@ -978,17 +1002,8 @@ skip:
 
        if (request->n_channels) {
                struct cfg80211_scan_request *old = rdev->int_scan_req;
-               rdev->int_scan_req = request;
 
-               /*
-                * Add the ssids from the parent scan request to the new scan
-                * request, so the driver would be able to use them in its
-                * probe requests to discover hidden APs on PSC channels.
-                */
-               request->ssids = (void *)&request->channels[request->n_channels];
-               request->n_ssids = rdev_req->n_ssids;
-               memcpy(request->ssids, rdev_req->ssids, sizeof(*request->ssids) *
-                      request->n_ssids);
+               rdev->int_scan_req = request;
 
                /*
                 * If this scan follows a previous scan, save the scan start
@@ -2128,7 +2143,8 @@ static bool cfg80211_6ghz_power_type_valid(const u8 *ie, size_t ielen,
        struct ieee80211_he_operation *he_oper;
 
        tmp = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, ie, ielen);
-       if (tmp && tmp->datalen >= sizeof(*he_oper) + 1) {
+       if (tmp && tmp->datalen >= sizeof(*he_oper) + 1 &&
+           tmp->datalen >= ieee80211_he_oper_size(tmp->data + 1)) {
                const struct ieee80211_he_6ghz_oper *he_6ghz_oper;
 
                he_oper = (void *)&tmp->data[1];
index 565511a3f461ed6872db387439a196351d02f00a..62f26618f674741a5163fa8e8d14c5319b2ceff8 100644 (file)
@@ -5,7 +5,7 @@
  *
  * Copyright 2005-2006 Jiri Benc <[email protected]>
  * Copyright 2006      Johannes Berg <[email protected]>
- * Copyright (C) 2020-2021, 2023 Intel Corporation
+ * Copyright (C) 2020-2021, 2023-2024 Intel Corporation
  */
 
 #include <linux/device.h>
@@ -137,7 +137,7 @@ static int wiphy_resume(struct device *dev)
        if (rdev->wiphy.registered && rdev->ops->resume)
                ret = rdev_resume(rdev);
        rdev->suspended = false;
-       schedule_work(&rdev->wiphy_work);
+       queue_work(system_unbound_wq, &rdev->wiphy_work);
        wiphy_unlock(&rdev->wiphy);
 
        if (ret)
index 2bde8a35463132d32dd0a185566ad4e72012e96a..082c6f9c5416eb5da26282f6a030440c79ed223c 100644 (file)
@@ -2549,6 +2549,7 @@ int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
 {
        struct cfg80211_registered_device *rdev;
        struct wireless_dev *wdev;
+       int ret;
 
        wdev = dev->ieee80211_ptr;
        if (!wdev)
@@ -2560,7 +2561,11 @@ int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
 
        memset(sinfo, 0, sizeof(*sinfo));
 
-       return rdev_get_station(rdev, dev, mac_addr, sinfo);
+       wiphy_lock(&rdev->wiphy);
+       ret = rdev_get_station(rdev, dev, mac_addr, sinfo);
+       wiphy_unlock(&rdev->wiphy);
+
+       return ret;
 }
 EXPORT_SYMBOL(cfg80211_get_station);
 
index 727aa20be4bde8dc63a544a44a5cdeb19cac7dcb..7d1c0986f9bb354aa5a562f3edc72e54837b8ac5 100644 (file)
@@ -313,13 +313,10 @@ static bool xsk_is_bound(struct xdp_sock *xs)
 
 static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
 {
-       struct net_device *dev = xdp->rxq->dev;
-       u32 qid = xdp->rxq->queue_index;
-
        if (!xsk_is_bound(xs))
                return -ENXIO;
 
-       if (!dev->_rx[qid].pool || xs->umem != dev->_rx[qid].pool->umem)
+       if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
                return -EINVAL;
 
        if (len > xsk_pool_get_rx_frame_size(xs->pool) && !xs->sg) {
index 475b904fe68b8fa0c4e06f265309a52332a582e8..66e07de2de35cd1b5d3b4b5771e152dde6660b0d 100644 (file)
@@ -3910,15 +3910,10 @@ static void xfrm_link_failure(struct sk_buff *skb)
        /* Impossible. Such dst must be popped before reaches point of failure. */
 }
 
-static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
+static void xfrm_negative_advice(struct sock *sk, struct dst_entry *dst)
 {
-       if (dst) {
-               if (dst->obsolete) {
-                       dst_release(dst);
-                       dst = NULL;
-               }
-       }
-       return dst;
+       if (dst->obsolete)
+               sk_dst_reset(sk);
 }
 
 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
index a186570725412f80f4d89df7ebd190ee5e8c01ab..b47f4daa4515dfa9ac5010a29968eeda7a177ca7 100644 (file)
@@ -3,7 +3,7 @@
 
 # *** Also keep .gitignore in sync when changing ***
 hostprogs-always-$(CONFIG_DTC)         += dtc fdtoverlay
-hostprogs-always-$(CHECK_DT_BINDING)   += dtc
+hostprogs-always-$(CHECK_DTBS)         += dtc
 
 dtc-objs       := dtc.o flattree.o fstree.o data.o livetree.o treesource.o \
                   srcpos.o checks.o util.o
index d77ad9079d0f92e2bb8065717e928fc40db5431a..fd1402c0a1a18fb7176260d08989413411252c04 100644 (file)
@@ -5,7 +5,7 @@ ifdef building_out_of_srctree
 symlinks := $(patsubst $(src)/%,%,$(wildcard $(src)/*.py))
 
 quiet_cmd_symlink = SYMLINK $@
-      cmd_symlink = ln -fsn $(patsubst $(obj)/%,$(abspath $(srctree))/$(src)/%,$@) $@
+      cmd_symlink = ln -fsn $(patsubst $(obj)/%,$(src)/%,$@) $@
 
 always-y += $(symlinks)
 $(addprefix $(obj)/, $(symlinks)): FORCE
index a290de36307ba8abe184a915fb0a6b6a3b29bbb6..4d95fce5f9a7ab7c421c2829c59305738e66a010 100644 (file)
@@ -476,7 +476,7 @@ static struct expr *expr_join_or(struct expr *e1, struct expr *e2)
                        return expr_alloc_comp(E_UNEQUAL, sym1, &symbol_yes);
                }
        }
-       if (sym1->type == S_BOOLEAN && sym1 == sym2) {
+       if (sym1->type == S_BOOLEAN) {
                if ((e1->type == E_NOT && e1->left.expr->type == E_SYMBOL && e2->type == E_SYMBOL) ||
                    (e2->type == E_NOT && e2->left.expr->type == E_SYMBOL && e1->type == E_SYMBOL))
                        return expr_alloc_symbol(&symbol_yes);
index d965e427753eb7f7d94c40600603ddb21bef19e7..fa50fc45622e38e717901e71c54532432c7f1c0c 100644 (file)
@@ -302,11 +302,6 @@ static inline int expr_is_yes(struct expr *e)
        return !e || (e->type == E_SYMBOL && e->left.sym == &symbol_yes);
 }
 
-static inline int expr_is_no(struct expr *e)
-{
-       return e && (e->type == E_SYMBOL && e->left.sym == &symbol_no);
-}
-
 #ifdef __cplusplus
 }
 #endif
index aa0e25ee5119ef2d2f993f309980fc27f1e3e00b..0e439d3d48d11b63ed4175923c360edfb7de6f56 100644 (file)
@@ -14,6 +14,7 @@
 
 struct symbol symbol_yes = {
        .name = "y",
+       .type = S_TRISTATE,
        .curr = { "y", yes },
        .menus = LIST_HEAD_INIT(symbol_yes.menus),
        .flags = SYMBOL_CONST|SYMBOL_VALID,
@@ -21,6 +22,7 @@ struct symbol symbol_yes = {
 
 struct symbol symbol_mod = {
        .name = "m",
+       .type = S_TRISTATE,
        .curr = { "m", mod },
        .menus = LIST_HEAD_INIT(symbol_mod.menus),
        .flags = SYMBOL_CONST|SYMBOL_VALID,
@@ -28,6 +30,7 @@ struct symbol symbol_mod = {
 
 struct symbol symbol_no = {
        .name = "n",
+       .type = S_TRISTATE,
        .curr = { "n", no },
        .menus = LIST_HEAD_INIT(symbol_no.menus),
        .flags = SYMBOL_CONST|SYMBOL_VALID,
@@ -820,8 +823,7 @@ const char *sym_get_string_value(struct symbol *sym)
                case no:
                        return "n";
                case mod:
-                       sym_calc_value(modules_sym);
-                       return (modules_sym->curr.tri == no) ? "n" : "m";
+                       return "m";
                case yes:
                        return "y";
                }
index 7862a81017477daec1f702fdf46166e381bd396d..46ce5d04dbeb100ddbe524baa36ccd63415e8738 100755 (executable)
@@ -179,10 +179,10 @@ kallsyms_step()
        kallsyms_S=${kallsyms_vmlinux}.S
 
        vmlinux_link ${kallsyms_vmlinux} "${kallsymso_prev}" ${btf_vmlinux_bin_o}
-       mksysmap ${kallsyms_vmlinux} ${kallsyms_vmlinux}.syms ${kallsymso_prev}
+       mksysmap ${kallsyms_vmlinux} ${kallsyms_vmlinux}.syms
        kallsyms ${kallsyms_vmlinux}.syms ${kallsyms_S}
 
-       info AS ${kallsyms_S}
+       info AS ${kallsymso}
        ${CC} ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS} \
              ${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \
              -c -o ${kallsymso} ${kallsyms_S}
@@ -193,7 +193,7 @@ kallsyms_step()
 mksysmap()
 {
        info NM ${2}
-       ${CONFIG_SHELL} "${srctree}/scripts/mksysmap" ${1} ${2} ${3}
+       ${NM} -n "${1}" | "${srctree}/scripts/mksysmap" > "${2}"
 }
 
 sorttable()
@@ -201,7 +201,6 @@ sorttable()
        ${objtree}/scripts/sorttable ${1}
 }
 
-# Delete output files in case of error
 cleanup()
 {
        rm -f .btf.*
@@ -282,7 +281,7 @@ if is_enabled CONFIG_DEBUG_INFO_BTF && is_enabled CONFIG_BPF; then
        ${RESOLVE_BTFIDS} vmlinux
 fi
 
-mksysmap vmlinux System.map ${kallsymso}
+mksysmap vmlinux System.map
 
 if is_enabled CONFIG_BUILDTIME_TABLE_SORT; then
        info SORTTAB vmlinux
index 3de90c5a094b7aacd8e5d18449374c2691113717..263147df80a445aaed75101fcb8179662f0de380 100755 (executable)
@@ -190,7 +190,7 @@ def output_dtb(fsw, seq, fname, arch, compress):
     Args:
         fsw (libfdt.FdtSw): Object to use for writing
         seq (int): Sequence number (1 for first)
-        fmame (str): Filename containing the DTB
+        fname (str): Filename containing the DTB
         arch: FIT architecture, e.g. 'arm64'
         compress (str): Compressed algorithm, e.g. 'gzip'
 
@@ -211,7 +211,6 @@ def output_dtb(fsw, seq, fname, arch, compress):
         fsw.property_string('type', 'flat_dt')
         fsw.property_string('arch', arch)
         fsw.property_string('compression', compress)
-        fsw.property('compatible', bytes(compat))
 
         with open(fname, 'rb') as inf:
             compressed = compress_data(inf, compress)
index 57ff5656d566fbc659801bdb4fd445b7b9ef2b86..c12723a0465562c782bfdaafc3ce4800671f959b 100755 (executable)
@@ -1,22 +1,16 @@
-#!/bin/sh -x
-# Based on the vmlinux file create the System.map file
+#!/bin/sed -f
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# sed script to filter out symbols that are not needed for System.map,
+# or not suitable for kallsyms. The input should be 'nm -n <file>'.
+#
 # System.map is used by module-init tools and some debugging
 # tools to retrieve the actual addresses of symbols in the kernel.
 #
-# Usage
-# mksysmap vmlinux System.map [exclude]
-
-
-#####
-# Generate System.map (actual filename passed as second argument)
-# The following refers to the symbol type as per nm(1).
-
 # readprofile starts reading symbols when _stext is found, and
 # continue until it finds a symbol which is not either of 'T', 't',
 # 'W' or 'w'.
 #
-
-${NM} -n ${1} | sed >${2} -e "
 # ---------------------------------------------------------------------------
 # Ignored symbol types
 #
@@ -92,13 +86,3 @@ ${NM} -n ${1} | sed >${2} -e "
 # ppc stub
 /\.long_branch\./d
 /\.plt_branch\./d
-
-# ---------------------------------------------------------------------------
-# Ignored kallsyms symbols
-#
-# If the 3rd parameter exists, symbols from it will be omitted from the output.
-# This makes kallsyms have the identical symbol lists in the step 1 and 2.
-# Without this, the step2 would get new symbols generated by scripts/kallsyms.c
-# when CONFIG_KALLSYMS_ALL is enabled. That might require one more pass.
-$(if [ $# -ge 3 ]; then ${NM} ${3} | sed -n '/ U /!s:.* \([^ ]*\)$:/ \1$/d:p'; fi)
-"
index 22d8b7c28074e056738a6cfb5307dc80c9044c55..7877a64cc6b87c0930e679ab07ee469c5bec22e5 100644 (file)
@@ -1110,6 +1110,7 @@ static int current_check_refer_path(struct dentry *const old_dentry,
        bool allow_parent1, allow_parent2;
        access_mask_t access_request_parent1, access_request_parent2;
        struct path mnt_dir;
+       struct dentry *old_parent;
        layer_mask_t layer_masks_parent1[LANDLOCK_NUM_ACCESS_FS] = {},
                     layer_masks_parent2[LANDLOCK_NUM_ACCESS_FS] = {};
 
@@ -1157,9 +1158,17 @@ static int current_check_refer_path(struct dentry *const old_dentry,
        mnt_dir.mnt = new_dir->mnt;
        mnt_dir.dentry = new_dir->mnt->mnt_root;
 
+       /*
+        * old_dentry may be the root of the common mount point and
+        * !IS_ROOT(old_dentry) at the same time (e.g. with open_tree() and
+        * OPEN_TREE_CLONE).  We do not need to call dget(old_parent) because
+        * we keep a reference to old_dentry.
+        */
+       old_parent = (old_dentry == mnt_dir.dentry) ? old_dentry :
+                                                     old_dentry->d_parent;
+
        /* new_dir->dentry is equal to new_dentry->d_parent */
-       allow_parent1 = collect_domain_accesses(dom, mnt_dir.dentry,
-                                               old_dentry->d_parent,
+       allow_parent1 = collect_domain_accesses(dom, mnt_dir.dentry, old_parent,
                                                &layer_masks_parent1);
        allow_parent2 = collect_domain_accesses(
                dom, mnt_dir.dentry, new_dir->dentry, &layer_masks_parent2);
index fad75be5f381d6e0d16466fa8b0b47fcfd092a23..1e0dd1a6d0b0e9aa01b6fe4cadb43b8c6b7de058 100644 (file)
@@ -10,7 +10,7 @@ config SECURITY_TOMOYO
        help
          This selects TOMOYO Linux, pathname-based access control.
          Required userspace tools and further information may be
-         found at <https://tomoyo.osdn.jp/>.
+         found at <https://tomoyo.sourceforge.net/>.
          If you are unsure how to answer this question, answer N.
 
 config SECURITY_TOMOYO_MAX_ACCEPT_ENTRY
index ea3140d510ecbfee06666df588a795b9f5bfc5ce..5c7b059a332aac494a95b1f99415dd2eef6a2900 100644 (file)
@@ -2787,7 +2787,7 @@ void tomoyo_check_profile(void)
                else
                        continue;
                pr_err("Userland tools for TOMOYO 2.6 must be installed and policy must be initialized.\n");
-               pr_err("Please see https://tomoyo.osdn.jp/2.6/ for more information.\n");
+               pr_err("Please see https://tomoyo.sourceforge.net/2.6/ for more information.\n");
                panic("STOP!");
        }
        tomoyo_read_unlock(idx);
index 4e52bbe32786ba9945f03fd921b9d177692b8cda..b9b708cf980d6dbf9d80992b7af86e761a7bdf71 100644 (file)
@@ -537,6 +537,11 @@ void snd_card_disconnect(struct snd_card *card)
                synchronize_irq(card->sync_irq);
 
        snd_info_card_disconnect(card);
+#ifdef CONFIG_SND_DEBUG
+       debugfs_remove(card->debugfs_root);
+       card->debugfs_root = NULL;
+#endif
+
        if (card->registered) {
                device_del(&card->card_dev);
                card->registered = false;
@@ -586,10 +591,6 @@ static int snd_card_do_free(struct snd_card *card)
                dev_warn(card->dev, "unable to free card info\n");
                /* Not fatal error */
        }
-#ifdef CONFIG_SND_DEBUG
-       debugfs_remove(card->debugfs_root);
-       card->debugfs_root = NULL;
-#endif
        if (card->release_completion)
                complete(card->release_completion);
        if (!card->managed)
index e08b2c4fbd1a577380d31781d28b222b404c5c2c..e4bcecdf89b7ec0bccb0d113784c32ecf2620a68 100644 (file)
@@ -37,11 +37,15 @@ static const int jack_switch_types[SND_JACK_SWITCH_TYPES] = {
 };
 #endif /* CONFIG_SND_JACK_INPUT_DEV */
 
+static void snd_jack_remove_debugfs(struct snd_jack *jack);
+
 static int snd_jack_dev_disconnect(struct snd_device *device)
 {
-#ifdef CONFIG_SND_JACK_INPUT_DEV
        struct snd_jack *jack = device->device_data;
 
+       snd_jack_remove_debugfs(jack);
+
+#ifdef CONFIG_SND_JACK_INPUT_DEV
        guard(mutex)(&jack->input_dev_lock);
        if (!jack->input_dev)
                return 0;
@@ -381,10 +385,14 @@ static int snd_jack_debugfs_add_inject_node(struct snd_jack *jack,
        return 0;
 }
 
-static void snd_jack_debugfs_clear_inject_node(struct snd_jack_kctl *jack_kctl)
+static void snd_jack_remove_debugfs(struct snd_jack *jack)
 {
-       debugfs_remove(jack_kctl->jack_debugfs_root);
-       jack_kctl->jack_debugfs_root = NULL;
+       struct snd_jack_kctl *jack_kctl;
+
+       list_for_each_entry(jack_kctl, &jack->kctl_list, list) {
+               debugfs_remove(jack_kctl->jack_debugfs_root);
+               jack_kctl->jack_debugfs_root = NULL;
+       }
 }
 #else /* CONFIG_SND_JACK_INJECTION_DEBUG */
 static int snd_jack_debugfs_add_inject_node(struct snd_jack *jack,
@@ -393,7 +401,7 @@ static int snd_jack_debugfs_add_inject_node(struct snd_jack *jack,
        return 0;
 }
 
-static void snd_jack_debugfs_clear_inject_node(struct snd_jack_kctl *jack_kctl)
+static void snd_jack_remove_debugfs(struct snd_jack *jack)
 {
 }
 #endif /* CONFIG_SND_JACK_INJECTION_DEBUG */
@@ -404,7 +412,6 @@ static void snd_jack_kctl_private_free(struct snd_kcontrol *kctl)
 
        jack_kctl = kctl->private_data;
        if (jack_kctl) {
-               snd_jack_debugfs_clear_inject_node(jack_kctl);
                list_del(&jack_kctl->list);
                kfree(jack_kctl);
        }
@@ -497,8 +504,8 @@ int snd_jack_new(struct snd_card *card, const char *id, int type,
                .dev_free = snd_jack_dev_free,
 #ifdef CONFIG_SND_JACK_INPUT_DEV
                .dev_register = snd_jack_dev_register,
-               .dev_disconnect = snd_jack_dev_disconnect,
 #endif /* CONFIG_SND_JACK_INPUT_DEV */
+               .dev_disconnect = snd_jack_dev_disconnect,
        };
 
        if (initial_kctl) {
index ee6ac649df836d695133f93da9cfd0518cc1ae23..171fb75267afae6f799d21350233256211932a6d 100644 (file)
@@ -157,7 +157,7 @@ static void ump_system_to_one_param_ev(const union snd_ump_midi1_msg *val,
 static void ump_system_to_songpos_ev(const union snd_ump_midi1_msg *val,
                                     struct snd_seq_event *ev)
 {
-       ev->data.control.value = (val->system.parm1 << 7) | val->system.parm2;
+       ev->data.control.value = (val->system.parm2 << 7) | val->system.parm1;
 }
 
 /* Encoders for 0xf0 - 0xff */
@@ -368,6 +368,7 @@ static int cvt_ump_midi1_to_midi2(struct snd_seq_client *dest,
        struct snd_seq_ump_event ev_cvt;
        const union snd_ump_midi1_msg *midi1 = (const union snd_ump_midi1_msg *)event->ump;
        union snd_ump_midi2_msg *midi2 = (union snd_ump_midi2_msg *)ev_cvt.ump;
+       struct snd_seq_ump_midi2_bank *cc;
 
        ev_cvt = *event;
        memset(&ev_cvt.ump, 0, sizeof(ev_cvt.ump));
@@ -387,11 +388,29 @@ static int cvt_ump_midi1_to_midi2(struct snd_seq_client *dest,
                midi2->paf.data = upscale_7_to_32bit(midi1->paf.data);
                break;
        case UMP_MSG_STATUS_CC:
+               cc = &dest_port->midi2_bank[midi1->note.channel];
+               switch (midi1->cc.index) {
+               case UMP_CC_BANK_SELECT:
+                       cc->bank_set = 1;
+                       cc->cc_bank_msb = midi1->cc.data;
+                       return 0; // skip
+               case UMP_CC_BANK_SELECT_LSB:
+                       cc->bank_set = 1;
+                       cc->cc_bank_lsb = midi1->cc.data;
+                       return 0; // skip
+               }
                midi2->cc.index = midi1->cc.index;
                midi2->cc.data = upscale_7_to_32bit(midi1->cc.data);
                break;
        case UMP_MSG_STATUS_PROGRAM:
                midi2->pg.program = midi1->pg.program;
+               cc = &dest_port->midi2_bank[midi1->note.channel];
+               if (cc->bank_set) {
+                       midi2->pg.bank_valid = 1;
+                       midi2->pg.bank_msb = cc->cc_bank_msb;
+                       midi2->pg.bank_lsb = cc->cc_bank_lsb;
+                       cc->bank_set = 0;
+               }
                break;
        case UMP_MSG_STATUS_CHANNEL_PRESSURE:
                midi2->caf.data = upscale_7_to_32bit(midi1->caf.data);
@@ -419,6 +438,7 @@ static int cvt_ump_midi2_to_midi1(struct snd_seq_client *dest,
        struct snd_seq_ump_event ev_cvt;
        union snd_ump_midi1_msg *midi1 = (union snd_ump_midi1_msg *)ev_cvt.ump;
        const union snd_ump_midi2_msg *midi2 = (const union snd_ump_midi2_msg *)event->ump;
+       int err;
        u16 v;
 
        ev_cvt = *event;
@@ -443,6 +463,24 @@ static int cvt_ump_midi2_to_midi1(struct snd_seq_client *dest,
                midi1->cc.data = downscale_32_to_7bit(midi2->cc.data);
                break;
        case UMP_MSG_STATUS_PROGRAM:
+               if (midi2->pg.bank_valid) {
+                       midi1->cc.status = UMP_MSG_STATUS_CC;
+                       midi1->cc.index = UMP_CC_BANK_SELECT;
+                       midi1->cc.data = midi2->pg.bank_msb;
+                       err = __snd_seq_deliver_single_event(dest, dest_port,
+                                                            (struct snd_seq_event *)&ev_cvt,
+                                                            atomic, hop);
+                       if (err < 0)
+                               return err;
+                       midi1->cc.index = UMP_CC_BANK_SELECT_LSB;
+                       midi1->cc.data = midi2->pg.bank_lsb;
+                       err = __snd_seq_deliver_single_event(dest, dest_port,
+                                                            (struct snd_seq_event *)&ev_cvt,
+                                                            atomic, hop);
+                       if (err < 0)
+                               return err;
+                       midi1->note.status = midi2->note.status;
+               }
                midi1->pg.program = midi2->pg.program;
                break;
        case UMP_MSG_STATUS_CHANNEL_PRESSURE:
@@ -691,6 +729,7 @@ static int system_ev_to_ump_midi1(const struct snd_seq_event *event,
                                  union snd_ump_midi1_msg *data,
                                  unsigned char status)
 {
+       data->system.type = UMP_MSG_TYPE_SYSTEM; // override
        data->system.status = status;
        return 1;
 }
@@ -701,6 +740,7 @@ static int system_1p_ev_to_ump_midi1(const struct snd_seq_event *event,
                                     union snd_ump_midi1_msg *data,
                                     unsigned char status)
 {
+       data->system.type = UMP_MSG_TYPE_SYSTEM; // override
        data->system.status = status;
        data->system.parm1 = event->data.control.value & 0x7f;
        return 1;
@@ -712,9 +752,10 @@ static int system_2p_ev_to_ump_midi1(const struct snd_seq_event *event,
                                     union snd_ump_midi1_msg *data,
                                     unsigned char status)
 {
+       data->system.type = UMP_MSG_TYPE_SYSTEM; // override
        data->system.status = status;
-       data->system.parm1 = (event->data.control.value >> 7) & 0x7f;
-       data->system.parm2 = event->data.control.value & 0x7f;
+       data->system.parm1 = event->data.control.value & 0x7f;
+       data->system.parm2 = (event->data.control.value >> 7) & 0x7f;
        return 1;
 }
 
@@ -854,7 +895,6 @@ static int pgm_ev_to_ump_midi2(const struct snd_seq_event *event,
                data->pg.bank_msb = cc->cc_bank_msb;
                data->pg.bank_lsb = cc->cc_bank_lsb;
                cc->bank_set = 0;
-               cc->cc_bank_msb = cc->cc_bank_lsb = 0;
        }
        return 1;
 }
index fd6a68a5427886d11e443132a74568d933f2f163..3f61220c23b4ec028faddd63bff776a91cc8067c 100644 (file)
@@ -685,10 +685,17 @@ static void seq_notify_protocol(struct snd_ump_endpoint *ump)
  */
 int snd_ump_switch_protocol(struct snd_ump_endpoint *ump, unsigned int protocol)
 {
+       unsigned int type;
+
        protocol &= ump->info.protocol_caps;
        if (protocol == ump->info.protocol)
                return 0;
 
+       type = protocol & SNDRV_UMP_EP_INFO_PROTO_MIDI_MASK;
+       if (type != SNDRV_UMP_EP_INFO_PROTO_MIDI1 &&
+           type != SNDRV_UMP_EP_INFO_PROTO_MIDI2)
+               return 0;
+
        ump->info.protocol = protocol;
        ump_dbg(ump, "New protocol = %x (caps = %x)\n",
                protocol, ump->info.protocol_caps);
@@ -960,6 +967,14 @@ int snd_ump_parse_endpoint(struct snd_ump_endpoint *ump)
        if (err < 0)
                ump_dbg(ump, "Unable to get UMP EP stream config\n");
 
+       /* If no protocol is set by some reason, assume the valid one */
+       if (!(ump->info.protocol & SNDRV_UMP_EP_INFO_PROTO_MIDI_MASK)) {
+               if (ump->info.protocol_caps & SNDRV_UMP_EP_INFO_PROTO_MIDI2)
+                       ump->info.protocol |= SNDRV_UMP_EP_INFO_PROTO_MIDI2;
+               else if (ump->info.protocol_caps & SNDRV_UMP_EP_INFO_PROTO_MIDI1)
+                       ump->info.protocol |= SNDRV_UMP_EP_INFO_PROTO_MIDI1;
+       }
+
        /* Query and create blocks from Function Blocks */
        for (blk = 0; blk < ump->info.num_blocks; blk++) {
                err = create_block_from_fb_info(ump, blk);
index de04799fdb69aaf3b08de639739e52f09ebb55a3..f67c44c83fde445258c59f34897b73a913c17cf1 100644 (file)
@@ -404,7 +404,6 @@ static int cvt_legacy_cmd_to_ump(struct ump_cvt_to_ump *cvt,
                        midi2->pg.bank_msb = cc->cc_bank_msb;
                        midi2->pg.bank_lsb = cc->cc_bank_lsb;
                        cc->bank_set = 0;
-                       cc->cc_bank_msb = cc->cc_bank_lsb = 0;
                }
                break;
        case UMP_MSG_STATUS_CHANNEL_PRESSURE:
index cfdb1b73c88c2011a2f977af4d53138cd0f9912e..537863447358e078d1d605d29f548352b8c80934 100644 (file)
@@ -668,7 +668,7 @@ int snd_intel_dsp_driver_probe(struct pci_dev *pci)
                return SND_INTEL_DSP_DRIVER_LEGACY;
        }
 
-       dev_info(&pci->dev, "DSP detected with PCI class/subclass/prog-if info 0x%06x\n", pci->class);
+       dev_dbg(&pci->dev, "DSP detected with PCI class/subclass/prog-if info 0x%06x\n", pci->class);
 
        /* find the configuration for the specific device */
        cfg = snd_intel_dsp_find_config(pci, config_table, ARRAY_SIZE(config_table));
@@ -678,12 +678,12 @@ int snd_intel_dsp_driver_probe(struct pci_dev *pci)
        if (cfg->flags & FLAG_SOF) {
                if (cfg->flags & FLAG_SOF_ONLY_IF_SOUNDWIRE &&
                    snd_intel_dsp_check_soundwire(pci) > 0) {
-                       dev_info(&pci->dev, "SoundWire enabled on CannonLake+ platform, using SOF driver\n");
+                       dev_info_once(&pci->dev, "SoundWire enabled on CannonLake+ platform, using SOF driver\n");
                        return SND_INTEL_DSP_DRIVER_SOF;
                }
                if (cfg->flags & FLAG_SOF_ONLY_IF_DMIC &&
                    snd_intel_dsp_check_dmic(pci)) {
-                       dev_info(&pci->dev, "Digital mics found on Skylake+ platform, using SOF driver\n");
+                       dev_info_once(&pci->dev, "Digital mics found on Skylake+ platform, using SOF driver\n");
                        return SND_INTEL_DSP_DRIVER_SOF;
                }
                if (!(cfg->flags & FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE))
@@ -694,7 +694,7 @@ int snd_intel_dsp_driver_probe(struct pci_dev *pci)
        if (cfg->flags & FLAG_SST) {
                if (cfg->flags & FLAG_SST_ONLY_IF_DMIC) {
                        if (snd_intel_dsp_check_dmic(pci)) {
-                               dev_info(&pci->dev, "Digital mics found on Skylake+ platform, using SST driver\n");
+                               dev_info_once(&pci->dev, "Digital mics found on Skylake+ platform, using SST driver\n");
                                return SND_INTEL_DSP_DRIVER_SST;
                        }
                } else {
index e3c0b9d5552d957869de671704b4b567dd4854a6..aa76d1c885895a22e2745ab77ef6b11f5414c513 100644 (file)
@@ -10310,7 +10310,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
        SND_PCI_QUIRK(0x1043, 0x3a20, "ASUS G614JZR", ALC245_FIXUP_CS35L41_SPI_2),
        SND_PCI_QUIRK(0x1043, 0x3a30, "ASUS G814JVR/JIR", ALC245_FIXUP_CS35L41_SPI_2),
-       SND_PCI_QUIRK(0x1043, 0x3a40, "ASUS G814JZR", ALC245_FIXUP_CS35L41_SPI_2),
+       SND_PCI_QUIRK(0x1043, 0x3a40, "ASUS G814JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
        SND_PCI_QUIRK(0x1043, 0x3a50, "ASUS G834JYR/JZR", ALC245_FIXUP_CS35L41_SPI_2),
        SND_PCI_QUIRK(0x1043, 0x3a60, "ASUS G634JYR/JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
        SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
index 94685449f0f48c9b7bd534b1947da07ab26fad53..92674314227c4623af36a6b1642e83c8a33572d6 100644 (file)
@@ -310,8 +310,9 @@ static int cs42l43_startup(struct snd_pcm_substream *substream, struct snd_soc_d
        struct snd_soc_component *component = dai->component;
        struct cs42l43_codec *priv = snd_soc_component_get_drvdata(component);
        struct cs42l43 *cs42l43 = priv->core;
-       int provider = !!regmap_test_bits(cs42l43->regmap, CS42L43_ASP_CLK_CONFIG2,
-                                         CS42L43_ASP_MASTER_MODE_MASK);
+       int provider = !dai->id || !!regmap_test_bits(cs42l43->regmap,
+                                                     CS42L43_ASP_CLK_CONFIG2,
+                                                     CS42L43_ASP_MASTER_MODE_MASK);
 
        if (provider)
                priv->constraint.mask = CS42L43_PROVIDER_RATE_MASK;
index c9d9a7b28efb08d28f206d585ae92b8ceb1be508..68d2d6444533f5d2f602dbe81fbf54b00979c4c2 100644 (file)
@@ -2085,5 +2085,6 @@ static const struct cs_dsp_client_ops wm_adsp2_client_ops = {
        .watchdog_expired = wm_adsp_fatal_error,
 };
 
+MODULE_DESCRIPTION("Cirrus Logic ASoC DSP Support");
 MODULE_LICENSE("GPL v2");
 MODULE_IMPORT_NS(FW_CS_DSP);
index 3ed81ab649c53b20818155bed4e2c9e59216f07a..4e0586034de4b2f0700324615844655ef42e6e43 100644 (file)
@@ -652,7 +652,7 @@ if SND_SOC_SOF_INTEL_SOUNDWIRE
 
 config SND_SOC_INTEL_SOUNDWIRE_SOF_MACH
        tristate "SoundWire generic machine driver"
-       depends on I2C && ACPI
+       depends on I2C && SPI_MASTER && ACPI
        depends on MFD_INTEL_LPSS || COMPILE_TEST
        depends on SND_SOC_INTEL_USER_FRIENDLY_LONG_NAMES || COMPILE_TEST
        depends on SOUNDWIRE
index b26fa471b431fff7c73934cede3e5a9c16eb58b2..81bb93e9835864267e1e8464806b6cdb8c02911f 100644 (file)
@@ -258,8 +258,8 @@ const struct snd_sof_dsp_ops sof_acp_common_ops = {
 };
 EXPORT_SYMBOL_NS(sof_acp_common_ops, SND_SOC_SOF_AMD_COMMON);
 
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("ACP SOF COMMON Driver");
 MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
 MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
 MODULE_IMPORT_NS(SOUNDWIRE_AMD_INIT);
-MODULE_DESCRIPTION("ACP SOF COMMON Driver");
-MODULE_LICENSE("Dual BSD/GPL");
index c12c7f820529476de0273474082b8174ab0ae052..74fd5f2b148b8545c1229731239aedf87af3e0a0 100644 (file)
@@ -801,7 +801,7 @@ void amd_sof_acp_remove(struct snd_sof_dev *sdev)
 }
 EXPORT_SYMBOL_NS(amd_sof_acp_remove, SND_SOC_SOF_AMD_COMMON);
 
+MODULE_LICENSE("Dual BSD/GPL");
 MODULE_DESCRIPTION("AMD ACP sof driver");
 MODULE_IMPORT_NS(SOUNDWIRE_AMD_INIT);
 MODULE_IMPORT_NS(SND_AMD_SOUNDWIRE_ACPI);
-MODULE_LICENSE("Dual BSD/GPL");
index 9fb645079c3ad3e97aa006994a4177864295f640..9e6eb4bfc805b21e9b0607d8ea26843307338d57 100644 (file)
@@ -140,7 +140,3 @@ int sof_acp63_ops_init(struct snd_sof_dev *sdev)
 
        return 0;
 }
-
-MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
-MODULE_DESCRIPTION("ACP63 SOF Driver");
-MODULE_LICENSE("Dual BSD/GPL");
index eeaa12cceb23311312d5104ae4452c0e745fb22a..fc8984447365718410b4320e8cd05fbf3802d804 100644 (file)
@@ -109,5 +109,6 @@ static struct pci_driver snd_sof_pci_amd_acp63_driver = {
 module_pci_driver(snd_sof_pci_amd_acp63_driver);
 
 MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("ACP63 SOF Driver");
 MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
 MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
index 2f288545c4260525a09b62bc01a1bb733d178ef3..4bc30951f8b0d711fc8baa6afa285a16a6ad330c 100644 (file)
@@ -99,5 +99,6 @@ static struct pci_driver snd_sof_pci_amd_rmb_driver = {
 module_pci_driver(snd_sof_pci_amd_rmb_driver);
 
 MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("REMBRANDT SOF Driver");
 MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
 MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
index a0195e9b400c8989748e1896f4b889fc509372ee..e08875bdfa8b16f3fcd1512223b5c351a735db21 100644 (file)
@@ -103,5 +103,6 @@ static struct pci_driver snd_sof_pci_amd_rn_driver = {
 module_pci_driver(snd_sof_pci_amd_rn_driver);
 
 MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("RENOIR SOF Driver");
 MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
 MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
index 5cd3ac84752fb83fa030c5f91614594c092ffa1c..16eb2994fbab905b8c1f4994f078be4f013611da 100644 (file)
@@ -101,5 +101,6 @@ static struct pci_driver snd_sof_pci_amd_vgh_driver = {
 module_pci_driver(snd_sof_pci_amd_vgh_driver);
 
 MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("VANGOGH SOF Driver");
 MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
 MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
index f1d1ba57ab3a0d716daa42ede437d5cf36518e70..076f2f05a95c2066055dffa26f5ad372cfe6b1c1 100644 (file)
@@ -140,7 +140,3 @@ int sof_rembrandt_ops_init(struct snd_sof_dev *sdev)
 
        return 0;
 }
-
-MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
-MODULE_DESCRIPTION("REMBRANDT SOF Driver");
-MODULE_LICENSE("Dual BSD/GPL");
index 47b863f6258c6d5d922411251155e5eb664151b5..aa2d24dac6f5d5f6a3c3367de868157c2bd49073 100644 (file)
@@ -115,7 +115,3 @@ int sof_renoir_ops_init(struct snd_sof_dev *sdev)
 
        return 0;
 }
-
-MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
-MODULE_DESCRIPTION("RENOIR SOF Driver");
-MODULE_LICENSE("Dual BSD/GPL");
index bc6ffdb5471a58ae4b30b27ba166fd356a974edd..61372958c09dc8c19343484e1155d986b067a17a 100644 (file)
@@ -161,7 +161,3 @@ int sof_vangogh_ops_init(struct snd_sof_dev *sdev)
 
        return 0;
 }
-
-MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
-MODULE_DESCRIPTION("VANGOGH SOF Driver");
-MODULE_LICENSE("Dual BSD/GPL");
index 0a4917136ff971593fc453acf62fc69ad33783b2..83fe0401baf867b071a7449d71d98c207506a15c 100644 (file)
@@ -769,7 +769,7 @@ void sof_machine_unregister(struct snd_sof_dev *sdev, void *pdata)
 EXPORT_SYMBOL(sof_machine_unregister);
 
 MODULE_AUTHOR("Liam Girdwood");
-MODULE_DESCRIPTION("Sound Open Firmware (SOF) Core");
 MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Sound Open Firmware (SOF) Core");
 MODULE_ALIAS("platform:sof-audio");
 MODULE_IMPORT_NS(SND_SOC_SOF_CLIENT);
index 2981aea123d976cdc0dccc295511cfcb18fb7677..fce6d9cf6a6be839002e5aa45cc3107d57f92f16 100644 (file)
@@ -75,3 +75,4 @@ void imx8_dump(struct snd_sof_dev *sdev, u32 flags)
 EXPORT_SYMBOL(imx8_dump);
 
 MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF helpers for IMX platforms");
index 3021dc87ab5a23c8b811282e3f12d4e3a784c392..9f24e3c283dd41ef73d89319d623ebacb4c523cc 100644 (file)
@@ -667,5 +667,6 @@ static struct platform_driver snd_sof_of_imx8_driver = {
 };
 module_platform_driver(snd_sof_of_imx8_driver);
 
-MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
 MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for IMX8 platforms");
+MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
index 4ed415f0434544c3d01a8119b1c0c020a06dbca8..1c7019c3cbd38ccaed5b5aca638a5d299b5ce824 100644 (file)
@@ -514,5 +514,6 @@ static struct platform_driver snd_sof_of_imx8m_driver = {
 };
 module_platform_driver(snd_sof_of_imx8m_driver);
 
-MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
 MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for IMX8M platforms");
+MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
index 8adfdd00413aa15a2d063bb46199a58b3e0619fc..2585b1beef23f89f65a696ec65cebcb27b579e44 100644 (file)
@@ -516,5 +516,6 @@ static struct platform_driver snd_sof_of_imx8ulp_driver = {
 };
 module_platform_driver(snd_sof_of_imx8ulp_driver);
 
-MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
 MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for IMX8ULP platforms");
+MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
index 86af4e9a716ef776f5e2ffa7c4242625a28a9c29..3505ac3a1b143fcd0586a2782224508afca47357 100644 (file)
@@ -418,3 +418,4 @@ void atom_set_mach_params(struct snd_soc_acpi_mach *mach,
 EXPORT_SYMBOL_NS(atom_set_mach_params, SND_SOC_SOF_INTEL_ATOM_HIFI_EP);
 
 MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for Atom platforms");
index 3262286a9a9d7422b518de2bd96cdf7b31189456..7f18080e4e191834295f35d11ed0c2e2e919d213 100644 (file)
@@ -694,6 +694,7 @@ static struct platform_driver snd_sof_acpi_intel_bdw_driver = {
 module_platform_driver(snd_sof_acpi_intel_bdw_driver);
 
 MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for Broadwell platforms");
 MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HIFI_EP_IPC);
 MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
 MODULE_IMPORT_NS(SND_SOC_SOF_ACPI_DEV);
index d78d11d4cfbf6c7b31011f3d135a0e997434a74e..7a57e162fb1c2b323d5eba59180fe3e58db85ca6 100644 (file)
@@ -475,6 +475,7 @@ static struct platform_driver snd_sof_acpi_intel_byt_driver = {
 module_platform_driver(snd_sof_acpi_intel_byt_driver);
 
 MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for Baytrail/Cherrytrail");
 MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HIFI_EP_IPC);
 MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
 MODULE_IMPORT_NS(SND_SOC_SOF_ACPI_DEV);
index da3db3ed379ecb7f350bed63be8535c1eecd04f3..dc46888faa0dc9c3fd3a826615bdeb5b66057b56 100644 (file)
@@ -457,3 +457,4 @@ EXPORT_SYMBOL_NS_GPL(hda_codec_i915_exit, SND_SOC_SOF_HDA_AUDIO_CODEC_I915);
 #endif
 
 MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for HDaudio codecs");
index 262b482dc0a800489eba28725c8b9048424fe9f7..b9a02750ce61311aff49a5674bf5f0cabe7a883e 100644 (file)
@@ -328,6 +328,7 @@ void hda_dsp_ctrl_stop_chip(struct snd_sof_dev *sdev)
 }
 
 MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF helpers for HDaudio platforms");
 MODULE_IMPORT_NS(SND_SOC_SOF_HDA_MLINK);
 MODULE_IMPORT_NS(SND_SOC_SOF_HDA_AUDIO_CODEC);
 MODULE_IMPORT_NS(SND_SOC_SOF_HDA_AUDIO_CODEC_I915);
index 04bbc5c9904ce63ab3ad5610ce62ee0ab2e8af17..9a3559c78b6279bd3dbfaf05cca3d5c64cff83f9 100644 (file)
@@ -972,3 +972,4 @@ EXPORT_SYMBOL_NS(hdac_bus_eml_enable_offload, SND_SOC_SOF_HDA_MLINK);
 #endif
 
 MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for HDaudio multi-link");
index e6a38de0a0aa103a6bf1694820a91d036d07f7c9..dead1c19558bb02af35f23eae1f73c4426a093e4 100644 (file)
@@ -1522,6 +1522,7 @@ void hda_unregister_clients(struct snd_sof_dev *sdev)
 }
 
 MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for HDaudio platforms");
 MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
 MODULE_IMPORT_NS(SND_SOC_SOF_HDA_AUDIO_CODEC);
 MODULE_IMPORT_NS(SND_SOC_SOF_HDA_AUDIO_CODEC_I915);
index df6d897da290b6486e386cff799319976a062daf..f006dcf5458aee93051a2ef24cb79266e2119fef 100644 (file)
@@ -105,6 +105,7 @@ static struct pci_driver snd_sof_pci_intel_apl_driver = {
 module_pci_driver(snd_sof_pci_intel_apl_driver);
 
 MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for ApolloLake platforms");
 MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_GENERIC);
 MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
 MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
index a39fa3657d5556dc4ad938e071d0dd2e9ab1c12d..a8406342f08bdd8edf8bfeb7e5a2e90b80bf6c3a 100644 (file)
@@ -143,6 +143,7 @@ static struct pci_driver snd_sof_pci_intel_cnl_driver = {
 module_pci_driver(snd_sof_pci_intel_cnl_driver);
 
 MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for CannonLake platforms");
 MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_GENERIC);
 MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
 MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
index 9f1fe47475fb17a6d53f70a77c4212cdc25d181a..25effca50d9fed1a819501717bb209e0217c7b68 100644 (file)
@@ -108,6 +108,7 @@ static struct pci_driver snd_sof_pci_intel_icl_driver = {
 module_pci_driver(snd_sof_pci_intel_icl_driver);
 
 MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for IceLake platforms");
 MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_GENERIC);
 MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
 MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_CNL);
index 68e5c90151b27250586e2658281021fca94d6e36..602c574064eb5ca34978be1edd8b0987edef67e0 100644 (file)
@@ -70,6 +70,7 @@ static struct pci_driver snd_sof_pci_intel_lnl_driver = {
 module_pci_driver(snd_sof_pci_intel_lnl_driver);
 
 MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for LunarLake platforms");
 MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_GENERIC);
 MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
 MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_MTL);
index c685cb8d617123ab397b953c2a14baac8360f2dd..8cb0333c033ef19756818c46998e329edc2fee2c 100644 (file)
@@ -133,6 +133,7 @@ static struct pci_driver snd_sof_pci_intel_mtl_driver = {
 module_pci_driver(snd_sof_pci_intel_mtl_driver);
 
 MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for MeteorLake platforms");
 MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_GENERIC);
 MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
 MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
index 862da8009543bbd699d51434529eaeb09b90fa21..8ca0231d7e4f6e349d98fff90f0e4558920682cc 100644 (file)
@@ -89,6 +89,7 @@ static struct pci_driver snd_sof_pci_intel_skl_driver = {
 module_pci_driver(snd_sof_pci_intel_skl_driver);
 
 MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for SkyLake platforms");
 MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_GENERIC);
 MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
 MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
index f73bb47cd79e7b79fc372b7e1f439ecbacba6faa..ebe1a7d1668946018e7a3277993c95291dc7b1ee 100644 (file)
@@ -317,6 +317,7 @@ static struct pci_driver snd_sof_pci_intel_tgl_driver = {
 module_pci_driver(snd_sof_pci_intel_tgl_driver);
 
 MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for TigerLake platforms");
 MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_GENERIC);
 MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
 MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_CNL);
index 5c3069588bb77111654b69e1c751e4e54ab49462..1375c393827e1167806a748cb4dab28b3b912791 100644 (file)
@@ -244,6 +244,7 @@ static struct pci_driver snd_sof_pci_intel_tng_driver = {
 module_pci_driver(snd_sof_pci_intel_tng_driver);
 
 MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for Tangier platforms");
 MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HIFI_EP_IPC);
 MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
 MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
index 307bee63756b5c81d2b19ea85ee5ca26bd6323aa..4df2be3d39eba0efba95aa16ebb7514c45dccc96 100644 (file)
@@ -650,7 +650,7 @@ static int sof_ipc4_pcm_dai_link_fixup(struct snd_soc_pcm_runtime *rtd,
        struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
        struct sof_ipc4_audio_format *ipc4_fmt;
        struct sof_ipc4_copier *ipc4_copier;
-       bool single_fmt = false;
+       bool single_bitdepth = false;
        u32 valid_bits = 0;
        int dir, ret;
 
@@ -682,18 +682,18 @@ static int sof_ipc4_pcm_dai_link_fixup(struct snd_soc_pcm_runtime *rtd,
                                return 0;
 
                        if (dir == SNDRV_PCM_STREAM_PLAYBACK) {
-                               if (sof_ipc4_copier_is_single_format(sdev,
+                               if (sof_ipc4_copier_is_single_bitdepth(sdev,
                                        available_fmt->output_pin_fmts,
                                        available_fmt->num_output_formats)) {
                                        ipc4_fmt = &available_fmt->output_pin_fmts->audio_fmt;
-                                       single_fmt = true;
+                                       single_bitdepth = true;
                                }
                        } else {
-                               if (sof_ipc4_copier_is_single_format(sdev,
+                               if (sof_ipc4_copier_is_single_bitdepth(sdev,
                                        available_fmt->input_pin_fmts,
                                        available_fmt->num_input_formats)) {
                                        ipc4_fmt = &available_fmt->input_pin_fmts->audio_fmt;
-                                       single_fmt = true;
+                                       single_bitdepth = true;
                                }
                        }
                }
@@ -703,7 +703,7 @@ static int sof_ipc4_pcm_dai_link_fixup(struct snd_soc_pcm_runtime *rtd,
        if (ret)
                return ret;
 
-       if (single_fmt) {
+       if (single_bitdepth) {
                snd_mask_none(fmt);
                valid_bits = SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH(ipc4_fmt->fmt_cfg);
                dev_dbg(component->dev, "Set %s to %d bit format\n", dai->name, valid_bits);
index beff10989324755e68237138e598fb017c47b617..00987039c9720ff0034d4baf536b8cb6c27a0f13 100644 (file)
@@ -195,9 +195,10 @@ static void sof_ipc4_dbg_audio_format(struct device *dev, struct sof_ipc4_pin_fo
        for (i = 0; i < num_formats; i++) {
                struct sof_ipc4_audio_format *fmt = &pin_fmt[i].audio_fmt;
                dev_dbg(dev,
-                       "Pin index #%d: %uHz, %ubit (ch_map %#x ch_cfg %u interleaving_style %u fmt_cfg %#x) buffer size %d\n",
-                       pin_fmt[i].pin_index, fmt->sampling_frequency, fmt->bit_depth, fmt->ch_map,
-                       fmt->ch_cfg, fmt->interleaving_style, fmt->fmt_cfg,
+                       "Pin index #%d: %uHz, %ubit, %luch (ch_map %#x ch_cfg %u interleaving_style %u fmt_cfg %#x) buffer size %d\n",
+                       pin_fmt[i].pin_index, fmt->sampling_frequency, fmt->bit_depth,
+                       SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT(fmt->fmt_cfg),
+                       fmt->ch_map, fmt->ch_cfg, fmt->interleaving_style, fmt->fmt_cfg,
                        pin_fmt[i].buffer_size);
        }
 }
@@ -217,6 +218,14 @@ sof_ipc4_get_input_pin_audio_fmt(struct snd_sof_widget *swidget, int pin_index)
        }
 
        process = swidget->private;
+
+       /*
+        * For process modules without base config extension, base module config
+        * format is used for all input pins
+        */
+       if (process->init_config != SOF_IPC4_MODULE_INIT_CONFIG_TYPE_BASE_CFG_WITH_EXT)
+               return &process->base_config.audio_fmt;
+
        base_cfg_ext = process->base_config_ext;
 
        /*
@@ -1422,7 +1431,7 @@ static int snd_sof_get_hw_config_params(struct snd_sof_dev *sdev, struct snd_sof
 
 static int
 snd_sof_get_nhlt_endpoint_data(struct snd_sof_dev *sdev, struct snd_sof_dai *dai,
-                              bool single_format,
+                              bool single_bitdepth,
                               struct snd_pcm_hw_params *params, u32 dai_index,
                               u32 linktype, u8 dir, u32 **dst, u32 *len)
 {
@@ -1445,7 +1454,7 @@ snd_sof_get_nhlt_endpoint_data(struct snd_sof_dev *sdev, struct snd_sof_dai *dai
                 * Look for 32-bit blob first instead of 16-bit if copier
                 * supports multiple formats
                 */
-               if (bit_depth == 16 && !single_format) {
+               if (bit_depth == 16 && !single_bitdepth) {
                        dev_dbg(sdev->dev, "Looking for 32-bit blob first for DMIC\n");
                        format_change = true;
                        bit_depth = 32;
@@ -1483,6 +1492,8 @@ snd_sof_get_nhlt_endpoint_data(struct snd_sof_dev *sdev, struct snd_sof_dai *dai
                                           dir, dev_type);
 
        if (!cfg) {
+               bool get_new_blob = false;
+
                if (format_change) {
                        /*
                         * The 32-bit blob was not found in NHLT table, try to
@@ -1490,7 +1501,20 @@ snd_sof_get_nhlt_endpoint_data(struct snd_sof_dev *sdev, struct snd_sof_dai *dai
                         */
                        bit_depth = params_width(params);
                        format_change = false;
+                       get_new_blob = true;
+               } else if (linktype == SOF_DAI_INTEL_DMIC && !single_bitdepth) {
+                       /*
+                        * The requested 32-bit blob (no format change for the
+                        * blob request) was not found in NHLT table, try to
+                        * look for 16-bit blob if the copier supports multiple
+                        * formats
+                        */
+                       bit_depth = 16;
+                       format_change = true;
+                       get_new_blob = true;
+               }
 
+               if (get_new_blob) {
                        cfg = intel_nhlt_get_endpoint_blob(sdev->dev, ipc4_data->nhlt,
                                                           dai_index, nhlt_type,
                                                           bit_depth, bit_depth,
@@ -1513,8 +1537,8 @@ out:
 
        if (format_change) {
                /*
-                * Update the params to reflect that we have loaded 32-bit blob
-                * instead of the 16-bit.
+                * Update the params to reflect that different blob was loaded
+                * instead of the requested bit depth (16 -> 32 or 32 -> 16).
                 * This information is going to be used by the caller to find
                 * matching copier format on the dai side.
                 */
@@ -1522,7 +1546,11 @@ out:
 
                m = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
                snd_mask_none(m);
-               snd_mask_set_format(m, SNDRV_PCM_FORMAT_S32_LE);
+               if (bit_depth == 16)
+                       snd_mask_set_format(m, SNDRV_PCM_FORMAT_S16_LE);
+               else
+                       snd_mask_set_format(m, SNDRV_PCM_FORMAT_S32_LE);
+
        }
 
        return 0;
@@ -1530,7 +1558,7 @@ out:
 #else
 static int
 snd_sof_get_nhlt_endpoint_data(struct snd_sof_dev *sdev, struct snd_sof_dai *dai,
-                              bool single_format,
+                              bool single_bitdepth,
                               struct snd_pcm_hw_params *params, u32 dai_index,
                               u32 linktype, u8 dir, u32 **dst, u32 *len)
 {
@@ -1538,9 +1566,9 @@ snd_sof_get_nhlt_endpoint_data(struct snd_sof_dev *sdev, struct snd_sof_dai *dai
 }
 #endif
 
-bool sof_ipc4_copier_is_single_format(struct snd_sof_dev *sdev,
-                                     struct sof_ipc4_pin_format *pin_fmts,
-                                     u32 pin_fmts_size)
+bool sof_ipc4_copier_is_single_bitdepth(struct snd_sof_dev *sdev,
+                                       struct sof_ipc4_pin_format *pin_fmts,
+                                       u32 pin_fmts_size)
 {
        struct sof_ipc4_audio_format *fmt;
        u32 valid_bits;
@@ -1563,6 +1591,55 @@ bool sof_ipc4_copier_is_single_format(struct snd_sof_dev *sdev,
        return true;
 }
 
+static int
+sof_ipc4_adjust_params_to_dai_format(struct snd_sof_dev *sdev,
+                                    struct snd_pcm_hw_params *params,
+                                    struct sof_ipc4_pin_format *pin_fmts,
+                                    u32 pin_fmts_size)
+{
+       u32 params_mask = BIT(SNDRV_PCM_HW_PARAM_RATE) |
+                         BIT(SNDRV_PCM_HW_PARAM_CHANNELS) |
+                         BIT(SNDRV_PCM_HW_PARAM_FORMAT);
+       struct sof_ipc4_audio_format *fmt;
+       u32 rate, channels, valid_bits;
+       int i;
+
+       fmt = &pin_fmts[0].audio_fmt;
+       rate = fmt->sampling_frequency;
+       channels = SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT(fmt->fmt_cfg);
+       valid_bits = SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH(fmt->fmt_cfg);
+
+       /* check if parameters in topology defined formats are the same */
+       for (i = 1; i < pin_fmts_size; i++) {
+               u32 val;
+
+               fmt = &pin_fmts[i].audio_fmt;
+
+               if (params_mask & BIT(SNDRV_PCM_HW_PARAM_RATE)) {
+                       val = fmt->sampling_frequency;
+                       if (val != rate)
+                               params_mask &= ~BIT(SNDRV_PCM_HW_PARAM_RATE);
+               }
+               if (params_mask & BIT(SNDRV_PCM_HW_PARAM_CHANNELS)) {
+                       val = SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT(fmt->fmt_cfg);
+                       if (val != channels)
+                               params_mask &= ~BIT(SNDRV_PCM_HW_PARAM_CHANNELS);
+               }
+               if (params_mask & BIT(SNDRV_PCM_HW_PARAM_FORMAT)) {
+                       val = SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH(fmt->fmt_cfg);
+                       if (val != valid_bits)
+                               params_mask &= ~BIT(SNDRV_PCM_HW_PARAM_FORMAT);
+               }
+       }
+
+       if (params_mask)
+               return sof_ipc4_update_hw_params(sdev, params,
+                                                &pin_fmts[0].audio_fmt,
+                                                params_mask);
+
+       return 0;
+}
+
 static int
 sof_ipc4_prepare_dai_copier(struct snd_sof_dev *sdev, struct snd_sof_dai *dai,
                            struct snd_pcm_hw_params *params, int dir)
@@ -1570,8 +1647,10 @@ sof_ipc4_prepare_dai_copier(struct snd_sof_dev *sdev, struct snd_sof_dai *dai,
        struct sof_ipc4_available_audio_format *available_fmt;
        struct snd_pcm_hw_params dai_params = *params;
        struct sof_ipc4_copier_data *copier_data;
+       struct sof_ipc4_pin_format *pin_fmts;
        struct sof_ipc4_copier *ipc4_copier;
-       bool single_format;
+       bool single_bitdepth;
+       u32 num_pin_fmts;
        int ret;
 
        ipc4_copier = dai->private;
@@ -1579,40 +1658,26 @@ sof_ipc4_prepare_dai_copier(struct snd_sof_dev *sdev, struct snd_sof_dai *dai,
        available_fmt = &ipc4_copier->available_fmt;
 
        /*
-        * If the copier on the DAI side supports only single bit depth then
-        * this depth (format) should be used to look for the NHLT blob (if
-        * needed) and in case of capture this should be used for the input
-        * format lookup
+        * Fixup the params based on the format parameters of the DAI. If any
+        * of the RATE, CHANNELS, bit depth is static among the formats then
+        * narrow the params to only allow that specific parameter value.
         */
        if (dir == SNDRV_PCM_STREAM_PLAYBACK) {
-               single_format = sof_ipc4_copier_is_single_format(sdev,
-                                               available_fmt->output_pin_fmts,
-                                               available_fmt->num_output_formats);
-
-               /* Update the dai_params with the only supported format */
-               if (single_format) {
-                       ret = sof_ipc4_update_hw_params(sdev, &dai_params,
-                                       &available_fmt->output_pin_fmts[0].audio_fmt,
-                                       BIT(SNDRV_PCM_HW_PARAM_FORMAT));
-                       if (ret)
-                               return ret;
-               }
+               pin_fmts = available_fmt->output_pin_fmts;
+               num_pin_fmts = available_fmt->num_output_formats;
        } else {
-               single_format = sof_ipc4_copier_is_single_format(sdev,
-                                               available_fmt->input_pin_fmts,
-                                               available_fmt->num_input_formats);
-
-               /* Update the dai_params with the only supported format */
-               if (single_format) {
-                       ret = sof_ipc4_update_hw_params(sdev, &dai_params,
-                                       &available_fmt->input_pin_fmts[0].audio_fmt,
-                                       BIT(SNDRV_PCM_HW_PARAM_FORMAT));
-                       if (ret)
-                               return ret;
-               }
+               pin_fmts = available_fmt->input_pin_fmts;
+               num_pin_fmts = available_fmt->num_input_formats;
        }
 
-       ret = snd_sof_get_nhlt_endpoint_data(sdev, dai, single_format,
+       ret = sof_ipc4_adjust_params_to_dai_format(sdev, &dai_params, pin_fmts,
+                                                  num_pin_fmts);
+       if (ret)
+               return ret;
+
+       single_bitdepth = sof_ipc4_copier_is_single_bitdepth(sdev, pin_fmts,
+                                                            num_pin_fmts);
+       ret = snd_sof_get_nhlt_endpoint_data(sdev, dai, single_bitdepth,
                                             &dai_params,
                                             ipc4_copier->dai_index,
                                             ipc4_copier->dai_type, dir,
@@ -1647,7 +1712,7 @@ sof_ipc4_prepare_copier_module(struct snd_sof_widget *swidget,
        u32 out_ref_rate, out_ref_channels;
        u32 deep_buffer_dma_ms = 0;
        int output_fmt_index;
-       bool single_output_format;
+       bool single_output_bitdepth;
        int i;
 
        dev_dbg(sdev->dev, "copier %s, type %d", swidget->widget->name, swidget->id);
@@ -1784,9 +1849,9 @@ sof_ipc4_prepare_copier_module(struct snd_sof_widget *swidget,
                return ret;
 
        /* set the reference params for output format selection */
-       single_output_format = sof_ipc4_copier_is_single_format(sdev,
-                                                               available_fmt->output_pin_fmts,
-                                                               available_fmt->num_output_formats);
+       single_output_bitdepth = sof_ipc4_copier_is_single_bitdepth(sdev,
+                                       available_fmt->output_pin_fmts,
+                                       available_fmt->num_output_formats);
        switch (swidget->id) {
        case snd_soc_dapm_aif_in:
        case snd_soc_dapm_dai_out:
@@ -1798,7 +1863,7 @@ sof_ipc4_prepare_copier_module(struct snd_sof_widget *swidget,
                out_ref_rate = in_fmt->sampling_frequency;
                out_ref_channels = SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT(in_fmt->fmt_cfg);
 
-               if (!single_output_format)
+               if (!single_output_bitdepth)
                        out_ref_valid_bits =
                                SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH(in_fmt->fmt_cfg);
                break;
@@ -1807,7 +1872,7 @@ sof_ipc4_prepare_copier_module(struct snd_sof_widget *swidget,
        case snd_soc_dapm_dai_in:
                out_ref_rate = params_rate(fe_params);
                out_ref_channels = params_channels(fe_params);
-               if (!single_output_format) {
+               if (!single_output_bitdepth) {
                        out_ref_valid_bits = sof_ipc4_get_valid_bits(sdev, fe_params);
                        if (out_ref_valid_bits < 0)
                                return out_ref_valid_bits;
@@ -1825,7 +1890,7 @@ sof_ipc4_prepare_copier_module(struct snd_sof_widget *swidget,
         * if the output format is the same across all available output formats, choose
         * that as the reference.
         */
-       if (single_output_format) {
+       if (single_output_bitdepth) {
                struct sof_ipc4_audio_format *out_fmt;
 
                out_fmt = &available_fmt->output_pin_fmts[0].audio_fmt;
index 4488762f6a71ba47322e0a1b7024833fed727612..f4dc499c0ffe55cb2c2dbdcd57efff4b1f38ba5e 100644 (file)
@@ -476,7 +476,7 @@ struct sof_ipc4_process {
        u32 init_config;
 };
 
-bool sof_ipc4_copier_is_single_format(struct snd_sof_dev *sdev,
-                                     struct sof_ipc4_pin_format *pin_fmts,
-                                     u32 pin_fmts_size);
+bool sof_ipc4_copier_is_single_bitdepth(struct snd_sof_dev *sdev,
+                                       struct sof_ipc4_pin_format *pin_fmts,
+                                       u32 pin_fmts_size);
 #endif
index c63e0d2f4b9686cc86877715e1893b580aae2c94..bea1b9d9ca2886db2b1f513ab10327b77f10b764 100644 (file)
@@ -666,6 +666,7 @@ static struct platform_driver snd_sof_of_mt8186_driver = {
 };
 module_platform_driver(snd_sof_of_mt8186_driver);
 
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for MT8186/MT8188 platforms");
 MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
 MODULE_IMPORT_NS(SND_SOC_SOF_MTK_COMMON);
-MODULE_LICENSE("Dual BSD/GPL");
index fc1c016104aee329d32433d03e19355e0541f2e6..31dc98d1b1d8bddde8f91047125ea034b912f691 100644 (file)
@@ -619,6 +619,7 @@ static struct platform_driver snd_sof_of_mt8195_driver = {
 };
 module_platform_driver(snd_sof_of_mt8195_driver);
 
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for MTL 8195 platforms");
 MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
 MODULE_IMPORT_NS(SND_SOC_SOF_MTK_COMMON);
-MODULE_LICENSE("Dual BSD/GPL");
index de8dbe27cd0deffc0a6708fcce6bfa0186c73cad..20bcf5590eb8855797d6cc61ef36e676d9dd6ceb 100644 (file)
@@ -82,3 +82,4 @@ void mtk_adsp_dump(struct snd_sof_dev *sdev, u32 flags)
 EXPORT_SYMBOL(mtk_adsp_dump);
 
 MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF helpers for MTK ADSP platforms");
index fdcbe33d3dcfb8346dfd2977e7cfcb70887256f6..b12b3d865ae30c9078bb33c5d25ab36b81a72a34 100644 (file)
@@ -110,7 +110,7 @@ static struct platform_driver sof_nocodec_audio = {
 };
 module_platform_driver(sof_nocodec_audio)
 
+MODULE_LICENSE("Dual BSD/GPL");
 MODULE_DESCRIPTION("ASoC sof nocodec");
 MODULE_AUTHOR("Liam Girdwood");
-MODULE_LICENSE("Dual BSD/GPL");
 MODULE_ALIAS("platform:sof-nocodec");
index 2d96d00f1c449a7ba98b8cb5f531a107e844e880..b196b2b74c2647124b5876e2ca1a222524d88cfb 100644 (file)
@@ -100,3 +100,4 @@ void sof_acpi_remove(struct platform_device *pdev)
 EXPORT_SYMBOL_NS(sof_acpi_remove, SND_SOC_SOF_ACPI_DEV);
 
 MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for ACPI platforms");
index 43561492609252e598dc7d474783a9fb285ad195..e7d2001140e81a93e46d8d05d8f1ca45ef05ef14 100644 (file)
@@ -394,6 +394,6 @@ static struct auxiliary_driver sof_ipc_flood_client_drv = {
 
 module_auxiliary_driver(sof_ipc_flood_client_drv);
 
-MODULE_DESCRIPTION("SOF IPC Flood Test Client Driver");
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SOF IPC Flood Test Client Driver");
 MODULE_IMPORT_NS(SND_SOC_SOF_CLIENT);
index 6973b6690df4b8f838bd8d7c316d69b03d99ed7a..d3f541069b24b8fc4dfaa8a1d4774e6e30db6cc5 100644 (file)
@@ -157,6 +157,6 @@ static struct auxiliary_driver sof_msg_inject_client_drv = {
 
 module_auxiliary_driver(sof_msg_inject_client_drv);
 
-MODULE_DESCRIPTION("SOF IPC Kernel Injector Client Driver");
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SOF IPC Kernel Injector Client Driver");
 MODULE_IMPORT_NS(SND_SOC_SOF_CLIENT);
index af22e6421029c67a4bb498fd16d47857892e5f29..d0f8beb9d0008d1ccfc1a9eadd13324108d5e1de 100644 (file)
@@ -335,6 +335,6 @@ static struct auxiliary_driver sof_msg_inject_client_drv = {
 
 module_auxiliary_driver(sof_msg_inject_client_drv);
 
-MODULE_DESCRIPTION("SOF IPC Message Injector Client Driver");
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SOF IPC Message Injector Client Driver");
 MODULE_IMPORT_NS(SND_SOC_SOF_CLIENT);
index b8f297307565cc630b87aaca1cec9973e9a8868b..ccc7d38ddc38340686c93282d83f4e427a5febc8 100644 (file)
@@ -540,6 +540,6 @@ static struct auxiliary_driver sof_probes_client_drv = {
 
 module_auxiliary_driver(sof_probes_client_drv);
 
-MODULE_DESCRIPTION("SOF Probes Client Driver");
 MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SOF Probes Client Driver");
 MODULE_IMPORT_NS(SND_SOC_SOF_CLIENT);
index b9a499e92b9a57514effeb54dbaf1e8fa43f42dd..71f7153cf79c6915f95d53a3625aa86bd896272a 100644 (file)
@@ -93,3 +93,4 @@ void sof_of_shutdown(struct platform_device *pdev)
 EXPORT_SYMBOL(sof_of_shutdown);
 
 MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for OF/DT platforms");
index 4365405783e61de3dbe4ca4f9b71b68996f1d869..38f2187da5de185f70d62dca8a26df5a457cf697 100644 (file)
@@ -304,3 +304,4 @@ void sof_pci_shutdown(struct pci_dev *pci)
 EXPORT_SYMBOL_NS(sof_pci_shutdown, SND_SOC_SOF_PCI_DEV);
 
 MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF support for PCI platforms");
index cad041bf56ccf62ab2d392690eb43287d6855776..44608682e9f8f678dd7b827145cc97507b4dc217 100644 (file)
@@ -73,3 +73,4 @@ int snd_sof_create_page_table(struct device *dev,
 EXPORT_SYMBOL(snd_sof_create_page_table);
 
 MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF utils");
index eb71303aa24c6ae61f7114b877dd5958a72275cb..794c7bbccbaf9202d836d340229bec9108600aa1 100644 (file)
@@ -125,5 +125,3 @@ int sof_stream_pcm_close(struct snd_sof_dev *sdev,
        return 0;
 }
 EXPORT_SYMBOL(sof_stream_pcm_close);
-
-MODULE_LICENSE("Dual BSD/GPL");
index ccbc3fcdadd5ec8a6f66f868d504f4d9f539aea2..3cf8c84beff9570f559cc1fc027af9dc5766190d 100644 (file)
@@ -151,5 +151,5 @@ const struct dsp_arch_ops sof_xtensa_arch_ops = {
 };
 EXPORT_SYMBOL_NS(sof_xtensa_arch_ops, SND_SOC_SOF_XTENSA);
 
-MODULE_DESCRIPTION("SOF Xtensa DSP support");
 MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF Xtensa DSP support");
index d9520cb826b31f545b6604ff770c1c5891bcada0..af393c7dee1f189d9143b0c33e45eaed40b3bff6 100644 (file)
@@ -728,7 +728,7 @@ static int sets_patch(struct object *obj)
 
 static int symbols_patch(struct object *obj)
 {
-       int err;
+       off_t err;
 
        if (__symbols_patch(obj, &obj->structs)  ||
            __symbols_patch(obj, &obj->unions)   ||
index a8188202413ec4cab8ea0c1df92637a64750cb4e..43742ac5b00da0aa66660d86cb627561d8eaf298 100644 (file)
@@ -148,6 +148,7 @@ enum {
        NETDEV_A_QSTATS_RX_ALLOC_FAIL,
        NETDEV_A_QSTATS_RX_HW_DROPS,
        NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS,
+       NETDEV_A_QSTATS_RX_CSUM_COMPLETE,
        NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY,
        NETDEV_A_QSTATS_RX_CSUM_NONE,
        NETDEV_A_QSTATS_RX_CSUM_BAD,
index a336786a22a38d9b36a9062f733a01f1d1cb312a..50befe125ddc550997fc0c8452c05c8e6f955579 100644 (file)
@@ -392,11 +392,41 @@ static int probe_uprobe_multi_link(int token_fd)
        link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
        err = -errno; /* close() can clobber errno */
 
+       if (link_fd >= 0 || err != -EBADF) {
+               if (link_fd >= 0)
+                       close(link_fd);
+               close(prog_fd);
+               return 0;
+       }
+
+       /* Initial multi-uprobe support in kernel didn't handle PID filtering
+        * correctly (it was doing thread filtering, not process filtering).
+        * So now we'll detect if PID filtering logic was fixed, and, if not,
+        * we'll pretend multi-uprobes are not supported, if not.
+        * Multi-uprobes are used in USDT attachment logic, and we need to be
+        * conservative here, because multi-uprobe selection happens early at
+        * load time, while the use of PID filtering is known late at
+        * attachment time, at which point it's too late to undo multi-uprobe
+        * selection.
+        *
+        * Creating uprobe with pid == -1 for (invalid) '/' binary will fail
+        * early with -EINVAL on kernels with fixed PID filtering logic;
+        * otherwise -ESRCH would be returned if passed correct binary path
+        * (but we'll just get -BADF, of course).
+        */
+       link_opts.uprobe_multi.pid = -1; /* invalid PID */
+       link_opts.uprobe_multi.path = "/"; /* invalid path */
+       link_opts.uprobe_multi.offsets = &offset;
+       link_opts.uprobe_multi.cnt = 1;
+
+       link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
+       err = -errno; /* close() can clobber errno */
+
        if (link_fd >= 0)
                close(link_fd);
        close(prog_fd);
 
-       return link_fd < 0 && err == -EBADF;
+       return link_fd < 0 && err == -EINVAL;
 }
 
 static int probe_kern_bpf_cookie(int token_fd)
index c519cc89c97f42a3849130aebdfcdfb14b44ebf6..0a56e22240fc8b8b026db7d72e0f5055cc420a1a 100644 (file)
@@ -41,6 +41,16 @@ union core_pstate {
                unsigned res1:31;
                unsigned en:1;
        } pstatedef;
+       /* since fam 1Ah: */
+       struct {
+               unsigned fid:12;
+               unsigned res1:2;
+               unsigned vid:8;
+               unsigned iddval:8;
+               unsigned idddiv:2;
+               unsigned res2:31;
+               unsigned en:1;
+       } pstatedef2;
        unsigned long long val;
 };
 
@@ -48,6 +58,10 @@ static int get_did(union core_pstate pstate)
 {
        int t;
 
+       /* Fam 1Ah onward do not use did */
+       if (cpupower_cpu_info.family >= 0x1A)
+               return 0;
+
        if (cpupower_cpu_info.caps & CPUPOWER_CAP_AMD_PSTATEDEF)
                t = pstate.pstatedef.did;
        else if (cpupower_cpu_info.family == 0x12)
@@ -61,12 +75,18 @@ static int get_did(union core_pstate pstate)
 static int get_cof(union core_pstate pstate)
 {
        int t;
-       int fid, did, cof;
+       int fid, did, cof = 0;
 
        did = get_did(pstate);
        if (cpupower_cpu_info.caps & CPUPOWER_CAP_AMD_PSTATEDEF) {
-               fid = pstate.pstatedef.fid;
-               cof = 200 * fid / did;
+               if (cpupower_cpu_info.family >= 0x1A) {
+                       fid = pstate.pstatedef2.fid;
+                       if (fid > 0x0f)
+                               cof = (fid * 5);
+               } else {
+                       fid = pstate.pstatedef.fid;
+                       cof = 200 * fid / did;
+               }
        } else {
                t = 0x10;
                fid = pstate.pstate.fid;
index 6584443144de89d3a344758e8ccf831c8bc3536d..eaf091a3d33137a4a09598e5d63145b4a7a4e71b 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/platform_device.h>
 #include <linux/mod_devicetable.h>
+#include <linux/vmalloc.h>
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/sizes.h>
index 5af9ba8a4645bc5fb86f724e8619e8e2d25ca11a..c1ce39874e2b5451d1caac769925ca4c0351fb98 100644 (file)
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 #
 
-CFLAGS += $(shell pkg-config --cflags alsa)
+CFLAGS += $(shell pkg-config --cflags alsa) $(KHDR_INCLUDES)
 LDLIBS += $(shell pkg-config --libs alsa)
 ifeq ($(LDLIBS),)
 LDLIBS += -lasound
index 15ee7b2fc4106428b3210de62df53a6c125060ec..b9135720024ccfc37b0a6946840248aa3e7f948f 100644 (file)
@@ -73,6 +73,16 @@ static int create_netkit(int mode, int policy, int peer_policy, int *ifindex,
                         "up primary");
        ASSERT_OK(system("ip addr add dev " netkit_name " 10.0.0.1/24"),
                         "addr primary");
+
+       if (mode == NETKIT_L3) {
+               ASSERT_EQ(system("ip link set dev " netkit_name
+                                " addr ee:ff:bb:cc:aa:dd 2> /dev/null"), 512,
+                                "set hwaddress");
+       } else {
+               ASSERT_OK(system("ip link set dev " netkit_name
+                                " addr ee:ff:bb:cc:aa:dd"),
+                                "set hwaddress");
+       }
        if (same_netns) {
                ASSERT_OK(system("ip link set dev " netkit_peer " up"),
                                 "up peer");
@@ -89,6 +99,16 @@ static int create_netkit(int mode, int policy, int peer_policy, int *ifindex,
        return err;
 }
 
+static void move_netkit(void)
+{
+       ASSERT_OK(system("ip link set " netkit_peer " netns foo"),
+                        "move peer");
+       ASSERT_OK(system("ip netns exec foo ip link set dev "
+                        netkit_peer " up"), "up peer");
+       ASSERT_OK(system("ip netns exec foo ip addr add dev "
+                        netkit_peer " 10.0.0.2/24"), "addr peer");
+}
+
 static void destroy_netkit(void)
 {
        ASSERT_OK(system("ip link del dev " netkit_name), "del primary");
@@ -685,3 +705,77 @@ void serial_test_tc_netkit_neigh_links(void)
        serial_test_tc_netkit_neigh_links_target(NETKIT_L2, BPF_NETKIT_PRIMARY);
        serial_test_tc_netkit_neigh_links_target(NETKIT_L3, BPF_NETKIT_PRIMARY);
 }
+
+static void serial_test_tc_netkit_pkt_type_mode(int mode)
+{
+       LIBBPF_OPTS(bpf_netkit_opts, optl_nk);
+       LIBBPF_OPTS(bpf_tcx_opts, optl_tcx);
+       int err, ifindex, ifindex2;
+       struct test_tc_link *skel;
+       struct bpf_link *link;
+
+       err = create_netkit(mode, NETKIT_PASS, NETKIT_PASS,
+                           &ifindex, true);
+       if (err)
+               return;
+
+       ifindex2 = if_nametoindex(netkit_peer);
+       ASSERT_NEQ(ifindex, ifindex2, "ifindex_1_2");
+
+       skel = test_tc_link__open();
+       if (!ASSERT_OK_PTR(skel, "skel_open"))
+               goto cleanup;
+
+       ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1,
+                 BPF_NETKIT_PRIMARY), 0, "tc1_attach_type");
+       ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc7,
+                 BPF_TCX_INGRESS), 0, "tc7_attach_type");
+
+       err = test_tc_link__load(skel);
+       if (!ASSERT_OK(err, "skel_load"))
+               goto cleanup;
+
+       assert_mprog_count_ifindex(ifindex,  BPF_NETKIT_PRIMARY, 0);
+       assert_mprog_count_ifindex(ifindex2, BPF_TCX_INGRESS, 0);
+
+       link = bpf_program__attach_netkit(skel->progs.tc1, ifindex, &optl_nk);
+       if (!ASSERT_OK_PTR(link, "link_attach"))
+               goto cleanup;
+
+       skel->links.tc1 = link;
+
+       assert_mprog_count_ifindex(ifindex,  BPF_NETKIT_PRIMARY, 1);
+       assert_mprog_count_ifindex(ifindex2, BPF_TCX_INGRESS, 0);
+
+       link = bpf_program__attach_tcx(skel->progs.tc7, ifindex2, &optl_tcx);
+       if (!ASSERT_OK_PTR(link, "link_attach"))
+               goto cleanup;
+
+       skel->links.tc7 = link;
+
+       assert_mprog_count_ifindex(ifindex,  BPF_NETKIT_PRIMARY, 1);
+       assert_mprog_count_ifindex(ifindex2, BPF_TCX_INGRESS, 1);
+
+       move_netkit();
+
+       tc_skel_reset_all_seen(skel);
+       skel->bss->set_type = true;
+       ASSERT_EQ(send_icmp(), 0, "icmp_pkt");
+
+       ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+       ASSERT_EQ(skel->bss->seen_tc7, true, "seen_tc7");
+
+       ASSERT_EQ(skel->bss->seen_host,  true, "seen_host");
+       ASSERT_EQ(skel->bss->seen_mcast, true, "seen_mcast");
+cleanup:
+       test_tc_link__destroy(skel);
+
+       assert_mprog_count_ifindex(ifindex,  BPF_NETKIT_PRIMARY, 0);
+       destroy_netkit();
+}
+
+void serial_test_tc_netkit_pkt_type(void)
+{
+       serial_test_tc_netkit_pkt_type_mode(NETKIT_L2);
+       serial_test_tc_netkit_pkt_type_mode(NETKIT_L3);
+}
index 8269cdee33ae97949c89f363b32d3d911b7dac21..bf6ca8e3eb131484300316d8097e9efac9454cbd 100644 (file)
@@ -1,12 +1,14 @@
 // SPDX-License-Identifier: GPL-2.0
 
 #include <unistd.h>
+#include <pthread.h>
 #include <test_progs.h>
 #include "uprobe_multi.skel.h"
 #include "uprobe_multi_bench.skel.h"
 #include "uprobe_multi_usdt.skel.h"
 #include "bpf/libbpf_internal.h"
 #include "testing_helpers.h"
+#include "../sdt.h"
 
 static char test_data[] = "test_data";
 
@@ -25,9 +27,17 @@ noinline void uprobe_multi_func_3(void)
        asm volatile ("");
 }
 
+noinline void usdt_trigger(void)
+{
+       STAP_PROBE(test, pid_filter_usdt);
+}
+
 struct child {
        int go[2];
+       int c2p[2]; /* child -> parent channel */
        int pid;
+       int tid;
+       pthread_t thread;
 };
 
 static void release_child(struct child *child)
@@ -38,6 +48,10 @@ static void release_child(struct child *child)
                return;
        close(child->go[1]);
        close(child->go[0]);
+       if (child->thread)
+               pthread_join(child->thread, NULL);
+       close(child->c2p[0]);
+       close(child->c2p[1]);
        if (child->pid > 0)
                waitpid(child->pid, &child_status, 0);
 }
@@ -63,7 +77,7 @@ static struct child *spawn_child(void)
        if (pipe(child.go))
                return NULL;
 
-       child.pid = fork();
+       child.pid = child.tid = fork();
        if (child.pid < 0) {
                release_child(&child);
                errno = EINVAL;
@@ -82,6 +96,7 @@ static struct child *spawn_child(void)
                uprobe_multi_func_1();
                uprobe_multi_func_2();
                uprobe_multi_func_3();
+               usdt_trigger();
 
                exit(errno);
        }
@@ -89,6 +104,67 @@ static struct child *spawn_child(void)
        return &child;
 }
 
+static void *child_thread(void *ctx)
+{
+       struct child *child = ctx;
+       int c = 0, err;
+
+       child->tid = syscall(SYS_gettid);
+
+       /* let parent know we are ready */
+       err = write(child->c2p[1], &c, 1);
+       if (err != 1)
+               pthread_exit(&err);
+
+       /* wait for parent's kick */
+       err = read(child->go[0], &c, 1);
+       if (err != 1)
+               pthread_exit(&err);
+
+       uprobe_multi_func_1();
+       uprobe_multi_func_2();
+       uprobe_multi_func_3();
+       usdt_trigger();
+
+       err = 0;
+       pthread_exit(&err);
+}
+
+static struct child *spawn_thread(void)
+{
+       static struct child child;
+       int c, err;
+
+       /* pipe to notify child to execute the trigger functions */
+       if (pipe(child.go))
+               return NULL;
+       /* pipe to notify parent that child thread is ready */
+       if (pipe(child.c2p)) {
+               close(child.go[0]);
+               close(child.go[1]);
+               return NULL;
+       }
+
+       child.pid = getpid();
+
+       err = pthread_create(&child.thread, NULL, child_thread, &child);
+       if (err) {
+               err = -errno;
+               close(child.go[0]);
+               close(child.go[1]);
+               close(child.c2p[0]);
+               close(child.c2p[1]);
+               errno = -err;
+               return NULL;
+       }
+
+       err = read(child.c2p[0], &c, 1);
+       if (!ASSERT_EQ(err, 1, "child_thread_ready"))
+               return NULL;
+
+       return &child;
+}
+
 static void uprobe_multi_test_run(struct uprobe_multi *skel, struct child *child)
 {
        skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1;
@@ -103,15 +179,23 @@ static void uprobe_multi_test_run(struct uprobe_multi *skel, struct child *child
         * passed at the probe attach.
         */
        skel->bss->pid = child ? 0 : getpid();
+       skel->bss->expect_pid = child ? child->pid : 0;
+
+       /* trigger all probes, if we are testing child *process*, just to make
+        * sure that PID filtering doesn't let through activations from wrong
+        * PIDs; when we test child *thread*, we don't want to do this to
+        * avoid double counting number of triggering events
+        */
+       if (!child || !child->thread) {
+               uprobe_multi_func_1();
+               uprobe_multi_func_2();
+               uprobe_multi_func_3();
+               usdt_trigger();
+       }
 
        if (child)
                kick_child(child);
 
-       /* trigger all probes */
-       uprobe_multi_func_1();
-       uprobe_multi_func_2();
-       uprobe_multi_func_3();
-
        /*
         * There are 2 entry and 2 exit probe called for each uprobe_multi_func_[123]
         * function and each slepable probe (6) increments uprobe_multi_sleep_result.
@@ -126,8 +210,12 @@ static void uprobe_multi_test_run(struct uprobe_multi *skel, struct child *child
 
        ASSERT_EQ(skel->bss->uprobe_multi_sleep_result, 6, "uprobe_multi_sleep_result");
 
-       if (child)
+       ASSERT_FALSE(skel->bss->bad_pid_seen, "bad_pid_seen");
+
+       if (child) {
                ASSERT_EQ(skel->bss->child_pid, child->pid, "uprobe_multi_child_pid");
+               ASSERT_EQ(skel->bss->child_tid, child->tid, "uprobe_multi_child_tid");
+       }
 }
 
 static void test_skel_api(void)
@@ -190,8 +278,24 @@ __test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_mul
        if (!ASSERT_OK_PTR(skel->links.uprobe_extra, "bpf_program__attach_uprobe_multi"))
                goto cleanup;
 
+       /* Attach (uprobe-backed) USDTs */
+       skel->links.usdt_pid = bpf_program__attach_usdt(skel->progs.usdt_pid, pid, binary,
+                                                       "test", "pid_filter_usdt", NULL);
+       if (!ASSERT_OK_PTR(skel->links.usdt_pid, "attach_usdt_pid"))
+               goto cleanup;
+
+       skel->links.usdt_extra = bpf_program__attach_usdt(skel->progs.usdt_extra, -1, binary,
+                                                         "test", "pid_filter_usdt", NULL);
+       if (!ASSERT_OK_PTR(skel->links.usdt_extra, "attach_usdt_extra"))
+               goto cleanup;
+
        uprobe_multi_test_run(skel, child);
 
+       ASSERT_FALSE(skel->bss->bad_pid_seen_usdt, "bad_pid_seen_usdt");
+       if (child) {
+               ASSERT_EQ(skel->bss->child_pid_usdt, child->pid, "usdt_multi_child_pid");
+               ASSERT_EQ(skel->bss->child_tid_usdt, child->tid, "usdt_multi_child_tid");
+       }
 cleanup:
        uprobe_multi__destroy(skel);
 }
@@ -210,6 +314,13 @@ test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi
                return;
 
        __test_attach_api(binary, pattern, opts, child);
+
+       /* pid filter (thread) */
+       child = spawn_thread();
+       if (!ASSERT_OK_PTR(child, "spawn_thread"))
+               return;
+
+       __test_attach_api(binary, pattern, opts, child);
 }
 
 static void test_attach_api_pattern(void)
@@ -397,7 +508,7 @@ static void test_attach_api_fails(void)
        link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
        if (!ASSERT_ERR(link_fd, "link_fd"))
                goto cleanup;
-       ASSERT_EQ(link_fd, -ESRCH, "pid_is_wrong");
+       ASSERT_EQ(link_fd, -EINVAL, "pid_is_wrong");
 
 cleanup:
        if (link_fd >= 0)
@@ -495,6 +606,13 @@ static void test_link_api(void)
                return;
 
        __test_link_api(child);
+
+       /* pid filter (thread) */
+       child = spawn_thread();
+       if (!ASSERT_OK_PTR(child, "spawn_thread"))
+               return;
+
+       __test_link_api(child);
 }
 
 static void test_bench_attach_uprobe(void)
index c60db8beeb734238f4cdb197292dcf1e58578290..1c9c4ec1be11ed5aed370daedd6c5780173dc253 100644 (file)
@@ -67,6 +67,7 @@
 #include "verifier_search_pruning.skel.h"
 #include "verifier_sock.skel.h"
 #include "verifier_sock_addr.skel.h"
+#include "verifier_sockmap_mutate.skel.h"
 #include "verifier_spill_fill.skel.h"
 #include "verifier_spin_lock.skel.h"
 #include "verifier_stack_ptr.skel.h"
@@ -183,6 +184,7 @@ void test_verifier_sdiv(void)                 { RUN(verifier_sdiv); }
 void test_verifier_search_pruning(void)       { RUN(verifier_search_pruning); }
 void test_verifier_sock(void)                 { RUN(verifier_sock); }
 void test_verifier_sock_addr(void)            { RUN(verifier_sock_addr); }
+void test_verifier_sockmap_mutate(void)       { RUN(verifier_sockmap_mutate); }
 void test_verifier_spill_fill(void)           { RUN(verifier_spill_fill); }
 void test_verifier_spin_lock(void)            { RUN(verifier_spin_lock); }
 void test_verifier_stack_ptr(void)            { RUN(verifier_stack_ptr); }
index 02e718f06e0f5c5b81c14e9d75c635b7c0837a31..40531e56776e426c247af0472a1d1ed6e9c0e49e 100644 (file)
@@ -84,7 +84,7 @@ int BPF_PROG(trace_tcp_connect, struct sock *sk)
 }
 
 SEC("fexit/inet_csk_accept")
-int BPF_PROG(inet_csk_accept, struct sock *sk, int flags, int *err, bool kern,
+int BPF_PROG(inet_csk_accept, struct sock *sk, struct proto_accept_arg *arg,
             struct sock *accepted_sk)
 {
        set_task_info(accepted_sk);
index 992400acb9572aa9b6949566e9eeabd2dcd86cd8..ab3eae3d6af8795006f98c1bb8122fffa3a49d5a 100644 (file)
@@ -4,7 +4,8 @@
 
 #include <linux/bpf.h>
 #include <linux/if_ether.h>
-
+#include <linux/stddef.h>
+#include <linux/if_packet.h>
 #include <bpf/bpf_endian.h>
 #include <bpf/bpf_helpers.h>
 
@@ -16,7 +17,13 @@ bool seen_tc3;
 bool seen_tc4;
 bool seen_tc5;
 bool seen_tc6;
+bool seen_tc7;
+
+bool set_type;
+
 bool seen_eth;
+bool seen_host;
+bool seen_mcast;
 
 SEC("tc/ingress")
 int tc1(struct __sk_buff *skb)
@@ -28,8 +35,16 @@ int tc1(struct __sk_buff *skb)
        if (bpf_skb_load_bytes(skb, 0, &eth, sizeof(eth)))
                goto out;
        seen_eth = eth.h_proto == bpf_htons(ETH_P_IP);
+       seen_host = skb->pkt_type == PACKET_HOST;
+       if (seen_host && set_type) {
+               eth.h_dest[0] = 4;
+               if (bpf_skb_store_bytes(skb, 0, &eth, sizeof(eth), 0))
+                       goto fail;
+               bpf_skb_change_type(skb, PACKET_MULTICAST);
+       }
 out:
        seen_tc1 = true;
+fail:
        return TCX_NEXT;
 }
 
@@ -67,3 +82,21 @@ int tc6(struct __sk_buff *skb)
        seen_tc6 = true;
        return TCX_PASS;
 }
+
+SEC("tc/ingress")
+int tc7(struct __sk_buff *skb)
+{
+       struct ethhdr eth = {};
+
+       if (skb->protocol != __bpf_constant_htons(ETH_P_IP))
+               goto out;
+       if (bpf_skb_load_bytes(skb, 0, &eth, sizeof(eth)))
+               goto out;
+       if (eth.h_dest[0] == 4 && set_type) {
+               seen_mcast = skb->pkt_type == PACKET_MULTICAST;
+               bpf_skb_change_type(skb, PACKET_HOST);
+       }
+out:
+       seen_tc7 = true;
+       return TCX_PASS;
+}
index 419d9aa28fce5cdf94e9976e6f8501a3551b4133..44190efcdba217b507e0c5772f9dfc14af72de25 100644 (file)
@@ -1,8 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0
-#include <linux/bpf.h>
+#include "vmlinux.h"
 #include <bpf/bpf_helpers.h>
 #include <bpf/bpf_tracing.h>
-#include <stdbool.h>
+#include <bpf/usdt.bpf.h>
 
 char _license[] SEC("license") = "GPL";
 
@@ -22,6 +22,13 @@ __u64 uprobe_multi_sleep_result = 0;
 
 int pid = 0;
 int child_pid = 0;
+int child_tid = 0;
+int child_pid_usdt = 0;
+int child_tid_usdt = 0;
+
+int expect_pid = 0;
+bool bad_pid_seen = false;
+bool bad_pid_seen_usdt = false;
 
 bool test_cookie = false;
 void *user_ptr = 0;
@@ -36,11 +43,19 @@ static __always_inline bool verify_sleepable_user_copy(void)
 
 static void uprobe_multi_check(void *ctx, bool is_return, bool is_sleep)
 {
-       child_pid = bpf_get_current_pid_tgid() >> 32;
+       __u64 cur_pid_tgid = bpf_get_current_pid_tgid();
+       __u32 cur_pid;
 
-       if (pid && child_pid != pid)
+       cur_pid = cur_pid_tgid >> 32;
+       if (pid && cur_pid != pid)
                return;
 
+       if (expect_pid && cur_pid != expect_pid)
+               bad_pid_seen = true;
+
+       child_pid = cur_pid_tgid >> 32;
+       child_tid = (__u32)cur_pid_tgid;
+
        __u64 cookie = test_cookie ? bpf_get_attach_cookie(ctx) : 0;
        __u64 addr = bpf_get_func_ip(ctx);
 
@@ -97,5 +112,32 @@ int uretprobe_sleep(struct pt_regs *ctx)
 SEC("uprobe.multi//proc/self/exe:uprobe_multi_func_*")
 int uprobe_extra(struct pt_regs *ctx)
 {
+       /* we need this one just to mix PID-filtered and global uprobes */
+       return 0;
+}
+
+SEC("usdt")
+int usdt_pid(struct pt_regs *ctx)
+{
+       __u64 cur_pid_tgid = bpf_get_current_pid_tgid();
+       __u32 cur_pid;
+
+       cur_pid = cur_pid_tgid >> 32;
+       if (pid && cur_pid != pid)
+               return 0;
+
+       if (expect_pid && cur_pid != expect_pid)
+               bad_pid_seen_usdt = true;
+
+       child_pid_usdt = cur_pid_tgid >> 32;
+       child_tid_usdt = (__u32)cur_pid_tgid;
+
+       return 0;
+}
+
+SEC("usdt")
+int usdt_extra(struct pt_regs *ctx)
+{
+       /* we need this one just to mix PID-filtered and global USDT probes */
        return 0;
 }
diff --git a/tools/testing/selftests/bpf/progs/verifier_sockmap_mutate.c b/tools/testing/selftests/bpf/progs/verifier_sockmap_mutate.c
new file mode 100644 (file)
index 0000000..fe4b123
--- /dev/null
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#include "bpf_misc.h"
+
+#define __always_unused __attribute__((unused))
+
+char _license[] SEC("license") = "GPL";
+
+struct sock {
+} __attribute__((preserve_access_index));
+
+struct bpf_iter__sockmap {
+       union {
+               struct sock *sk;
+       };
+} __attribute__((preserve_access_index));
+
+struct {
+       __uint(type, BPF_MAP_TYPE_SOCKHASH);
+       __uint(max_entries, 1);
+       __type(key, int);
+       __type(value, int);
+} sockhash SEC(".maps");
+
+struct {
+       __uint(type, BPF_MAP_TYPE_SOCKMAP);
+       __uint(max_entries, 1);
+       __type(key, int);
+       __type(value, int);
+} sockmap SEC(".maps");
+
+enum { CG_OK = 1 };
+
+int zero = 0;
+
+static __always_inline void test_sockmap_delete(void)
+{
+       bpf_map_delete_elem(&sockmap, &zero);
+       bpf_map_delete_elem(&sockhash, &zero);
+}
+
+static __always_inline void test_sockmap_update(void *sk)
+{
+       if (sk) {
+               bpf_map_update_elem(&sockmap, &zero, sk, BPF_ANY);
+               bpf_map_update_elem(&sockhash, &zero, sk, BPF_ANY);
+       }
+}
+
+static __always_inline void test_sockmap_lookup_and_update(void)
+{
+       struct bpf_sock *sk = bpf_map_lookup_elem(&sockmap, &zero);
+
+       if (sk) {
+               test_sockmap_update(sk);
+               bpf_sk_release(sk);
+       }
+}
+
+static __always_inline void test_sockmap_mutate(void *sk)
+{
+       test_sockmap_delete();
+       test_sockmap_update(sk);
+}
+
+static __always_inline void test_sockmap_lookup_and_mutate(void)
+{
+       test_sockmap_delete();
+       test_sockmap_lookup_and_update();
+}
+
+SEC("action")
+__success
+int test_sched_act(struct __sk_buff *skb)
+{
+       test_sockmap_mutate(skb->sk);
+       return 0;
+}
+
+SEC("classifier")
+__success
+int test_sched_cls(struct __sk_buff *skb)
+{
+       test_sockmap_mutate(skb->sk);
+       return 0;
+}
+
+SEC("flow_dissector")
+__success
+int test_flow_dissector_delete(struct __sk_buff *skb __always_unused)
+{
+       test_sockmap_delete();
+       return 0;
+}
+
+SEC("flow_dissector")
+__failure __msg("program of this type cannot use helper bpf_sk_release")
+int test_flow_dissector_update(struct __sk_buff *skb __always_unused)
+{
+       test_sockmap_lookup_and_update(); /* no access to skb->sk */
+       return 0;
+}
+
+SEC("iter/sockmap")
+__success
+int test_trace_iter(struct bpf_iter__sockmap *ctx)
+{
+       test_sockmap_mutate(ctx->sk);
+       return 0;
+}
+
+SEC("raw_tp/kfree")
+__failure __msg("cannot update sockmap in this context")
+int test_raw_tp_delete(const void *ctx __always_unused)
+{
+       test_sockmap_delete();
+       return 0;
+}
+
+SEC("raw_tp/kfree")
+__failure __msg("cannot update sockmap in this context")
+int test_raw_tp_update(const void *ctx __always_unused)
+{
+       test_sockmap_lookup_and_update();
+       return 0;
+}
+
+SEC("sk_lookup")
+__success
+int test_sk_lookup(struct bpf_sk_lookup *ctx)
+{
+       test_sockmap_mutate(ctx->sk);
+       return 0;
+}
+
+SEC("sk_reuseport")
+__success
+int test_sk_reuseport(struct sk_reuseport_md *ctx)
+{
+       test_sockmap_mutate(ctx->sk);
+       return 0;
+}
+
+SEC("socket")
+__success
+int test_socket_filter(struct __sk_buff *skb)
+{
+       test_sockmap_mutate(skb->sk);
+       return 0;
+}
+
+SEC("sockops")
+__success
+int test_sockops_delete(struct bpf_sock_ops *ctx __always_unused)
+{
+       test_sockmap_delete();
+       return CG_OK;
+}
+
+SEC("sockops")
+__failure __msg("cannot update sockmap in this context")
+int test_sockops_update(struct bpf_sock_ops *ctx)
+{
+       test_sockmap_update(ctx->sk);
+       return CG_OK;
+}
+
+SEC("sockops")
+__success
+int test_sockops_update_dedicated(struct bpf_sock_ops *ctx)
+{
+       bpf_sock_map_update(ctx, &sockmap, &zero, BPF_ANY);
+       bpf_sock_hash_update(ctx, &sockhash, &zero, BPF_ANY);
+       return CG_OK;
+}
+
+SEC("xdp")
+__success
+int test_xdp(struct xdp_md *ctx __always_unused)
+{
+       test_sockmap_lookup_and_mutate();
+       return XDP_PASS;
+}
index b171fd53b004e8372bf9e73bdce84e5429d79b11..632ab44737ec3afca5412172e6df67cff87b1e28 100644 (file)
@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 #define _GNU_SOURCE
+#define __SANE_USERSPACE_TYPES__ // Use ll64
 
 #include <stdio.h>
 #include <stdbool.h>
index 759f86e7d263e43bcd7438b5979cc9815c2d4c2f..2862aae58b79acbe175ab6b36b42798bb99a2225 100644 (file)
@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 #define _GNU_SOURCE
+#define __SANE_USERSPACE_TYPES__ // Use ll64
 
 #include <inttypes.h>
 #include <unistd.h>
index e59d985eeff0c8be95c79fd914ac706439952dc7..048a312abf405243da08a2a28216ed8a98dab4d7 100644 (file)
@@ -1,16 +1,28 @@
-CONFIG_KPROBES=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_DEBUG_INFO_BTF=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_EPROBE_EVENTS=y
+CONFIG_FPROBE=y
+CONFIG_FPROBE_EVENTS=y
 CONFIG_FTRACE=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_FUNCTION_GRAPH_RETVAL=y
 CONFIG_FUNCTION_PROFILER=y
-CONFIG_TRACER_SNAPSHOT=y
-CONFIG_STACK_TRACER=y
 CONFIG_HIST_TRIGGERS=y
-CONFIG_SCHED_TRACER=y
-CONFIG_PREEMPT_TRACER=y
 CONFIG_IRQSOFF_TRACER=y
-CONFIG_PREEMPTIRQ_DELAY_TEST=m
+CONFIG_KALLSYMS_ALL=y
+CONFIG_KPROBES=y
+CONFIG_KPROBE_EVENTS=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
+CONFIG_PREEMPTIRQ_DELAY_TEST=m
+CONFIG_PREEMPT_TRACER=y
+CONFIG_PROBE_EVENTS_BTF_ARGS=y
 CONFIG_SAMPLES=y
 CONFIG_SAMPLE_FTRACE_DIRECT=m
 CONFIG_SAMPLE_TRACE_PRINTK=m
-CONFIG_KALLSYMS_ALL=y
+CONFIG_SCHED_TRACER=y
+CONFIG_STACK_TRACER=y
+CONFIG_TRACER_SNAPSHOT=y
+CONFIG_UPROBES=y
+CONFIG_UPROBE_EVENTS=y
index d3a79da215c8b08a053c09f3547d3f4fc2f74dcc..5f72abe6fa79bbe0d199a8dcabfdb2bbbbf119fe 100644 (file)
@@ -1,7 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: Generic dynamic event - check if duplicate events are caught
-# requires: dynamic_events "e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>]":README
+# requires: dynamic_events "e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>]":README events/syscalls/sys_enter_openat
 
 echo 0 > events/enable
 
index 3f74c09c56b62465189346e6a56f2525cb97bd6b..118247b8dd84d8733ee2147f7e27d0d5635cfe5d 100644 (file)
@@ -10,7 +10,6 @@ fail() { #msg
 }
 
 sample_events() {
-    echo > trace
     echo 1 > events/kmem/kmem_cache_free/enable
     echo 1 > tracing_on
     ls > /dev/null
@@ -22,6 +21,7 @@ echo 0 > tracing_on
 echo 0 > events/enable
 
 echo "Get the most frequently calling function"
+echo > trace
 sample_events
 
 target_func=`cat trace | grep -o 'call_site=\([^+]*\)' | sed 's/call_site=//' | sort | uniq -c | sort | tail -n 1 | sed 's/^[ 0-9]*//'`
@@ -32,7 +32,16 @@ echo > trace
 
 echo "Test event filter function name"
 echo "call_site.function == $target_func" > events/kmem/kmem_cache_free/filter
+
+sample_events
+max_retry=10
+while [ `grep kmem_cache_free trace| wc -l` -eq 0 ]; do
 sample_events
+max_retry=$((max_retry - 1))
+if [ $max_retry -eq 0 ]; then
+       exit_fail
+fi
+done
 
 hitcnt=`grep kmem_cache_free trace| grep $target_func | wc -l`
 misscnt=`grep kmem_cache_free trace| grep -v $target_func | wc -l`
@@ -49,7 +58,16 @@ address=`grep " ${target_func}\$" /proc/kallsyms | cut -d' ' -f1`
 
 echo "Test event filter function address"
 echo "call_site.function == 0x$address" > events/kmem/kmem_cache_free/filter
+echo > trace
+sample_events
+max_retry=10
+while [ `grep kmem_cache_free trace| wc -l` -eq 0 ]; do
 sample_events
+max_retry=$((max_retry - 1))
+if [ $max_retry -eq 0 ]; then
+       exit_fail
+fi
+done
 
 hitcnt=`grep kmem_cache_free trace| grep $target_func | wc -l`
 misscnt=`grep kmem_cache_free trace| grep -v $target_func | wc -l`
index 1f6981ef7afa063bd8b1185bffa5159ea3e40ce3..ba19b81cef39afc7426c46d3eb1bc4cbe9ead83a 100644 (file)
@@ -30,7 +30,8 @@ find_dot_func() {
        fi
 
        grep " [tT] .*\.isra\..*" /proc/kallsyms | cut -f 3 -d " " | while read f; do
-               if grep -s $f available_filter_functions; then
+               cnt=`grep -s $f available_filter_functions | wc -l`;
+               if [ $cnt -eq 1 ]; then
                        echo $f
                        break
                fi
index 11e157d7533b8fbaf5ac691b3df7057e0ff558c1..78ab2cd111f608aca6d35a80937bd72f3ce0c3d4 100644 (file)
@@ -3,8 +3,6 @@ SUBDIRS := functional
 
 TEST_PROGS := run.sh
 
-.PHONY: all clean
-
 include ../lib.mk
 
 all:
index a392d0917b4e55c145b293e1e60674bc904e74c0..994fa3468f170cb05c258c17298819a78af100bf 100644 (file)
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 INCLUDES := -I../include -I../../ $(KHDR_INCLUDES)
-CFLAGS := $(CFLAGS) -g -O2 -Wall -D_GNU_SOURCE -pthread $(INCLUDES) $(KHDR_INCLUDES)
+CFLAGS := $(CFLAGS) -g -O2 -Wall -D_GNU_SOURCE= -pthread $(INCLUDES) $(KHDR_INCLUDES)
 LDLIBS := -lpthread -lrt
 
 LOCAL_HDRS := \
index 7f3ca5c78df12968f0aa14168f7dc78001b8fff8..215c6cb539b4abc7a48fb00b545ae26e52fd118a 100644 (file)
@@ -360,7 +360,7 @@ out:
 
 int main(int argc, char *argv[])
 {
-       const char *test_name;
+       char *test_name;
        int c, ret;
 
        while ((c = getopt(argc, argv, "bchlot:v:")) != -1) {
index ce8ff8e8ce3a2599832ca908743f06bcef4f6496..ac280dcba996df54626c114b17df779d4be2ba7a 100644 (file)
@@ -183,6 +183,7 @@ TEST_GEN_PROGS_s390x += s390x/sync_regs_test
 TEST_GEN_PROGS_s390x += s390x/tprot
 TEST_GEN_PROGS_s390x += s390x/cmma_test
 TEST_GEN_PROGS_s390x += s390x/debug_test
+TEST_GEN_PROGS_s390x += s390x/shared_zeropage_test
 TEST_GEN_PROGS_s390x += demand_paging_test
 TEST_GEN_PROGS_s390x += dirty_log_test
 TEST_GEN_PROGS_s390x += guest_print_test
diff --git a/tools/testing/selftests/kvm/s390x/shared_zeropage_test.c b/tools/testing/selftests/kvm/s390x/shared_zeropage_test.c
new file mode 100644 (file)
index 0000000..bba0d9a
--- /dev/null
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Test shared zeropage handling (with/without storage keys)
+ *
+ * Copyright (C) 2024, Red Hat, Inc.
+ */
+#include <sys/mman.h>
+
+#include <linux/fs.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "kselftest.h"
+#include "ucall_common.h"
+
+static void set_storage_key(void *addr, uint8_t skey)
+{
+       asm volatile("sske %0,%1" : : "d" (skey), "a" (addr));
+}
+
+static void guest_code(void)
+{
+       /* Issue some storage key instruction. */
+       set_storage_key((void *)0, 0x98);
+       GUEST_DONE();
+}
+
+/*
+ * Returns 1 if the shared zeropage is mapped, 0 if something else is mapped.
+ * Returns < 0 on error or if nothing is mapped.
+ */
+static int maps_shared_zeropage(int pagemap_fd, void *addr)
+{
+       struct page_region region;
+       struct pm_scan_arg arg = {
+               .start = (uintptr_t)addr,
+               .end = (uintptr_t)addr + 4096,
+               .vec = (uintptr_t)&region,
+               .vec_len = 1,
+               .size = sizeof(struct pm_scan_arg),
+               .category_mask = PAGE_IS_PFNZERO,
+               .category_anyof_mask = PAGE_IS_PRESENT,
+               .return_mask = PAGE_IS_PFNZERO,
+       };
+       return ioctl(pagemap_fd, PAGEMAP_SCAN, &arg);
+}
+
+int main(int argc, char *argv[])
+{
+       char *mem, *page0, *page1, *page2, tmp;
+       const size_t pagesize = getpagesize();
+       struct kvm_vcpu *vcpu;
+       struct kvm_vm *vm;
+       struct ucall uc;
+       int pagemap_fd;
+
+       ksft_print_header();
+       ksft_set_plan(3);
+
+       /*
+        * We'll use memory that is not mapped into the VM for simplicity.
+        * Shared zeropages are enabled/disabled per-process.
+        */
+       mem = mmap(0, 3 * pagesize, PROT_READ, MAP_PRIVATE | MAP_ANON, -1, 0);
+       TEST_ASSERT(mem != MAP_FAILED, "mmap() failed");
+
+       /* Disable THP. Ignore errors on older kernels. */
+       madvise(mem, 3 * pagesize, MADV_NOHUGEPAGE);
+
+       page0 = mem;
+       page1 = page0 + pagesize;
+       page2 = page1 + pagesize;
+
+       /* Can we even detect shared zeropages? */
+       pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
+       TEST_REQUIRE(pagemap_fd >= 0);
+
+       tmp = *page0;
+       asm volatile("" : "+r" (tmp));
+       TEST_REQUIRE(maps_shared_zeropage(pagemap_fd, page0) == 1);
+
+       vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+
+       /* Verify that we get the shared zeropage after VM creation. */
+       tmp = *page1;
+       asm volatile("" : "+r" (tmp));
+       ksft_test_result(maps_shared_zeropage(pagemap_fd, page1) == 1,
+                        "Shared zeropages should be enabled\n");
+
+       /*
+        * Let our VM execute a storage key instruction that should
+        * unshare all shared zeropages.
+        */
+       vcpu_run(vcpu);
+       get_ucall(vcpu, &uc);
+       TEST_ASSERT_EQ(uc.cmd, UCALL_DONE);
+
+       /* Verify that we don't have a shared zeropage anymore. */
+       ksft_test_result(!maps_shared_zeropage(pagemap_fd, page1),
+                        "Shared zeropage should be gone\n");
+
+       /* Verify that we don't get any new shared zeropages. */
+       tmp = *page2;
+       asm volatile("" : "+r" (tmp));
+       ksft_test_result(!maps_shared_zeropage(pagemap_fd, page2),
+                        "Shared zeropages should be disabled\n");
+
+       kvm_vm_free(vm);
+
+       ksft_finished();
+}
index 6b5a9ff88c3d715ae14f12635dcbee9b83ee6bcb..7d063c652be164b589887f546095b58752f09ac3 100644 (file)
@@ -35,6 +35,7 @@
  * See https://sourceware.org/glibc/wiki/Synchronizing_Headers.
  */
 #include <linux/fs.h>
+#include <linux/mount.h>
 
 #include "common.h"
 
@@ -47,6 +48,13 @@ int renameat2(int olddirfd, const char *oldpath, int newdirfd,
 }
 #endif
 
+#ifndef open_tree
+int open_tree(int dfd, const char *filename, unsigned int flags)
+{
+       return syscall(__NR_open_tree, dfd, filename, flags);
+}
+#endif
+
 #ifndef RENAME_EXCHANGE
 #define RENAME_EXCHANGE (1 << 1)
 #endif
@@ -2400,6 +2408,43 @@ TEST_F_FORK(layout1, refer_denied_by_default4)
                                layer_dir_s1d1_refer);
 }
 
+/*
+ * Tests walking through a denied root mount.
+ */
+TEST_F_FORK(layout1, refer_mount_root_deny)
+{
+       const struct landlock_ruleset_attr ruleset_attr = {
+               .handled_access_fs = LANDLOCK_ACCESS_FS_MAKE_DIR,
+       };
+       int root_fd, ruleset_fd;
+
+       /* Creates a mount object from a non-mount point. */
+       set_cap(_metadata, CAP_SYS_ADMIN);
+       root_fd =
+               open_tree(AT_FDCWD, dir_s1d1,
+                         AT_EMPTY_PATH | OPEN_TREE_CLONE | OPEN_TREE_CLOEXEC);
+       clear_cap(_metadata, CAP_SYS_ADMIN);
+       ASSERT_LE(0, root_fd);
+
+       ruleset_fd =
+               landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
+       ASSERT_LE(0, ruleset_fd);
+
+       ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0));
+       ASSERT_EQ(0, landlock_restrict_self(ruleset_fd, 0));
+       EXPECT_EQ(0, close(ruleset_fd));
+
+       /* Link denied by Landlock: EACCES. */
+       EXPECT_EQ(-1, linkat(root_fd, ".", root_fd, "does_not_exist", 0));
+       EXPECT_EQ(EACCES, errno);
+
+       /* renameat2() always returns EBUSY. */
+       EXPECT_EQ(-1, renameat2(root_fd, ".", root_fd, "does_not_exist", 0));
+       EXPECT_EQ(EBUSY, errno);
+
+       EXPECT_EQ(0, close(root_fd));
+}
+
 TEST_F_FORK(layout1, reparent_link)
 {
        const struct rule layer1[] = {
index 22061204fb691298e6004aaec1839d011bd17ecb..241542441c5177f60f08963f0b63d456d0ecfdbc 100644 (file)
@@ -2,3 +2,4 @@ CONFIG_IPV6=y
 CONFIG_NET_SCH_NETEM=m
 CONFIG_HSR=y
 CONFIG_VETH=y
+CONFIG_BRIDGE=y
index 790294c8af83271d0962e8ddecfc132449bc89fb..3684b813b0f67ec6bf247a2e633f1a09ffec3852 100755 (executable)
@@ -174,6 +174,8 @@ trap cleanup_all_ns EXIT
 setup_hsr_interfaces 0
 do_complete_ping_test
 
+setup_ns ns1 ns2 ns3
+
 setup_hsr_interfaces 1
 do_complete_ping_test
 
index edc030e81a4649e5245c6bc1a336f3c83c28daed..9155c914c064fe542fae87631561692e48104a26 100644 (file)
@@ -15,7 +15,7 @@ ksft_xfail=2
 ksft_skip=4
 
 # namespace list created by setup_ns
-NS_LIST=""
+NS_LIST=()
 
 ##############################################################################
 # Helpers
@@ -27,6 +27,7 @@ __ksft_status_merge()
        local -A weights
        local weight=0
 
+       local i
        for i in "$@"; do
                weights[$i]=$((weight++))
        done
@@ -67,9 +68,7 @@ loopy_wait()
        while true
        do
                local out
-               out=$("$@")
-               local ret=$?
-               if ((!ret)); then
+               if out=$("$@"); then
                        echo -n "$out"
                        return 0
                fi
@@ -139,6 +138,7 @@ cleanup_ns()
        fi
 
        for ns in "$@"; do
+               [ -z "${ns}" ] && continue
                ip netns delete "${ns}" &> /dev/null
                if ! busywait $BUSYWAIT_TIMEOUT ip netns list \| grep -vq "^$ns$" &> /dev/null; then
                        echo "Warn: Failed to remove namespace $ns"
@@ -152,7 +152,7 @@ cleanup_ns()
 
 cleanup_all_ns()
 {
-       cleanup_ns $NS_LIST
+       cleanup_ns "${NS_LIST[@]}"
 }
 
 # setup netns with given names as prefix. e.g
@@ -161,7 +161,7 @@ setup_ns()
 {
        local ns=""
        local ns_name=""
-       local ns_list=""
+       local ns_list=()
        local ns_exist=
        for ns_name in "$@"; do
                # Some test may setup/remove same netns multi times
@@ -177,13 +177,13 @@ setup_ns()
 
                if ! ip netns add "$ns"; then
                        echo "Failed to create namespace $ns_name"
-                       cleanup_ns "$ns_list"
+                       cleanup_ns "${ns_list[@]}"
                        return $ksft_skip
                fi
                ip -n "$ns" link set lo up
-               ! $ns_exist && ns_list="$ns_list $ns"
+               ! $ns_exist && ns_list+=("$ns")
        done
-       NS_LIST="$NS_LIST $ns_list"
+       NS_LIST+=("${ns_list[@]}")
 }
 
 tc_rule_stats_get()
index fefa9173bdaaabf7182b1bc2c626cbdfbe014efa..2b66c5fa71ebff7bf93f712aecab26e7e77134e3 100755 (executable)
@@ -261,6 +261,8 @@ reset()
 
        TEST_NAME="${1}"
 
+       MPTCP_LIB_SUBTEST_FLAKY=0 # reset if modified
+
        if skip_test; then
                MPTCP_LIB_TEST_COUNTER=$((MPTCP_LIB_TEST_COUNTER+1))
                last_test_ignored=1
@@ -448,7 +450,9 @@ reset_with_tcp_filter()
 # $1: err msg
 fail_test()
 {
-       ret=${KSFT_FAIL}
+       if ! mptcp_lib_subtest_is_flaky; then
+               ret=${KSFT_FAIL}
+       fi
 
        if [ ${#} -gt 0 ]; then
                print_fail "${@}"
@@ -3069,6 +3073,7 @@ fullmesh_tests()
 fastclose_tests()
 {
        if reset_check_counter "fastclose test" "MPTcpExtMPFastcloseTx"; then
+               MPTCP_LIB_SUBTEST_FLAKY=1
                test_linkfail=1024 fastclose=client \
                        run_tests $ns1 $ns2 10.0.1.1
                chk_join_nr 0 0 0
@@ -3077,6 +3082,7 @@ fastclose_tests()
        fi
 
        if reset_check_counter "fastclose server test" "MPTcpExtMPFastcloseRx"; then
+               MPTCP_LIB_SUBTEST_FLAKY=1
                test_linkfail=1024 fastclose=server \
                        run_tests $ns1 $ns2 10.0.1.1
                chk_join_nr 0 0 0 0 0 0 1
@@ -3095,6 +3101,7 @@ fail_tests()
 {
        # single subflow
        if reset_with_fail "Infinite map" 1; then
+               MPTCP_LIB_SUBTEST_FLAKY=1
                test_linkfail=128 \
                        run_tests $ns1 $ns2 10.0.1.1
                chk_join_nr 0 0 0 +1 +0 1 0 1 "$(pedit_action_pkts)"
@@ -3103,6 +3110,7 @@ fail_tests()
 
        # multiple subflows
        if reset_with_fail "MP_FAIL MP_RST" 2; then
+               MPTCP_LIB_SUBTEST_FLAKY=1
                tc -n $ns2 qdisc add dev ns2eth1 root netem rate 1mbit delay 5ms
                pm_nl_set_limits $ns1 0 1
                pm_nl_set_limits $ns2 0 1
index ad2ebda5cb64ba5e389ec9f4f3064474547fba95..6ffa9b7a3260dffa2bedf1685155b2c45053ce56 100644 (file)
@@ -21,6 +21,7 @@ declare -rx MPTCP_LIB_AF_INET6=10
 
 MPTCP_LIB_SUBTESTS=()
 MPTCP_LIB_SUBTESTS_DUPLICATED=0
+MPTCP_LIB_SUBTEST_FLAKY=0
 MPTCP_LIB_TEST_COUNTER=0
 MPTCP_LIB_TEST_FORMAT="%02u %-50s"
 MPTCP_LIB_IP_MPTCP=0
@@ -41,6 +42,16 @@ else
        readonly MPTCP_LIB_COLOR_RESET=
 fi
 
+# SELFTESTS_MPTCP_LIB_OVERRIDE_FLAKY env var can be set not to ignore errors
+# from subtests marked as flaky
+mptcp_lib_override_flaky() {
+       [ "${SELFTESTS_MPTCP_LIB_OVERRIDE_FLAKY:-}" = 1 ]
+}
+
+mptcp_lib_subtest_is_flaky() {
+       [ "${MPTCP_LIB_SUBTEST_FLAKY}" = 1 ] && ! mptcp_lib_override_flaky
+}
+
 # $1: color, $2: text
 mptcp_lib_print_color() {
        echo -e "${MPTCP_LIB_START_PRINT:-}${*}${MPTCP_LIB_COLOR_RESET}"
@@ -72,7 +83,16 @@ mptcp_lib_pr_skip() {
 }
 
 mptcp_lib_pr_fail() {
-       mptcp_lib_print_err "[FAIL]${1:+ ${*}}"
+       local title cmt
+
+       if mptcp_lib_subtest_is_flaky; then
+               title="IGNO"
+               cmt=" (flaky)"
+       else
+               title="FAIL"
+       fi
+
+       mptcp_lib_print_err "[${title}]${cmt}${1:+ ${*}}"
 }
 
 mptcp_lib_pr_info() {
@@ -208,7 +228,13 @@ mptcp_lib_result_pass() {
 
 # $1: test name
 mptcp_lib_result_fail() {
-       __mptcp_lib_result_add "not ok" "${1}"
+       if mptcp_lib_subtest_is_flaky; then
+               # It might sound better to use 'not ok # TODO' or 'ok # SKIP',
+               # but some CIs don't understand 'TODO' and treat SKIP as errors.
+               __mptcp_lib_result_add "ok" "${1} # IGNORE Flaky"
+       else
+               __mptcp_lib_result_add "not ok" "${1}"
+       fi
 }
 
 # $1: test name
index 4b14b4412166b5db79ea6be28c009b536a8136b8..f74e1c3c126d18821ebba2968ed3f25b9f90c506 100755 (executable)
@@ -244,7 +244,7 @@ run_test()
        do_transfer $small $large $time
        lret=$?
        mptcp_lib_result_code "${lret}" "${msg}"
-       if [ $lret -ne 0 ]; then
+       if [ $lret -ne 0 ] && ! mptcp_lib_subtest_is_flaky; then
                ret=$lret
                [ $bail -eq 0 ] || exit $ret
        fi
@@ -254,7 +254,7 @@ run_test()
        do_transfer $large $small $time
        lret=$?
        mptcp_lib_result_code "${lret}" "${msg}"
-       if [ $lret -ne 0 ]; then
+       if [ $lret -ne 0 ] && ! mptcp_lib_subtest_is_flaky; then
                ret=$lret
                [ $bail -eq 0 ] || exit $ret
        fi
@@ -290,7 +290,7 @@ run_test 10 10 0 0 "balanced bwidth"
 run_test 10 10 1 25 "balanced bwidth with unbalanced delay"
 
 # we still need some additional infrastructure to pass the following test-cases
-run_test 10 3 0 0 "unbalanced bwidth"
+MPTCP_LIB_SUBTEST_FLAKY=1 run_test 10 3 0 0 "unbalanced bwidth"
 run_test 10 3 1 25 "unbalanced bwidth with unbalanced delay"
 run_test 10 3 25 1 "unbalanced bwidth with opposed, unbalanced delay"
 
index 9024754530b230d0252905c5e01d327908da092a..5790ab446527f1677408ffce4b9cf76271afe9ed 100644 (file)
@@ -5,6 +5,7 @@
  */
 
 #define _GNU_SOURCE
+#define __SANE_USERSPACE_TYPES__ // Use ll64
 #include <fcntl.h>
 #include <sched.h>
 #include <sys/stat.h>
index 12da0a939e3e5f2461eadb48883d4e0d024f7e34..557fb074acf0ca4e7a5ee0e31b306f2e3ac3725a 100644 (file)
             "echo \"1\" > /sys/bus/netdevsim/del_device"
         ]
     },
+    {
+        "id": "6f62",
+        "name": "Add taprio Qdisc with too short interval",
+        "category": [
+            "qdisc",
+            "taprio"
+        ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
+        "setup": [
+            "echo \"1 1 8\" > /sys/bus/netdevsim/new_device"
+        ],
+        "cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: taprio num_tc 2 queues 1@0 1@1 sched-entry S 01 300 sched-entry S 02 1700 clockid CLOCK_TAI",
+        "expExitCode": "2",
+        "verifyCmd": "$TC qdisc show dev $ETH",
+        "matchPattern": "qdisc taprio 1: root refcnt",
+        "matchCount": "0",
+        "teardown": [
+            "echo \"1\" > /sys/bus/netdevsim/del_device"
+        ]
+    },
+    {
+        "id": "831f",
+        "name": "Add taprio Qdisc with too short cycle-time",
+        "category": [
+            "qdisc",
+            "taprio"
+        ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
+        "setup": [
+            "echo \"1 1 8\" > /sys/bus/netdevsim/new_device"
+        ],
+        "cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: taprio num_tc 2 queues 1@0 1@1 sched-entry S 01 200000 sched-entry S 02 200000 cycle-time 100 clockid CLOCK_TAI",
+        "expExitCode": "2",
+        "verifyCmd": "$TC qdisc show dev $ETH",
+        "matchPattern": "qdisc taprio 1: root refcnt",
+        "matchCount": "0",
+        "teardown": [
+            "echo \"1\" > /sys/bus/netdevsim/del_device"
+        ]
+    },
     {
         "id": "3e1e",
         "name": "Add taprio Qdisc with an invalid cycle-time",
This page took 1.428403 seconds and 4 git commands to generate.