]> Git Repo - linux.git/commitdiff
Merge tag 'v6.4-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
authorLinus Torvalds <[email protected]>
Wed, 26 Apr 2023 15:32:52 +0000 (08:32 -0700)
committerLinus Torvalds <[email protected]>
Wed, 26 Apr 2023 15:32:52 +0000 (08:32 -0700)
Pull crypto updates from Herbert Xu:
 "API:
   - Total usage stats now include all that returned errors (instead of
     just some)
   - Remove maximum hash statesize limit
   - Add cloning support for hmac and unkeyed hashes
   - Demote BUG_ON in crypto_unregister_alg to a WARN_ON

  Algorithms:
   - Use RIP-relative addressing on x86 to prepare for PIE build
   - Add accelerated AES/GCM stitched implementation on powerpc P10
   - Add some test vectors for cmac(camellia)
   - Remove failure case where jent is unavailable outside of FIPS mode
     in drbg
   - Add permanent and intermittent health error checks in jitter RNG

  Drivers:
   - Add support for 402xx devices in qat
   - Add support for HiSTB TRNG
   - Fix hash concurrency issues in stm32
   - Add OP-TEE firmware support in caam"

* tag 'v6.4-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (139 commits)
  i2c: designware: Add doorbell support for Mendocino
  i2c: designware: Use PCI PSP driver for communication
  powerpc: Move Power10 feature PPC_MODULE_FEATURE_P10
  crypto: p10-aes-gcm - Remove POWER10_CPU dependency
  crypto: testmgr - Add some test vectors for cmac(camellia)
  crypto: cryptd - Add support for cloning hashes
  crypto: cryptd - Convert hash to use modern init_tfm/exit_tfm
  crypto: hmac - Add support for cloning
  crypto: hash - Add crypto_clone_ahash/shash
  crypto: api - Add crypto_clone_tfm
  crypto: api - Add crypto_tfm_get
  crypto: x86/sha - Use local .L symbols for code
  crypto: x86/crc32 - Use local .L symbols for code
  crypto: x86/aesni - Use local .L symbols for code
  crypto: x86/sha256 - Use RIP-relative addressing
  crypto: x86/ghash - Use RIP-relative addressing
  crypto: x86/des3 - Use RIP-relative addressing
  crypto: x86/crc32c - Use RIP-relative addressing
  crypto: x86/cast6 - Use RIP-relative addressing
  crypto: x86/cast5 - Use RIP-relative addressing
  ...

373 files changed:
Documentation/devicetree/bindings/crypto/qcom-qce.txt [deleted file]
Documentation/devicetree/bindings/crypto/qcom-qce.yaml [new file with mode: 0644]
MAINTAINERS
arch/arm64/boot/dts/qcom/sm8550.dtsi
arch/arm64/crypto/aes-neonbs-core.S
arch/powerpc/crypto/Kconfig
arch/powerpc/crypto/Makefile
arch/powerpc/crypto/aes-gcm-p10-glue.c [new file with mode: 0644]
arch/powerpc/crypto/aes-gcm-p10.S [new file with mode: 0644]
arch/powerpc/crypto/aesp8-ppc.pl [new file with mode: 0644]
arch/powerpc/crypto/ghashp8-ppc.pl [new file with mode: 0644]
arch/powerpc/crypto/ppc-xlate.pl [new file with mode: 0644]
arch/powerpc/include/asm/cpufeature.h
arch/x86/crypto/aegis128-aesni-asm.S
arch/x86/crypto/aesni-intel_asm.S
arch/x86/crypto/aesni-intel_avx-x86_64.S
arch/x86/crypto/aria-aesni-avx-asm_64.S
arch/x86/crypto/aria-aesni-avx2-asm_64.S
arch/x86/crypto/aria-gfni-avx512-asm_64.S
arch/x86/crypto/camellia-aesni-avx-asm_64.S
arch/x86/crypto/camellia-aesni-avx2-asm_64.S
arch/x86/crypto/camellia-x86_64-asm_64.S
arch/x86/crypto/cast5-avx-x86_64-asm_64.S
arch/x86/crypto/cast6-avx-x86_64-asm_64.S
arch/x86/crypto/crc32-pclmul_asm.S
arch/x86/crypto/crc32c-pcl-intel-asm_64.S
arch/x86/crypto/des3_ede-asm_64.S
arch/x86/crypto/ghash-clmulni-intel_asm.S
arch/x86/crypto/sha1_avx2_x86_64_asm.S
arch/x86/crypto/sha256-avx-asm.S
arch/x86/crypto/sha256-avx2-asm.S
arch/x86/crypto/sha256-ssse3-asm.S
arch/x86/crypto/sha512-avx-asm.S
arch/x86/crypto/sha512-avx2-asm.S
arch/x86/crypto/sha512-ssse3-asm.S
arch/x86/kvm/svm/sev.c
crypto/acompress.c
crypto/aead.c
crypto/ahash.c
crypto/akcipher.c
crypto/algapi.c
crypto/algif_hash.c
crypto/api.c
crypto/async_tx/async_pq.c
crypto/async_tx/async_tx.c
crypto/compress.h [new file with mode: 0644]
crypto/cryptd.c
crypto/crypto_user_stat.c
crypto/drbg.c
crypto/fips.c
crypto/hash.h [new file with mode: 0644]
crypto/hmac.c
crypto/internal.h
crypto/jitterentropy-kcapi.c
crypto/jitterentropy.c
crypto/jitterentropy.h
crypto/kpp.c
crypto/rng.c
crypto/scompress.c
crypto/shash.c
crypto/skcipher.c
crypto/tcrypt.c
crypto/testmgr.c
crypto/testmgr.h
drivers/char/hw_random/meson-rng.c
drivers/char/hw_random/xgene-rng.c
drivers/crypto/Kconfig
drivers/crypto/Makefile
drivers/crypto/amcc/crypto4xx_core.c
drivers/crypto/aspeed/aspeed-acry.c
drivers/crypto/atmel-aes.c
drivers/crypto/atmel-sha.c
drivers/crypto/atmel-sha204a.c
drivers/crypto/atmel-tdes.c
drivers/crypto/caam/caamalg.c
drivers/crypto/caam/caamhash.c
drivers/crypto/caam/caampkc.c
drivers/crypto/caam/caamrng.c
drivers/crypto/caam/ctrl.c
drivers/crypto/caam/debugfs.c
drivers/crypto/caam/debugfs.h
drivers/crypto/caam/dpseci-debugfs.c
drivers/crypto/caam/intern.h
drivers/crypto/caam/jr.c
drivers/crypto/cavium/nitrox/nitrox_main.c
drivers/crypto/ccp/Makefile
drivers/crypto/ccp/platform-access.c [new file with mode: 0644]
drivers/crypto/ccp/platform-access.h [new file with mode: 0644]
drivers/crypto/ccp/psp-dev.c
drivers/crypto/ccp/psp-dev.h
drivers/crypto/ccp/sev-dev.c
drivers/crypto/ccp/sev-dev.h
drivers/crypto/ccp/sp-dev.h
drivers/crypto/ccp/sp-pci.c
drivers/crypto/ccp/tee-dev.c
drivers/crypto/ccree/cc_driver.c
drivers/crypto/hifn_795x.c
drivers/crypto/hisilicon/Kconfig
drivers/crypto/hisilicon/Makefile
drivers/crypto/hisilicon/hpre/hpre_main.c
drivers/crypto/hisilicon/qm.c
drivers/crypto/hisilicon/sec2/sec_main.c
drivers/crypto/hisilicon/trng/Makefile
drivers/crypto/hisilicon/trng/trng-stb.c [new file with mode: 0644]
drivers/crypto/hisilicon/zip/zip_main.c
drivers/crypto/img-hash.c
drivers/crypto/inside-secure/safexcel.c
drivers/crypto/intel/Kconfig [new file with mode: 0644]
drivers/crypto/intel/Makefile [new file with mode: 0644]
drivers/crypto/intel/ixp4xx/Kconfig [new file with mode: 0644]
drivers/crypto/intel/ixp4xx/Makefile [new file with mode: 0644]
drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c [new file with mode: 0644]
drivers/crypto/intel/keembay/Kconfig [new file with mode: 0644]
drivers/crypto/intel/keembay/Makefile [new file with mode: 0644]
drivers/crypto/intel/keembay/keembay-ocs-aes-core.c [new file with mode: 0644]
drivers/crypto/intel/keembay/keembay-ocs-ecc.c [new file with mode: 0644]
drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c [new file with mode: 0644]
drivers/crypto/intel/keembay/ocs-aes.c [new file with mode: 0644]
drivers/crypto/intel/keembay/ocs-aes.h [new file with mode: 0644]
drivers/crypto/intel/keembay/ocs-hcu.c [new file with mode: 0644]
drivers/crypto/intel/keembay/ocs-hcu.h [new file with mode: 0644]
drivers/crypto/intel/qat/Kconfig [new file with mode: 0644]
drivers/crypto/intel/qat/Makefile [new file with mode: 0644]
drivers/crypto/intel/qat/qat_4xxx/Makefile [new file with mode: 0644]
drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_4xxx/adf_drv.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_c3xxx/Makefile [new file with mode: 0644]
drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_c3xxxvf/Makefile [new file with mode: 0644]
drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_c62x/Makefile [new file with mode: 0644]
drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_c62x/adf_drv.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_c62xvf/Makefile [new file with mode: 0644]
drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/Makefile [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_accel_devices.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_accel_engine.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_admin.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_aer.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_cfg.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_cfg.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_cfg_common.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_cfg_user.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_common_drv.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_gen2_config.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_gen2_config.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_gen2_dc.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_init.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_isr.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_sriov.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_sysfs.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_transport.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_transport.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_transport_access_macros.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_transport_debug.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_transport_internal.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_vf_isr.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/icp_qat_fw.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/icp_qat_fw_la.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/icp_qat_fw_pke.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/icp_qat_hal.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/icp_qat_hw.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp_defs.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/qat_algs.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/qat_algs_send.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/qat_algs_send.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/qat_bl.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/qat_bl.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/qat_comp_algs.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/qat_comp_req.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/qat_compression.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/qat_compression.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/qat_crypto.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/qat_crypto.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/qat_hal.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/qat_uclo.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_dh895xcc/Makefile [new file with mode: 0644]
drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_dh895xccvf/Makefile [new file with mode: 0644]
drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c [new file with mode: 0644]
drivers/crypto/ixp4xx_crypto.c [deleted file]
drivers/crypto/keembay/Kconfig [deleted file]
drivers/crypto/keembay/Makefile [deleted file]
drivers/crypto/keembay/keembay-ocs-aes-core.c [deleted file]
drivers/crypto/keembay/keembay-ocs-ecc.c [deleted file]
drivers/crypto/keembay/keembay-ocs-hcu-core.c [deleted file]
drivers/crypto/keembay/ocs-aes.c [deleted file]
drivers/crypto/keembay/ocs-aes.h [deleted file]
drivers/crypto/keembay/ocs-hcu.c [deleted file]
drivers/crypto/keembay/ocs-hcu.h [deleted file]
drivers/crypto/mxs-dcp.c
drivers/crypto/qat/Kconfig [deleted file]
drivers/crypto/qat/Makefile [deleted file]
drivers/crypto/qat/qat_4xxx/Makefile [deleted file]
drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c [deleted file]
drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h [deleted file]
drivers/crypto/qat/qat_4xxx/adf_drv.c [deleted file]
drivers/crypto/qat/qat_c3xxx/Makefile [deleted file]
drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c [deleted file]
drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h [deleted file]
drivers/crypto/qat/qat_c3xxx/adf_drv.c [deleted file]
drivers/crypto/qat/qat_c3xxxvf/Makefile [deleted file]
drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c [deleted file]
drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h [deleted file]
drivers/crypto/qat/qat_c3xxxvf/adf_drv.c [deleted file]
drivers/crypto/qat/qat_c62x/Makefile [deleted file]
drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c [deleted file]
drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h [deleted file]
drivers/crypto/qat/qat_c62x/adf_drv.c [deleted file]
drivers/crypto/qat/qat_c62xvf/Makefile [deleted file]
drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c [deleted file]
drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h [deleted file]
drivers/crypto/qat/qat_c62xvf/adf_drv.c [deleted file]
drivers/crypto/qat/qat_common/Makefile [deleted file]
drivers/crypto/qat/qat_common/adf_accel_devices.h [deleted file]
drivers/crypto/qat/qat_common/adf_accel_engine.c [deleted file]
drivers/crypto/qat/qat_common/adf_admin.c [deleted file]
drivers/crypto/qat/qat_common/adf_aer.c [deleted file]
drivers/crypto/qat/qat_common/adf_cfg.c [deleted file]
drivers/crypto/qat/qat_common/adf_cfg.h [deleted file]
drivers/crypto/qat/qat_common/adf_cfg_common.h [deleted file]
drivers/crypto/qat/qat_common/adf_cfg_strings.h [deleted file]
drivers/crypto/qat/qat_common/adf_cfg_user.h [deleted file]
drivers/crypto/qat/qat_common/adf_common_drv.h [deleted file]
drivers/crypto/qat/qat_common/adf_ctl_drv.c [deleted file]
drivers/crypto/qat/qat_common/adf_dev_mgr.c [deleted file]
drivers/crypto/qat/qat_common/adf_gen2_config.c [deleted file]
drivers/crypto/qat/qat_common/adf_gen2_config.h [deleted file]
drivers/crypto/qat/qat_common/adf_gen2_dc.c [deleted file]
drivers/crypto/qat/qat_common/adf_gen2_dc.h [deleted file]
drivers/crypto/qat/qat_common/adf_gen2_hw_data.c [deleted file]
drivers/crypto/qat/qat_common/adf_gen2_hw_data.h [deleted file]
drivers/crypto/qat/qat_common/adf_gen2_pfvf.c [deleted file]
drivers/crypto/qat/qat_common/adf_gen2_pfvf.h [deleted file]
drivers/crypto/qat/qat_common/adf_gen4_dc.c [deleted file]
drivers/crypto/qat/qat_common/adf_gen4_dc.h [deleted file]
drivers/crypto/qat/qat_common/adf_gen4_hw_data.c [deleted file]
drivers/crypto/qat/qat_common/adf_gen4_hw_data.h [deleted file]
drivers/crypto/qat/qat_common/adf_gen4_pfvf.c [deleted file]
drivers/crypto/qat/qat_common/adf_gen4_pfvf.h [deleted file]
drivers/crypto/qat/qat_common/adf_gen4_pm.c [deleted file]
drivers/crypto/qat/qat_common/adf_gen4_pm.h [deleted file]
drivers/crypto/qat/qat_common/adf_hw_arbiter.c [deleted file]
drivers/crypto/qat/qat_common/adf_init.c [deleted file]
drivers/crypto/qat/qat_common/adf_isr.c [deleted file]
drivers/crypto/qat/qat_common/adf_pfvf_msg.h [deleted file]
drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.c [deleted file]
drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.h [deleted file]
drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.c [deleted file]
drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.h [deleted file]
drivers/crypto/qat/qat_common/adf_pfvf_utils.c [deleted file]
drivers/crypto/qat/qat_common/adf_pfvf_utils.h [deleted file]
drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c [deleted file]
drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.h [deleted file]
drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.c [deleted file]
drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.h [deleted file]
drivers/crypto/qat/qat_common/adf_sriov.c [deleted file]
drivers/crypto/qat/qat_common/adf_sysfs.c [deleted file]
drivers/crypto/qat/qat_common/adf_transport.c [deleted file]
drivers/crypto/qat/qat_common/adf_transport.h [deleted file]
drivers/crypto/qat/qat_common/adf_transport_access_macros.h [deleted file]
drivers/crypto/qat/qat_common/adf_transport_debug.c [deleted file]
drivers/crypto/qat/qat_common/adf_transport_internal.h [deleted file]
drivers/crypto/qat/qat_common/adf_vf_isr.c [deleted file]
drivers/crypto/qat/qat_common/icp_qat_fw.h [deleted file]
drivers/crypto/qat/qat_common/icp_qat_fw_comp.h [deleted file]
drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h [deleted file]
drivers/crypto/qat/qat_common/icp_qat_fw_la.h [deleted file]
drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h [deleted file]
drivers/crypto/qat/qat_common/icp_qat_fw_pke.h [deleted file]
drivers/crypto/qat/qat_common/icp_qat_hal.h [deleted file]
drivers/crypto/qat/qat_common/icp_qat_hw.h [deleted file]
drivers/crypto/qat/qat_common/icp_qat_hw_20_comp.h [deleted file]
drivers/crypto/qat/qat_common/icp_qat_hw_20_comp_defs.h [deleted file]
drivers/crypto/qat/qat_common/icp_qat_uclo.h [deleted file]
drivers/crypto/qat/qat_common/qat_algs.c [deleted file]
drivers/crypto/qat/qat_common/qat_algs_send.c [deleted file]
drivers/crypto/qat/qat_common/qat_algs_send.h [deleted file]
drivers/crypto/qat/qat_common/qat_asym_algs.c [deleted file]
drivers/crypto/qat/qat_common/qat_bl.c [deleted file]
drivers/crypto/qat/qat_common/qat_bl.h [deleted file]
drivers/crypto/qat/qat_common/qat_comp_algs.c [deleted file]
drivers/crypto/qat/qat_common/qat_comp_req.h [deleted file]
drivers/crypto/qat/qat_common/qat_compression.c [deleted file]
drivers/crypto/qat/qat_common/qat_compression.h [deleted file]
drivers/crypto/qat/qat_common/qat_crypto.c [deleted file]
drivers/crypto/qat/qat_common/qat_crypto.h [deleted file]
drivers/crypto/qat/qat_common/qat_hal.c [deleted file]
drivers/crypto/qat/qat_common/qat_uclo.c [deleted file]
drivers/crypto/qat/qat_dh895xcc/Makefile [deleted file]
drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c [deleted file]
drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h [deleted file]
drivers/crypto/qat/qat_dh895xcc/adf_drv.c [deleted file]
drivers/crypto/qat/qat_dh895xccvf/Makefile [deleted file]
drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c [deleted file]
drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h [deleted file]
drivers/crypto/qat/qat_dh895xccvf/adf_drv.c [deleted file]
drivers/crypto/qce/core.c
drivers/crypto/qce/core.h
drivers/crypto/sa2ul.c
drivers/crypto/sahara.c
drivers/crypto/stm32/stm32-hash.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/i2c-designware-amdpsp.c
drivers/i2c/busses/i2c-designware-core.h
drivers/i2c/busses/i2c-designware-platdrv.c
drivers/tee/amdtee/call.c
drivers/tee/amdtee/shm_pool.c
include/crypto/acompress.h
include/crypto/aead.h
include/crypto/akcipher.h
include/crypto/algapi.h
include/crypto/hash.h
include/crypto/internal/acompress.h
include/crypto/internal/hash.h
include/crypto/internal/scompress.h
include/crypto/kpp.h
include/crypto/rng.h
include/crypto/skcipher.h
include/crypto/utils.h [new file with mode: 0644]
include/linux/crypto.h
include/linux/psp-platform-access.h [new file with mode: 0644]
include/linux/psp-sev.h
include/linux/psp.h [new file with mode: 0644]
kernel/padata.c
lib/crypto/utils.c

diff --git a/Documentation/devicetree/bindings/crypto/qcom-qce.txt b/Documentation/devicetree/bindings/crypto/qcom-qce.txt
deleted file mode 100644 (file)
index fdd53b1..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-Qualcomm crypto engine driver
-
-Required properties:
-
-- compatible  : should be "qcom,crypto-v5.1"
-- reg         : specifies base physical address and size of the registers map
-- clocks      : phandle to clock-controller plus clock-specifier pair
-- clock-names : "iface" clocks register interface
-                "bus" clocks data transfer interface
-                "core" clocks rest of the crypto block
-- dmas        : DMA specifiers for tx and rx dma channels. For more see
-                Documentation/devicetree/bindings/dma/dma.txt
-- dma-names   : DMA request names should be "rx" and "tx"
-
-Example:
-       crypto@fd45a000 {
-               compatible = "qcom,crypto-v5.1";
-               reg = <0xfd45a000 0x6000>;
-               clocks = <&gcc GCC_CE2_AHB_CLK>,
-                        <&gcc GCC_CE2_AXI_CLK>,
-                        <&gcc GCC_CE2_CLK>;
-               clock-names = "iface", "bus", "core";
-               dmas = <&cryptobam 2>, <&cryptobam 3>;
-               dma-names = "rx", "tx";
-       };
diff --git a/Documentation/devicetree/bindings/crypto/qcom-qce.yaml b/Documentation/devicetree/bindings/crypto/qcom-qce.yaml
new file mode 100644 (file)
index 0000000..e375bd9
--- /dev/null
@@ -0,0 +1,123 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/crypto/qcom-qce.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm crypto engine driver
+
+maintainers:
+  - Bhupesh Sharma <[email protected]>
+
+description:
+  This document defines the binding for the QCE crypto
+  controller found on Qualcomm parts.
+
+properties:
+  compatible:
+    oneOf:
+      - const: qcom,crypto-v5.1
+        deprecated: true
+        description: Kept only for ABI backward compatibility
+
+      - const: qcom,crypto-v5.4
+        deprecated: true
+        description: Kept only for ABI backward compatibility
+
+      - items:
+          - enum:
+              - qcom,ipq6018-qce
+              - qcom,ipq8074-qce
+              - qcom,msm8996-qce
+              - qcom,sdm845-qce
+          - const: qcom,ipq4019-qce
+          - const: qcom,qce
+
+      - items:
+          - enum:
+              - qcom,sm8250-qce
+              - qcom,sm8350-qce
+              - qcom,sm8450-qce
+              - qcom,sm8550-qce
+          - const: qcom,sm8150-qce
+          - const: qcom,qce
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    items:
+      - description: iface clocks register interface.
+      - description: bus clocks data transfer interface.
+      - description: core clocks rest of the crypto block.
+
+  clock-names:
+    items:
+      - const: iface
+      - const: bus
+      - const: core
+
+  iommus:
+    minItems: 1
+    maxItems: 8
+    description:
+      phandle to apps_smmu node with sid mask.
+
+  interconnects:
+    maxItems: 1
+    description:
+      Interconnect path between qce crypto and main memory.
+
+  interconnect-names:
+    const: memory
+
+  dmas:
+    items:
+      - description: DMA specifiers for rx dma channel.
+      - description: DMA specifiers for tx dma channel.
+
+  dma-names:
+    items:
+      - const: rx
+      - const: tx
+
+allOf:
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - qcom,crypto-v5.1
+              - qcom,crypto-v5.4
+              - qcom,ipq4019-qce
+
+    then:
+      required:
+        - clocks
+        - clock-names
+
+required:
+  - compatible
+  - reg
+  - dmas
+  - dma-names
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/qcom,gcc-apq8084.h>
+    crypto-engine@fd45a000 {
+        compatible = "qcom,ipq6018-qce", "qcom,ipq4019-qce", "qcom,qce";
+        reg = <0xfd45a000 0x6000>;
+        clocks = <&gcc GCC_CE2_AHB_CLK>,
+                 <&gcc GCC_CE2_AXI_CLK>,
+                 <&gcc GCC_CE2_CLK>;
+        clock-names = "iface", "bus", "core";
+        dmas = <&cryptobam 2>, <&cryptobam 3>;
+        dma-names = "rx", "tx";
+        iommus = <&apps_smmu 0x584 0x0011>,
+                 <&apps_smmu 0x586 0x0011>,
+                 <&apps_smmu 0x594 0x0011>,
+                 <&apps_smmu 0x596 0x0011>;
+    };
index f0abf21883af3f6b01f13b271bf67201b31a8643..cd69cdfc23a2a8e219e23dd7e8ddddb537753d4a 100644 (file)
@@ -2269,7 +2269,7 @@ F:        arch/arm/boot/dts/intel-ixp*
 F:     arch/arm/mach-ixp4xx/
 F:     drivers/bus/intel-ixp4xx-eb.c
 F:     drivers/clocksource/timer-ixp4xx.c
-F:     drivers/crypto/ixp4xx_crypto.c
+F:     drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
 F:     drivers/gpio/gpio-ixp4xx.c
 F:     drivers/irqchip/irq-ixp4xx.c
 
@@ -10391,7 +10391,7 @@ INTEL IXP4XX CRYPTO SUPPORT
 M:     Corentin Labbe <[email protected]>
 L:     [email protected]
 S:     Maintained
-F:     drivers/crypto/ixp4xx_crypto.c
+F:     drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
 
 INTEL ISHTP ECLITE DRIVER
 M:     Sumesh K Naduvalath <[email protected]>
@@ -10426,11 +10426,11 @@ INTEL KEEM BAY OCS AES/SM4 CRYPTO DRIVER
 M:     Daniele Alessandrelli <[email protected]>
 S:     Maintained
 F:     Documentation/devicetree/bindings/crypto/intel,keembay-ocs-aes.yaml
-F:     drivers/crypto/keembay/Kconfig
-F:     drivers/crypto/keembay/Makefile
-F:     drivers/crypto/keembay/keembay-ocs-aes-core.c
-F:     drivers/crypto/keembay/ocs-aes.c
-F:     drivers/crypto/keembay/ocs-aes.h
+F:     drivers/crypto/intel/keembay/Kconfig
+F:     drivers/crypto/intel/keembay/Makefile
+F:     drivers/crypto/intel/keembay/keembay-ocs-aes-core.c
+F:     drivers/crypto/intel/keembay/ocs-aes.c
+F:     drivers/crypto/intel/keembay/ocs-aes.h
 
 INTEL KEEM BAY OCS ECC CRYPTO DRIVER
 M:     Daniele Alessandrelli <[email protected]>
@@ -10438,20 +10438,20 @@ M:    Prabhjot Khurana <[email protected]>
 M:     Mark Gross <[email protected]>
 S:     Maintained
 F:     Documentation/devicetree/bindings/crypto/intel,keembay-ocs-ecc.yaml
-F:     drivers/crypto/keembay/Kconfig
-F:     drivers/crypto/keembay/Makefile
-F:     drivers/crypto/keembay/keembay-ocs-ecc.c
+F:     drivers/crypto/intel/keembay/Kconfig
+F:     drivers/crypto/intel/keembay/Makefile
+F:     drivers/crypto/intel/keembay/keembay-ocs-ecc.c
 
 INTEL KEEM BAY OCS HCU CRYPTO DRIVER
 M:     Daniele Alessandrelli <[email protected]>
 M:     Declan Murphy <[email protected]>
 S:     Maintained
 F:     Documentation/devicetree/bindings/crypto/intel,keembay-ocs-hcu.yaml
-F:     drivers/crypto/keembay/Kconfig
-F:     drivers/crypto/keembay/Makefile
-F:     drivers/crypto/keembay/keembay-ocs-hcu-core.c
-F:     drivers/crypto/keembay/ocs-hcu.c
-F:     drivers/crypto/keembay/ocs-hcu.h
+F:     drivers/crypto/intel/keembay/Kconfig
+F:     drivers/crypto/intel/keembay/Makefile
+F:     drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
+F:     drivers/crypto/intel/keembay/ocs-hcu.c
+F:     drivers/crypto/intel/keembay/ocs-hcu.h
 
 INTEL THUNDER BAY EMMC PHY DRIVER
 M:     Nandhini Srikandan <[email protected]>
@@ -17027,7 +17027,7 @@ QAT DRIVER
 M:     Giovanni Cabiddu <[email protected]>
 L:     [email protected]
 S:     Supported
-F:     drivers/crypto/qat/
+F:     drivers/crypto/intel/qat/
 
 QCOM AUDIO (ASoC) DRIVERS
 M:     Srinivas Kandagatla <[email protected]>
@@ -17295,6 +17295,7 @@ M:      Thara Gopinath <[email protected]>
 L:     [email protected]
 L:     [email protected]
 S:     Maintained
+F:     Documentation/devicetree/bindings/crypto/qcom-qce.yaml
 F:     drivers/crypto/qce/
 
 QUALCOMM EMAC GIGABIT ETHERNET DRIVER
index f110d6cc195d9daa0e350a1d23d4fccfdc363ba0..6e9bad8f6f33ebe74cfab95e457b18bd9af45fc2 100644 (file)
                };
 
                crypto: crypto@1de0000 {
-                       compatible = "qcom,sm8550-qce";
+                       compatible = "qcom,sm8550-qce", "qcom,sm8150-qce", "qcom,qce";
                        reg = <0x0 0x01dfa000 0x0 0x6000>;
                        dmas = <&cryptobam 4>, <&cryptobam 5>;
                        dma-names = "rx", "tx";
index 7278a37c2d5cd0005f935810703bd94bcaa7ee32..baf450717b24ba37cac4644617e8081b25d1437d 100644 (file)
@@ -15,6 +15,7 @@
  */
 
 #include <linux/linkage.h>
+#include <linux/cfi_types.h>
 #include <asm/assembler.h>
 
        .text
@@ -620,12 +621,12 @@ SYM_FUNC_END(aesbs_decrypt8)
        .endm
 
        .align          4
-SYM_FUNC_START(aesbs_ecb_encrypt)
+SYM_TYPED_FUNC_START(aesbs_ecb_encrypt)
        __ecb_crypt     aesbs_encrypt8, v0, v1, v4, v6, v3, v7, v2, v5
 SYM_FUNC_END(aesbs_ecb_encrypt)
 
        .align          4
-SYM_FUNC_START(aesbs_ecb_decrypt)
+SYM_TYPED_FUNC_START(aesbs_ecb_decrypt)
        __ecb_crypt     aesbs_decrypt8, v0, v1, v6, v4, v2, v7, v3, v5
 SYM_FUNC_END(aesbs_ecb_decrypt)
 
@@ -799,11 +800,11 @@ SYM_FUNC_END(__xts_crypt8)
        ret
        .endm
 
-SYM_FUNC_START(aesbs_xts_encrypt)
+SYM_TYPED_FUNC_START(aesbs_xts_encrypt)
        __xts_crypt     aesbs_encrypt8, v0, v1, v4, v6, v3, v7, v2, v5
 SYM_FUNC_END(aesbs_xts_encrypt)
 
-SYM_FUNC_START(aesbs_xts_decrypt)
+SYM_TYPED_FUNC_START(aesbs_xts_decrypt)
        __xts_crypt     aesbs_decrypt8, v0, v1, v6, v4, v2, v7, v3, v5
 SYM_FUNC_END(aesbs_xts_decrypt)
 
index c1b964447401634e9c55ba7f34695a0f63750789..7113f9355165a663732964cdf1890541936a36a8 100644 (file)
@@ -94,4 +94,21 @@ config CRYPTO_AES_PPC_SPE
          architecture specific assembler implementations that work on 1KB
          tables or 256 bytes S-boxes.
 
+config CRYPTO_AES_GCM_P10
+       tristate "Stitched AES/GCM acceleration support on P10 or later CPU (PPC)"
+       depends on PPC64 && CPU_LITTLE_ENDIAN
+       select CRYPTO_LIB_AES
+       select CRYPTO_ALGAPI
+       select CRYPTO_AEAD
+       default m
+       help
+         AEAD cipher: AES cipher algorithms (FIPS-197)
+         GCM (Galois/Counter Mode) authenticated encryption mode (NIST SP800-38D)
+         Architecture: powerpc64 using:
+           - little-endian
+           - Power10 or later features
+
+         Support for cryptographic acceleration instructions on Power10 or
+         later CPU. This module supports stitched acceleration for AES/GCM.
+
 endmenu
index 4808d97fede5323a7a3a7a302334e6e4107d69d9..05c7486f42c587b1bf1c59b3ad756c4a6f474609 100644 (file)
@@ -13,6 +13,7 @@ obj-$(CONFIG_CRYPTO_SHA256_PPC_SPE) += sha256-ppc-spe.o
 obj-$(CONFIG_CRYPTO_CRC32C_VPMSUM) += crc32c-vpmsum.o
 obj-$(CONFIG_CRYPTO_CRCT10DIF_VPMSUM) += crct10dif-vpmsum.o
 obj-$(CONFIG_CRYPTO_VPMSUM_TESTER) += crc-vpmsum_test.o
+obj-$(CONFIG_CRYPTO_AES_GCM_P10) += aes-gcm-p10-crypto.o
 
 aes-ppc-spe-y := aes-spe-core.o aes-spe-keys.o aes-tab-4k.o aes-spe-modes.o aes-spe-glue.o
 md5-ppc-y := md5-asm.o md5-glue.o
@@ -21,3 +22,15 @@ sha1-ppc-spe-y := sha1-spe-asm.o sha1-spe-glue.o
 sha256-ppc-spe-y := sha256-spe-asm.o sha256-spe-glue.o
 crc32c-vpmsum-y := crc32c-vpmsum_asm.o crc32c-vpmsum_glue.o
 crct10dif-vpmsum-y := crct10dif-vpmsum_asm.o crct10dif-vpmsum_glue.o
+aes-gcm-p10-crypto-y := aes-gcm-p10-glue.o aes-gcm-p10.o ghashp8-ppc.o aesp8-ppc.o
+
+quiet_cmd_perl = PERL    $@
+      cmd_perl = $(PERL) $< $(if $(CONFIG_CPU_LITTLE_ENDIAN), linux-ppc64le, linux-ppc64) > $@
+
+targets += aesp8-ppc.S ghashp8-ppc.S
+
+$(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE
+       $(call if_changed,perl)
+
+OBJECT_FILES_NON_STANDARD_aesp8-ppc.o := y
+OBJECT_FILES_NON_STANDARD_ghashp8-ppc.o := y
diff --git a/arch/powerpc/crypto/aes-gcm-p10-glue.c b/arch/powerpc/crypto/aes-gcm-p10-glue.c
new file mode 100644 (file)
index 0000000..bd3475f
--- /dev/null
@@ -0,0 +1,343 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Glue code for accelerated AES-GCM stitched implementation for ppc64le.
+ *
+ * Copyright 2022- IBM Inc. All rights reserved
+ */
+
+#include <asm/unaligned.h>
+#include <asm/simd.h>
+#include <asm/switch_to.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/b128ops.h>
+#include <crypto/gf128mul.h>
+#include <crypto/internal/simd.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
+#include <linux/cpufeature.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#define        PPC_ALIGN               16
+#define GCM_IV_SIZE            12
+
+MODULE_DESCRIPTION("PPC64le AES-GCM with Stitched implementation");
+MODULE_AUTHOR("Danny Tsen <[email protected]");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_CRYPTO("aes");
+
+asmlinkage int aes_p8_set_encrypt_key(const u8 *userKey, const int bits,
+                                     void *key);
+asmlinkage void aes_p8_encrypt(const u8 *in, u8 *out, const void *key);
+asmlinkage void aes_p10_gcm_encrypt(u8 *in, u8 *out, size_t len,
+                                   void *rkey, u8 *iv, void *Xi);
+asmlinkage void aes_p10_gcm_decrypt(u8 *in, u8 *out, size_t len,
+                                   void *rkey, u8 *iv, void *Xi);
+asmlinkage void gcm_init_htable(unsigned char htable[256], unsigned char Xi[16]);
+asmlinkage void gcm_ghash_p8(unsigned char *Xi, unsigned char *Htable,
+               unsigned char *aad, unsigned int alen);
+
+struct aes_key {
+       u8 key[AES_MAX_KEYLENGTH];
+       u64 rounds;
+};
+
+struct gcm_ctx {
+       u8 iv[16];
+       u8 ivtag[16];
+       u8 aad_hash[16];
+       u64 aadLen;
+       u64 Plen;       /* offset 56 - used in aes_p10_gcm_{en/de}crypt */
+};
+struct Hash_ctx {
+       u8 H[16];       /* subkey */
+       u8 Htable[256]; /* Xi, Hash table(offset 32) */
+};
+
+struct p10_aes_gcm_ctx {
+       struct aes_key enc_key;
+};
+
+static void vsx_begin(void)
+{
+       preempt_disable();
+       enable_kernel_vsx();
+}
+
+static void vsx_end(void)
+{
+       disable_kernel_vsx();
+       preempt_enable();
+}
+
+static void set_subkey(unsigned char *hash)
+{
+       *(u64 *)&hash[0] = be64_to_cpup((__be64 *)&hash[0]);
+       *(u64 *)&hash[8] = be64_to_cpup((__be64 *)&hash[8]);
+}
+
+/*
+ * Compute aad if any.
+ *   - Hash aad and copy to Xi.
+ */
+static void set_aad(struct gcm_ctx *gctx, struct Hash_ctx *hash,
+                   unsigned char *aad, int alen)
+{
+       int i;
+       u8 nXi[16] = {0, };
+
+       gctx->aadLen = alen;
+       i = alen & ~0xf;
+       if (i) {
+               gcm_ghash_p8(nXi, hash->Htable+32, aad, i);
+               aad += i;
+               alen -= i;
+       }
+       if (alen) {
+               for (i = 0; i < alen; i++)
+                       nXi[i] ^= aad[i];
+
+               memset(gctx->aad_hash, 0, 16);
+               gcm_ghash_p8(gctx->aad_hash, hash->Htable+32, nXi, 16);
+       } else {
+               memcpy(gctx->aad_hash, nXi, 16);
+       }
+
+       memcpy(hash->Htable, gctx->aad_hash, 16);
+}
+
+static void gcmp10_init(struct gcm_ctx *gctx, u8 *iv, unsigned char *rdkey,
+                       struct Hash_ctx *hash, u8 *assoc, unsigned int assoclen)
+{
+       __be32 counter = cpu_to_be32(1);
+
+       aes_p8_encrypt(hash->H, hash->H, rdkey);
+       set_subkey(hash->H);
+       gcm_init_htable(hash->Htable+32, hash->H);
+
+       *((__be32 *)(iv+12)) = counter;
+
+       gctx->Plen = 0;
+
+       /*
+        * Encrypt counter vector as iv tag and increment counter.
+        */
+       aes_p8_encrypt(iv, gctx->ivtag, rdkey);
+
+       counter = cpu_to_be32(2);
+       *((__be32 *)(iv+12)) = counter;
+       memcpy(gctx->iv, iv, 16);
+
+       gctx->aadLen = assoclen;
+       memset(gctx->aad_hash, 0, 16);
+       if (assoclen)
+               set_aad(gctx, hash, assoc, assoclen);
+}
+
+static void finish_tag(struct gcm_ctx *gctx, struct Hash_ctx *hash, int len)
+{
+       int i;
+       unsigned char len_ac[16 + PPC_ALIGN];
+       unsigned char *aclen = PTR_ALIGN((void *)len_ac, PPC_ALIGN);
+       __be64 clen = cpu_to_be64(len << 3);
+       __be64 alen = cpu_to_be64(gctx->aadLen << 3);
+
+       if (len == 0 && gctx->aadLen == 0) {
+               memcpy(hash->Htable, gctx->ivtag, 16);
+               return;
+       }
+
+       /*
+        * Len is in bits.
+        */
+       *((__be64 *)(aclen)) = alen;
+       *((__be64 *)(aclen+8)) = clen;
+
+       /*
+        * hash (AAD len and len)
+        */
+       gcm_ghash_p8(hash->Htable, hash->Htable+32, aclen, 16);
+
+       for (i = 0; i < 16; i++)
+               hash->Htable[i] ^= gctx->ivtag[i];
+}
+
+static int set_authsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+       switch (authsize) {
+       case 4:
+       case 8:
+       case 12:
+       case 13:
+       case 14:
+       case 15:
+       case 16:
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int p10_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
+                            unsigned int keylen)
+{
+       struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+       struct p10_aes_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
+       int ret;
+
+       vsx_begin();
+       ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
+       vsx_end();
+
+       return ret ? -EINVAL : 0;
+}
+
+static int p10_aes_gcm_crypt(struct aead_request *req, int enc)
+{
+       struct crypto_tfm *tfm = req->base.tfm;
+       struct p10_aes_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
+       u8 databuf[sizeof(struct gcm_ctx) + PPC_ALIGN];
+       struct gcm_ctx *gctx = PTR_ALIGN((void *)databuf, PPC_ALIGN);
+       u8 hashbuf[sizeof(struct Hash_ctx) + PPC_ALIGN];
+       struct Hash_ctx *hash = PTR_ALIGN((void *)hashbuf, PPC_ALIGN);
+       struct scatter_walk assoc_sg_walk;
+       struct skcipher_walk walk;
+       u8 *assocmem = NULL;
+       u8 *assoc;
+       unsigned int assoclen = req->assoclen;
+       unsigned int cryptlen = req->cryptlen;
+       unsigned char ivbuf[AES_BLOCK_SIZE+PPC_ALIGN];
+       unsigned char *iv = PTR_ALIGN((void *)ivbuf, PPC_ALIGN);
+       int ret;
+       unsigned long auth_tag_len = crypto_aead_authsize(__crypto_aead_cast(tfm));
+       u8 otag[16];
+       int total_processed = 0;
+
+       memset(databuf, 0, sizeof(databuf));
+       memset(hashbuf, 0, sizeof(hashbuf));
+       memset(ivbuf, 0, sizeof(ivbuf));
+       memcpy(iv, req->iv, GCM_IV_SIZE);
+
+       /* Linearize assoc, if not already linear */
+       if (req->src->length >= assoclen && req->src->length) {
+               scatterwalk_start(&assoc_sg_walk, req->src);
+               assoc = scatterwalk_map(&assoc_sg_walk);
+       } else {
+               gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+                             GFP_KERNEL : GFP_ATOMIC;
+
+               /* assoc can be any length, so must be on heap */
+               assocmem = kmalloc(assoclen, flags);
+               if (unlikely(!assocmem))
+                       return -ENOMEM;
+               assoc = assocmem;
+
+               scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
+       }
+
+       vsx_begin();
+       gcmp10_init(gctx, iv, (unsigned char *) &ctx->enc_key, hash, assoc, assoclen);
+       vsx_end();
+
+       if (!assocmem)
+               scatterwalk_unmap(assoc);
+       else
+               kfree(assocmem);
+
+       if (enc)
+               ret = skcipher_walk_aead_encrypt(&walk, req, false);
+       else
+               ret = skcipher_walk_aead_decrypt(&walk, req, false);
+       if (ret)
+               return ret;
+
+       while (walk.nbytes > 0 && ret == 0) {
+
+               vsx_begin();
+               if (enc)
+                       aes_p10_gcm_encrypt(walk.src.virt.addr,
+                                           walk.dst.virt.addr,
+                                           walk.nbytes,
+                                           &ctx->enc_key, gctx->iv, hash->Htable);
+               else
+                       aes_p10_gcm_decrypt(walk.src.virt.addr,
+                                           walk.dst.virt.addr,
+                                           walk.nbytes,
+                                           &ctx->enc_key, gctx->iv, hash->Htable);
+               vsx_end();
+
+               total_processed += walk.nbytes;
+               ret = skcipher_walk_done(&walk, 0);
+       }
+
+       if (ret)
+               return ret;
+
+       /* Finalize hash */
+       vsx_begin();
+       finish_tag(gctx, hash, total_processed);
+       vsx_end();
+
+       /* copy Xi to end of dst */
+       if (enc)
+               scatterwalk_map_and_copy(hash->Htable, req->dst, req->assoclen + cryptlen,
+                                        auth_tag_len, 1);
+       else {
+               scatterwalk_map_and_copy(otag, req->src,
+                                        req->assoclen + cryptlen - auth_tag_len,
+                                        auth_tag_len, 0);
+
+               if (crypto_memneq(otag, hash->Htable, auth_tag_len)) {
+                       memzero_explicit(hash->Htable, 16);
+                       return -EBADMSG;
+               }
+       }
+
+       return 0;
+}
+
+static int p10_aes_gcm_encrypt(struct aead_request *req)
+{
+       return p10_aes_gcm_crypt(req, 1);
+}
+
+static int p10_aes_gcm_decrypt(struct aead_request *req)
+{
+       return p10_aes_gcm_crypt(req, 0);
+}
+
+static struct aead_alg gcm_aes_alg = {
+       .ivsize                 = GCM_IV_SIZE,
+       .maxauthsize            = 16,
+
+       .setauthsize            = set_authsize,
+       .setkey                 = p10_aes_gcm_setkey,
+       .encrypt                = p10_aes_gcm_encrypt,
+       .decrypt                = p10_aes_gcm_decrypt,
+
+       .base.cra_name          = "gcm(aes)",
+       .base.cra_driver_name   = "aes_gcm_p10",
+       .base.cra_priority      = 2100,
+       .base.cra_blocksize     = 1,
+       .base.cra_ctxsize       = sizeof(struct p10_aes_gcm_ctx),
+       .base.cra_module        = THIS_MODULE,
+};
+
+static int __init p10_init(void)
+{
+       return crypto_register_aead(&gcm_aes_alg);
+}
+
+static void __exit p10_exit(void)
+{
+       crypto_unregister_aead(&gcm_aes_alg);
+}
+
+module_cpu_feature_match(PPC_MODULE_FEATURE_P10, p10_init);
+module_exit(p10_exit);
diff --git a/arch/powerpc/crypto/aes-gcm-p10.S b/arch/powerpc/crypto/aes-gcm-p10.S
new file mode 100644 (file)
index 0000000..a51f4b2
--- /dev/null
@@ -0,0 +1,1521 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+ #
+ # Accelerated AES-GCM stitched implementation for ppc64le.
+ #
+ # Copyright 2022- IBM Inc. All rights reserved
+ #
+ #===================================================================================
+ # Written by Danny Tsen <[email protected]>
+ #
+ # GHASH is based on the Karatsuba multiplication method.
+ #
+ #    Xi xor X1
+ #
+ #    X1 * H^4 + X2 * H^3 + x3 * H^2 + X4 * H =
+ #      (X1.h * H4.h + xX.l * H4.l + X1 * H4) +
+ #      (X2.h * H3.h + X2.l * H3.l + X2 * H3) +
+ #      (X3.h * H2.h + X3.l * H2.l + X3 * H2) +
+ #      (X4.h * H.h + X4.l * H.l + X4 * H)
+ #
+ # Xi = v0
+ # H Poly = v2
+ # Hash keys = v3 - v14
+ #     ( H.l, H, H.h)
+ #     ( H^2.l, H^2, H^2.h)
+ #     ( H^3.l, H^3, H^3.h)
+ #     ( H^4.l, H^4, H^4.h)
+ #
+ # v30 is IV
+ # v31 - counter 1
+ #
+ # AES used,
+ #     vs0 - vs14 for round keys
+ #     v15, v16, v17, v18, v19, v20, v21, v22 for 8 blocks (encrypted)
+ #
+ # This implementation uses stitched AES-GCM approach to improve overall performance.
+ # AES is implemented with 8x blocks and GHASH is using 2 4x blocks.
+ #
+ # ===================================================================================
+ #
+
+#include <asm/ppc_asm.h>
+#include <linux/linkage.h>
+
+.machine        "any"
+.text
+
+ # 4x loops
+ # v15 - v18 - input states
+ # vs1 - vs9 - round keys
+ #
+.macro Loop_aes_middle4x
+       xxlor   19+32, 1, 1
+       xxlor   20+32, 2, 2
+       xxlor   21+32, 3, 3
+       xxlor   22+32, 4, 4
+
+       vcipher 15, 15, 19
+       vcipher 16, 16, 19
+       vcipher 17, 17, 19
+       vcipher 18, 18, 19
+
+       vcipher 15, 15, 20
+       vcipher 16, 16, 20
+       vcipher 17, 17, 20
+       vcipher 18, 18, 20
+
+       vcipher 15, 15, 21
+       vcipher 16, 16, 21
+       vcipher 17, 17, 21
+       vcipher 18, 18, 21
+
+       vcipher 15, 15, 22
+       vcipher 16, 16, 22
+       vcipher 17, 17, 22
+       vcipher 18, 18, 22
+
+       xxlor   19+32, 5, 5
+       xxlor   20+32, 6, 6
+       xxlor   21+32, 7, 7
+       xxlor   22+32, 8, 8
+
+       vcipher 15, 15, 19
+       vcipher 16, 16, 19
+       vcipher 17, 17, 19
+       vcipher 18, 18, 19
+
+       vcipher 15, 15, 20
+       vcipher 16, 16, 20
+       vcipher 17, 17, 20
+       vcipher 18, 18, 20
+
+       vcipher 15, 15, 21
+       vcipher 16, 16, 21
+       vcipher 17, 17, 21
+       vcipher 18, 18, 21
+
+       vcipher 15, 15, 22
+       vcipher 16, 16, 22
+       vcipher 17, 17, 22
+       vcipher 18, 18, 22
+
+       xxlor   23+32, 9, 9
+       vcipher 15, 15, 23
+       vcipher 16, 16, 23
+       vcipher 17, 17, 23
+       vcipher 18, 18, 23
+.endm
+
+ # 8x loops
+ # v15 - v22 - input states
+ # vs1 - vs9 - round keys
+ #
+.macro Loop_aes_middle8x
+       xxlor   23+32, 1, 1
+       xxlor   24+32, 2, 2
+       xxlor   25+32, 3, 3
+       xxlor   26+32, 4, 4
+
+       vcipher 15, 15, 23
+       vcipher 16, 16, 23
+       vcipher 17, 17, 23
+       vcipher 18, 18, 23
+       vcipher 19, 19, 23
+       vcipher 20, 20, 23
+       vcipher 21, 21, 23
+       vcipher 22, 22, 23
+
+       vcipher 15, 15, 24
+       vcipher 16, 16, 24
+       vcipher 17, 17, 24
+       vcipher 18, 18, 24
+       vcipher 19, 19, 24
+       vcipher 20, 20, 24
+       vcipher 21, 21, 24
+       vcipher 22, 22, 24
+
+       vcipher 15, 15, 25
+       vcipher 16, 16, 25
+       vcipher 17, 17, 25
+       vcipher 18, 18, 25
+       vcipher 19, 19, 25
+       vcipher 20, 20, 25
+       vcipher 21, 21, 25
+       vcipher 22, 22, 25
+
+       vcipher 15, 15, 26
+       vcipher 16, 16, 26
+       vcipher 17, 17, 26
+       vcipher 18, 18, 26
+       vcipher 19, 19, 26
+       vcipher 20, 20, 26
+       vcipher 21, 21, 26
+       vcipher 22, 22, 26
+
+       xxlor   23+32, 5, 5
+       xxlor   24+32, 6, 6
+       xxlor   25+32, 7, 7
+       xxlor   26+32, 8, 8
+
+       vcipher 15, 15, 23
+       vcipher 16, 16, 23
+       vcipher 17, 17, 23
+       vcipher 18, 18, 23
+       vcipher 19, 19, 23
+       vcipher 20, 20, 23
+       vcipher 21, 21, 23
+       vcipher 22, 22, 23
+
+       vcipher 15, 15, 24
+       vcipher 16, 16, 24
+       vcipher 17, 17, 24
+       vcipher 18, 18, 24
+       vcipher 19, 19, 24
+       vcipher 20, 20, 24
+       vcipher 21, 21, 24
+       vcipher 22, 22, 24
+
+       vcipher 15, 15, 25
+       vcipher 16, 16, 25
+       vcipher 17, 17, 25
+       vcipher 18, 18, 25
+       vcipher 19, 19, 25
+       vcipher 20, 20, 25
+       vcipher 21, 21, 25
+       vcipher 22, 22, 25
+
+       vcipher 15, 15, 26
+       vcipher 16, 16, 26
+       vcipher 17, 17, 26
+       vcipher 18, 18, 26
+       vcipher 19, 19, 26
+       vcipher 20, 20, 26
+       vcipher 21, 21, 26
+       vcipher 22, 22, 26
+
+       xxlor   23+32, 9, 9
+       vcipher 15, 15, 23
+       vcipher 16, 16, 23
+       vcipher 17, 17, 23
+       vcipher 18, 18, 23
+       vcipher 19, 19, 23
+       vcipher 20, 20, 23
+       vcipher 21, 21, 23
+       vcipher 22, 22, 23
+.endm
+
+.macro Loop_aes_middle_1x
+       xxlor   19+32, 1, 1
+       xxlor   20+32, 2, 2
+       xxlor   21+32, 3, 3
+       xxlor   22+32, 4, 4
+
+       vcipher 15, 15, 19
+       vcipher 15, 15, 20
+       vcipher 15, 15, 21
+       vcipher 15, 15, 22
+
+       xxlor   19+32, 5, 5
+       xxlor   20+32, 6, 6
+       xxlor   21+32, 7, 7
+       xxlor   22+32, 8, 8
+
+       vcipher 15, 15, 19
+       vcipher 15, 15, 20
+       vcipher 15, 15, 21
+       vcipher 15, 15, 22
+
+       xxlor   19+32, 9, 9
+       vcipher 15, 15, 19
+.endm
+
+ #
+ # Compute 4x hash values based on Karatsuba method.
+ #
+.macro ppc_aes_gcm_ghash
+       vxor            15, 15, 0
+
+       vpmsumd         23, 12, 15              # H4.L * X.L
+       vpmsumd         24, 9, 16
+       vpmsumd         25, 6, 17
+       vpmsumd         26, 3, 18
+
+       vxor            23, 23, 24
+       vxor            23, 23, 25
+       vxor            23, 23, 26              # L
+
+       vpmsumd         24, 13, 15              # H4.L * X.H + H4.H * X.L
+       vpmsumd         25, 10, 16              # H3.L * X1.H + H3.H * X1.L
+       vpmsumd         26, 7, 17
+       vpmsumd         27, 4, 18
+
+       vxor            24, 24, 25
+       vxor            24, 24, 26
+       vxor            24, 24, 27              # M
+
+       # sum hash and reduction with H Poly
+       vpmsumd         28, 23, 2               # reduction
+
+       vxor            29, 29, 29
+       vsldoi          26, 24, 29, 8           # mL
+       vsldoi          29, 29, 24, 8           # mH
+       vxor            23, 23, 26              # mL + L
+
+       vsldoi          23, 23, 23, 8           # swap
+       vxor            23, 23, 28
+
+       vpmsumd         24, 14, 15              # H4.H * X.H
+       vpmsumd         25, 11, 16
+       vpmsumd         26, 8, 17
+       vpmsumd         27, 5, 18
+
+       vxor            24, 24, 25
+       vxor            24, 24, 26
+       vxor            24, 24, 27
+
+       vxor            24, 24, 29
+
+       # sum hash and reduction with H Poly
+       vsldoi          27, 23, 23, 8           # swap
+       vpmsumd         23, 23, 2
+       vxor            27, 27, 24
+       vxor            23, 23, 27
+
+       xxlor           32, 23+32, 23+32                # update hash
+
+.endm
+
+ #
+ # Combine two 4x ghash
+ # v15 - v22 - input blocks
+ #
+.macro ppc_aes_gcm_ghash2_4x
+       # first 4x hash
+       vxor            15, 15, 0               # Xi + X
+
+       vpmsumd         23, 12, 15              # H4.L * X.L
+       vpmsumd         24, 9, 16
+       vpmsumd         25, 6, 17
+       vpmsumd         26, 3, 18
+
+       vxor            23, 23, 24
+       vxor            23, 23, 25
+       vxor            23, 23, 26              # L
+
+       vpmsumd         24, 13, 15              # H4.L * X.H + H4.H * X.L
+       vpmsumd         25, 10, 16              # H3.L * X1.H + H3.H * X1.L
+       vpmsumd         26, 7, 17
+       vpmsumd         27, 4, 18
+
+       vxor            24, 24, 25
+       vxor            24, 24, 26
+
+       # sum hash and reduction with H Poly
+       vpmsumd         28, 23, 2               # reduction
+
+       vxor            29, 29, 29
+
+       vxor            24, 24, 27              # M
+       vsldoi          26, 24, 29, 8           # mL
+       vsldoi          29, 29, 24, 8           # mH
+       vxor            23, 23, 26              # mL + L
+
+       vsldoi          23, 23, 23, 8           # swap
+       vxor            23, 23, 28
+
+       vpmsumd         24, 14, 15              # H4.H * X.H
+       vpmsumd         25, 11, 16
+       vpmsumd         26, 8, 17
+       vpmsumd         27, 5, 18
+
+       vxor            24, 24, 25
+       vxor            24, 24, 26
+       vxor            24, 24, 27              # H
+
+       vxor            24, 24, 29              # H + mH
+
+       # sum hash and reduction with H Poly
+       vsldoi          27, 23, 23, 8           # swap
+       vpmsumd         23, 23, 2
+       vxor            27, 27, 24
+       vxor            27, 23, 27              # 1st Xi
+
+       # 2nd 4x hash
+       vpmsumd         24, 9, 20
+       vpmsumd         25, 6, 21
+       vpmsumd         26, 3, 22
+       vxor            19, 19, 27              # Xi + X
+       vpmsumd         23, 12, 19              # H4.L * X.L
+
+       vxor            23, 23, 24
+       vxor            23, 23, 25
+       vxor            23, 23, 26              # L
+
+       vpmsumd         24, 13, 19              # H4.L * X.H + H4.H * X.L
+       vpmsumd         25, 10, 20              # H3.L * X1.H + H3.H * X1.L
+       vpmsumd         26, 7, 21
+       vpmsumd         27, 4, 22
+
+       vxor            24, 24, 25
+       vxor            24, 24, 26
+
+       # sum hash and reduction with H Poly
+       vpmsumd         28, 23, 2               # reduction
+
+       vxor            29, 29, 29
+
+       vxor            24, 24, 27              # M
+       vsldoi          26, 24, 29, 8           # mL
+       vsldoi          29, 29, 24, 8           # mH
+       vxor            23, 23, 26              # mL + L
+
+       vsldoi          23, 23, 23, 8           # swap
+       vxor            23, 23, 28
+
+       vpmsumd         24, 14, 19              # H4.H * X.H
+       vpmsumd         25, 11, 20
+       vpmsumd         26, 8, 21
+       vpmsumd         27, 5, 22
+
+       vxor            24, 24, 25
+       vxor            24, 24, 26
+       vxor            24, 24, 27              # H
+
+       vxor            24, 24, 29              # H + mH
+
+       # sum hash and reduction with H Poly
+       vsldoi          27, 23, 23, 8           # swap
+       vpmsumd         23, 23, 2
+       vxor            27, 27, 24
+       vxor            23, 23, 27
+
+       xxlor           32, 23+32, 23+32                # update hash
+
+.endm
+
+ #
+ # Compute update single hash
+ #
+.macro ppc_update_hash_1x
+       vxor            28, 28, 0
+
+       vxor            19, 19, 19
+
+       vpmsumd         22, 3, 28               # L
+       vpmsumd         23, 4, 28               # M
+       vpmsumd         24, 5, 28               # H
+
+       vpmsumd         27, 22, 2               # reduction
+
+       vsldoi          25, 23, 19, 8           # mL
+       vsldoi          26, 19, 23, 8           # mH
+       vxor            22, 22, 25              # LL + LL
+       vxor            24, 24, 26              # HH + HH
+
+       vsldoi          22, 22, 22, 8           # swap
+       vxor            22, 22, 27
+
+       vsldoi          20, 22, 22, 8           # swap
+       vpmsumd         22, 22, 2               # reduction
+       vxor            20, 20, 24
+       vxor            22, 22, 20
+
+       vmr             0, 22                   # update hash
+
+.endm
+
+.macro SAVE_REGS
+       stdu 1,-640(1)
+       mflr 0
+
+       std     14,112(1)
+       std     15,120(1)
+       std     16,128(1)
+       std     17,136(1)
+       std     18,144(1)
+       std     19,152(1)
+       std     20,160(1)
+       std     21,168(1)
+       li      9, 256
+       stvx    20, 9, 1
+       addi    9, 9, 16
+       stvx    21, 9, 1
+       addi    9, 9, 16
+       stvx    22, 9, 1
+       addi    9, 9, 16
+       stvx    23, 9, 1
+       addi    9, 9, 16
+       stvx    24, 9, 1
+       addi    9, 9, 16
+       stvx    25, 9, 1
+       addi    9, 9, 16
+       stvx    26, 9, 1
+       addi    9, 9, 16
+       stvx    27, 9, 1
+       addi    9, 9, 16
+       stvx    28, 9, 1
+       addi    9, 9, 16
+       stvx    29, 9, 1
+       addi    9, 9, 16
+       stvx    30, 9, 1
+       addi    9, 9, 16
+       stvx    31, 9, 1
+       stxv    14, 464(1)
+       stxv    15, 480(1)
+       stxv    16, 496(1)
+       stxv    17, 512(1)
+       stxv    18, 528(1)
+       stxv    19, 544(1)
+       stxv    20, 560(1)
+       stxv    21, 576(1)
+       stxv    22, 592(1)
+       std     0, 656(1)
+.endm
+
+.macro RESTORE_REGS
+       lxv     14, 464(1)
+       lxv     15, 480(1)
+       lxv     16, 496(1)
+       lxv     17, 512(1)
+       lxv     18, 528(1)
+       lxv     19, 544(1)
+       lxv     20, 560(1)
+       lxv     21, 576(1)
+       lxv     22, 592(1)
+       li      9, 256
+       lvx     20, 9, 1
+       addi    9, 9, 16
+       lvx     21, 9, 1
+       addi    9, 9, 16
+       lvx     22, 9, 1
+       addi    9, 9, 16
+       lvx     23, 9, 1
+       addi    9, 9, 16
+       lvx     24, 9, 1
+       addi    9, 9, 16
+       lvx     25, 9, 1
+       addi    9, 9, 16
+       lvx     26, 9, 1
+       addi    9, 9, 16
+       lvx     27, 9, 1
+       addi    9, 9, 16
+       lvx     28, 9, 1
+       addi    9, 9, 16
+       lvx     29, 9, 1
+       addi    9, 9, 16
+       lvx     30, 9, 1
+       addi    9, 9, 16
+       lvx     31, 9, 1
+
+       ld      0, 656(1)
+       ld      14,112(1)
+       ld      15,120(1)
+       ld      16,128(1)
+       ld      17,136(1)
+       ld      18,144(1)
+       ld      19,152(1)
+       ld      20,160(1)
+       ld      21,168(1)
+
+       mtlr    0
+       addi    1, 1, 640
+.endm
+
+.macro LOAD_HASH_TABLE
+       # Load Xi
+       lxvb16x 32, 0, 8        # load Xi
+
+       # load Hash - h^4, h^3, h^2, h
+       li      10, 32
+       lxvd2x  2+32, 10, 8     # H Poli
+       li      10, 48
+       lxvd2x  3+32, 10, 8     # Hl
+       li      10, 64
+       lxvd2x  4+32, 10, 8     # H
+       li      10, 80
+       lxvd2x  5+32, 10, 8     # Hh
+
+       li      10, 96
+       lxvd2x  6+32, 10, 8     # H^2l
+       li      10, 112
+       lxvd2x  7+32, 10, 8     # H^2
+       li      10, 128
+       lxvd2x  8+32, 10, 8     # H^2h
+
+       li      10, 144
+       lxvd2x  9+32, 10, 8     # H^3l
+       li      10, 160
+       lxvd2x  10+32, 10, 8    # H^3
+       li      10, 176
+       lxvd2x  11+32, 10, 8    # H^3h
+
+       li      10, 192
+       lxvd2x  12+32, 10, 8    # H^4l
+       li      10, 208
+       lxvd2x  13+32, 10, 8    # H^4
+       li      10, 224
+       lxvd2x  14+32, 10, 8    # H^4h
+.endm
+
+ #
+ # aes_p10_gcm_encrypt (const void *inp, void *out, size_t len,
+ #               const char *rk, unsigned char iv[16], void *Xip);
+ #
+ #    r3 - inp
+ #    r4 - out
+ #    r5 - len
+ #    r6 - AES round keys
+ #    r7 - iv and other data
+ #    r8 - Xi, HPoli, hash keys
+ #
+ #    rounds is at offset 240 in rk
+ #    Xi is at 0 in gcm_table (Xip).
+ #
+_GLOBAL(aes_p10_gcm_encrypt)
+.align 5
+
+       SAVE_REGS
+
+       LOAD_HASH_TABLE
+
+       # initialize ICB: GHASH( IV ), IV - r7
+       lxvb16x 30+32, 0, 7     # load IV  - v30
+
+       mr      12, 5           # length
+       li      11, 0           # block index
+
+       # counter 1
+       vxor    31, 31, 31
+       vspltisb 22, 1
+       vsldoi  31, 31, 22,1    # counter 1
+
+       # load round key to VSR
+       lxv     0, 0(6)
+       lxv     1, 0x10(6)
+       lxv     2, 0x20(6)
+       lxv     3, 0x30(6)
+       lxv     4, 0x40(6)
+       lxv     5, 0x50(6)
+       lxv     6, 0x60(6)
+       lxv     7, 0x70(6)
+       lxv     8, 0x80(6)
+       lxv     9, 0x90(6)
+       lxv     10, 0xa0(6)
+
+       # load rounds - 10 (128), 12 (192), 14 (256)
+       lwz     9,240(6)
+
+       #
+       # vxor  state, state, w # addroundkey
+       xxlor   32+29, 0, 0
+       vxor    15, 30, 29      # IV + round key - add round key 0
+
+       cmpdi   9, 10
+       beq     Loop_aes_gcm_8x
+
+       # load 2 more round keys (v11, v12)
+       lxv     11, 0xb0(6)
+       lxv     12, 0xc0(6)
+
+       cmpdi   9, 12
+       beq     Loop_aes_gcm_8x
+
+       # load 2 more round keys (v11, v12, v13, v14)
+       lxv     13, 0xd0(6)
+       lxv     14, 0xe0(6)
+       cmpdi   9, 14
+       beq     Loop_aes_gcm_8x
+
+       b       aes_gcm_out
+
+.align 5
+Loop_aes_gcm_8x:
+       mr      14, 3
+       mr      9, 4
+
+       #
+       # check partial block
+       #
+Continue_partial_check:
+       ld      15, 56(7)
+       cmpdi   15, 0
+       beq     Continue
+       bgt     Final_block
+       cmpdi   15, 16
+       blt     Final_block
+
+Continue:
+       # n blcoks
+       li      10, 128
+       divdu   10, 12, 10      # n 128 bytes-blocks
+       cmpdi   10, 0
+       beq     Loop_last_block
+
+       vaddudm 30, 30, 31      # IV + counter
+       vxor    16, 30, 29
+       vaddudm 30, 30, 31
+       vxor    17, 30, 29
+       vaddudm 30, 30, 31
+       vxor    18, 30, 29
+       vaddudm 30, 30, 31
+       vxor    19, 30, 29
+       vaddudm 30, 30, 31
+       vxor    20, 30, 29
+       vaddudm 30, 30, 31
+       vxor    21, 30, 29
+       vaddudm 30, 30, 31
+       vxor    22, 30, 29
+
+       mtctr   10
+
+       li      15, 16
+       li      16, 32
+       li      17, 48
+       li      18, 64
+       li      19, 80
+       li      20, 96
+       li      21, 112
+
+       lwz     10, 240(6)
+
+Loop_8x_block:
+
+       lxvb16x         15, 0, 14       # load block
+       lxvb16x         16, 15, 14      # load block
+       lxvb16x         17, 16, 14      # load block
+       lxvb16x         18, 17, 14      # load block
+       lxvb16x         19, 18, 14      # load block
+       lxvb16x         20, 19, 14      # load block
+       lxvb16x         21, 20, 14      # load block
+       lxvb16x         22, 21, 14      # load block
+       addi            14, 14, 128
+
+       Loop_aes_middle8x
+
+       xxlor   23+32, 10, 10
+
+       cmpdi   10, 10
+       beq     Do_next_ghash
+
+       # 192 bits
+       xxlor   24+32, 11, 11
+
+       vcipher 15, 15, 23
+       vcipher 16, 16, 23
+       vcipher 17, 17, 23
+       vcipher 18, 18, 23
+       vcipher 19, 19, 23
+       vcipher 20, 20, 23
+       vcipher 21, 21, 23
+       vcipher 22, 22, 23
+
+       vcipher 15, 15, 24
+       vcipher 16, 16, 24
+       vcipher 17, 17, 24
+       vcipher 18, 18, 24
+       vcipher 19, 19, 24
+       vcipher 20, 20, 24
+       vcipher 21, 21, 24
+       vcipher 22, 22, 24
+
+       xxlor   23+32, 12, 12
+
+       cmpdi   10, 12
+       beq     Do_next_ghash
+
+       # 256 bits
+       xxlor   24+32, 13, 13
+
+       vcipher 15, 15, 23
+       vcipher 16, 16, 23
+       vcipher 17, 17, 23
+       vcipher 18, 18, 23
+       vcipher 19, 19, 23
+       vcipher 20, 20, 23
+       vcipher 21, 21, 23
+       vcipher 22, 22, 23
+
+       vcipher 15, 15, 24
+       vcipher 16, 16, 24
+       vcipher 17, 17, 24
+       vcipher 18, 18, 24
+       vcipher 19, 19, 24
+       vcipher 20, 20, 24
+       vcipher 21, 21, 24
+       vcipher 22, 22, 24
+
+       xxlor   23+32, 14, 14
+
+       cmpdi   10, 14
+       beq     Do_next_ghash
+       b       aes_gcm_out
+
+Do_next_ghash:
+
+       #
+       # last round
+       vcipherlast     15, 15, 23
+       vcipherlast     16, 16, 23
+
+       xxlxor          47, 47, 15
+       stxvb16x        47, 0, 9        # store output
+       xxlxor          48, 48, 16
+       stxvb16x        48, 15, 9       # store output
+
+       vcipherlast     17, 17, 23
+       vcipherlast     18, 18, 23
+
+       xxlxor          49, 49, 17
+       stxvb16x        49, 16, 9       # store output
+       xxlxor          50, 50, 18
+       stxvb16x        50, 17, 9       # store output
+
+       vcipherlast     19, 19, 23
+       vcipherlast     20, 20, 23
+
+       xxlxor          51, 51, 19
+       stxvb16x        51, 18, 9       # store output
+       xxlxor          52, 52, 20
+       stxvb16x        52, 19, 9       # store output
+
+       vcipherlast     21, 21, 23
+       vcipherlast     22, 22, 23
+
+       xxlxor          53, 53, 21
+       stxvb16x        53, 20, 9       # store output
+       xxlxor          54, 54, 22
+       stxvb16x        54, 21, 9       # store output
+
+       addi            9, 9, 128
+
+       # ghash here
+       ppc_aes_gcm_ghash2_4x
+
+       xxlor   27+32, 0, 0
+       vaddudm 30, 30, 31              # IV + counter
+       vmr     29, 30
+       vxor    15, 30, 27              # add round key
+       vaddudm 30, 30, 31
+       vxor    16, 30, 27
+       vaddudm 30, 30, 31
+       vxor    17, 30, 27
+       vaddudm 30, 30, 31
+       vxor    18, 30, 27
+       vaddudm 30, 30, 31
+       vxor    19, 30, 27
+       vaddudm 30, 30, 31
+       vxor    20, 30, 27
+       vaddudm 30, 30, 31
+       vxor    21, 30, 27
+       vaddudm 30, 30, 31
+       vxor    22, 30, 27
+
+       addi    12, 12, -128
+       addi    11, 11, 128
+
+       bdnz    Loop_8x_block
+
+       vmr     30, 29
+       stxvb16x 30+32, 0, 7            # update IV
+
+Loop_last_block:
+       cmpdi   12, 0
+       beq     aes_gcm_out
+
+       # loop last few blocks
+       li      10, 16
+       divdu   10, 12, 10
+
+       mtctr   10
+
+       lwz     10, 240(6)
+
+       cmpdi   12, 16
+       blt     Final_block
+
+Next_rem_block:
+       lxvb16x 15, 0, 14               # load block
+
+       Loop_aes_middle_1x
+
+       xxlor   23+32, 10, 10
+
+       cmpdi   10, 10
+       beq     Do_next_1x
+
+       # 192 bits
+       xxlor   24+32, 11, 11
+
+       vcipher 15, 15, 23
+       vcipher 15, 15, 24
+
+       xxlor   23+32, 12, 12
+
+       cmpdi   10, 12
+       beq     Do_next_1x
+
+       # 256 bits
+       xxlor   24+32, 13, 13
+
+       vcipher 15, 15, 23
+       vcipher 15, 15, 24
+
+       xxlor   23+32, 14, 14
+
+       cmpdi   10, 14
+       beq     Do_next_1x
+
+Do_next_1x:
+       vcipherlast     15, 15, 23
+
+       xxlxor          47, 47, 15
+       stxvb16x        47, 0, 9        # store output
+       addi            14, 14, 16
+       addi            9, 9, 16
+
+       vmr             28, 15
+       ppc_update_hash_1x
+
+       addi            12, 12, -16
+       addi            11, 11, 16
+       xxlor           19+32, 0, 0
+       vaddudm         30, 30, 31              # IV + counter
+       vxor            15, 30, 19              # add round key
+
+       bdnz    Next_rem_block
+
+       li      15, 0
+       std     15, 56(7)               # clear partial?
+       stxvb16x 30+32, 0, 7            # update IV
+       cmpdi   12, 0
+       beq     aes_gcm_out
+
+Final_block:
+       lwz     10, 240(6)
+       Loop_aes_middle_1x
+
+       xxlor   23+32, 10, 10
+
+       cmpdi   10, 10
+       beq     Do_final_1x
+
+       # 192 bits
+       xxlor   24+32, 11, 11
+
+       vcipher 15, 15, 23
+       vcipher 15, 15, 24
+
+       xxlor   23+32, 12, 12
+
+       cmpdi   10, 12
+       beq     Do_final_1x
+
+       # 256 bits
+       xxlor   24+32, 13, 13
+
+       vcipher 15, 15, 23
+       vcipher 15, 15, 24
+
+       xxlor   23+32, 14, 14
+
+       cmpdi   10, 14
+       beq     Do_final_1x
+
+Do_final_1x:
+       vcipherlast     15, 15, 23
+
+       # check partial block
+       li      21, 0                   # encrypt
+       ld      15, 56(7)               # partial?
+       cmpdi   15, 0
+       beq     Normal_block
+       bl      Do_partial_block
+
+       cmpdi   12, 0
+       ble aes_gcm_out
+
+       b Continue_partial_check
+
+Normal_block:
+       lxvb16x 15, 0, 14               # load last block
+       xxlxor  47, 47, 15
+
+       # create partial block mask
+       li      15, 16
+       sub     15, 15, 12              # index to the mask
+
+       vspltisb        16, -1          # first 16 bytes - 0xffff...ff
+       vspltisb        17, 0           # second 16 bytes - 0x0000...00
+       li      10, 192
+       stvx    16, 10, 1
+       addi    10, 10, 16
+       stvx    17, 10, 1
+
+       addi    10, 1, 192
+       lxvb16x 16, 15, 10              # load partial block mask
+       xxland  47, 47, 16
+
+       vmr     28, 15
+       ppc_update_hash_1x
+
+       # * should store only the remaining bytes.
+       bl      Write_partial_block
+
+       stxvb16x 30+32, 0, 7            # update IV
+       std     12, 56(7)               # update partial?
+       li      16, 16
+
+       stxvb16x        32, 0, 8                # write out Xi
+       stxvb16x        32, 16, 8               # write out Xi
+       b aes_gcm_out
+
+ #
+ # Compute data mask
+ #
+.macro GEN_MASK _mask _start _end
+       vspltisb        16, -1          # first 16 bytes - 0xffff...ff
+       vspltisb        17, 0           # second 16 bytes - 0x0000...00
+       li      10, 192
+       stxvb16x        17+32, 10, 1
+       add     10, 10, \_start
+       stxvb16x        16+32, 10, 1
+       add     10, 10, \_end
+       stxvb16x        17+32, 10, 1
+
+       addi    10, 1, 192
+       lxvb16x \_mask, 0, 10           # load partial block mask
+.endm
+
+ #
+ # Handle multiple partial blocks for encrypt and decrypt
+ #   operations.
+ #
+SYM_FUNC_START_LOCAL(Do_partial_block)
+       add     17, 15, 5
+       cmpdi   17, 16
+       bgt     Big_block
+       GEN_MASK 18, 15, 5
+       b       _Partial
+SYM_FUNC_END(Do_partial_block)
+Big_block:
+       li      16, 16
+       GEN_MASK 18, 15, 16
+
+_Partial:
+       lxvb16x 17+32, 0, 14            # load last block
+       sldi    16, 15, 3
+       mtvsrdd 32+16, 0, 16
+       vsro    17, 17, 16
+       xxlxor  47, 47, 17+32
+       xxland  47, 47, 18
+
+       vxor    0, 0, 0                 # clear Xi
+       vmr     28, 15
+
+       cmpdi   21, 0                   # encrypt/decrypt ops?
+       beq     Skip_decrypt
+       xxland  32+28, 32+17, 18
+
+Skip_decrypt:
+
+       ppc_update_hash_1x
+
+       li      16, 16
+       lxvb16x 32+29, 16, 8
+       vxor    0, 0, 29
+       stxvb16x 32, 0, 8               # save Xi
+       stxvb16x 32, 16, 8              # save Xi
+
+       # store partial block
+       # loop the rest of the stream if any
+       sldi    16, 15, 3
+       mtvsrdd 32+16, 0, 16
+       vslo    15, 15, 16
+       #stxvb16x 15+32, 0, 9           # last block
+
+       li      16, 16
+       sub     17, 16, 15              # 16 - partial
+
+       add     16, 15, 5
+       cmpdi   16, 16
+       bgt     Larger_16
+       mr      17, 5
+Larger_16:
+
+       # write partial
+       li              10, 192
+       stxvb16x        15+32, 10, 1    # save current block
+
+       addi            10, 9, -1
+       addi            16, 1, 191
+       mtctr           17              # move partial byte count
+
+Write_last_partial:
+        lbzu           18, 1(16)
+       stbu            18, 1(10)
+        bdnz           Write_last_partial
+       # Complete loop partial
+
+       add     14, 14, 17
+       add     9, 9, 17
+       sub     12, 12, 17
+       add     11, 11, 17
+
+       add     15, 15, 5
+       cmpdi   15, 16
+       blt     Save_partial
+
+       vaddudm 30, 30, 31
+       stxvb16x 30+32, 0, 7            # update IV
+       xxlor   32+29, 0, 0
+       vxor    15, 30, 29              # IV + round key - add round key 0
+       li      15, 0
+       std     15, 56(7)               # partial done - clear
+       b       Partial_done
+Save_partial:
+       std     15, 56(7)               # partial
+
+Partial_done:
+       blr
+
+ #
+ # Write partial block
+ # r9 - output
+ # r12 - remaining bytes
+ # v15 - partial input data
+ #
+SYM_FUNC_START_LOCAL(Write_partial_block)
+       li              10, 192
+       stxvb16x        15+32, 10, 1            # last block
+
+       addi            10, 9, -1
+       addi            16, 1, 191
+
+        mtctr          12                      # remaining bytes
+       li              15, 0
+
+Write_last_byte:
+        lbzu           14, 1(16)
+       stbu            14, 1(10)
+        bdnz           Write_last_byte
+       blr
+SYM_FUNC_END(Write_partial_block)
+
+aes_gcm_out:
+       # out = state
+       stxvb16x        32, 0, 8                # write out Xi
+       add     3, 11, 12               # return count
+
+       RESTORE_REGS
+       blr
+
+ #
+ # 8x Decrypt
+ #
+_GLOBAL(aes_p10_gcm_decrypt)
+.align 5
+
+       SAVE_REGS
+
+       LOAD_HASH_TABLE
+
+       # initialize ICB: GHASH( IV ), IV - r7
+       lxvb16x 30+32, 0, 7     # load IV  - v30
+
+       mr      12, 5           # length
+       li      11, 0           # block index
+
+       # counter 1
+       vxor    31, 31, 31
+       vspltisb 22, 1
+       vsldoi  31, 31, 22,1    # counter 1
+
+       # load round key to VSR
+       lxv     0, 0(6)
+       lxv     1, 0x10(6)
+       lxv     2, 0x20(6)
+       lxv     3, 0x30(6)
+       lxv     4, 0x40(6)
+       lxv     5, 0x50(6)
+       lxv     6, 0x60(6)
+       lxv     7, 0x70(6)
+       lxv     8, 0x80(6)
+       lxv     9, 0x90(6)
+       lxv     10, 0xa0(6)
+
+       # load rounds - 10 (128), 12 (192), 14 (256)
+       lwz     9,240(6)
+
+       #
+       # vxor  state, state, w # addroundkey
+       xxlor   32+29, 0, 0
+       vxor    15, 30, 29      # IV + round key - add round key 0
+
+       cmpdi   9, 10
+       beq     Loop_aes_gcm_8x_dec
+
+       # load 2 more round keys (v11, v12)
+       lxv     11, 0xb0(6)
+       lxv     12, 0xc0(6)
+
+       cmpdi   9, 12
+       beq     Loop_aes_gcm_8x_dec
+
+       # load 2 more round keys (v11, v12, v13, v14)
+       lxv     13, 0xd0(6)
+       lxv     14, 0xe0(6)
+       cmpdi   9, 14
+       beq     Loop_aes_gcm_8x_dec
+
+       b       aes_gcm_out
+
+.align 5
+Loop_aes_gcm_8x_dec:
+       mr      14, 3
+       mr      9, 4
+
+       #
+       # check partial block
+       #
+Continue_partial_check_dec:
+       ld      15, 56(7)
+       cmpdi   15, 0
+       beq     Continue_dec
+       bgt     Final_block_dec
+       cmpdi   15, 16
+       blt     Final_block_dec
+
+Continue_dec:
+       # n blcoks
+       li      10, 128
+       divdu   10, 12, 10      # n 128 bytes-blocks
+       cmpdi   10, 0
+       beq     Loop_last_block_dec
+
+       vaddudm 30, 30, 31      # IV + counter
+       vxor    16, 30, 29
+       vaddudm 30, 30, 31
+       vxor    17, 30, 29
+       vaddudm 30, 30, 31
+       vxor    18, 30, 29
+       vaddudm 30, 30, 31
+       vxor    19, 30, 29
+       vaddudm 30, 30, 31
+       vxor    20, 30, 29
+       vaddudm 30, 30, 31
+       vxor    21, 30, 29
+       vaddudm 30, 30, 31
+       vxor    22, 30, 29
+
+       mtctr   10
+
+       li      15, 16
+       li      16, 32
+       li      17, 48
+       li      18, 64
+       li      19, 80
+       li      20, 96
+       li      21, 112
+
+       lwz     10, 240(6)
+
+Loop_8x_block_dec:
+
+       lxvb16x         15, 0, 14       # load block
+       lxvb16x         16, 15, 14      # load block
+       lxvb16x         17, 16, 14      # load block
+       lxvb16x         18, 17, 14      # load block
+       lxvb16x         19, 18, 14      # load block
+       lxvb16x         20, 19, 14      # load block
+       lxvb16x         21, 20, 14      # load block
+       lxvb16x         22, 21, 14      # load block
+       addi            14, 14, 128
+
+       Loop_aes_middle8x
+
+       xxlor   23+32, 10, 10
+
+       cmpdi   10, 10
+       beq     Do_next_ghash_dec
+
+       # 192 bits
+       xxlor   24+32, 11, 11
+
+       vcipher 15, 15, 23
+       vcipher 16, 16, 23
+       vcipher 17, 17, 23
+       vcipher 18, 18, 23
+       vcipher 19, 19, 23
+       vcipher 20, 20, 23
+       vcipher 21, 21, 23
+       vcipher 22, 22, 23
+
+       vcipher 15, 15, 24
+       vcipher 16, 16, 24
+       vcipher 17, 17, 24
+       vcipher 18, 18, 24
+       vcipher 19, 19, 24
+       vcipher 20, 20, 24
+       vcipher 21, 21, 24
+       vcipher 22, 22, 24
+
+       xxlor   23+32, 12, 12
+
+       cmpdi   10, 12
+       beq     Do_next_ghash_dec
+
+       # 256 bits
+       xxlor   24+32, 13, 13
+
+       vcipher 15, 15, 23
+       vcipher 16, 16, 23
+       vcipher 17, 17, 23
+       vcipher 18, 18, 23
+       vcipher 19, 19, 23
+       vcipher 20, 20, 23
+       vcipher 21, 21, 23
+       vcipher 22, 22, 23
+
+       vcipher 15, 15, 24
+       vcipher 16, 16, 24
+       vcipher 17, 17, 24
+       vcipher 18, 18, 24
+       vcipher 19, 19, 24
+       vcipher 20, 20, 24
+       vcipher 21, 21, 24
+       vcipher 22, 22, 24
+
+       xxlor   23+32, 14, 14
+
+       cmpdi   10, 14
+       beq     Do_next_ghash_dec
+       b       aes_gcm_out
+
+Do_next_ghash_dec:
+
+       #
+       # last round
+       vcipherlast     15, 15, 23
+       vcipherlast     16, 16, 23
+
+       xxlxor          47, 47, 15
+       stxvb16x        47, 0, 9        # store output
+       xxlxor          48, 48, 16
+       stxvb16x        48, 15, 9       # store output
+
+       vcipherlast     17, 17, 23
+       vcipherlast     18, 18, 23
+
+       xxlxor          49, 49, 17
+       stxvb16x        49, 16, 9       # store output
+       xxlxor          50, 50, 18
+       stxvb16x        50, 17, 9       # store output
+
+       vcipherlast     19, 19, 23
+       vcipherlast     20, 20, 23
+
+       xxlxor          51, 51, 19
+       stxvb16x        51, 18, 9       # store output
+       xxlxor          52, 52, 20
+       stxvb16x        52, 19, 9       # store output
+
+       vcipherlast     21, 21, 23
+       vcipherlast     22, 22, 23
+
+       xxlxor          53, 53, 21
+       stxvb16x        53, 20, 9       # store output
+       xxlxor          54, 54, 22
+       stxvb16x        54, 21, 9       # store output
+
+       addi            9, 9, 128
+
+       xxlor           15+32, 15, 15
+       xxlor           16+32, 16, 16
+       xxlor           17+32, 17, 17
+       xxlor           18+32, 18, 18
+       xxlor           19+32, 19, 19
+       xxlor           20+32, 20, 20
+       xxlor           21+32, 21, 21
+       xxlor           22+32, 22, 22
+
+       # ghash here
+       ppc_aes_gcm_ghash2_4x
+
+       xxlor   27+32, 0, 0
+       vaddudm 30, 30, 31              # IV + counter
+       vmr     29, 30
+       vxor    15, 30, 27              # add round key
+       vaddudm 30, 30, 31
+       vxor    16, 30, 27
+       vaddudm 30, 30, 31
+       vxor    17, 30, 27
+       vaddudm 30, 30, 31
+       vxor    18, 30, 27
+       vaddudm 30, 30, 31
+       vxor    19, 30, 27
+       vaddudm 30, 30, 31
+       vxor    20, 30, 27
+       vaddudm 30, 30, 31
+       vxor    21, 30, 27
+       vaddudm 30, 30, 31
+       vxor    22, 30, 27
+
+       addi    12, 12, -128
+       addi    11, 11, 128
+
+       bdnz    Loop_8x_block_dec
+
+       vmr     30, 29
+       stxvb16x 30+32, 0, 7            # update IV
+
+Loop_last_block_dec:
+       cmpdi   12, 0
+       beq     aes_gcm_out
+
+       # loop last few blocks
+       li      10, 16
+       divdu   10, 12, 10
+
+       mtctr   10
+
+       lwz     10, 240(6)
+
+       cmpdi   12, 16
+       blt     Final_block_dec
+
+Next_rem_block_dec:
+       lxvb16x 15, 0, 14               # load block
+
+       Loop_aes_middle_1x
+
+       xxlor   23+32, 10, 10
+
+       cmpdi   10, 10
+       beq     Do_next_1x_dec
+
+       # 192 bits
+       xxlor   24+32, 11, 11
+
+       vcipher 15, 15, 23
+       vcipher 15, 15, 24
+
+       xxlor   23+32, 12, 12
+
+       cmpdi   10, 12
+       beq     Do_next_1x_dec
+
+       # 256 bits
+       xxlor   24+32, 13, 13
+
+       vcipher 15, 15, 23
+       vcipher 15, 15, 24
+
+       xxlor   23+32, 14, 14
+
+       cmpdi   10, 14
+       beq     Do_next_1x_dec
+
+Do_next_1x_dec:
+       vcipherlast     15, 15, 23
+
+       xxlxor          47, 47, 15
+       stxvb16x        47, 0, 9        # store output
+       addi            14, 14, 16
+       addi            9, 9, 16
+
+       xxlor           28+32, 15, 15
+       #vmr            28, 15
+       ppc_update_hash_1x
+
+       addi            12, 12, -16
+       addi            11, 11, 16
+       xxlor           19+32, 0, 0
+       vaddudm         30, 30, 31              # IV + counter
+       vxor            15, 30, 19              # add round key
+
+       bdnz    Next_rem_block_dec
+
+       li      15, 0
+       std     15, 56(7)               # clear partial?
+       stxvb16x 30+32, 0, 7            # update IV
+       cmpdi   12, 0
+       beq     aes_gcm_out
+
+Final_block_dec:
+       lwz     10, 240(6)
+       Loop_aes_middle_1x
+
+       xxlor   23+32, 10, 10
+
+       cmpdi   10, 10
+       beq     Do_final_1x_dec
+
+       # 192 bits
+       xxlor   24+32, 11, 11
+
+       vcipher 15, 15, 23
+       vcipher 15, 15, 24
+
+       xxlor   23+32, 12, 12
+
+       cmpdi   10, 12
+       beq     Do_final_1x_dec
+
+       # 256 bits
+       xxlor   24+32, 13, 13
+
+       vcipher 15, 15, 23
+       vcipher 15, 15, 24
+
+       xxlor   23+32, 14, 14
+
+       cmpdi   10, 14
+       beq     Do_final_1x_dec
+
+Do_final_1x_dec:
+       vcipherlast     15, 15, 23
+
+       # check partial block
+       li      21, 1                   # decrypt
+       ld      15, 56(7)               # partial?
+       cmpdi   15, 0
+       beq     Normal_block_dec
+       bl      Do_partial_block
+       cmpdi   12, 0
+       ble aes_gcm_out
+
+       b Continue_partial_check_dec
+
+Normal_block_dec:
+       lxvb16x 15, 0, 14               # load last block
+       xxlxor  47, 47, 15
+
+       # create partial block mask
+       li      15, 16
+       sub     15, 15, 12              # index to the mask
+
+       vspltisb        16, -1          # first 16 bytes - 0xffff...ff
+       vspltisb        17, 0           # second 16 bytes - 0x0000...00
+       li      10, 192
+       stvx    16, 10, 1
+       addi    10, 10, 16
+       stvx    17, 10, 1
+
+       addi    10, 1, 192
+       lxvb16x 16, 15, 10              # load partial block mask
+       xxland  47, 47, 16
+
+       xxland  32+28, 15, 16
+       #vmr    28, 15
+       ppc_update_hash_1x
+
+       # * should store only the remaining bytes.
+       bl      Write_partial_block
+
+       stxvb16x 30+32, 0, 7            # update IV
+       std     12, 56(7)               # update partial?
+       li      16, 16
+
+       stxvb16x        32, 0, 8                # write out Xi
+       stxvb16x        32, 16, 8               # write out Xi
+       b aes_gcm_out
diff --git a/arch/powerpc/crypto/aesp8-ppc.pl b/arch/powerpc/crypto/aesp8-ppc.pl
new file mode 100644 (file)
index 0000000..1f22aec
--- /dev/null
@@ -0,0 +1,585 @@
+#! /usr/bin/env perl
+# SPDX-License-Identifier: GPL-2.0
+
+# This code is taken from CRYPTOGAMs[1] and is included here using the option
+# in the license to distribute the code under the GPL. Therefore this program
+# is free software; you can redistribute it and/or modify it under the terms of
+# the GNU General Public License version 2 as published by the Free Software
+# Foundation.
+#
+# [1] https://www.openssl.org/~appro/cryptogams/
+
+# Copyright (c) 2006-2017, CRYPTOGAMS by <[email protected]>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+#       * Redistributions of source code must retain copyright notices,
+#         this list of conditions and the following disclaimer.
+#
+#       * Redistributions in binary form must reproduce the above
+#         copyright notice, this list of conditions and the following
+#         disclaimer in the documentation and/or other materials
+#         provided with the distribution.
+#
+#       * Neither the name of the CRYPTOGAMS nor the names of its
+#         copyright holder and contributors may be used to endorse or
+#         promote products derived from this software without specific
+#         prior written permission.
+#
+# ALTERNATIVELY, provided that this notice is retained in full, this
+# product may be distributed under the terms of the GNU General Public
+# License (GPL), in which case the provisions of the GPL apply INSTEAD OF
+# those given above.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# ====================================================================
+# Written by Andy Polyakov <[email protected]> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see https://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# This module implements support for AES instructions as per PowerISA
+# specification version 2.07, first implemented by POWER8 processor.
+# The module is endian-agnostic in sense that it supports both big-
+# and little-endian cases. Data alignment in parallelizable modes is
+# handled with VSX loads and stores, which implies MSR.VSX flag being
+# set. It should also be noted that ISA specification doesn't prohibit
+# alignment exceptions for these instructions on page boundaries.
+# Initially alignment was handled in pure AltiVec/VMX way [when data
+# is aligned programmatically, which in turn guarantees exception-
+# free execution], but it turned to hamper performance when vcipher
+# instructions are interleaved. It's reckoned that eventual
+# misalignment penalties at page boundaries are in average lower
+# than additional overhead in pure AltiVec approach.
+#
+# May 2016
+#
+# Add XTS subroutine, 9x on little- and 12x improvement on big-endian
+# systems were measured.
+#
+######################################################################
+# Current large-block performance in cycles per byte processed with
+# 128-bit key (less is better).
+#
+#              CBC en-/decrypt CTR     XTS
+# POWER8[le]   3.96/0.72       0.74    1.1
+# POWER8[be]   3.75/0.65       0.66    1.0
+
+$flavour = shift;
+
+if ($flavour =~ /64/) {
+       $SIZE_T =8;
+       $LRSAVE =2*$SIZE_T;
+       $STU    ="stdu";
+       $POP    ="ld";
+       $PUSH   ="std";
+       $UCMP   ="cmpld";
+       $SHL    ="sldi";
+} elsif ($flavour =~ /32/) {
+       $SIZE_T =4;
+       $LRSAVE =$SIZE_T;
+       $STU    ="stwu";
+       $POP    ="lwz";
+       $PUSH   ="stw";
+       $UCMP   ="cmplw";
+       $SHL    ="slwi";
+} else { die "nonsense $flavour"; }
+
+$LITTLE_ENDIAN = ($flavour=~/le$/) ? $SIZE_T : 0;
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
+die "can't locate ppc-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
+
+$FRAME=8*$SIZE_T;
+$prefix="aes_p8";
+
+$sp="r1";
+$vrsave="r12";
+
+#########################################################################
+{{{    # Key setup procedures                                          #
+my ($inp,$bits,$out,$ptr,$cnt,$rounds)=map("r$_",(3..8));
+my ($zero,$in0,$in1,$key,$rcon,$mask,$tmp)=map("v$_",(0..6));
+my ($stage,$outperm,$outmask,$outhead,$outtail)=map("v$_",(7..11));
+
+$code.=<<___;
+.machine       "any"
+
+.text
+
+.align 7
+rcon:
+.long  0x01000000, 0x01000000, 0x01000000, 0x01000000  ?rev
+.long  0x1b000000, 0x1b000000, 0x1b000000, 0x1b000000  ?rev
+.long  0x0d0e0f0c, 0x0d0e0f0c, 0x0d0e0f0c, 0x0d0e0f0c  ?rev
+.long  0,0,0,0                                         ?asis
+Lconsts:
+       mflr    r0
+       bcl     20,31,\$+4
+       mflr    $ptr     #vvvvv "distance between . and rcon
+       addi    $ptr,$ptr,-0x48
+       mtlr    r0
+       blr
+       .long   0
+       .byte   0,12,0x14,0,0,0,0,0
+.asciz "AES for PowerISA 2.07, CRYPTOGAMS by <appro\@openssl.org>"
+
+.globl .${prefix}_set_encrypt_key
+Lset_encrypt_key:
+       mflr            r11
+       $PUSH           r11,$LRSAVE($sp)
+
+       li              $ptr,-1
+       ${UCMP}i        $inp,0
+       beq-            Lenc_key_abort          # if ($inp==0) return -1;
+       ${UCMP}i        $out,0
+       beq-            Lenc_key_abort          # if ($out==0) return -1;
+       li              $ptr,-2
+       cmpwi           $bits,128
+       blt-            Lenc_key_abort
+       cmpwi           $bits,256
+       bgt-            Lenc_key_abort
+       andi.           r0,$bits,0x3f
+       bne-            Lenc_key_abort
+
+       lis             r0,0xfff0
+       mfspr           $vrsave,256
+       mtspr           256,r0
+
+       bl              Lconsts
+       mtlr            r11
+
+       neg             r9,$inp
+       lvx             $in0,0,$inp
+       addi            $inp,$inp,15            # 15 is not typo
+       lvsr            $key,0,r9               # borrow $key
+       li              r8,0x20
+       cmpwi           $bits,192
+       lvx             $in1,0,$inp
+       le?vspltisb     $mask,0x0f              # borrow $mask
+       lvx             $rcon,0,$ptr
+       le?vxor         $key,$key,$mask         # adjust for byte swap
+       lvx             $mask,r8,$ptr
+       addi            $ptr,$ptr,0x10
+       vperm           $in0,$in0,$in1,$key     # align [and byte swap in LE]
+       li              $cnt,8
+       vxor            $zero,$zero,$zero
+       mtctr           $cnt
+
+       ?lvsr           $outperm,0,$out
+       vspltisb        $outmask,-1
+       lvx             $outhead,0,$out
+       ?vperm          $outmask,$zero,$outmask,$outperm
+
+       blt             Loop128
+       addi            $inp,$inp,8
+       beq             L192
+       addi            $inp,$inp,8
+       b               L256
+
+.align 4
+Loop128:
+       vperm           $key,$in0,$in0,$mask    # rotate-n-splat
+       vsldoi          $tmp,$zero,$in0,12      # >>32
+        vperm          $outtail,$in0,$in0,$outperm     # rotate
+        vsel           $stage,$outhead,$outtail,$outmask
+        vmr            $outhead,$outtail
+       vcipherlast     $key,$key,$rcon
+        stvx           $stage,0,$out
+        addi           $out,$out,16
+
+       vxor            $in0,$in0,$tmp
+       vsldoi          $tmp,$zero,$tmp,12      # >>32
+       vxor            $in0,$in0,$tmp
+       vsldoi          $tmp,$zero,$tmp,12      # >>32
+       vxor            $in0,$in0,$tmp
+        vadduwm        $rcon,$rcon,$rcon
+       vxor            $in0,$in0,$key
+       bdnz            Loop128
+
+       lvx             $rcon,0,$ptr            # last two round keys
+
+       vperm           $key,$in0,$in0,$mask    # rotate-n-splat
+       vsldoi          $tmp,$zero,$in0,12      # >>32
+        vperm          $outtail,$in0,$in0,$outperm     # rotate
+        vsel           $stage,$outhead,$outtail,$outmask
+        vmr            $outhead,$outtail
+       vcipherlast     $key,$key,$rcon
+        stvx           $stage,0,$out
+        addi           $out,$out,16
+
+       vxor            $in0,$in0,$tmp
+       vsldoi          $tmp,$zero,$tmp,12      # >>32
+       vxor            $in0,$in0,$tmp
+       vsldoi          $tmp,$zero,$tmp,12      # >>32
+       vxor            $in0,$in0,$tmp
+        vadduwm        $rcon,$rcon,$rcon
+       vxor            $in0,$in0,$key
+
+       vperm           $key,$in0,$in0,$mask    # rotate-n-splat
+       vsldoi          $tmp,$zero,$in0,12      # >>32
+        vperm          $outtail,$in0,$in0,$outperm     # rotate
+        vsel           $stage,$outhead,$outtail,$outmask
+        vmr            $outhead,$outtail
+       vcipherlast     $key,$key,$rcon
+        stvx           $stage,0,$out
+        addi           $out,$out,16
+
+       vxor            $in0,$in0,$tmp
+       vsldoi          $tmp,$zero,$tmp,12      # >>32
+       vxor            $in0,$in0,$tmp
+       vsldoi          $tmp,$zero,$tmp,12      # >>32
+       vxor            $in0,$in0,$tmp
+       vxor            $in0,$in0,$key
+        vperm          $outtail,$in0,$in0,$outperm     # rotate
+        vsel           $stage,$outhead,$outtail,$outmask
+        vmr            $outhead,$outtail
+        stvx           $stage,0,$out
+
+       addi            $inp,$out,15            # 15 is not typo
+       addi            $out,$out,0x50
+
+       li              $rounds,10
+       b               Ldone
+
+.align 4
+L192:
+       lvx             $tmp,0,$inp
+       li              $cnt,4
+        vperm          $outtail,$in0,$in0,$outperm     # rotate
+        vsel           $stage,$outhead,$outtail,$outmask
+        vmr            $outhead,$outtail
+        stvx           $stage,0,$out
+        addi           $out,$out,16
+       vperm           $in1,$in1,$tmp,$key     # align [and byte swap in LE]
+       vspltisb        $key,8                  # borrow $key
+       mtctr           $cnt
+       vsububm         $mask,$mask,$key        # adjust the mask
+
+Loop192:
+       vperm           $key,$in1,$in1,$mask    # roate-n-splat
+       vsldoi          $tmp,$zero,$in0,12      # >>32
+       vcipherlast     $key,$key,$rcon
+
+       vxor            $in0,$in0,$tmp
+       vsldoi          $tmp,$zero,$tmp,12      # >>32
+       vxor            $in0,$in0,$tmp
+       vsldoi          $tmp,$zero,$tmp,12      # >>32
+       vxor            $in0,$in0,$tmp
+
+        vsldoi         $stage,$zero,$in1,8
+       vspltw          $tmp,$in0,3
+       vxor            $tmp,$tmp,$in1
+       vsldoi          $in1,$zero,$in1,12      # >>32
+        vadduwm        $rcon,$rcon,$rcon
+       vxor            $in1,$in1,$tmp
+       vxor            $in0,$in0,$key
+       vxor            $in1,$in1,$key
+        vsldoi         $stage,$stage,$in0,8
+
+       vperm           $key,$in1,$in1,$mask    # rotate-n-splat
+       vsldoi          $tmp,$zero,$in0,12      # >>32
+        vperm          $outtail,$stage,$stage,$outperm # rotate
+        vsel           $stage,$outhead,$outtail,$outmask
+        vmr            $outhead,$outtail
+       vcipherlast     $key,$key,$rcon
+        stvx           $stage,0,$out
+        addi           $out,$out,16
+
+        vsldoi         $stage,$in0,$in1,8
+       vxor            $in0,$in0,$tmp
+       vsldoi          $tmp,$zero,$tmp,12      # >>32
+        vperm          $outtail,$stage,$stage,$outperm # rotate
+        vsel           $stage,$outhead,$outtail,$outmask
+        vmr            $outhead,$outtail
+       vxor            $in0,$in0,$tmp
+       vsldoi          $tmp,$zero,$tmp,12      # >>32
+       vxor            $in0,$in0,$tmp
+        stvx           $stage,0,$out
+        addi           $out,$out,16
+
+       vspltw          $tmp,$in0,3
+       vxor            $tmp,$tmp,$in1
+       vsldoi          $in1,$zero,$in1,12      # >>32
+        vadduwm        $rcon,$rcon,$rcon
+       vxor            $in1,$in1,$tmp
+       vxor            $in0,$in0,$key
+       vxor            $in1,$in1,$key
+        vperm          $outtail,$in0,$in0,$outperm     # rotate
+        vsel           $stage,$outhead,$outtail,$outmask
+        vmr            $outhead,$outtail
+        stvx           $stage,0,$out
+        addi           $inp,$out,15            # 15 is not typo
+        addi           $out,$out,16
+       bdnz            Loop192
+
+       li              $rounds,12
+       addi            $out,$out,0x20
+       b               Ldone
+
+.align 4
+L256:
+       lvx             $tmp,0,$inp
+       li              $cnt,7
+       li              $rounds,14
+        vperm          $outtail,$in0,$in0,$outperm     # rotate
+        vsel           $stage,$outhead,$outtail,$outmask
+        vmr            $outhead,$outtail
+        stvx           $stage,0,$out
+        addi           $out,$out,16
+       vperm           $in1,$in1,$tmp,$key     # align [and byte swap in LE]
+       mtctr           $cnt
+
+Loop256:
+       vperm           $key,$in1,$in1,$mask    # rotate-n-splat
+       vsldoi          $tmp,$zero,$in0,12      # >>32
+        vperm          $outtail,$in1,$in1,$outperm     # rotate
+        vsel           $stage,$outhead,$outtail,$outmask
+        vmr            $outhead,$outtail
+       vcipherlast     $key,$key,$rcon
+        stvx           $stage,0,$out
+        addi           $out,$out,16
+
+       vxor            $in0,$in0,$tmp
+       vsldoi          $tmp,$zero,$tmp,12      # >>32
+       vxor            $in0,$in0,$tmp
+       vsldoi          $tmp,$zero,$tmp,12      # >>32
+       vxor            $in0,$in0,$tmp
+        vadduwm        $rcon,$rcon,$rcon
+       vxor            $in0,$in0,$key
+        vperm          $outtail,$in0,$in0,$outperm     # rotate
+        vsel           $stage,$outhead,$outtail,$outmask
+        vmr            $outhead,$outtail
+        stvx           $stage,0,$out
+        addi           $inp,$out,15            # 15 is not typo
+        addi           $out,$out,16
+       bdz             Ldone
+
+       vspltw          $key,$in0,3             # just splat
+       vsldoi          $tmp,$zero,$in1,12      # >>32
+       vsbox           $key,$key
+
+       vxor            $in1,$in1,$tmp
+       vsldoi          $tmp,$zero,$tmp,12      # >>32
+       vxor            $in1,$in1,$tmp
+       vsldoi          $tmp,$zero,$tmp,12      # >>32
+       vxor            $in1,$in1,$tmp
+
+       vxor            $in1,$in1,$key
+       b               Loop256
+
+.align 4
+Ldone:
+       lvx             $in1,0,$inp             # redundant in aligned case
+       vsel            $in1,$outhead,$in1,$outmask
+       stvx            $in1,0,$inp
+       li              $ptr,0
+       mtspr           256,$vrsave
+       stw             $rounds,0($out)
+
+Lenc_key_abort:
+       mr              r3,$ptr
+       blr
+       .long           0
+       .byte           0,12,0x14,1,0,0,3,0
+       .long           0
+.size  .${prefix}_set_encrypt_key,.-.${prefix}_set_encrypt_key
+
+.globl .${prefix}_set_decrypt_key
+       $STU            $sp,-$FRAME($sp)
+       mflr            r10
+       $PUSH           r10,$FRAME+$LRSAVE($sp)
+       bl              Lset_encrypt_key
+       mtlr            r10
+
+       cmpwi           r3,0
+       bne-            Ldec_key_abort
+
+       slwi            $cnt,$rounds,4
+       subi            $inp,$out,240           # first round key
+       srwi            $rounds,$rounds,1
+       add             $out,$inp,$cnt          # last round key
+       mtctr           $rounds
+
+Ldeckey:
+       lwz             r0, 0($inp)
+       lwz             r6, 4($inp)
+       lwz             r7, 8($inp)
+       lwz             r8, 12($inp)
+       addi            $inp,$inp,16
+       lwz             r9, 0($out)
+       lwz             r10,4($out)
+       lwz             r11,8($out)
+       lwz             r12,12($out)
+       stw             r0, 0($out)
+       stw             r6, 4($out)
+       stw             r7, 8($out)
+       stw             r8, 12($out)
+       subi            $out,$out,16
+       stw             r9, -16($inp)
+       stw             r10,-12($inp)
+       stw             r11,-8($inp)
+       stw             r12,-4($inp)
+       bdnz            Ldeckey
+
+       xor             r3,r3,r3                # return value
+Ldec_key_abort:
+       addi            $sp,$sp,$FRAME
+       blr
+       .long           0
+       .byte           0,12,4,1,0x80,0,3,0
+       .long           0
+.size  .${prefix}_set_decrypt_key,.-.${prefix}_set_decrypt_key
+___
+}}}
+#########################################################################
+{{{    # Single block en- and decrypt procedures                       #
+sub gen_block () {
+my $dir = shift;
+my $n   = $dir eq "de" ? "n" : "";
+my ($inp,$out,$key,$rounds,$idx)=map("r$_",(3..7));
+
+$code.=<<___;
+.globl .${prefix}_${dir}crypt
+       lwz             $rounds,240($key)
+       lis             r0,0xfc00
+       mfspr           $vrsave,256
+       li              $idx,15                 # 15 is not typo
+       mtspr           256,r0
+
+       lvx             v0,0,$inp
+       neg             r11,$out
+       lvx             v1,$idx,$inp
+       lvsl            v2,0,$inp               # inpperm
+       le?vspltisb     v4,0x0f
+       ?lvsl           v3,0,r11                # outperm
+       le?vxor         v2,v2,v4
+       li              $idx,16
+       vperm           v0,v0,v1,v2             # align [and byte swap in LE]
+       lvx             v1,0,$key
+       ?lvsl           v5,0,$key               # keyperm
+       srwi            $rounds,$rounds,1
+       lvx             v2,$idx,$key
+       addi            $idx,$idx,16
+       subi            $rounds,$rounds,1
+       ?vperm          v1,v1,v2,v5             # align round key
+
+       vxor            v0,v0,v1
+       lvx             v1,$idx,$key
+       addi            $idx,$idx,16
+       mtctr           $rounds
+
+Loop_${dir}c:
+       ?vperm          v2,v2,v1,v5
+       v${n}cipher     v0,v0,v2
+       lvx             v2,$idx,$key
+       addi            $idx,$idx,16
+       ?vperm          v1,v1,v2,v5
+       v${n}cipher     v0,v0,v1
+       lvx             v1,$idx,$key
+       addi            $idx,$idx,16
+       bdnz            Loop_${dir}c
+
+       ?vperm          v2,v2,v1,v5
+       v${n}cipher     v0,v0,v2
+       lvx             v2,$idx,$key
+       ?vperm          v1,v1,v2,v5
+       v${n}cipherlast v0,v0,v1
+
+       vspltisb        v2,-1
+       vxor            v1,v1,v1
+       li              $idx,15                 # 15 is not typo
+       ?vperm          v2,v1,v2,v3             # outmask
+       le?vxor         v3,v3,v4
+       lvx             v1,0,$out               # outhead
+       vperm           v0,v0,v0,v3             # rotate [and byte swap in LE]
+       vsel            v1,v1,v0,v2
+       lvx             v4,$idx,$out
+       stvx            v1,0,$out
+       vsel            v0,v0,v4,v2
+       stvx            v0,$idx,$out
+
+       mtspr           256,$vrsave
+       blr
+       .long           0
+       .byte           0,12,0x14,0,0,0,3,0
+       .long           0
+.size  .${prefix}_${dir}crypt,.-.${prefix}_${dir}crypt
+___
+}
+&gen_block("en");
+&gen_block("de");
+}}}
+
+my $consts=1;
+foreach(split("\n",$code)) {
+        s/\`([^\`]*)\`/eval($1)/geo;
+
+       # constants table endian-specific conversion
+       if ($consts && m/\.(long|byte)\s+(.+)\s+(\?[a-z]*)$/o) {
+           my $conv=$3;
+           my @bytes=();
+
+           # convert to endian-agnostic format
+           if ($1 eq "long") {
+             foreach (split(/,\s*/,$2)) {
+               my $l = /^0/?oct:int;
+               push @bytes,($l>>24)&0xff,($l>>16)&0xff,($l>>8)&0xff,$l&0xff;
+             }
+           } else {
+               @bytes = map(/^0/?oct:int,split(/,\s*/,$2));
+           }
+
+           # little-endian conversion
+           if ($flavour =~ /le$/o) {
+               SWITCH: for($conv)  {
+                   /\?inv/ && do   { @bytes=map($_^0xf,@bytes); last; };
+                   /\?rev/ && do   { @bytes=reverse(@bytes);    last; };
+               }
+           }
+
+           #emit
+           print ".byte\t",join(',',map (sprintf("0x%02x",$_),@bytes)),"\n";
+           next;
+       }
+       $consts=0 if (m/Lconsts:/o);    # end of table
+
+       # instructions prefixed with '?' are endian-specific and need
+       # to be adjusted accordingly...
+       if ($flavour =~ /le$/o) {       # little-endian
+           s/le\?//o           or
+           s/be\?/#be#/o       or
+           s/\?lvsr/lvsl/o     or
+           s/\?lvsl/lvsr/o     or
+           s/\?(vperm\s+v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+)/$1$3$2$4/o or
+           s/\?(vsldoi\s+v[0-9]+,\s*)(v[0-9]+,)\s*(v[0-9]+,\s*)([0-9]+)/$1$3$2 16-$4/o or
+           s/\?(vspltw\s+v[0-9]+,\s*)(v[0-9]+,)\s*([0-9])/$1$2 3-$3/o;
+       } else {                        # big-endian
+           s/le\?/#le#/o       or
+           s/be\?//o           or
+           s/\?([a-z]+)/$1/o;
+       }
+
+        print $_,"\n";
+}
+
+close STDOUT;
diff --git a/arch/powerpc/crypto/ghashp8-ppc.pl b/arch/powerpc/crypto/ghashp8-ppc.pl
new file mode 100644 (file)
index 0000000..b56603b
--- /dev/null
@@ -0,0 +1,370 @@
+#!/usr/bin/env perl
+# SPDX-License-Identifier: GPL-2.0
+
+# This code is taken from the OpenSSL project but the author (Andy Polyakov)
+# has relicensed it under the GPLv2. Therefore this program is free software;
+# you can redistribute it and/or modify it under the terms of the GNU General
+# Public License version 2 as published by the Free Software Foundation.
+#
+# The original headers, including the original license headers, are
+# included below for completeness.
+
+# ====================================================================
+# Written by Andy Polyakov <[email protected]> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see https://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# GHASH for PowerISA v2.07.
+#
+# July 2014
+#
+# Accurate performance measurements are problematic, because it's
+# always virtualized setup with possibly throttled processor.
+# Relative comparison is therefore more informative. This initial
+# version is ~2.1x slower than hardware-assisted AES-128-CTR, ~12x
+# faster than "4-bit" integer-only compiler-generated 64-bit code.
+# "Initial version" means that there is room for futher improvement.
+
+$flavour=shift;
+$output =shift;
+
+if ($flavour =~ /64/) {
+       $SIZE_T=8;
+       $LRSAVE=2*$SIZE_T;
+       $STU="stdu";
+       $POP="ld";
+       $PUSH="std";
+} elsif ($flavour =~ /32/) {
+       $SIZE_T=4;
+       $LRSAVE=$SIZE_T;
+       $STU="stwu";
+       $POP="lwz";
+       $PUSH="stw";
+} else { die "nonsense $flavour"; }
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
+die "can't locate ppc-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour $output" || die "can't call $xlate: $!";
+
+my ($Xip,$Htbl,$inp,$len)=map("r$_",(3..6));   # argument block
+
+my ($Xl,$Xm,$Xh,$IN)=map("v$_",(0..3));
+my ($zero,$t0,$t1,$t2,$xC2,$H,$Hh,$Hl,$lemask)=map("v$_",(4..12));
+my ($Xl1,$Xm1,$Xh1,$IN1,$H2,$H2h,$H2l)=map("v$_",(13..19));
+my $vrsave="r12";
+my ($t4,$t5,$t6) = ($Hl,$H,$Hh);
+
+$code=<<___;
+.machine       "any"
+
+.text
+
+.globl .gcm_init_p8
+       lis             r0,0xfff0
+       li              r8,0x10
+       mfspr           $vrsave,256
+       li              r9,0x20
+       mtspr           256,r0
+       li              r10,0x30
+       lvx_u           $H,0,r4                 # load H
+       le?xor          r7,r7,r7
+       le?addi         r7,r7,0x8               # need a vperm start with 08
+       le?lvsr         5,0,r7
+       le?vspltisb     6,0x0f
+       le?vxor         5,5,6                   # set a b-endian mask
+       le?vperm        $H,$H,$H,5
+
+       vspltisb        $xC2,-16                # 0xf0
+       vspltisb        $t0,1                   # one
+       vaddubm         $xC2,$xC2,$xC2          # 0xe0
+       vxor            $zero,$zero,$zero
+       vor             $xC2,$xC2,$t0           # 0xe1
+       vsldoi          $xC2,$xC2,$zero,15      # 0xe1...
+       vsldoi          $t1,$zero,$t0,1         # ...1
+       vaddubm         $xC2,$xC2,$xC2          # 0xc2...
+       vspltisb        $t2,7
+       vor             $xC2,$xC2,$t1           # 0xc2....01
+       vspltb          $t1,$H,0                # most significant byte
+       vsl             $H,$H,$t0               # H<<=1
+       vsrab           $t1,$t1,$t2             # broadcast carry bit
+       vand            $t1,$t1,$xC2
+       vxor            $H,$H,$t1               # twisted H
+
+       vsldoi          $H,$H,$H,8              # twist even more ...
+       vsldoi          $xC2,$zero,$xC2,8       # 0xc2.0
+       vsldoi          $Hl,$zero,$H,8          # ... and split
+       vsldoi          $Hh,$H,$zero,8
+
+       stvx_u          $xC2,0,r3               # save pre-computed table
+       stvx_u          $Hl,r8,r3
+       stvx_u          $H, r9,r3
+       stvx_u          $Hh,r10,r3
+
+       mtspr           256,$vrsave
+       blr
+       .long           0
+       .byte           0,12,0x14,0,0,0,2,0
+       .long           0
+.size  .gcm_init_p8,.-.gcm_init_p8
+
+.globl .gcm_init_htable
+       lis             r0,0xfff0
+       li              r8,0x10
+       mfspr           $vrsave,256
+       li              r9,0x20
+       mtspr           256,r0
+       li              r10,0x30
+       lvx_u           $H,0,r4                 # load H
+
+       vspltisb        $xC2,-16                # 0xf0
+       vspltisb        $t0,1                   # one
+       vaddubm         $xC2,$xC2,$xC2          # 0xe0
+       vxor            $zero,$zero,$zero
+       vor             $xC2,$xC2,$t0           # 0xe1
+       vsldoi          $xC2,$xC2,$zero,15      # 0xe1...
+       vsldoi          $t1,$zero,$t0,1         # ...1
+       vaddubm         $xC2,$xC2,$xC2          # 0xc2...
+       vspltisb        $t2,7
+       vor             $xC2,$xC2,$t1           # 0xc2....01
+       vspltb          $t1,$H,0                # most significant byte
+       vsl             $H,$H,$t0               # H<<=1
+       vsrab           $t1,$t1,$t2             # broadcast carry bit
+       vand            $t1,$t1,$xC2
+       vxor            $IN,$H,$t1              # twisted H
+
+       vsldoi          $H,$IN,$IN,8            # twist even more ...
+       vsldoi          $xC2,$zero,$xC2,8       # 0xc2.0
+       vsldoi          $Hl,$zero,$H,8          # ... and split
+       vsldoi          $Hh,$H,$zero,8
+
+       stvx_u          $xC2,0,r3               # save pre-computed table
+       stvx_u          $Hl,r8,r3
+       li              r8,0x40
+       stvx_u          $H, r9,r3
+       li              r9,0x50
+       stvx_u          $Hh,r10,r3
+       li              r10,0x60
+
+       vpmsumd         $Xl,$IN,$Hl             # H.lo·H.lo
+       vpmsumd         $Xm,$IN,$H              # H.hi·H.lo+H.lo·H.hi
+       vpmsumd         $Xh,$IN,$Hh             # H.hi·H.hi
+
+       vpmsumd         $t2,$Xl,$xC2            # 1st reduction phase
+
+       vsldoi          $t0,$Xm,$zero,8
+       vsldoi          $t1,$zero,$Xm,8
+       vxor            $Xl,$Xl,$t0
+       vxor            $Xh,$Xh,$t1
+
+       vsldoi          $Xl,$Xl,$Xl,8
+       vxor            $Xl,$Xl,$t2
+
+       vsldoi          $t1,$Xl,$Xl,8           # 2nd reduction phase
+       vpmsumd         $Xl,$Xl,$xC2
+       vxor            $t1,$t1,$Xh
+       vxor            $IN1,$Xl,$t1
+
+       vsldoi          $H2,$IN1,$IN1,8
+       vsldoi          $H2l,$zero,$H2,8
+       vsldoi          $H2h,$H2,$zero,8
+
+       stvx_u          $H2l,r8,r3              # save H^2
+       li              r8,0x70
+       stvx_u          $H2,r9,r3
+       li              r9,0x80
+       stvx_u          $H2h,r10,r3
+       li              r10,0x90
+
+       vpmsumd         $Xl,$IN,$H2l            # H.lo·H^2.lo
+        vpmsumd        $Xl1,$IN1,$H2l          # H^2.lo·H^2.lo
+       vpmsumd         $Xm,$IN,$H2             # H.hi·H^2.lo+H.lo·H^2.hi
+        vpmsumd        $Xm1,$IN1,$H2           # H^2.hi·H^2.lo+H^2.lo·H^2.hi
+       vpmsumd         $Xh,$IN,$H2h            # H.hi·H^2.hi
+        vpmsumd        $Xh1,$IN1,$H2h          # H^2.hi·H^2.hi
+
+       vpmsumd         $t2,$Xl,$xC2            # 1st reduction phase
+        vpmsumd        $t6,$Xl1,$xC2           # 1st reduction phase
+
+       vsldoi          $t0,$Xm,$zero,8
+       vsldoi          $t1,$zero,$Xm,8
+        vsldoi         $t4,$Xm1,$zero,8
+        vsldoi         $t5,$zero,$Xm1,8
+       vxor            $Xl,$Xl,$t0
+       vxor            $Xh,$Xh,$t1
+        vxor           $Xl1,$Xl1,$t4
+        vxor           $Xh1,$Xh1,$t5
+
+       vsldoi          $Xl,$Xl,$Xl,8
+        vsldoi         $Xl1,$Xl1,$Xl1,8
+       vxor            $Xl,$Xl,$t2
+        vxor           $Xl1,$Xl1,$t6
+
+       vsldoi          $t1,$Xl,$Xl,8           # 2nd reduction phase
+        vsldoi         $t5,$Xl1,$Xl1,8         # 2nd reduction phase
+       vpmsumd         $Xl,$Xl,$xC2
+        vpmsumd        $Xl1,$Xl1,$xC2
+       vxor            $t1,$t1,$Xh
+        vxor           $t5,$t5,$Xh1
+       vxor            $Xl,$Xl,$t1
+        vxor           $Xl1,$Xl1,$t5
+
+       vsldoi          $H,$Xl,$Xl,8
+        vsldoi         $H2,$Xl1,$Xl1,8
+       vsldoi          $Hl,$zero,$H,8
+       vsldoi          $Hh,$H,$zero,8
+        vsldoi         $H2l,$zero,$H2,8
+        vsldoi         $H2h,$H2,$zero,8
+
+       stvx_u          $Hl,r8,r3               # save H^3
+       li              r8,0xa0
+       stvx_u          $H,r9,r3
+       li              r9,0xb0
+       stvx_u          $Hh,r10,r3
+       li              r10,0xc0
+        stvx_u         $H2l,r8,r3              # save H^4
+        stvx_u         $H2,r9,r3
+        stvx_u         $H2h,r10,r3
+
+       mtspr           256,$vrsave
+       blr
+       .long           0
+       .byte           0,12,0x14,0,0,0,2,0
+       .long           0
+.size  .gcm_init_htable,.-.gcm_init_htable
+
+.globl .gcm_gmult_p8
+       lis             r0,0xfff8
+       li              r8,0x10
+       mfspr           $vrsave,256
+       li              r9,0x20
+       mtspr           256,r0
+       li              r10,0x30
+       lvx_u           $IN,0,$Xip              # load Xi
+
+       lvx_u           $Hl,r8,$Htbl            # load pre-computed table
+        le?lvsl        $lemask,r0,r0
+       lvx_u           $H, r9,$Htbl
+        le?vspltisb    $t0,0x07
+       lvx_u           $Hh,r10,$Htbl
+        le?vxor        $lemask,$lemask,$t0
+       lvx_u           $xC2,0,$Htbl
+        le?vperm       $IN,$IN,$IN,$lemask
+       vxor            $zero,$zero,$zero
+
+       vpmsumd         $Xl,$IN,$Hl             # H.lo·Xi.lo
+       vpmsumd         $Xm,$IN,$H              # H.hi·Xi.lo+H.lo·Xi.hi
+       vpmsumd         $Xh,$IN,$Hh             # H.hi·Xi.hi
+
+       vpmsumd         $t2,$Xl,$xC2            # 1st phase
+
+       vsldoi          $t0,$Xm,$zero,8
+       vsldoi          $t1,$zero,$Xm,8
+       vxor            $Xl,$Xl,$t0
+       vxor            $Xh,$Xh,$t1
+
+       vsldoi          $Xl,$Xl,$Xl,8
+       vxor            $Xl,$Xl,$t2
+
+       vsldoi          $t1,$Xl,$Xl,8           # 2nd phase
+       vpmsumd         $Xl,$Xl,$xC2
+       vxor            $t1,$t1,$Xh
+       vxor            $Xl,$Xl,$t1
+
+       le?vperm        $Xl,$Xl,$Xl,$lemask
+       stvx_u          $Xl,0,$Xip              # write out Xi
+
+       mtspr           256,$vrsave
+       blr
+       .long           0
+       .byte           0,12,0x14,0,0,0,2,0
+       .long           0
+.size  .gcm_gmult_p8,.-.gcm_gmult_p8
+
+.globl .gcm_ghash_p8
+       lis             r0,0xfff8
+       li              r8,0x10
+       mfspr           $vrsave,256
+       li              r9,0x20
+       mtspr           256,r0
+       li              r10,0x30
+       lvx_u           $Xl,0,$Xip              # load Xi
+
+       lvx_u           $Hl,r8,$Htbl            # load pre-computed table
+        le?lvsl        $lemask,r0,r0
+       lvx_u           $H, r9,$Htbl
+        le?vspltisb    $t0,0x07
+       lvx_u           $Hh,r10,$Htbl
+        le?vxor        $lemask,$lemask,$t0
+       lvx_u           $xC2,0,$Htbl
+        le?vperm       $Xl,$Xl,$Xl,$lemask
+       vxor            $zero,$zero,$zero
+
+       lvx_u           $IN,0,$inp
+       addi            $inp,$inp,16
+       subi            $len,$len,16
+        le?vperm       $IN,$IN,$IN,$lemask
+       vxor            $IN,$IN,$Xl
+       b               Loop
+
+.align 5
+Loop:
+        subic          $len,$len,16
+       vpmsumd         $Xl,$IN,$Hl             # H.lo·Xi.lo
+        subfe.         r0,r0,r0                # borrow?-1:0
+       vpmsumd         $Xm,$IN,$H              # H.hi·Xi.lo+H.lo·Xi.hi
+        and            r0,r0,$len
+       vpmsumd         $Xh,$IN,$Hh             # H.hi·Xi.hi
+        add            $inp,$inp,r0
+
+       vpmsumd         $t2,$Xl,$xC2            # 1st phase
+
+       vsldoi          $t0,$Xm,$zero,8
+       vsldoi          $t1,$zero,$Xm,8
+       vxor            $Xl,$Xl,$t0
+       vxor            $Xh,$Xh,$t1
+
+       vsldoi          $Xl,$Xl,$Xl,8
+       vxor            $Xl,$Xl,$t2
+        lvx_u          $IN,0,$inp
+        addi           $inp,$inp,16
+
+       vsldoi          $t1,$Xl,$Xl,8           # 2nd phase
+       vpmsumd         $Xl,$Xl,$xC2
+        le?vperm       $IN,$IN,$IN,$lemask
+       vxor            $t1,$t1,$Xh
+       vxor            $IN,$IN,$t1
+       vxor            $IN,$IN,$Xl
+       beq             Loop                    # did $len-=16 borrow?
+
+       vxor            $Xl,$Xl,$t1
+       le?vperm        $Xl,$Xl,$Xl,$lemask
+       stvx_u          $Xl,0,$Xip              # write out Xi
+
+       mtspr           256,$vrsave
+       blr
+       .long           0
+       .byte           0,12,0x14,0,0,0,4,0
+       .long           0
+.size  .gcm_ghash_p8,.-.gcm_ghash_p8
+
+.asciz  "GHASH for PowerISA 2.07, CRYPTOGAMS by <appro\@openssl.org>"
+.align  2
+___
+
+foreach (split("\n",$code)) {
+       if ($flavour =~ /le$/o) {       # little-endian
+           s/le\?//o           or
+           s/be\?/#be#/o;
+       } else {
+           s/le\?/#le#/o       or
+           s/be\?//o;
+       }
+       print $_,"\n";
+}
+
+close STDOUT; # enforce flush
diff --git a/arch/powerpc/crypto/ppc-xlate.pl b/arch/powerpc/crypto/ppc-xlate.pl
new file mode 100644 (file)
index 0000000..23cca70
--- /dev/null
@@ -0,0 +1,229 @@
+#!/usr/bin/env perl
+# SPDX-License-Identifier: GPL-2.0
+
+# PowerPC assembler distiller by <appro>.
+
+my $flavour = shift;
+my $output = shift;
+open STDOUT,">$output" || die "can't open $output: $!";
+
+my %GLOBALS;
+my $dotinlocallabels=($flavour=~/linux/)?1:0;
+
+################################################################
+# directives which need special treatment on different platforms
+################################################################
+my $globl = sub {
+    my $junk = shift;
+    my $name = shift;
+    my $global = \$GLOBALS{$name};
+    my $ret;
+
+    $name =~ s|^[\.\_]||;
+
+    SWITCH: for ($flavour) {
+       /aix/           && do { $name = ".$name";
+                               last;
+                             };
+       /osx/           && do { $name = "_$name";
+                               last;
+                             };
+       /linux/
+                       && do { $ret = "_GLOBAL($name)";
+                               last;
+                             };
+    }
+
+    $ret = ".globl     $name\nalign 5\n$name:" if (!$ret);
+    $$global = $name;
+    $ret;
+};
+my $text = sub {
+    my $ret = ($flavour =~ /aix/) ? ".csect\t.text[PR],7" : ".text";
+    $ret = ".abiversion        2\n".$ret       if ($flavour =~ /linux.*64le/);
+    $ret;
+};
+my $machine = sub {
+    my $junk = shift;
+    my $arch = shift;
+    if ($flavour =~ /osx/)
+    {  $arch =~ s/\"//g;
+       $arch = ($flavour=~/64/) ? "ppc970-64" : "ppc970" if ($arch eq "any");
+    }
+    ".machine  $arch";
+};
+my $size = sub {
+    if ($flavour =~ /linux/)
+    {  shift;
+       my $name = shift; $name =~ s|^[\.\_]||;
+       my $ret  = ".size       $name,.-".($flavour=~/64$/?".":"").$name;
+       $ret .= "\n.size        .$name,.-.$name" if ($flavour=~/64$/);
+       $ret;
+    }
+    else
+    {  "";     }
+};
+my $asciz = sub {
+    shift;
+    my $line = join(",",@_);
+    if ($line =~ /^"(.*)"$/)
+    {  ".byte  " . join(",",unpack("C*",$1),0) . "\n.align     2";     }
+    else
+    {  "";     }
+};
+my $quad = sub {
+    shift;
+    my @ret;
+    my ($hi,$lo);
+    for (@_) {
+       if (/^0x([0-9a-f]*?)([0-9a-f]{1,8})$/io)
+       {  $hi=$1?"0x$1":"0"; $lo="0x$2";  }
+       elsif (/^([0-9]+)$/o)
+       {  $hi=$1>>32; $lo=$1&0xffffffff;  } # error-prone with 32-bit perl
+       else
+       {  $hi=undef; $lo=$_; }
+
+       if (defined($hi))
+       {  push(@ret,$flavour=~/le$/o?".long\t$lo,$hi":".long\t$hi,$lo");  }
+       else
+       {  push(@ret,".quad     $lo");  }
+    }
+    join("\n",@ret);
+};
+
+################################################################
+# simplified mnemonics not handled by at least one assembler
+################################################################
+my $cmplw = sub {
+    my $f = shift;
+    my $cr = 0; $cr = shift if ($#_>1);
+    # Some out-of-date 32-bit GNU assembler just can't handle cmplw...
+    ($flavour =~ /linux.*32/) ?
+       "       .long   ".sprintf "0x%x",31<<26|$cr<<23|$_[0]<<16|$_[1]<<11|64 :
+       "       cmplw   ".join(',',$cr,@_);
+};
+my $bdnz = sub {
+    my $f = shift;
+    my $bo = $f=~/[\+\-]/ ? 16+9 : 16; # optional "to be taken" hint
+    "  bc      $bo,0,".shift;
+} if ($flavour!~/linux/);
+my $bltlr = sub {
+    my $f = shift;
+    my $bo = $f=~/\-/ ? 12+2 : 12;     # optional "not to be taken" hint
+    ($flavour =~ /linux/) ?            # GNU as doesn't allow most recent hints
+       "       .long   ".sprintf "0x%x",19<<26|$bo<<21|16<<1 :
+       "       bclr    $bo,0";
+};
+my $bnelr = sub {
+    my $f = shift;
+    my $bo = $f=~/\-/ ? 4+2 : 4;       # optional "not to be taken" hint
+    ($flavour =~ /linux/) ?            # GNU as doesn't allow most recent hints
+       "       .long   ".sprintf "0x%x",19<<26|$bo<<21|2<<16|16<<1 :
+       "       bclr    $bo,2";
+};
+my $beqlr = sub {
+    my $f = shift;
+    my $bo = $f=~/-/ ? 12+2 : 12;      # optional "not to be taken" hint
+    ($flavour =~ /linux/) ?            # GNU as doesn't allow most recent hints
+       "       .long   ".sprintf "0x%X",19<<26|$bo<<21|2<<16|16<<1 :
+       "       bclr    $bo,2";
+};
+# GNU assembler can't handle extrdi rA,rS,16,48, or when sum of last two
+# arguments is 64, with "operand out of range" error.
+my $extrdi = sub {
+    my ($f,$ra,$rs,$n,$b) = @_;
+    $b = ($b+$n)&63; $n = 64-$n;
+    "  rldicl  $ra,$rs,$b,$n";
+};
+my $vmr = sub {
+    my ($f,$vx,$vy) = @_;
+    "  vor     $vx,$vy,$vy";
+};
+
+# Some ABIs specify vrsave, special-purpose register #256, as reserved
+# for system use.
+my $no_vrsave = ($flavour =~ /linux-ppc64le/);
+my $mtspr = sub {
+    my ($f,$idx,$ra) = @_;
+    if ($idx == 256 && $no_vrsave) {
+       "       or      $ra,$ra,$ra";
+    } else {
+       "       mtspr   $idx,$ra";
+    }
+};
+my $mfspr = sub {
+    my ($f,$rd,$idx) = @_;
+    if ($idx == 256 && $no_vrsave) {
+       "       li      $rd,-1";
+    } else {
+       "       mfspr   $rd,$idx";
+    }
+};
+
+# PowerISA 2.06 stuff
+sub vsxmem_op {
+    my ($f, $vrt, $ra, $rb, $op) = @_;
+    "  .long   ".sprintf "0x%X",(31<<26)|($vrt<<21)|($ra<<16)|($rb<<11)|($op*2+1);
+}
+# made-up unaligned memory reference AltiVec/VMX instructions
+my $lvx_u      = sub { vsxmem_op(@_, 844); };  # lxvd2x
+my $stvx_u     = sub { vsxmem_op(@_, 972); };  # stxvd2x
+my $lvdx_u     = sub { vsxmem_op(@_, 588); };  # lxsdx
+my $stvdx_u    = sub { vsxmem_op(@_, 716); };  # stxsdx
+my $lvx_4w     = sub { vsxmem_op(@_, 780); };  # lxvw4x
+my $stvx_4w    = sub { vsxmem_op(@_, 908); };  # stxvw4x
+
+# PowerISA 2.07 stuff
+sub vcrypto_op {
+    my ($f, $vrt, $vra, $vrb, $op) = @_;
+    "  .long   ".sprintf "0x%X",(4<<26)|($vrt<<21)|($vra<<16)|($vrb<<11)|$op;
+}
+my $vcipher    = sub { vcrypto_op(@_, 1288); };
+my $vcipherlast        = sub { vcrypto_op(@_, 1289); };
+my $vncipher   = sub { vcrypto_op(@_, 1352); };
+my $vncipherlast= sub { vcrypto_op(@_, 1353); };
+my $vsbox      = sub { vcrypto_op(@_, 0, 1480); };
+my $vshasigmad = sub { my ($st,$six)=splice(@_,-2); vcrypto_op(@_, $st<<4|$six, 1730); };
+my $vshasigmaw = sub { my ($st,$six)=splice(@_,-2); vcrypto_op(@_, $st<<4|$six, 1666); };
+my $vpmsumb    = sub { vcrypto_op(@_, 1032); };
+my $vpmsumd    = sub { vcrypto_op(@_, 1224); };
+my $vpmsubh    = sub { vcrypto_op(@_, 1096); };
+my $vpmsumw    = sub { vcrypto_op(@_, 1160); };
+my $vaddudm    = sub { vcrypto_op(@_, 192);  };
+my $vadduqm    = sub { vcrypto_op(@_, 256);  };
+
+my $mtsle      = sub {
+    my ($f, $arg) = @_;
+    "  .long   ".sprintf "0x%X",(31<<26)|($arg<<21)|(147*2);
+};
+
+print "#include <asm/ppc_asm.h>\n" if $flavour =~ /linux/;
+
+while($line=<>) {
+
+    $line =~ s|[#!;].*$||;     # get rid of asm-style comments...
+    $line =~ s|/\*.*\*/||;     # ... and C-style comments...
+    $line =~ s|^\s+||;         # ... and skip white spaces in beginning...
+    $line =~ s|\s+$||;         # ... and at the end
+
+    {
+       $line =~ s|\b\.L(\w+)|L$1|g;    # common denominator for Locallabel
+       $line =~ s|\bL(\w+)|\.L$1|g     if ($dotinlocallabels);
+    }
+
+    {
+       $line =~ s|^\s*(\.?)(\w+)([\.\+\-]?)\s*||;
+       my $c = $1; $c = "\t" if ($c eq "");
+       my $mnemonic = $2;
+       my $f = $3;
+       my $opcode = eval("\$$mnemonic");
+       $line =~ s/\b(c?[rf]|v|vs)([0-9]+)\b/$2/g if ($c ne "." and $flavour !~ /osx/);
+       if (ref($opcode) eq 'CODE') { $line = &$opcode($f,split(',',$line)); }
+       elsif ($mnemonic)           { $line = $c.$mnemonic.$f."\t".$line; }
+    }
+
+    print $line if ($line);
+    print "\n";
+}
+
+close STDOUT;
index f6f790a90367faf2cb9295bc74385dbdf5364d85..2dcc66225e7f3176f3e840d95ae60cfdf205e7a7 100644 (file)
@@ -22,6 +22,7 @@
  */
 
 #define PPC_MODULE_FEATURE_VEC_CRYPTO                  (32 + ilog2(PPC_FEATURE2_VEC_CRYPTO))
+#define PPC_MODULE_FEATURE_P10                         (32 + ilog2(PPC_FEATURE2_ARCH_3_1))
 
 #define cpu_feature(x)         (x)
 
index cdf3215ec272ced263a234efab1dc754d240debd..ad7f4c89162568b0b1089312bf4a4d675311b111 100644 (file)
@@ -201,8 +201,8 @@ SYM_FUNC_START(crypto_aegis128_aesni_init)
        movdqa KEY, STATE4
 
        /* load the constants: */
-       movdqa .Laegis128_const_0, STATE2
-       movdqa .Laegis128_const_1, STATE1
+       movdqa .Laegis128_const_0(%rip), STATE2
+       movdqa .Laegis128_const_1(%rip), STATE1
        pxor STATE2, STATE3
        pxor STATE1, STATE4
 
@@ -682,7 +682,7 @@ SYM_TYPED_FUNC_START(crypto_aegis128_aesni_dec_tail)
        punpcklbw T0, T0
        punpcklbw T0, T0
        punpcklbw T0, T0
-       movdqa .Laegis128_counter, T1
+       movdqa .Laegis128_counter(%rip), T1
        pcmpgtb T1, T0
        pand T0, MSG
 
index 837c1e0aa0217783896dfd45b66f761e103cf8a6..3ac7487ecad2d3f0248ce95a2f8688e75b20895c 100644 (file)
@@ -288,53 +288,53 @@ ALL_F:      .octa 0xffffffffffffffffffffffffffffffff
        # Encrypt/Decrypt first few blocks
 
        and     $(3<<4), %r12
-       jz      _initial_num_blocks_is_0_\@
+       jz      .L_initial_num_blocks_is_0_\@
        cmp     $(2<<4), %r12
-       jb      _initial_num_blocks_is_1_\@
-       je      _initial_num_blocks_is_2_\@
-_initial_num_blocks_is_3_\@:
+       jb      .L_initial_num_blocks_is_1_\@
+       je      .L_initial_num_blocks_is_2_\@
+.L_initial_num_blocks_is_3_\@:
        INITIAL_BLOCKS_ENC_DEC  %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
 %xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 5, 678, \operation
        sub     $48, %r13
-       jmp     _initial_blocks_\@
-_initial_num_blocks_is_2_\@:
+       jmp     .L_initial_blocks_\@
+.L_initial_num_blocks_is_2_\@:
        INITIAL_BLOCKS_ENC_DEC  %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
 %xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 6, 78, \operation
        sub     $32, %r13
-       jmp     _initial_blocks_\@
-_initial_num_blocks_is_1_\@:
+       jmp     .L_initial_blocks_\@
+.L_initial_num_blocks_is_1_\@:
        INITIAL_BLOCKS_ENC_DEC  %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
 %xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 7, 8, \operation
        sub     $16, %r13
-       jmp     _initial_blocks_\@
-_initial_num_blocks_is_0_\@:
+       jmp     .L_initial_blocks_\@
+.L_initial_num_blocks_is_0_\@:
        INITIAL_BLOCKS_ENC_DEC  %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
 %xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 8, 0, \operation
-_initial_blocks_\@:
+.L_initial_blocks_\@:
 
        # Main loop - Encrypt/Decrypt remaining blocks
 
        test    %r13, %r13
-       je      _zero_cipher_left_\@
+       je      .L_zero_cipher_left_\@
        sub     $64, %r13
-       je      _four_cipher_left_\@
-_crypt_by_4_\@:
+       je      .L_four_cipher_left_\@
+.L_crypt_by_4_\@:
        GHASH_4_ENCRYPT_4_PARALLEL_\operation   %xmm9, %xmm10, %xmm11, %xmm12, \
        %xmm13, %xmm14, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, \
        %xmm7, %xmm8, enc
        add     $64, %r11
        sub     $64, %r13
-       jne     _crypt_by_4_\@
-_four_cipher_left_\@:
+       jne     .L_crypt_by_4_\@
+.L_four_cipher_left_\@:
        GHASH_LAST_4    %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, \
 %xmm15, %xmm1, %xmm2, %xmm3, %xmm4, %xmm8
-_zero_cipher_left_\@:
+.L_zero_cipher_left_\@:
        movdqu %xmm8, AadHash(%arg2)
        movdqu %xmm0, CurCount(%arg2)
 
        mov     %arg5, %r13
        and     $15, %r13                       # %r13 = arg5 (mod 16)
-       je      _multiple_of_16_bytes_\@
+       je      .L_multiple_of_16_bytes_\@
 
        mov %r13, PBlockLen(%arg2)
 
@@ -348,14 +348,14 @@ _zero_cipher_left_\@:
        movdqu %xmm0, PBlockEncKey(%arg2)
 
        cmp     $16, %arg5
-       jge _large_enough_update_\@
+       jge     .L_large_enough_update_\@
 
        lea (%arg4,%r11,1), %r10
        mov %r13, %r12
        READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1
-       jmp _data_read_\@
+       jmp     .L_data_read_\@
 
-_large_enough_update_\@:
+.L_large_enough_update_\@:
        sub     $16, %r11
        add     %r13, %r11
 
@@ -374,7 +374,7 @@ _large_enough_update_\@:
        # shift right 16-r13 bytes
        pshufb  %xmm2, %xmm1
 
-_data_read_\@:
+.L_data_read_\@:
        lea ALL_F+16(%rip), %r12
        sub %r13, %r12
 
@@ -409,19 +409,19 @@ _data_read_\@:
        # Output %r13 bytes
        movq %xmm0, %rax
        cmp $8, %r13
-       jle _less_than_8_bytes_left_\@
+       jle .L_less_than_8_bytes_left_\@
        mov %rax, (%arg3 , %r11, 1)
        add $8, %r11
        psrldq $8, %xmm0
        movq %xmm0, %rax
        sub $8, %r13
-_less_than_8_bytes_left_\@:
+.L_less_than_8_bytes_left_\@:
        mov %al,  (%arg3, %r11, 1)
        add $1, %r11
        shr $8, %rax
        sub $1, %r13
-       jne _less_than_8_bytes_left_\@
-_multiple_of_16_bytes_\@:
+       jne .L_less_than_8_bytes_left_\@
+.L_multiple_of_16_bytes_\@:
 .endm
 
 # GCM_COMPLETE Finishes update of tag of last partial block
@@ -434,11 +434,11 @@ _multiple_of_16_bytes_\@:
        mov PBlockLen(%arg2), %r12
 
        test %r12, %r12
-       je _partial_done\@
+       je .L_partial_done\@
 
        GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
 
-_partial_done\@:
+.L_partial_done\@:
        mov AadLen(%arg2), %r12  # %r13 = aadLen (number of bytes)
        shl     $3, %r12                  # convert into number of bits
        movd    %r12d, %xmm15             # len(A) in %xmm15
@@ -457,44 +457,44 @@ _partial_done\@:
        movdqu OrigIV(%arg2), %xmm0       # %xmm0 = Y0
        ENCRYPT_SINGLE_BLOCK    %xmm0,  %xmm1     # E(K, Y0)
        pxor    %xmm8, %xmm0
-_return_T_\@:
+.L_return_T_\@:
        mov     \AUTHTAG, %r10                     # %r10 = authTag
        mov     \AUTHTAGLEN, %r11                    # %r11 = auth_tag_len
        cmp     $16, %r11
-       je      _T_16_\@
+       je      .L_T_16_\@
        cmp     $8, %r11
-       jl      _T_4_\@
-_T_8_\@:
+       jl      .L_T_4_\@
+.L_T_8_\@:
        movq    %xmm0, %rax
        mov     %rax, (%r10)
        add     $8, %r10
        sub     $8, %r11
        psrldq  $8, %xmm0
        test    %r11, %r11
-       je      _return_T_done_\@
-_T_4_\@:
+       je      .L_return_T_done_\@
+.L_T_4_\@:
        movd    %xmm0, %eax
        mov     %eax, (%r10)
        add     $4, %r10
        sub     $4, %r11
        psrldq  $4, %xmm0
        test    %r11, %r11
-       je      _return_T_done_\@
-_T_123_\@:
+       je      .L_return_T_done_\@
+.L_T_123_\@:
        movd    %xmm0, %eax
        cmp     $2, %r11
-       jl      _T_1_\@
+       jl      .L_T_1_\@
        mov     %ax, (%r10)
        cmp     $2, %r11
-       je      _return_T_done_\@
+       je      .L_return_T_done_\@
        add     $2, %r10
        sar     $16, %eax
-_T_1_\@:
+.L_T_1_\@:
        mov     %al, (%r10)
-       jmp     _return_T_done_\@
-_T_16_\@:
+       jmp     .L_return_T_done_\@
+.L_T_16_\@:
        movdqu  %xmm0, (%r10)
-_return_T_done_\@:
+.L_return_T_done_\@:
 .endm
 
 #ifdef __x86_64__
@@ -563,30 +563,30 @@ _return_T_done_\@:
 # Clobbers %rax, DLEN and XMM1
 .macro READ_PARTIAL_BLOCK DPTR DLEN XMM1 XMMDst
         cmp $8, \DLEN
-        jl _read_lt8_\@
+        jl .L_read_lt8_\@
         mov (\DPTR), %rax
         movq %rax, \XMMDst
         sub $8, \DLEN
-        jz _done_read_partial_block_\@
+        jz .L_done_read_partial_block_\@
        xor %eax, %eax
-_read_next_byte_\@:
+.L_read_next_byte_\@:
         shl $8, %rax
         mov 7(\DPTR, \DLEN, 1), %al
         dec \DLEN
-        jnz _read_next_byte_\@
+        jnz .L_read_next_byte_\@
         movq %rax, \XMM1
        pslldq $8, \XMM1
         por \XMM1, \XMMDst
-       jmp _done_read_partial_block_\@
-_read_lt8_\@:
+       jmp .L_done_read_partial_block_\@
+.L_read_lt8_\@:
        xor %eax, %eax
-_read_next_byte_lt8_\@:
+.L_read_next_byte_lt8_\@:
         shl $8, %rax
         mov -1(\DPTR, \DLEN, 1), %al
         dec \DLEN
-        jnz _read_next_byte_lt8_\@
+        jnz .L_read_next_byte_lt8_\@
         movq %rax, \XMMDst
-_done_read_partial_block_\@:
+.L_done_read_partial_block_\@:
 .endm
 
 # CALC_AAD_HASH: Calculates the hash of the data which will not be encrypted.
@@ -600,8 +600,8 @@ _done_read_partial_block_\@:
        pxor       \TMP6, \TMP6
 
        cmp        $16, %r11
-       jl         _get_AAD_rest\@
-_get_AAD_blocks\@:
+       jl         .L_get_AAD_rest\@
+.L_get_AAD_blocks\@:
        movdqu     (%r10), \TMP7
        pshufb     %xmm14, \TMP7 # byte-reflect the AAD data
        pxor       \TMP7, \TMP6
@@ -609,14 +609,14 @@ _get_AAD_blocks\@:
        add        $16, %r10
        sub        $16, %r11
        cmp        $16, %r11
-       jge        _get_AAD_blocks\@
+       jge        .L_get_AAD_blocks\@
 
        movdqu     \TMP6, \TMP7
 
        /* read the last <16B of AAD */
-_get_AAD_rest\@:
+.L_get_AAD_rest\@:
        test       %r11, %r11
-       je         _get_AAD_done\@
+       je         .L_get_AAD_done\@
 
        READ_PARTIAL_BLOCK %r10, %r11, \TMP1, \TMP7
        pshufb     %xmm14, \TMP7 # byte-reflect the AAD data
@@ -624,7 +624,7 @@ _get_AAD_rest\@:
        GHASH_MUL  \TMP7, \HASHKEY, \TMP1, \TMP2, \TMP3, \TMP4, \TMP5
        movdqu \TMP7, \TMP6
 
-_get_AAD_done\@:
+.L_get_AAD_done\@:
        movdqu \TMP6, AadHash(%arg2)
 .endm
 
@@ -637,21 +637,21 @@ _get_AAD_done\@:
        AAD_HASH operation
        mov     PBlockLen(%arg2), %r13
        test    %r13, %r13
-       je      _partial_block_done_\@  # Leave Macro if no partial blocks
+       je      .L_partial_block_done_\@        # Leave Macro if no partial blocks
        # Read in input data without over reading
        cmp     $16, \PLAIN_CYPH_LEN
-       jl      _fewer_than_16_bytes_\@
+       jl      .L_fewer_than_16_bytes_\@
        movups  (\PLAIN_CYPH_IN), %xmm1 # If more than 16 bytes, just fill xmm
-       jmp     _data_read_\@
+       jmp     .L_data_read_\@
 
-_fewer_than_16_bytes_\@:
+.L_fewer_than_16_bytes_\@:
        lea     (\PLAIN_CYPH_IN, \DATA_OFFSET, 1), %r10
        mov     \PLAIN_CYPH_LEN, %r12
        READ_PARTIAL_BLOCK %r10 %r12 %xmm0 %xmm1
 
        mov PBlockLen(%arg2), %r13
 
-_data_read_\@:                         # Finished reading in data
+.L_data_read_\@:                               # Finished reading in data
 
        movdqu  PBlockEncKey(%arg2), %xmm9
        movdqu  HashKey(%arg2), %xmm13
@@ -674,9 +674,9 @@ _data_read_\@:                              # Finished reading in data
        sub     $16, %r10
        # Determine if if partial block is not being filled and
        # shift mask accordingly
-       jge     _no_extra_mask_1_\@
+       jge     .L_no_extra_mask_1_\@
        sub     %r10, %r12
-_no_extra_mask_1_\@:
+.L_no_extra_mask_1_\@:
 
        movdqu  ALL_F-SHIFT_MASK(%r12), %xmm1
        # get the appropriate mask to mask out bottom r13 bytes of xmm9
@@ -689,17 +689,17 @@ _no_extra_mask_1_\@:
        pxor    %xmm3, \AAD_HASH
 
        test    %r10, %r10
-       jl      _partial_incomplete_1_\@
+       jl      .L_partial_incomplete_1_\@
 
        # GHASH computation for the last <16 Byte block
        GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
        xor     %eax, %eax
 
        mov     %rax, PBlockLen(%arg2)
-       jmp     _dec_done_\@
-_partial_incomplete_1_\@:
+       jmp     .L_dec_done_\@
+.L_partial_incomplete_1_\@:
        add     \PLAIN_CYPH_LEN, PBlockLen(%arg2)
-_dec_done_\@:
+.L_dec_done_\@:
        movdqu  \AAD_HASH, AadHash(%arg2)
 .else
        pxor    %xmm1, %xmm9                    # Plaintext XOR E(K, Yn)
@@ -710,9 +710,9 @@ _dec_done_\@:
        sub     $16, %r10
        # Determine if if partial block is not being filled and
        # shift mask accordingly
-       jge     _no_extra_mask_2_\@
+       jge     .L_no_extra_mask_2_\@
        sub     %r10, %r12
-_no_extra_mask_2_\@:
+.L_no_extra_mask_2_\@:
 
        movdqu  ALL_F-SHIFT_MASK(%r12), %xmm1
        # get the appropriate mask to mask out bottom r13 bytes of xmm9
@@ -724,17 +724,17 @@ _no_extra_mask_2_\@:
        pxor    %xmm9, \AAD_HASH
 
        test    %r10, %r10
-       jl      _partial_incomplete_2_\@
+       jl      .L_partial_incomplete_2_\@
 
        # GHASH computation for the last <16 Byte block
        GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
        xor     %eax, %eax
 
        mov     %rax, PBlockLen(%arg2)
-       jmp     _encode_done_\@
-_partial_incomplete_2_\@:
+       jmp     .L_encode_done_\@
+.L_partial_incomplete_2_\@:
        add     \PLAIN_CYPH_LEN, PBlockLen(%arg2)
-_encode_done_\@:
+.L_encode_done_\@:
        movdqu  \AAD_HASH, AadHash(%arg2)
 
        movdqa  SHUF_MASK(%rip), %xmm10
@@ -744,32 +744,32 @@ _encode_done_\@:
 .endif
        # output encrypted Bytes
        test    %r10, %r10
-       jl      _partial_fill_\@
+       jl      .L_partial_fill_\@
        mov     %r13, %r12
        mov     $16, %r13
        # Set r13 to be the number of bytes to write out
        sub     %r12, %r13
-       jmp     _count_set_\@
-_partial_fill_\@:
+       jmp     .L_count_set_\@
+.L_partial_fill_\@:
        mov     \PLAIN_CYPH_LEN, %r13
-_count_set_\@:
+.L_count_set_\@:
        movdqa  %xmm9, %xmm0
        movq    %xmm0, %rax
        cmp     $8, %r13
-       jle     _less_than_8_bytes_left_\@
+       jle     .L_less_than_8_bytes_left_\@
 
        mov     %rax, (\CYPH_PLAIN_OUT, \DATA_OFFSET, 1)
        add     $8, \DATA_OFFSET
        psrldq  $8, %xmm0
        movq    %xmm0, %rax
        sub     $8, %r13
-_less_than_8_bytes_left_\@:
+.L_less_than_8_bytes_left_\@:
        movb    %al, (\CYPH_PLAIN_OUT, \DATA_OFFSET, 1)
        add     $1, \DATA_OFFSET
        shr     $8, %rax
        sub     $1, %r13
-       jne     _less_than_8_bytes_left_\@
-_partial_block_done_\@:
+       jne     .L_less_than_8_bytes_left_\@
+.L_partial_block_done_\@:
 .endm # PARTIAL_BLOCK
 
 /*
@@ -813,14 +813,14 @@ _partial_block_done_\@:
        shr     $2,%eax                         # 128->4, 192->6, 256->8
        add     $5,%eax                       # 128->9, 192->11, 256->13
 
-aes_loop_initial_\@:
+.Laes_loop_initial_\@:
        MOVADQ  (%r10),\TMP1
 .irpc  index, \i_seq
        aesenc  \TMP1, %xmm\index
 .endr
        add     $16,%r10
        sub     $1,%eax
-       jnz     aes_loop_initial_\@
+       jnz     .Laes_loop_initial_\@
 
        MOVADQ  (%r10), \TMP1
 .irpc index, \i_seq
@@ -861,7 +861,7 @@ aes_loop_initial_\@:
        GHASH_MUL  %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
 .endif
        cmp        $64, %r13
-       jl      _initial_blocks_done\@
+       jl      .L_initial_blocks_done\@
        # no need for precomputed values
 /*
 *
@@ -908,18 +908,18 @@ aes_loop_initial_\@:
        mov        keysize,%eax
        shr        $2,%eax                      # 128->4, 192->6, 256->8
        sub        $4,%eax                      # 128->0, 192->2, 256->4
-       jz         aes_loop_pre_done\@
+       jz         .Laes_loop_pre_done\@
 
-aes_loop_pre_\@:
+.Laes_loop_pre_\@:
        MOVADQ     (%r10),\TMP2
 .irpc  index, 1234
        aesenc     \TMP2, %xmm\index
 .endr
        add        $16,%r10
        sub        $1,%eax
-       jnz        aes_loop_pre_\@
+       jnz        .Laes_loop_pre_\@
 
-aes_loop_pre_done\@:
+.Laes_loop_pre_done\@:
        MOVADQ     (%r10), \TMP2
        aesenclast \TMP2, \XMM1
        aesenclast \TMP2, \XMM2
@@ -963,7 +963,7 @@ aes_loop_pre_done\@:
        pshufb %xmm14, \XMM3 # perform a 16 byte swap
        pshufb %xmm14, \XMM4 # perform a 16 byte swap
 
-_initial_blocks_done\@:
+.L_initial_blocks_done\@:
 
 .endm
 
@@ -1095,18 +1095,18 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
        mov       keysize,%eax
        shr       $2,%eax                       # 128->4, 192->6, 256->8
        sub       $4,%eax                       # 128->0, 192->2, 256->4
-       jz        aes_loop_par_enc_done\@
+       jz        .Laes_loop_par_enc_done\@
 
-aes_loop_par_enc\@:
+.Laes_loop_par_enc\@:
        MOVADQ    (%r10),\TMP3
 .irpc  index, 1234
        aesenc    \TMP3, %xmm\index
 .endr
        add       $16,%r10
        sub       $1,%eax
-       jnz       aes_loop_par_enc\@
+       jnz       .Laes_loop_par_enc\@
 
-aes_loop_par_enc_done\@:
+.Laes_loop_par_enc_done\@:
        MOVADQ    (%r10), \TMP3
        aesenclast \TMP3, \XMM1           # Round 10
        aesenclast \TMP3, \XMM2
@@ -1303,18 +1303,18 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
        mov       keysize,%eax
        shr       $2,%eax                       # 128->4, 192->6, 256->8
        sub       $4,%eax                       # 128->0, 192->2, 256->4
-       jz        aes_loop_par_dec_done\@
+       jz        .Laes_loop_par_dec_done\@
 
-aes_loop_par_dec\@:
+.Laes_loop_par_dec\@:
        MOVADQ    (%r10),\TMP3
 .irpc  index, 1234
        aesenc    \TMP3, %xmm\index
 .endr
        add       $16,%r10
        sub       $1,%eax
-       jnz       aes_loop_par_dec\@
+       jnz       .Laes_loop_par_dec\@
 
-aes_loop_par_dec_done\@:
+.Laes_loop_par_dec_done\@:
        MOVADQ    (%r10), \TMP3
        aesenclast \TMP3, \XMM1           # last round
        aesenclast \TMP3, \XMM2
@@ -2717,7 +2717,7 @@ SYM_FUNC_END(aesni_cts_cbc_dec)
  *     BSWAP_MASK == endian swapping mask
  */
 SYM_FUNC_START_LOCAL(_aesni_inc_init)
-       movaps .Lbswap_mask, BSWAP_MASK
+       movaps .Lbswap_mask(%rip), BSWAP_MASK
        movaps IV, CTR
        pshufb BSWAP_MASK, CTR
        mov $1, TCTR_LOW
index 0852ab573fd306acfcbeff5cb9d0128bc0502a34..46cddd78857bd9eb2782d62d36853ece8fb21cd6 100644 (file)
@@ -154,30 +154,6 @@ SHIFT_MASK:      .octa     0x0f0e0d0c0b0a09080706050403020100
 ALL_F:           .octa     0xffffffffffffffffffffffffffffffff
                  .octa     0x00000000000000000000000000000000
 
-.section .rodata
-.align 16
-.type aad_shift_arr, @object
-.size aad_shift_arr, 272
-aad_shift_arr:
-        .octa     0xffffffffffffffffffffffffffffffff
-        .octa     0xffffffffffffffffffffffffffffff0C
-        .octa     0xffffffffffffffffffffffffffff0D0C
-        .octa     0xffffffffffffffffffffffffff0E0D0C
-        .octa     0xffffffffffffffffffffffff0F0E0D0C
-        .octa     0xffffffffffffffffffffff0C0B0A0908
-        .octa     0xffffffffffffffffffff0D0C0B0A0908
-        .octa     0xffffffffffffffffff0E0D0C0B0A0908
-        .octa     0xffffffffffffffff0F0E0D0C0B0A0908
-        .octa     0xffffffffffffff0C0B0A090807060504
-        .octa     0xffffffffffff0D0C0B0A090807060504
-        .octa     0xffffffffff0E0D0C0B0A090807060504
-        .octa     0xffffffff0F0E0D0C0B0A090807060504
-        .octa     0xffffff0C0B0A09080706050403020100
-        .octa     0xffff0D0C0B0A09080706050403020100
-        .octa     0xff0E0D0C0B0A09080706050403020100
-        .octa     0x0F0E0D0C0B0A09080706050403020100
-
-
 .text
 
 
@@ -302,68 +278,68 @@ VARIABLE_OFFSET = 16*8
         mov     %r13, %r12
         shr     $4, %r12
         and     $7, %r12
-        jz      _initial_num_blocks_is_0\@
+        jz      .L_initial_num_blocks_is_0\@
 
         cmp     $7, %r12
-        je      _initial_num_blocks_is_7\@
+        je      .L_initial_num_blocks_is_7\@
         cmp     $6, %r12
-        je      _initial_num_blocks_is_6\@
+        je      .L_initial_num_blocks_is_6\@
         cmp     $5, %r12
-        je      _initial_num_blocks_is_5\@
+        je      .L_initial_num_blocks_is_5\@
         cmp     $4, %r12
-        je      _initial_num_blocks_is_4\@
+        je      .L_initial_num_blocks_is_4\@
         cmp     $3, %r12
-        je      _initial_num_blocks_is_3\@
+        je      .L_initial_num_blocks_is_3\@
         cmp     $2, %r12
-        je      _initial_num_blocks_is_2\@
+        je      .L_initial_num_blocks_is_2\@
 
-        jmp     _initial_num_blocks_is_1\@
+        jmp     .L_initial_num_blocks_is_1\@
 
-_initial_num_blocks_is_7\@:
+.L_initial_num_blocks_is_7\@:
         \INITIAL_BLOCKS  \REP, 7, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
         sub     $16*7, %r13
-        jmp     _initial_blocks_encrypted\@
+        jmp     .L_initial_blocks_encrypted\@
 
-_initial_num_blocks_is_6\@:
+.L_initial_num_blocks_is_6\@:
         \INITIAL_BLOCKS  \REP, 6, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
         sub     $16*6, %r13
-        jmp     _initial_blocks_encrypted\@
+        jmp     .L_initial_blocks_encrypted\@
 
-_initial_num_blocks_is_5\@:
+.L_initial_num_blocks_is_5\@:
         \INITIAL_BLOCKS  \REP, 5, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
         sub     $16*5, %r13
-        jmp     _initial_blocks_encrypted\@
+        jmp     .L_initial_blocks_encrypted\@
 
-_initial_num_blocks_is_4\@:
+.L_initial_num_blocks_is_4\@:
         \INITIAL_BLOCKS  \REP, 4, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
         sub     $16*4, %r13
-        jmp     _initial_blocks_encrypted\@
+        jmp     .L_initial_blocks_encrypted\@
 
-_initial_num_blocks_is_3\@:
+.L_initial_num_blocks_is_3\@:
         \INITIAL_BLOCKS  \REP, 3, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
         sub     $16*3, %r13
-        jmp     _initial_blocks_encrypted\@
+        jmp     .L_initial_blocks_encrypted\@
 
-_initial_num_blocks_is_2\@:
+.L_initial_num_blocks_is_2\@:
         \INITIAL_BLOCKS  \REP, 2, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
         sub     $16*2, %r13
-        jmp     _initial_blocks_encrypted\@
+        jmp     .L_initial_blocks_encrypted\@
 
-_initial_num_blocks_is_1\@:
+.L_initial_num_blocks_is_1\@:
         \INITIAL_BLOCKS  \REP, 1, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
         sub     $16*1, %r13
-        jmp     _initial_blocks_encrypted\@
+        jmp     .L_initial_blocks_encrypted\@
 
-_initial_num_blocks_is_0\@:
+.L_initial_num_blocks_is_0\@:
         \INITIAL_BLOCKS  \REP, 0, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
 
 
-_initial_blocks_encrypted\@:
+.L_initial_blocks_encrypted\@:
         test    %r13, %r13
-        je      _zero_cipher_left\@
+        je      .L_zero_cipher_left\@
 
         sub     $128, %r13
-        je      _eight_cipher_left\@
+        je      .L_eight_cipher_left\@
 
 
 
@@ -373,9 +349,9 @@ _initial_blocks_encrypted\@:
         vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
 
 
-_encrypt_by_8_new\@:
+.L_encrypt_by_8_new\@:
         cmp     $(255-8), %r15d
-        jg      _encrypt_by_8\@
+        jg      .L_encrypt_by_8\@
 
 
 
@@ -383,30 +359,30 @@ _encrypt_by_8_new\@:
         \GHASH_8_ENCRYPT_8_PARALLEL      \REP, %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm15, out_order, \ENC_DEC
         add     $128, %r11
         sub     $128, %r13
-        jne     _encrypt_by_8_new\@
+        jne     .L_encrypt_by_8_new\@
 
         vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
-        jmp     _eight_cipher_left\@
+        jmp     .L_eight_cipher_left\@
 
-_encrypt_by_8\@:
+.L_encrypt_by_8\@:
         vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
         add     $8, %r15b
         \GHASH_8_ENCRYPT_8_PARALLEL      \REP, %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm15, in_order, \ENC_DEC
         vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
         add     $128, %r11
         sub     $128, %r13
-        jne     _encrypt_by_8_new\@
+        jne     .L_encrypt_by_8_new\@
 
         vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
 
 
 
 
-_eight_cipher_left\@:
+.L_eight_cipher_left\@:
         \GHASH_LAST_8    %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8
 
 
-_zero_cipher_left\@:
+.L_zero_cipher_left\@:
         vmovdqu %xmm14, AadHash(arg2)
         vmovdqu %xmm9, CurCount(arg2)
 
@@ -414,7 +390,7 @@ _zero_cipher_left\@:
         mov     arg5, %r13
         and     $15, %r13                            # r13 = (arg5 mod 16)
 
-        je      _multiple_of_16_bytes\@
+        je      .L_multiple_of_16_bytes\@
 
         # handle the last <16 Byte block separately
 
@@ -428,7 +404,7 @@ _zero_cipher_left\@:
         vmovdqu %xmm9, PBlockEncKey(arg2)
 
         cmp $16, arg5
-        jge _large_enough_update\@
+        jge .L_large_enough_update\@
 
         lea (arg4,%r11,1), %r10
         mov %r13, %r12
@@ -440,9 +416,9 @@ _zero_cipher_left\@:
                                                     # able to shift 16-r13 bytes (r13 is the
        # number of bytes in plaintext mod 16)
 
-        jmp _final_ghash_mul\@
+        jmp .L_final_ghash_mul\@
 
-_large_enough_update\@:
+.L_large_enough_update\@:
         sub $16, %r11
         add %r13, %r11
 
@@ -461,7 +437,7 @@ _large_enough_update\@:
         # shift right 16-r13 bytes
         vpshufb  %xmm2, %xmm1, %xmm1
 
-_final_ghash_mul\@:
+.L_final_ghash_mul\@:
         .if  \ENC_DEC ==  DEC
         vmovdqa %xmm1, %xmm2
         vpxor   %xmm1, %xmm9, %xmm9                  # Plaintext XOR E(K, Yn)
@@ -490,7 +466,7 @@ _final_ghash_mul\@:
         # output r13 Bytes
         vmovq   %xmm9, %rax
         cmp     $8, %r13
-        jle     _less_than_8_bytes_left\@
+        jle     .L_less_than_8_bytes_left\@
 
         mov     %rax, (arg3 , %r11)
         add     $8, %r11
@@ -498,15 +474,15 @@ _final_ghash_mul\@:
         vmovq   %xmm9, %rax
         sub     $8, %r13
 
-_less_than_8_bytes_left\@:
+.L_less_than_8_bytes_left\@:
         movb    %al, (arg3 , %r11)
         add     $1, %r11
         shr     $8, %rax
         sub     $1, %r13
-        jne     _less_than_8_bytes_left\@
+        jne     .L_less_than_8_bytes_left\@
         #############################
 
-_multiple_of_16_bytes\@:
+.L_multiple_of_16_bytes\@:
 .endm
 
 
@@ -519,12 +495,12 @@ _multiple_of_16_bytes\@:
 
         mov PBlockLen(arg2), %r12
         test %r12, %r12
-        je _partial_done\@
+        je .L_partial_done\@
 
        #GHASH computation for the last <16 Byte block
         \GHASH_MUL       %xmm14, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
 
-_partial_done\@:
+.L_partial_done\@:
         mov AadLen(arg2), %r12                          # r12 = aadLen (number of bytes)
         shl     $3, %r12                             # convert into number of bits
         vmovd   %r12d, %xmm15                        # len(A) in xmm15
@@ -547,49 +523,49 @@ _partial_done\@:
 
 
 
-_return_T\@:
+.L_return_T\@:
         mov     \AUTH_TAG, %r10              # r10 = authTag
         mov     \AUTH_TAG_LEN, %r11              # r11 = auth_tag_len
 
         cmp     $16, %r11
-        je      _T_16\@
+        je      .L_T_16\@
 
         cmp     $8, %r11
-        jl      _T_4\@
+        jl      .L_T_4\@
 
-_T_8\@:
+.L_T_8\@:
         vmovq   %xmm9, %rax
         mov     %rax, (%r10)
         add     $8, %r10
         sub     $8, %r11
         vpsrldq $8, %xmm9, %xmm9
         test    %r11, %r11
-        je     _return_T_done\@
-_T_4\@:
+        je     .L_return_T_done\@
+.L_T_4\@:
         vmovd   %xmm9, %eax
         mov     %eax, (%r10)
         add     $4, %r10
         sub     $4, %r11
         vpsrldq     $4, %xmm9, %xmm9
         test    %r11, %r11
-        je     _return_T_done\@
-_T_123\@:
+        je     .L_return_T_done\@
+.L_T_123\@:
         vmovd     %xmm9, %eax
         cmp     $2, %r11
-        jl     _T_1\@
+        jl     .L_T_1\@
         mov     %ax, (%r10)
         cmp     $2, %r11
-        je     _return_T_done\@
+        je     .L_return_T_done\@
         add     $2, %r10
         sar     $16, %eax
-_T_1\@:
+.L_T_1\@:
         mov     %al, (%r10)
-        jmp     _return_T_done\@
+        jmp     .L_return_T_done\@
 
-_T_16\@:
+.L_T_16\@:
         vmovdqu %xmm9, (%r10)
 
-_return_T_done\@:
+.L_return_T_done\@:
 .endm
 
 .macro CALC_AAD_HASH GHASH_MUL AAD AADLEN T1 T2 T3 T4 T5 T6 T7 T8
@@ -603,8 +579,8 @@ _return_T_done\@:
        vpxor   \T8, \T8, \T8
        vpxor   \T7, \T7, \T7
        cmp     $16, %r11
-       jl      _get_AAD_rest8\@
-_get_AAD_blocks\@:
+       jl      .L_get_AAD_rest8\@
+.L_get_AAD_blocks\@:
        vmovdqu (%r10), \T7
        vpshufb SHUF_MASK(%rip), \T7, \T7
        vpxor   \T7, \T8, \T8
@@ -613,29 +589,29 @@ _get_AAD_blocks\@:
        sub     $16, %r12
        sub     $16, %r11
        cmp     $16, %r11
-       jge     _get_AAD_blocks\@
+       jge     .L_get_AAD_blocks\@
        vmovdqu \T8, \T7
        test    %r11, %r11
-       je      _get_AAD_done\@
+       je      .L_get_AAD_done\@
 
        vpxor   \T7, \T7, \T7
 
        /* read the last <16B of AAD. since we have at least 4B of
        data right after the AAD (the ICV, and maybe some CT), we can
        read 4B/8B blocks safely, and then get rid of the extra stuff */
-_get_AAD_rest8\@:
+.L_get_AAD_rest8\@:
        cmp     $4, %r11
-       jle     _get_AAD_rest4\@
+       jle     .L_get_AAD_rest4\@
        movq    (%r10), \T1
        add     $8, %r10
        sub     $8, %r11
        vpslldq $8, \T1, \T1
        vpsrldq $8, \T7, \T7
        vpxor   \T1, \T7, \T7
-       jmp     _get_AAD_rest8\@
-_get_AAD_rest4\@:
+       jmp     .L_get_AAD_rest8\@
+.L_get_AAD_rest4\@:
        test    %r11, %r11
-       jle      _get_AAD_rest0\@
+       jle     .L_get_AAD_rest0\@
        mov     (%r10), %eax
        movq    %rax, \T1
        add     $4, %r10
@@ -643,20 +619,22 @@ _get_AAD_rest4\@:
        vpslldq $12, \T1, \T1
        vpsrldq $4, \T7, \T7
        vpxor   \T1, \T7, \T7
-_get_AAD_rest0\@:
+.L_get_AAD_rest0\@:
        /* finalize: shift out the extra bytes we read, and align
        left. since pslldq can only shift by an immediate, we use
-       vpshufb and an array of shuffle masks */
-       movq    %r12, %r11
-       salq    $4, %r11
-       vmovdqu  aad_shift_arr(%r11), \T1
-       vpshufb \T1, \T7, \T7
-_get_AAD_rest_final\@:
+       vpshufb and a pair of shuffle masks */
+       leaq    ALL_F(%rip), %r11
+       subq    %r12, %r11
+       vmovdqu 16(%r11), \T1
+       andq    $~3, %r11
+       vpshufb (%r11), \T7, \T7
+       vpand   \T1, \T7, \T7
+.L_get_AAD_rest_final\@:
        vpshufb SHUF_MASK(%rip), \T7, \T7
        vpxor   \T8, \T7, \T7
        \GHASH_MUL       \T7, \T2, \T1, \T3, \T4, \T5, \T6
 
-_get_AAD_done\@:
+.L_get_AAD_done\@:
         vmovdqu \T7, AadHash(arg2)
 .endm
 
@@ -707,28 +685,28 @@ _get_AAD_done\@:
         vpxor \XMMDst, \XMMDst, \XMMDst
 
         cmp $8, \DLEN
-        jl _read_lt8_\@
+        jl .L_read_lt8_\@
         mov (\DPTR), %rax
         vpinsrq $0, %rax, \XMMDst, \XMMDst
         sub $8, \DLEN
-        jz _done_read_partial_block_\@
+        jz .L_done_read_partial_block_\@
         xor %eax, %eax
-_read_next_byte_\@:
+.L_read_next_byte_\@:
         shl $8, %rax
         mov 7(\DPTR, \DLEN, 1), %al
         dec \DLEN
-        jnz _read_next_byte_\@
+        jnz .L_read_next_byte_\@
         vpinsrq $1, %rax, \XMMDst, \XMMDst
-        jmp _done_read_partial_block_\@
-_read_lt8_\@:
+        jmp .L_done_read_partial_block_\@
+.L_read_lt8_\@:
         xor %eax, %eax
-_read_next_byte_lt8_\@:
+.L_read_next_byte_lt8_\@:
         shl $8, %rax
         mov -1(\DPTR, \DLEN, 1), %al
         dec \DLEN
-        jnz _read_next_byte_lt8_\@
+        jnz .L_read_next_byte_lt8_\@
         vpinsrq $0, %rax, \XMMDst, \XMMDst
-_done_read_partial_block_\@:
+.L_done_read_partial_block_\@:
 .endm
 
 # PARTIAL_BLOCK: Handles encryption/decryption and the tag partial blocks
@@ -740,21 +718,21 @@ _done_read_partial_block_\@:
         AAD_HASH ENC_DEC
         mov    PBlockLen(arg2), %r13
         test   %r13, %r13
-        je     _partial_block_done_\@  # Leave Macro if no partial blocks
+        je     .L_partial_block_done_\@        # Leave Macro if no partial blocks
         # Read in input data without over reading
         cmp    $16, \PLAIN_CYPH_LEN
-        jl     _fewer_than_16_bytes_\@
+        jl     .L_fewer_than_16_bytes_\@
         vmovdqu        (\PLAIN_CYPH_IN), %xmm1 # If more than 16 bytes, just fill xmm
-        jmp    _data_read_\@
+        jmp    .L_data_read_\@
 
-_fewer_than_16_bytes_\@:
+.L_fewer_than_16_bytes_\@:
         lea    (\PLAIN_CYPH_IN, \DATA_OFFSET, 1), %r10
         mov    \PLAIN_CYPH_LEN, %r12
         READ_PARTIAL_BLOCK %r10 %r12 %xmm1
 
         mov PBlockLen(arg2), %r13
 
-_data_read_\@:                         # Finished reading in data
+.L_data_read_\@:                               # Finished reading in data
 
         vmovdqu        PBlockEncKey(arg2), %xmm9
         vmovdqu        HashKey(arg2), %xmm13
@@ -777,9 +755,9 @@ _data_read_\@:                              # Finished reading in data
         sub    $16, %r10
         # Determine if if partial block is not being filled and
         # shift mask accordingly
-        jge    _no_extra_mask_1_\@
+        jge    .L_no_extra_mask_1_\@
         sub    %r10, %r12
-_no_extra_mask_1_\@:
+.L_no_extra_mask_1_\@:
 
         vmovdqu        ALL_F-SHIFT_MASK(%r12), %xmm1
         # get the appropriate mask to mask out bottom r13 bytes of xmm9
@@ -792,17 +770,17 @@ _no_extra_mask_1_\@:
         vpxor  %xmm3, \AAD_HASH, \AAD_HASH
 
         test   %r10, %r10
-        jl     _partial_incomplete_1_\@
+        jl     .L_partial_incomplete_1_\@
 
         # GHASH computation for the last <16 Byte block
         \GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
         xor    %eax,%eax
 
         mov    %rax, PBlockLen(arg2)
-        jmp    _dec_done_\@
-_partial_incomplete_1_\@:
+        jmp    .L_dec_done_\@
+.L_partial_incomplete_1_\@:
         add    \PLAIN_CYPH_LEN, PBlockLen(arg2)
-_dec_done_\@:
+.L_dec_done_\@:
         vmovdqu        \AAD_HASH, AadHash(arg2)
 .else
         vpxor  %xmm1, %xmm9, %xmm9                     # Plaintext XOR E(K, Yn)
@@ -813,9 +791,9 @@ _dec_done_\@:
         sub    $16, %r10
         # Determine if if partial block is not being filled and
         # shift mask accordingly
-        jge    _no_extra_mask_2_\@
+        jge    .L_no_extra_mask_2_\@
         sub    %r10, %r12
-_no_extra_mask_2_\@:
+.L_no_extra_mask_2_\@:
 
         vmovdqu        ALL_F-SHIFT_MASK(%r12), %xmm1
         # get the appropriate mask to mask out bottom r13 bytes of xmm9
@@ -827,17 +805,17 @@ _no_extra_mask_2_\@:
         vpxor  %xmm9, \AAD_HASH, \AAD_HASH
 
         test   %r10, %r10
-        jl     _partial_incomplete_2_\@
+        jl     .L_partial_incomplete_2_\@
 
         # GHASH computation for the last <16 Byte block
         \GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
         xor    %eax,%eax
 
         mov    %rax, PBlockLen(arg2)
-        jmp    _encode_done_\@
-_partial_incomplete_2_\@:
+        jmp    .L_encode_done_\@
+.L_partial_incomplete_2_\@:
         add    \PLAIN_CYPH_LEN, PBlockLen(arg2)
-_encode_done_\@:
+.L_encode_done_\@:
         vmovdqu        \AAD_HASH, AadHash(arg2)
 
         vmovdqa        SHUF_MASK(%rip), %xmm10
@@ -847,32 +825,32 @@ _encode_done_\@:
 .endif
         # output encrypted Bytes
         test   %r10, %r10
-        jl     _partial_fill_\@
+        jl     .L_partial_fill_\@
         mov    %r13, %r12
         mov    $16, %r13
         # Set r13 to be the number of bytes to write out
         sub    %r12, %r13
-        jmp    _count_set_\@
-_partial_fill_\@:
+        jmp    .L_count_set_\@
+.L_partial_fill_\@:
         mov    \PLAIN_CYPH_LEN, %r13
-_count_set_\@:
+.L_count_set_\@:
         vmovdqa        %xmm9, %xmm0
         vmovq  %xmm0, %rax
         cmp    $8, %r13
-        jle    _less_than_8_bytes_left_\@
+        jle    .L_less_than_8_bytes_left_\@
 
         mov    %rax, (\CYPH_PLAIN_OUT, \DATA_OFFSET, 1)
         add    $8, \DATA_OFFSET
         psrldq $8, %xmm0
         vmovq  %xmm0, %rax
         sub    $8, %r13
-_less_than_8_bytes_left_\@:
+.L_less_than_8_bytes_left_\@:
         movb   %al, (\CYPH_PLAIN_OUT, \DATA_OFFSET, 1)
         add    $1, \DATA_OFFSET
         shr    $8, %rax
         sub    $1, %r13
-        jne    _less_than_8_bytes_left_\@
-_partial_block_done_\@:
+        jne    .L_less_than_8_bytes_left_\@
+.L_partial_block_done_\@:
 .endm # PARTIAL_BLOCK
 
 ###############################################################################
@@ -1073,7 +1051,7 @@ _partial_block_done_\@:
         vmovdqa  \XMM8, \T3
 
         cmp     $128, %r13
-        jl      _initial_blocks_done\@                  # no need for precomputed constants
+        jl      .L_initial_blocks_done\@                  # no need for precomputed constants
 
 ###############################################################################
 # Haskey_i_k holds XORed values of the low and high parts of the Haskey_i
@@ -1215,7 +1193,7 @@ _partial_block_done_\@:
 
 ###############################################################################
 
-_initial_blocks_done\@:
+.L_initial_blocks_done\@:
 
 .endm
 
@@ -2023,7 +2001,7 @@ SYM_FUNC_END(aesni_gcm_finalize_avx_gen2)
         vmovdqa  \XMM8, \T3
 
         cmp     $128, %r13
-        jl      _initial_blocks_done\@                  # no need for precomputed constants
+        jl      .L_initial_blocks_done\@                  # no need for precomputed constants
 
 ###############################################################################
 # Haskey_i_k holds XORed values of the low and high parts of the Haskey_i
@@ -2167,7 +2145,7 @@ SYM_FUNC_END(aesni_gcm_finalize_avx_gen2)
 
 ###############################################################################
 
-_initial_blocks_done\@:
+.L_initial_blocks_done\@:
 
 
 .endm
index 9243f6289d34bfbfbc3ca22ba5d1199b80c00147..7c1abc513f34621eff4d0ee72db6a6c3dbb291f5 100644 (file)
@@ -80,7 +80,7 @@
        transpose_4x4(c0, c1, c2, c3, a0, a1);          \
        transpose_4x4(d0, d1, d2, d3, a0, a1);          \
                                                        \
-       vmovdqu .Lshufb_16x16b, a0;                     \
+       vmovdqu .Lshufb_16x16b(%rip), a0;               \
        vmovdqu st1, a1;                                \
        vpshufb a0, a2, a2;                             \
        vpshufb a0, a3, a3;                             \
        transpose_4x4(c0, c1, c2, c3, a0, a1);          \
        transpose_4x4(d0, d1, d2, d3, a0, a1);          \
                                                        \
-       vmovdqu .Lshufb_16x16b, a0;                     \
+       vmovdqu .Lshufb_16x16b(%rip), a0;               \
        vmovdqu st1, a1;                                \
        vpshufb a0, a2, a2;                             \
        vpshufb a0, a3, a3;                             \
                            x4, x5, x6, x7,             \
                            t0, t1, t2, t3,             \
                            t4, t5, t6, t7)             \
-       vmovdqa .Ltf_s2_bitmatrix, t0;                  \
-       vmovdqa .Ltf_inv_bitmatrix, t1;                 \
-       vmovdqa .Ltf_id_bitmatrix, t2;                  \
-       vmovdqa .Ltf_aff_bitmatrix, t3;                 \
-       vmovdqa .Ltf_x2_bitmatrix, t4;                  \
+       vmovdqa .Ltf_s2_bitmatrix(%rip), t0;            \
+       vmovdqa .Ltf_inv_bitmatrix(%rip), t1;           \
+       vmovdqa .Ltf_id_bitmatrix(%rip), t2;            \
+       vmovdqa .Ltf_aff_bitmatrix(%rip), t3;           \
+       vmovdqa .Ltf_x2_bitmatrix(%rip), t4;            \
        vgf2p8affineinvqb $(tf_s2_const), t0, x1, x1;   \
        vgf2p8affineinvqb $(tf_s2_const), t0, x5, x5;   \
        vgf2p8affineqb $(tf_inv_const), t1, x2, x2;     \
                       x4, x5, x6, x7,                  \
                       t0, t1, t2, t3,                  \
                       t4, t5, t6, t7)                  \
-       vmovdqa .Linv_shift_row, t0;                    \
-       vmovdqa .Lshift_row, t1;                        \
-       vbroadcastss .L0f0f0f0f, t6;                    \
-       vmovdqa .Ltf_lo__inv_aff__and__s2, t2;          \
-       vmovdqa .Ltf_hi__inv_aff__and__s2, t3;          \
-       vmovdqa .Ltf_lo__x2__and__fwd_aff, t4;          \
-       vmovdqa .Ltf_hi__x2__and__fwd_aff, t5;          \
+       vmovdqa .Linv_shift_row(%rip), t0;              \
+       vmovdqa .Lshift_row(%rip), t1;                  \
+       vbroadcastss .L0f0f0f0f(%rip), t6;              \
+       vmovdqa .Ltf_lo__inv_aff__and__s2(%rip), t2;    \
+       vmovdqa .Ltf_hi__inv_aff__and__s2(%rip), t3;    \
+       vmovdqa .Ltf_lo__x2__and__fwd_aff(%rip), t4;    \
+       vmovdqa .Ltf_hi__x2__and__fwd_aff(%rip), t5;    \
                                                        \
        vaesenclast t7, x0, x0;                         \
        vaesenclast t7, x4, x4;                         \
index 82a14b4ad920f7927cea8b4a013595f7b9c249d2..c60fa2980630379b6e4d095d20f155ce204fff52 100644 (file)
@@ -96,7 +96,7 @@
        transpose_4x4(c0, c1, c2, c3, a0, a1);          \
        transpose_4x4(d0, d1, d2, d3, a0, a1);          \
                                                        \
-       vbroadcasti128 .Lshufb_16x16b, a0;              \
+       vbroadcasti128 .Lshufb_16x16b(%rip), a0;        \
        vmovdqu st1, a1;                                \
        vpshufb a0, a2, a2;                             \
        vpshufb a0, a3, a3;                             \
        transpose_4x4(c0, c1, c2, c3, a0, a1);          \
        transpose_4x4(d0, d1, d2, d3, a0, a1);          \
                                                        \
-       vbroadcasti128 .Lshufb_16x16b, a0;              \
+       vbroadcasti128 .Lshufb_16x16b(%rip), a0;        \
        vmovdqu st1, a1;                                \
        vpshufb a0, a2, a2;                             \
        vpshufb a0, a3, a3;                             \
                            x4, x5, x6, x7,             \
                            t0, t1, t2, t3,             \
                            t4, t5, t6, t7)             \
-       vpbroadcastq .Ltf_s2_bitmatrix, t0;             \
-       vpbroadcastq .Ltf_inv_bitmatrix, t1;            \
-       vpbroadcastq .Ltf_id_bitmatrix, t2;             \
-       vpbroadcastq .Ltf_aff_bitmatrix, t3;            \
-       vpbroadcastq .Ltf_x2_bitmatrix, t4;             \
+       vpbroadcastq .Ltf_s2_bitmatrix(%rip), t0;       \
+       vpbroadcastq .Ltf_inv_bitmatrix(%rip), t1;      \
+       vpbroadcastq .Ltf_id_bitmatrix(%rip), t2;       \
+       vpbroadcastq .Ltf_aff_bitmatrix(%rip), t3;      \
+       vpbroadcastq .Ltf_x2_bitmatrix(%rip), t4;       \
        vgf2p8affineinvqb $(tf_s2_const), t0, x1, x1;   \
        vgf2p8affineinvqb $(tf_s2_const), t0, x5, x5;   \
        vgf2p8affineqb $(tf_inv_const), t1, x2, x2;     \
                       t4, t5, t6, t7)                  \
        vpxor t7, t7, t7;                               \
        vpxor t6, t6, t6;                               \
-       vbroadcasti128 .Linv_shift_row, t0;             \
-       vbroadcasti128 .Lshift_row, t1;                 \
-       vbroadcasti128 .Ltf_lo__inv_aff__and__s2, t2;   \
-       vbroadcasti128 .Ltf_hi__inv_aff__and__s2, t3;   \
-       vbroadcasti128 .Ltf_lo__x2__and__fwd_aff, t4;   \
-       vbroadcasti128 .Ltf_hi__x2__and__fwd_aff, t5;   \
+       vbroadcasti128 .Linv_shift_row(%rip), t0;       \
+       vbroadcasti128 .Lshift_row(%rip), t1;           \
+       vbroadcasti128 .Ltf_lo__inv_aff__and__s2(%rip), t2; \
+       vbroadcasti128 .Ltf_hi__inv_aff__and__s2(%rip), t3; \
+       vbroadcasti128 .Ltf_lo__x2__and__fwd_aff(%rip), t4; \
+       vbroadcasti128 .Ltf_hi__x2__and__fwd_aff(%rip), t5; \
                                                        \
        vextracti128 $1, x0, t6##_x;                    \
        vaesenclast t7##_x, x0##_x, x0##_x;             \
        vaesdeclast t7##_x, t6##_x, t6##_x;             \
        vinserti128 $1, t6##_x, x6, x6;                 \
                                                        \
-       vpbroadcastd .L0f0f0f0f, t6;                    \
+       vpbroadcastd .L0f0f0f0f(%rip), t6;              \
                                                        \
        /* AES inverse shift rows */                    \
        vpshufb t0, x0, x0;                             \
index 3193f0701450665587f90dcb352d0e50695a4d88..860887e5d02ed6ef58b954611ea77a0a6f5661d9 100644 (file)
@@ -80,7 +80,7 @@
        transpose_4x4(c0, c1, c2, c3, a0, a1);          \
        transpose_4x4(d0, d1, d2, d3, a0, a1);          \
                                                        \
-       vbroadcasti64x2 .Lshufb_16x16b, a0;             \
+       vbroadcasti64x2 .Lshufb_16x16b(%rip), a0;       \
        vmovdqu64 st1, a1;                              \
        vpshufb a0, a2, a2;                             \
        vpshufb a0, a3, a3;                             \
        transpose_4x4(c0, c1, c2, c3, a0, a1);          \
        transpose_4x4(d0, d1, d2, d3, a0, a1);          \
                                                        \
-       vbroadcasti64x2 .Lshufb_16x16b, a0;             \
+       vbroadcasti64x2 .Lshufb_16x16b(%rip), a0;       \
        vmovdqu64 st1, a1;                              \
        vpshufb a0, a2, a2;                             \
        vpshufb a0, a3, a3;                             \
                            x4, x5, x6, x7,             \
                            t0, t1, t2, t3,             \
                            t4, t5, t6, t7)             \
-       vpbroadcastq .Ltf_s2_bitmatrix, t0;             \
-       vpbroadcastq .Ltf_inv_bitmatrix, t1;            \
-       vpbroadcastq .Ltf_id_bitmatrix, t2;             \
-       vpbroadcastq .Ltf_aff_bitmatrix, t3;            \
-       vpbroadcastq .Ltf_x2_bitmatrix, t4;             \
+       vpbroadcastq .Ltf_s2_bitmatrix(%rip), t0;       \
+       vpbroadcastq .Ltf_inv_bitmatrix(%rip), t1;      \
+       vpbroadcastq .Ltf_id_bitmatrix(%rip), t2;       \
+       vpbroadcastq .Ltf_aff_bitmatrix(%rip), t3;      \
+       vpbroadcastq .Ltf_x2_bitmatrix(%rip), t4;       \
        vgf2p8affineinvqb $(tf_s2_const), t0, x1, x1;   \
        vgf2p8affineinvqb $(tf_s2_const), t0, x5, x5;   \
        vgf2p8affineqb $(tf_inv_const), t1, x2, x2;     \
                             y4, y5, y6, y7,            \
                             t0, t1, t2, t3,            \
                             t4, t5, t6, t7)            \
-       vpbroadcastq .Ltf_s2_bitmatrix, t0;             \
-       vpbroadcastq .Ltf_inv_bitmatrix, t1;            \
-       vpbroadcastq .Ltf_id_bitmatrix, t2;             \
-       vpbroadcastq .Ltf_aff_bitmatrix, t3;            \
-       vpbroadcastq .Ltf_x2_bitmatrix, t4;             \
+       vpbroadcastq .Ltf_s2_bitmatrix(%rip), t0;       \
+       vpbroadcastq .Ltf_inv_bitmatrix(%rip), t1;      \
+       vpbroadcastq .Ltf_id_bitmatrix(%rip), t2;       \
+       vpbroadcastq .Ltf_aff_bitmatrix(%rip), t3;      \
+       vpbroadcastq .Ltf_x2_bitmatrix(%rip), t4;       \
        vgf2p8affineinvqb $(tf_s2_const), t0, x1, x1;   \
        vgf2p8affineinvqb $(tf_s2_const), t0, x5, x5;   \
        vgf2p8affineqb $(tf_inv_const), t1, x2, x2;     \
index 4a30618281ec2e9e85af99f0d0e1ad6ec3dcaa72..646477a13e110fed04c960428226d8a53ca41442 100644 (file)
        /* \
         * S-function with AES subbytes \
         */ \
-       vmovdqa .Linv_shift_row, t4; \
-       vbroadcastss .L0f0f0f0f, t7; \
-       vmovdqa .Lpre_tf_lo_s1, t0; \
-       vmovdqa .Lpre_tf_hi_s1, t1; \
+       vmovdqa .Linv_shift_row(%rip), t4; \
+       vbroadcastss .L0f0f0f0f(%rip), t7; \
+       vmovdqa .Lpre_tf_lo_s1(%rip), t0; \
+       vmovdqa .Lpre_tf_hi_s1(%rip), t1; \
        \
        /* AES inverse shift rows */ \
        vpshufb t4, x0, x0; \
@@ -68,8 +68,8 @@
        vpshufb t4, x6, x6; \
        \
        /* prefilter sboxes 1, 2 and 3 */ \
-       vmovdqa .Lpre_tf_lo_s4, t2; \
-       vmovdqa .Lpre_tf_hi_s4, t3; \
+       vmovdqa .Lpre_tf_lo_s4(%rip), t2; \
+       vmovdqa .Lpre_tf_hi_s4(%rip), t3; \
        filter_8bit(x0, t0, t1, t7, t6); \
        filter_8bit(x7, t0, t1, t7, t6); \
        filter_8bit(x1, t0, t1, t7, t6); \
@@ -83,8 +83,8 @@
        filter_8bit(x6, t2, t3, t7, t6); \
        \
        /* AES subbytes + AES shift rows */ \
-       vmovdqa .Lpost_tf_lo_s1, t0; \
-       vmovdqa .Lpost_tf_hi_s1, t1; \
+       vmovdqa .Lpost_tf_lo_s1(%rip), t0; \
+       vmovdqa .Lpost_tf_hi_s1(%rip), t1; \
        vaesenclast t4, x0, x0; \
        vaesenclast t4, x7, x7; \
        vaesenclast t4, x1, x1; \
        vaesenclast t4, x6, x6; \
        \
        /* postfilter sboxes 1 and 4 */ \
-       vmovdqa .Lpost_tf_lo_s3, t2; \
-       vmovdqa .Lpost_tf_hi_s3, t3; \
+       vmovdqa .Lpost_tf_lo_s3(%rip), t2; \
+       vmovdqa .Lpost_tf_hi_s3(%rip), t3; \
        filter_8bit(x0, t0, t1, t7, t6); \
        filter_8bit(x7, t0, t1, t7, t6); \
        filter_8bit(x3, t0, t1, t7, t6); \
        filter_8bit(x6, t0, t1, t7, t6); \
        \
        /* postfilter sbox 3 */ \
-       vmovdqa .Lpost_tf_lo_s2, t4; \
-       vmovdqa .Lpost_tf_hi_s2, t5; \
+       vmovdqa .Lpost_tf_lo_s2(%rip), t4; \
+       vmovdqa .Lpost_tf_hi_s2(%rip), t5; \
        filter_8bit(x2, t2, t3, t7, t6); \
        filter_8bit(x5, t2, t3, t7, t6); \
        \
@@ -443,7 +443,7 @@ SYM_FUNC_END(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
        transpose_4x4(c0, c1, c2, c3, a0, a1); \
        transpose_4x4(d0, d1, d2, d3, a0, a1); \
        \
-       vmovdqu .Lshufb_16x16b, a0; \
+       vmovdqu .Lshufb_16x16b(%rip), a0; \
        vmovdqu st1, a1; \
        vpshufb a0, a2, a2; \
        vpshufb a0, a3, a3; \
@@ -482,7 +482,7 @@ SYM_FUNC_END(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
 #define inpack16_pre(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
                     y6, y7, rio, key) \
        vmovq key, x0; \
-       vpshufb .Lpack_bswap, x0, x0; \
+       vpshufb .Lpack_bswap(%rip), x0, x0; \
        \
        vpxor 0 * 16(rio), x0, y7; \
        vpxor 1 * 16(rio), x0, y6; \
@@ -533,7 +533,7 @@ SYM_FUNC_END(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
        vmovdqu x0, stack_tmp0; \
        \
        vmovq key, x0; \
-       vpshufb .Lpack_bswap, x0, x0; \
+       vpshufb .Lpack_bswap(%rip), x0, x0; \
        \
        vpxor x0, y7, y7; \
        vpxor x0, y6, y6; \
index deaf62aa73a6b09cb002cb99562438206f43ef43..a0eb94e53b1bb12d50bb5ecc5ab5edffb665625f 100644 (file)
        /* \
         * S-function with AES subbytes \
         */ \
-       vbroadcasti128 .Linv_shift_row, t4; \
-       vpbroadcastd .L0f0f0f0f, t7; \
-       vbroadcasti128 .Lpre_tf_lo_s1, t5; \
-       vbroadcasti128 .Lpre_tf_hi_s1, t6; \
-       vbroadcasti128 .Lpre_tf_lo_s4, t2; \
-       vbroadcasti128 .Lpre_tf_hi_s4, t3; \
+       vbroadcasti128 .Linv_shift_row(%rip), t4; \
+       vpbroadcastd .L0f0f0f0f(%rip), t7; \
+       vbroadcasti128 .Lpre_tf_lo_s1(%rip), t5; \
+       vbroadcasti128 .Lpre_tf_hi_s1(%rip), t6; \
+       vbroadcasti128 .Lpre_tf_lo_s4(%rip), t2; \
+       vbroadcasti128 .Lpre_tf_hi_s4(%rip), t3; \
        \
        /* AES inverse shift rows */ \
        vpshufb t4, x0, x0; \
        vinserti128 $1, t2##_x, x6, x6; \
        vextracti128 $1, x1, t3##_x; \
        vextracti128 $1, x4, t2##_x; \
-       vbroadcasti128 .Lpost_tf_lo_s1, t0; \
-       vbroadcasti128 .Lpost_tf_hi_s1, t1; \
+       vbroadcasti128 .Lpost_tf_lo_s1(%rip), t0; \
+       vbroadcasti128 .Lpost_tf_hi_s1(%rip), t1; \
        vaesenclast t4##_x, x2##_x, x2##_x; \
        vaesenclast t4##_x, t6##_x, t6##_x; \
        vinserti128 $1, t6##_x, x2, x2; \
        vinserti128 $1, t2##_x, x4, x4; \
        \
        /* postfilter sboxes 1 and 4 */ \
-       vbroadcasti128 .Lpost_tf_lo_s3, t2; \
-       vbroadcasti128 .Lpost_tf_hi_s3, t3; \
+       vbroadcasti128 .Lpost_tf_lo_s3(%rip), t2; \
+       vbroadcasti128 .Lpost_tf_hi_s3(%rip), t3; \
        filter_8bit(x0, t0, t1, t7, t6); \
        filter_8bit(x7, t0, t1, t7, t6); \
        filter_8bit(x3, t0, t1, t7, t6); \
        filter_8bit(x6, t0, t1, t7, t6); \
        \
        /* postfilter sbox 3 */ \
-       vbroadcasti128 .Lpost_tf_lo_s2, t4; \
-       vbroadcasti128 .Lpost_tf_hi_s2, t5; \
+       vbroadcasti128 .Lpost_tf_lo_s2(%rip), t4; \
+       vbroadcasti128 .Lpost_tf_hi_s2(%rip), t5; \
        filter_8bit(x2, t2, t3, t7, t6); \
        filter_8bit(x5, t2, t3, t7, t6); \
        \
@@ -475,7 +475,7 @@ SYM_FUNC_END(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
        transpose_4x4(c0, c1, c2, c3, a0, a1); \
        transpose_4x4(d0, d1, d2, d3, a0, a1); \
        \
-       vbroadcasti128 .Lshufb_16x16b, a0; \
+       vbroadcasti128 .Lshufb_16x16b(%rip), a0; \
        vmovdqu st1, a1; \
        vpshufb a0, a2, a2; \
        vpshufb a0, a3, a3; \
@@ -514,7 +514,7 @@ SYM_FUNC_END(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
 #define inpack32_pre(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
                     y6, y7, rio, key) \
        vpbroadcastq key, x0; \
-       vpshufb .Lpack_bswap, x0, x0; \
+       vpshufb .Lpack_bswap(%rip), x0, x0; \
        \
        vpxor 0 * 32(rio), x0, y7; \
        vpxor 1 * 32(rio), x0, y6; \
@@ -565,7 +565,7 @@ SYM_FUNC_END(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
        vmovdqu x0, stack_tmp0; \
        \
        vpbroadcastq key, x0; \
-       vpshufb .Lpack_bswap, x0, x0; \
+       vpshufb .Lpack_bswap(%rip), x0, x0; \
        \
        vpxor x0, y7, y7; \
        vpxor x0, y6, y6; \
index 347c059f59403d3c081d09d676fbe5920d8cff09..816b6bb8bded7bb489351dc2263c6a4f72b561a6 100644 (file)
 #define RXORbl %r9b
 
 #define xor2ror16(T0, T1, tmp1, tmp2, ab, dst) \
+       leaq T0(%rip),                  tmp1; \
        movzbl ab ## bl,                tmp2 ## d; \
+       xorq (tmp1, tmp2, 8),           dst; \
+       leaq T1(%rip),                  tmp2; \
        movzbl ab ## bh,                tmp1 ## d; \
        rorq $16,                       ab; \
-       xorq T0(, tmp2, 8),             dst; \
-       xorq T1(, tmp1, 8),             dst;
+       xorq (tmp2, tmp1, 8),           dst;
 
 /**********************************************************************
   1-way camellia
index 0326a01503c3a554bf89482ed158eb7c8719c9ce..b4e460a87f18ddaac4ebb9cee25c86875cb87e7c 100644 (file)
 
 #define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \
        movzbl          src ## bh,     RID1d;    \
+       leaq            s1(%rip),      RID2;     \
+       movl            (RID2,RID1,4), dst ## d; \
        movzbl          src ## bl,     RID2d;    \
+       leaq            s2(%rip),      RID1;     \
+       op1             (RID1,RID2,4), dst ## d; \
        shrq $16,       src;                     \
-       movl            s1(, RID1, 4), dst ## d; \
-       op1             s2(, RID2, 4), dst ## d; \
        movzbl          src ## bh,     RID1d;    \
+       leaq            s3(%rip),      RID2;     \
+       op2             (RID2,RID1,4), dst ## d; \
        movzbl          src ## bl,     RID2d;    \
        interleave_op(il_reg);                   \
-       op2             s3(, RID1, 4), dst ## d; \
-       op3             s4(, RID2, 4), dst ## d;
+       leaq            s4(%rip),      RID1;     \
+       op3             (RID1,RID2,4), dst ## d;
 
 #define dummy(d) /* do nothing */
 
        subround(l ## 3, r ## 3, l ## 4, r ## 4, f);
 
 #define enc_preload_rkr() \
-       vbroadcastss    .L16_mask,                RKR;      \
+       vbroadcastss    .L16_mask(%rip),          RKR;      \
        /* add 16-bit rotation to key rotations (mod 32) */ \
        vpxor           kr(CTX),                  RKR, RKR;
 
 #define dec_preload_rkr() \
-       vbroadcastss    .L16_mask,                RKR;      \
+       vbroadcastss    .L16_mask(%rip),          RKR;      \
        /* add 16-bit rotation to key rotations (mod 32) */ \
        vpxor           kr(CTX),                  RKR, RKR; \
-       vpshufb         .Lbswap128_mask,          RKR, RKR;
+       vpshufb         .Lbswap128_mask(%rip),    RKR, RKR;
 
 #define transpose_2x4(x0, x1, t0, t1) \
        vpunpckldq              x1, x0, t0; \
@@ -235,9 +239,9 @@ SYM_FUNC_START_LOCAL(__cast5_enc_blk16)
 
        movq %rdi, CTX;
 
-       vmovdqa .Lbswap_mask, RKM;
-       vmovd .Lfirst_mask, R1ST;
-       vmovd .L32_mask, R32;
+       vmovdqa .Lbswap_mask(%rip), RKM;
+       vmovd .Lfirst_mask(%rip), R1ST;
+       vmovd .L32_mask(%rip), R32;
        enc_preload_rkr();
 
        inpack_blocks(RL1, RR1, RTMP, RX, RKM);
@@ -271,7 +275,7 @@ SYM_FUNC_START_LOCAL(__cast5_enc_blk16)
        popq %rbx;
        popq %r15;
 
-       vmovdqa .Lbswap_mask, RKM;
+       vmovdqa .Lbswap_mask(%rip), RKM;
 
        outunpack_blocks(RR1, RL1, RTMP, RX, RKM);
        outunpack_blocks(RR2, RL2, RTMP, RX, RKM);
@@ -308,9 +312,9 @@ SYM_FUNC_START_LOCAL(__cast5_dec_blk16)
 
        movq %rdi, CTX;
 
-       vmovdqa .Lbswap_mask, RKM;
-       vmovd .Lfirst_mask, R1ST;
-       vmovd .L32_mask, R32;
+       vmovdqa .Lbswap_mask(%rip), RKM;
+       vmovd .Lfirst_mask(%rip), R1ST;
+       vmovd .L32_mask(%rip), R32;
        dec_preload_rkr();
 
        inpack_blocks(RL1, RR1, RTMP, RX, RKM);
@@ -341,7 +345,7 @@ SYM_FUNC_START_LOCAL(__cast5_dec_blk16)
        round(RL, RR, 1, 2);
        round(RR, RL, 0, 1);
 
-       vmovdqa .Lbswap_mask, RKM;
+       vmovdqa .Lbswap_mask(%rip), RKM;
        popq %rbx;
        popq %r15;
 
@@ -504,8 +508,8 @@ SYM_FUNC_START(cast5_ctr_16way)
 
        vpcmpeqd RKR, RKR, RKR;
        vpaddq RKR, RKR, RKR; /* low: -2, high: -2 */
-       vmovdqa .Lbswap_iv_mask, R1ST;
-       vmovdqa .Lbswap128_mask, RKM;
+       vmovdqa .Lbswap_iv_mask(%rip), R1ST;
+       vmovdqa .Lbswap128_mask(%rip), RKM;
 
        /* load IV and byteswap */
        vmovq (%rcx), RX;
index 82b716fd5dbac65a3c5c1637a9cd75ac604135ab..9e86d460b4092826eecc1bf748ac45b18fa83bdb 100644 (file)
 
 #define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \
        movzbl          src ## bh,     RID1d;    \
+       leaq            s1(%rip),      RID2;     \
+       movl            (RID2,RID1,4), dst ## d; \
        movzbl          src ## bl,     RID2d;    \
+       leaq            s2(%rip),      RID1;     \
+       op1             (RID1,RID2,4), dst ## d; \
        shrq $16,       src;                     \
-       movl            s1(, RID1, 4), dst ## d; \
-       op1             s2(, RID2, 4), dst ## d; \
        movzbl          src ## bh,     RID1d;    \
+       leaq            s3(%rip),      RID2;     \
+       op2             (RID2,RID1,4), dst ## d; \
        movzbl          src ## bl,     RID2d;    \
        interleave_op(il_reg);                   \
-       op2             s3(, RID1, 4), dst ## d; \
-       op3             s4(, RID2, 4), dst ## d;
+       leaq            s4(%rip),      RID1;     \
+       op3             (RID1,RID2,4), dst ## d;
 
 #define dummy(d) /* do nothing */
 
        qop(RD, RC, 1);
 
 #define shuffle(mask) \
-       vpshufb         mask,            RKR, RKR;
+       vpshufb         mask(%rip),            RKR, RKR;
 
 #define preload_rkr(n, do_mask, mask) \
-       vbroadcastss    .L16_mask,                RKR;      \
+       vbroadcastss    .L16_mask(%rip),          RKR;      \
        /* add 16-bit rotation to key rotations (mod 32) */ \
        vpxor           (kr+n*16)(CTX),           RKR, RKR; \
        do_mask(mask);
@@ -258,9 +262,9 @@ SYM_FUNC_START_LOCAL(__cast6_enc_blk8)
 
        movq %rdi, CTX;
 
-       vmovdqa .Lbswap_mask, RKM;
-       vmovd .Lfirst_mask, R1ST;
-       vmovd .L32_mask, R32;
+       vmovdqa .Lbswap_mask(%rip), RKM;
+       vmovd .Lfirst_mask(%rip), R1ST;
+       vmovd .L32_mask(%rip), R32;
 
        inpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
        inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
@@ -284,7 +288,7 @@ SYM_FUNC_START_LOCAL(__cast6_enc_blk8)
        popq %rbx;
        popq %r15;
 
-       vmovdqa .Lbswap_mask, RKM;
+       vmovdqa .Lbswap_mask(%rip), RKM;
 
        outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
        outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
@@ -306,9 +310,9 @@ SYM_FUNC_START_LOCAL(__cast6_dec_blk8)
 
        movq %rdi, CTX;
 
-       vmovdqa .Lbswap_mask, RKM;
-       vmovd .Lfirst_mask, R1ST;
-       vmovd .L32_mask, R32;
+       vmovdqa .Lbswap_mask(%rip), RKM;
+       vmovd .Lfirst_mask(%rip), R1ST;
+       vmovd .L32_mask(%rip), R32;
 
        inpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
        inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
@@ -332,7 +336,7 @@ SYM_FUNC_START_LOCAL(__cast6_dec_blk8)
        popq %rbx;
        popq %r15;
 
-       vmovdqa .Lbswap_mask, RKM;
+       vmovdqa .Lbswap_mask(%rip), RKM;
        outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
        outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
 
index ca53e96996ac2143b068204ff5cd55dc6974ccc2..5d31137e2c7dfca0b438c32054e58d16e7add2ba 100644 (file)
@@ -90,7 +90,7 @@ SYM_FUNC_START(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligne
        sub     $0x40, LEN
        add     $0x40, BUF
        cmp     $0x40, LEN
-       jb      less_64
+       jb      .Lless_64
 
 #ifdef __x86_64__
        movdqa .Lconstant_R2R1(%rip), CONSTANT
@@ -98,7 +98,7 @@ SYM_FUNC_START(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligne
        movdqa .Lconstant_R2R1, CONSTANT
 #endif
 
-loop_64:/*  64 bytes Full cache line folding */
+.Lloop_64:/*  64 bytes Full cache line folding */
        prefetchnta    0x40(BUF)
        movdqa  %xmm1, %xmm5
        movdqa  %xmm2, %xmm6
@@ -139,8 +139,8 @@ loop_64:/*  64 bytes Full cache line folding */
        sub     $0x40, LEN
        add     $0x40, BUF
        cmp     $0x40, LEN
-       jge     loop_64
-less_64:/*  Folding cache line into 128bit */
+       jge     .Lloop_64
+.Lless_64:/*  Folding cache line into 128bit */
 #ifdef __x86_64__
        movdqa  .Lconstant_R4R3(%rip), CONSTANT
 #else
@@ -167,8 +167,8 @@ less_64:/*  Folding cache line into 128bit */
        pxor    %xmm4, %xmm1
 
        cmp     $0x10, LEN
-       jb      fold_64
-loop_16:/* Folding rest buffer into 128bit */
+       jb      .Lfold_64
+.Lloop_16:/* Folding rest buffer into 128bit */
        movdqa  %xmm1, %xmm5
        pclmulqdq $0x00, CONSTANT, %xmm1
        pclmulqdq $0x11, CONSTANT, %xmm5
@@ -177,9 +177,9 @@ loop_16:/* Folding rest buffer into 128bit */
        sub     $0x10, LEN
        add     $0x10, BUF
        cmp     $0x10, LEN
-       jge     loop_16
+       jge     .Lloop_16
 
-fold_64:
+.Lfold_64:
        /* perform the last 64 bit fold, also adds 32 zeroes
         * to the input stream */
        pclmulqdq $0x01, %xmm1, CONSTANT /* R4 * xmm1.low */
index ec35915f0901a08765a9937084d3d2b3a793687a..81ce0f4db555ce03f7b09f78ffc6d0719298066a 100644 (file)
 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
 
 .macro LABEL prefix n
-\prefix\n\():
+.L\prefix\n\():
 .endm
 
 .macro JMPTBL_ENTRY i
-.quad crc_\i
+.quad .Lcrc_\i
 .endm
 
 .macro JNC_LESS_THAN j
-       jnc less_than_\j
+       jnc .Lless_than_\j
 .endm
 
 # Define threshold where buffers are considered "small" and routed to more
@@ -108,30 +108,30 @@ SYM_FUNC_START(crc_pcl)
        neg     %bufp
        and     $7, %bufp               # calculate the unalignment amount of
                                        # the address
-       je      proc_block              # Skip if aligned
+       je      .Lproc_block            # Skip if aligned
 
        ## If len is less than 8 and we're unaligned, we need to jump
        ## to special code to avoid reading beyond the end of the buffer
        cmp     $8, len
-       jae     do_align
+       jae     .Ldo_align
        # less_than_8 expects length in upper 3 bits of len_dw
        # less_than_8_post_shl1 expects length = carryflag * 8 + len_dw[31:30]
        shl     $32-3+1, len_dw
-       jmp     less_than_8_post_shl1
+       jmp     .Lless_than_8_post_shl1
 
-do_align:
+.Ldo_align:
        #### Calculate CRC of unaligned bytes of the buffer (if any)
        movq    (bufptmp), tmp          # load a quadward from the buffer
        add     %bufp, bufptmp          # align buffer pointer for quadword
                                        # processing
        sub     %bufp, len              # update buffer length
-align_loop:
+.Lalign_loop:
        crc32b  %bl, crc_init_dw        # compute crc32 of 1-byte
        shr     $8, tmp                 # get next byte
        dec     %bufp
-       jne     align_loop
+       jne     .Lalign_loop
 
-proc_block:
+.Lproc_block:
 
        ################################################################
        ## 2) PROCESS  BLOCKS:
@@ -141,11 +141,11 @@ proc_block:
        movq    len, tmp                # save num bytes in tmp
 
        cmpq    $128*24, len
-       jae     full_block
+       jae     .Lfull_block
 
-continue_block:
+.Lcontinue_block:
        cmpq    $SMALL_SIZE, len
-       jb      small
+       jb      .Lsmall
 
        ## len < 128*24
        movq    $2731, %rax             # 2731 = ceil(2^16 / 24)
@@ -168,13 +168,14 @@ continue_block:
        xor     crc2, crc2
 
        ## branch into array
-       mov     jump_table(,%rax,8), %bufp
+       leaq    jump_table(%rip), %bufp
+       mov     (%bufp,%rax,8), %bufp
        JMP_NOSPEC bufp
 
        ################################################################
        ## 2a) PROCESS FULL BLOCKS:
        ################################################################
-full_block:
+.Lfull_block:
        movl    $128,%eax
        lea     128*8*2(block_0), block_1
        lea     128*8*3(block_0), block_2
@@ -189,7 +190,6 @@ full_block:
        ## 3) CRC Array:
        ################################################################
 
-crc_array:
        i=128
 .rept 128-1
 .altmacro
@@ -242,28 +242,28 @@ LABEL crc_ 0
        ENDBR
        mov     tmp, len
        cmp     $128*24, tmp
-       jae     full_block
+       jae     .Lfull_block
        cmp     $24, tmp
-       jae     continue_block
+       jae     .Lcontinue_block
 
-less_than_24:
+.Lless_than_24:
        shl     $32-4, len_dw                   # less_than_16 expects length
                                                # in upper 4 bits of len_dw
-       jnc     less_than_16
+       jnc     .Lless_than_16
        crc32q  (bufptmp), crc_init
        crc32q  8(bufptmp), crc_init
-       jz      do_return
+       jz      .Ldo_return
        add     $16, bufptmp
        # len is less than 8 if we got here
        # less_than_8 expects length in upper 3 bits of len_dw
        # less_than_8_post_shl1 expects length = carryflag * 8 + len_dw[31:30]
        shl     $2, len_dw
-       jmp     less_than_8_post_shl1
+       jmp     .Lless_than_8_post_shl1
 
        #######################################################################
        ## 6) LESS THAN 256-bytes REMAIN AT THIS POINT (8-bits of len are full)
        #######################################################################
-small:
+.Lsmall:
        shl $32-8, len_dw               # Prepare len_dw for less_than_256
        j=256
 .rept 5                                        # j = {256, 128, 64, 32, 16}
@@ -279,32 +279,32 @@ LABEL less_than_ %j                       # less_than_j: Length should be in
        crc32q  i(bufptmp), crc_init    # Compute crc32 of 8-byte data
        i=i+8
 .endr
-       jz      do_return               # Return if remaining length is zero
+       jz      .Ldo_return             # Return if remaining length is zero
        add     $j, bufptmp             # Advance buf
 .endr
 
-less_than_8:                           # Length should be stored in
+.Lless_than_8:                         # Length should be stored in
                                        # upper 3 bits of len_dw
        shl     $1, len_dw
-less_than_8_post_shl1:
-       jnc     less_than_4
+.Lless_than_8_post_shl1:
+       jnc     .Lless_than_4
        crc32l  (bufptmp), crc_init_dw  # CRC of 4 bytes
-       jz      do_return               # return if remaining data is zero
+       jz      .Ldo_return             # return if remaining data is zero
        add     $4, bufptmp
-less_than_4:                           # Length should be stored in
+.Lless_than_4:                         # Length should be stored in
                                        # upper 2 bits of len_dw
        shl     $1, len_dw
-       jnc     less_than_2
+       jnc     .Lless_than_2
        crc32w  (bufptmp), crc_init_dw  # CRC of 2 bytes
-       jz      do_return               # return if remaining data is zero
+       jz      .Ldo_return             # return if remaining data is zero
        add     $2, bufptmp
-less_than_2:                           # Length should be stored in the MSB
+.Lless_than_2:                         # Length should be stored in the MSB
                                        # of len_dw
        shl     $1, len_dw
-       jnc     less_than_1
+       jnc     .Lless_than_1
        crc32b  (bufptmp), crc_init_dw  # CRC of 1 byte
-less_than_1:                           # Length should be zero
-do_return:
+.Lless_than_1:                         # Length should be zero
+.Ldo_return:
        movq    crc_init, %rax
        popq    %rsi
        popq    %rdi
index f4c760f4cade6d7b77ad1406a83c2158f0ea53a0..cf21b998e77cc4ea5338ae89bdddfa1636adbed2 100644 (file)
        movzbl RW0bl, RT2d; \
        movzbl RW0bh, RT3d; \
        shrq $16, RW0; \
-       movq s8(, RT0, 8), RT0; \
-       xorq s6(, RT1, 8), to; \
+       leaq s8(%rip), RW1; \
+       movq (RW1, RT0, 8), RT0; \
+       leaq s6(%rip), RW1; \
+       xorq (RW1, RT1, 8), to; \
        movzbl RW0bl, RL1d; \
        movzbl RW0bh, RT1d; \
        shrl $16, RW0d; \
-       xorq s4(, RT2, 8), RT0; \
-       xorq s2(, RT3, 8), to; \
+       leaq s4(%rip), RW1; \
+       xorq (RW1, RT2, 8), RT0; \
+       leaq s2(%rip), RW1; \
+       xorq (RW1, RT3, 8), to; \
        movzbl RW0bl, RT2d; \
        movzbl RW0bh, RT3d; \
-       xorq s7(, RL1, 8), RT0; \
-       xorq s5(, RT1, 8), to; \
-       xorq s3(, RT2, 8), RT0; \
+       leaq s7(%rip), RW1; \
+       xorq (RW1, RL1, 8), RT0; \
+       leaq s5(%rip), RW1; \
+       xorq (RW1, RT1, 8), to; \
+       leaq s3(%rip), RW1; \
+       xorq (RW1, RT2, 8), RT0; \
        load_next_key(n, RW0); \
        xorq RT0, to; \
-       xorq s1(, RT3, 8), to; \
+       leaq s1(%rip), RW1; \
+       xorq (RW1, RT3, 8), to; \
 
 #define load_next_key(n, RWx) \
        movq (((n) + 1) * 8)(CTX), RWx;
@@ -355,65 +363,89 @@ SYM_FUNC_END(des3_ede_x86_64_crypt_blk)
        movzbl RW0bl, RT3d; \
        movzbl RW0bh, RT1d; \
        shrq $16, RW0; \
-       xorq s8(, RT3, 8), to##0; \
-       xorq s6(, RT1, 8), to##0; \
+       leaq s8(%rip), RT2; \
+       xorq (RT2, RT3, 8), to##0; \
+       leaq s6(%rip), RT2; \
+       xorq (RT2, RT1, 8), to##0; \
        movzbl RW0bl, RT3d; \
        movzbl RW0bh, RT1d; \
        shrq $16, RW0; \
-       xorq s4(, RT3, 8), to##0; \
-       xorq s2(, RT1, 8), to##0; \
+       leaq s4(%rip), RT2; \
+       xorq (RT2, RT3, 8), to##0; \
+       leaq s2(%rip), RT2; \
+       xorq (RT2, RT1, 8), to##0; \
        movzbl RW0bl, RT3d; \
        movzbl RW0bh, RT1d; \
        shrl $16, RW0d; \
-       xorq s7(, RT3, 8), to##0; \
-       xorq s5(, RT1, 8), to##0; \
+       leaq s7(%rip), RT2; \
+       xorq (RT2, RT3, 8), to##0; \
+       leaq s5(%rip), RT2; \
+       xorq (RT2, RT1, 8), to##0; \
        movzbl RW0bl, RT3d; \
        movzbl RW0bh, RT1d; \
        load_next_key(n, RW0); \
-       xorq s3(, RT3, 8), to##0; \
-       xorq s1(, RT1, 8), to##0; \
+       leaq s3(%rip), RT2; \
+       xorq (RT2, RT3, 8), to##0; \
+       leaq s1(%rip), RT2; \
+       xorq (RT2, RT1, 8), to##0; \
                xorq from##1, RW1; \
                movzbl RW1bl, RT3d; \
                movzbl RW1bh, RT1d; \
                shrq $16, RW1; \
-               xorq s8(, RT3, 8), to##1; \
-               xorq s6(, RT1, 8), to##1; \
+               leaq s8(%rip), RT2; \
+               xorq (RT2, RT3, 8), to##1; \
+               leaq s6(%rip), RT2; \
+               xorq (RT2, RT1, 8), to##1; \
                movzbl RW1bl, RT3d; \
                movzbl RW1bh, RT1d; \
                shrq $16, RW1; \
-               xorq s4(, RT3, 8), to##1; \
-               xorq s2(, RT1, 8), to##1; \
+               leaq s4(%rip), RT2; \
+               xorq (RT2, RT3, 8), to##1; \
+               leaq s2(%rip), RT2; \
+               xorq (RT2, RT1, 8), to##1; \
                movzbl RW1bl, RT3d; \
                movzbl RW1bh, RT1d; \
                shrl $16, RW1d; \
-               xorq s7(, RT3, 8), to##1; \
-               xorq s5(, RT1, 8), to##1; \
+               leaq s7(%rip), RT2; \
+               xorq (RT2, RT3, 8), to##1; \
+               leaq s5(%rip), RT2; \
+               xorq (RT2, RT1, 8), to##1; \
                movzbl RW1bl, RT3d; \
                movzbl RW1bh, RT1d; \
                do_movq(RW0, RW1); \
-               xorq s3(, RT3, 8), to##1; \
-               xorq s1(, RT1, 8), to##1; \
+               leaq s3(%rip), RT2; \
+               xorq (RT2, RT3, 8), to##1; \
+               leaq s1(%rip), RT2; \
+               xorq (RT2, RT1, 8), to##1; \
                        xorq from##2, RW2; \
                        movzbl RW2bl, RT3d; \
                        movzbl RW2bh, RT1d; \
                        shrq $16, RW2; \
-                       xorq s8(, RT3, 8), to##2; \
-                       xorq s6(, RT1, 8), to##2; \
+                       leaq s8(%rip), RT2; \
+                       xorq (RT2, RT3, 8), to##2; \
+                       leaq s6(%rip), RT2; \
+                       xorq (RT2, RT1, 8), to##2; \
                        movzbl RW2bl, RT3d; \
                        movzbl RW2bh, RT1d; \
                        shrq $16, RW2; \
-                       xorq s4(, RT3, 8), to##2; \
-                       xorq s2(, RT1, 8), to##2; \
+                       leaq s4(%rip), RT2; \
+                       xorq (RT2, RT3, 8), to##2; \
+                       leaq s2(%rip), RT2; \
+                       xorq (RT2, RT1, 8), to##2; \
                        movzbl RW2bl, RT3d; \
                        movzbl RW2bh, RT1d; \
                        shrl $16, RW2d; \
-                       xorq s7(, RT3, 8), to##2; \
-                       xorq s5(, RT1, 8), to##2; \
+                       leaq s7(%rip), RT2; \
+                       xorq (RT2, RT3, 8), to##2; \
+                       leaq s5(%rip), RT2; \
+                       xorq (RT2, RT1, 8), to##2; \
                        movzbl RW2bl, RT3d; \
                        movzbl RW2bh, RT1d; \
                        do_movq(RW0, RW2); \
-                       xorq s3(, RT3, 8), to##2; \
-                       xorq s1(, RT1, 8), to##2;
+                       leaq s3(%rip), RT2; \
+                       xorq (RT2, RT3, 8), to##2; \
+                       leaq s1(%rip), RT2; \
+                       xorq (RT2, RT1, 8), to##2;
 
 #define __movq(src, dst) \
        movq src, dst;
index 257ed9446f3ee1a9096584ca234977e7d50c190b..99cb983ded9e369f0dc31c5be37bd897f6756de4 100644 (file)
@@ -93,7 +93,7 @@ SYM_FUNC_START(clmul_ghash_mul)
        FRAME_BEGIN
        movups (%rdi), DATA
        movups (%rsi), SHASH
-       movaps .Lbswap_mask, BSWAP
+       movaps .Lbswap_mask(%rip), BSWAP
        pshufb BSWAP, DATA
        call __clmul_gf128mul_ble
        pshufb BSWAP, DATA
@@ -110,7 +110,7 @@ SYM_FUNC_START(clmul_ghash_update)
        FRAME_BEGIN
        cmp $16, %rdx
        jb .Lupdate_just_ret    # check length
-       movaps .Lbswap_mask, BSWAP
+       movaps .Lbswap_mask(%rip), BSWAP
        movups (%rdi), DATA
        movups (%rcx), SHASH
        pshufb BSWAP, DATA
index a96b2fd26dab4bb53d8642456cd630811489d547..4b49bdc9526583d6ea1b51a5b9bf5fb8206cf872 100644 (file)
        xchg    WK_BUF, PRECALC_BUF
 
        .align 32
-_loop:
+.L_loop:
        /*
         * code loops through more than one block
         * we use K_BASE value as a signal of a last block,
         * it is set below by: cmovae BUFFER_PTR, K_BASE
         */
        test BLOCKS_CTR, BLOCKS_CTR
-       jnz _begin
+       jnz .L_begin
        .align 32
-       jmp     _end
+       jmp     .L_end
        .align 32
-_begin:
+.L_begin:
 
        /*
         * Do first block
@@ -508,9 +508,6 @@ _begin:
                .set j, j+2
        .endr
 
-       jmp _loop0
-_loop0:
-
        /*
         * rounds:
         * 10,12,14,16,18
@@ -545,7 +542,7 @@ _loop0:
        UPDATE_HASH     16(HASH_PTR), E
 
        test    BLOCKS_CTR, BLOCKS_CTR
-       jz      _loop
+       jz      .L_loop
 
        mov     TB, B
 
@@ -562,8 +559,6 @@ _loop0:
                .set j, j+2
        .endr
 
-       jmp     _loop1
-_loop1:
        /*
         * rounds
         * 20+80,22+80,24+80,26+80,28+80
@@ -574,9 +569,6 @@ _loop1:
                .set j, j+2
        .endr
 
-       jmp     _loop2
-_loop2:
-
        /*
         * rounds
         * 40+80,42+80,44+80,46+80,48+80
@@ -592,9 +584,6 @@ _loop2:
        /* Move to the next block only if needed*/
        ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128
 
-       jmp     _loop3
-_loop3:
-
        /*
         * rounds
         * 60+80,62+80,64+80,66+80,68+80
@@ -623,10 +612,10 @@ _loop3:
 
        xchg    WK_BUF, PRECALC_BUF
 
-       jmp     _loop
+       jmp     .L_loop
 
        .align 32
-       _end:
+.L_end:
 
 .endm
 /*
index 5555b5d5215a449eff7624f1505829036b917139..53de72bdd851ec8da4cd46430b0969139b512a8c 100644 (file)
@@ -360,7 +360,7 @@ SYM_TYPED_FUNC_START(sha256_transform_avx)
        and     $~15, %rsp              # align stack pointer
 
        shl     $6, NUM_BLKS            # convert to bytes
-       jz      done_hash
+       jz      .Ldone_hash
        add     INP, NUM_BLKS           # pointer to end of data
        mov     NUM_BLKS, _INP_END(%rsp)
 
@@ -377,7 +377,7 @@ SYM_TYPED_FUNC_START(sha256_transform_avx)
        vmovdqa  PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK
        vmovdqa  _SHUF_00BA(%rip), SHUF_00BA
        vmovdqa  _SHUF_DC00(%rip), SHUF_DC00
-loop0:
+.Lloop0:
        lea     K256(%rip), TBL
 
        ## byte swap first 16 dwords
@@ -391,7 +391,7 @@ loop0:
        ## schedule 48 input dwords, by doing 3 rounds of 16 each
        mov     $3, SRND
 .align 16
-loop1:
+.Lloop1:
        vpaddd  (TBL), X0, XFER
        vmovdqa XFER, _XFER(%rsp)
        FOUR_ROUNDS_AND_SCHED
@@ -410,10 +410,10 @@ loop1:
        FOUR_ROUNDS_AND_SCHED
 
        sub     $1, SRND
-       jne     loop1
+       jne     .Lloop1
 
        mov     $2, SRND
-loop2:
+.Lloop2:
        vpaddd  (TBL), X0, XFER
        vmovdqa XFER, _XFER(%rsp)
        DO_ROUND        0
@@ -433,7 +433,7 @@ loop2:
        vmovdqa X3, X1
 
        sub     $1, SRND
-       jne     loop2
+       jne     .Lloop2
 
        addm    (4*0)(CTX),a
        addm    (4*1)(CTX),b
@@ -447,9 +447,9 @@ loop2:
        mov     _INP(%rsp), INP
        add     $64, INP
        cmp     _INP_END(%rsp), INP
-       jne     loop0
+       jne     .Lloop0
 
-done_hash:
+.Ldone_hash:
 
        mov     %rbp, %rsp
        popq    %rbp
index 3eada94168526665bbe79920886d70ace9938db0..9918212faf914ffc61c5801809294ade209f03d4 100644 (file)
@@ -538,12 +538,12 @@ SYM_TYPED_FUNC_START(sha256_transform_rorx)
        and     $-32, %rsp      # align rsp to 32 byte boundary
 
        shl     $6, NUM_BLKS    # convert to bytes
-       jz      done_hash
+       jz      .Ldone_hash
        lea     -64(INP, NUM_BLKS), NUM_BLKS # pointer to last block
        mov     NUM_BLKS, _INP_END(%rsp)
 
        cmp     NUM_BLKS, INP
-       je      only_one_block
+       je      .Lonly_one_block
 
        ## load initial digest
        mov     (CTX), a
@@ -561,7 +561,7 @@ SYM_TYPED_FUNC_START(sha256_transform_rorx)
 
        mov     CTX, _CTX(%rsp)
 
-loop0:
+.Lloop0:
        ## Load first 16 dwords from two blocks
        VMOVDQ  0*32(INP),XTMP0
        VMOVDQ  1*32(INP),XTMP1
@@ -580,7 +580,7 @@ loop0:
        vperm2i128      $0x20, XTMP3, XTMP1, X2
        vperm2i128      $0x31, XTMP3, XTMP1, X3
 
-last_block_enter:
+.Llast_block_enter:
        add     $64, INP
        mov     INP, _INP(%rsp)
 
@@ -588,34 +588,40 @@ last_block_enter:
        xor     SRND, SRND
 
 .align 16
-loop1:
-       vpaddd  K256+0*32(SRND), X0, XFER
+.Lloop1:
+       leaq    K256+0*32(%rip), INP            ## reuse INP as scratch reg
+       vpaddd  (INP, SRND), X0, XFER
        vmovdqa XFER, 0*32+_XFER(%rsp, SRND)
        FOUR_ROUNDS_AND_SCHED   _XFER + 0*32
 
-       vpaddd  K256+1*32(SRND), X0, XFER
+       leaq    K256+1*32(%rip), INP
+       vpaddd  (INP, SRND), X0, XFER
        vmovdqa XFER, 1*32+_XFER(%rsp, SRND)
        FOUR_ROUNDS_AND_SCHED   _XFER + 1*32
 
-       vpaddd  K256+2*32(SRND), X0, XFER
+       leaq    K256+2*32(%rip), INP
+       vpaddd  (INP, SRND), X0, XFER
        vmovdqa XFER, 2*32+_XFER(%rsp, SRND)
        FOUR_ROUNDS_AND_SCHED   _XFER + 2*32
 
-       vpaddd  K256+3*32(SRND), X0, XFER
+       leaq    K256+3*32(%rip), INP
+       vpaddd  (INP, SRND), X0, XFER
        vmovdqa XFER, 3*32+_XFER(%rsp, SRND)
        FOUR_ROUNDS_AND_SCHED   _XFER + 3*32
 
        add     $4*32, SRND
        cmp     $3*4*32, SRND
-       jb      loop1
+       jb      .Lloop1
 
-loop2:
+.Lloop2:
        ## Do last 16 rounds with no scheduling
-       vpaddd  K256+0*32(SRND), X0, XFER
+       leaq    K256+0*32(%rip), INP
+       vpaddd  (INP, SRND), X0, XFER
        vmovdqa XFER, 0*32+_XFER(%rsp, SRND)
        DO_4ROUNDS      _XFER + 0*32
 
-       vpaddd  K256+1*32(SRND), X1, XFER
+       leaq    K256+1*32(%rip), INP
+       vpaddd  (INP, SRND), X1, XFER
        vmovdqa XFER, 1*32+_XFER(%rsp, SRND)
        DO_4ROUNDS      _XFER + 1*32
        add     $2*32, SRND
@@ -624,7 +630,7 @@ loop2:
        vmovdqa X3, X1
 
        cmp     $4*4*32, SRND
-       jb      loop2
+       jb      .Lloop2
 
        mov     _CTX(%rsp), CTX
        mov     _INP(%rsp), INP
@@ -639,17 +645,17 @@ loop2:
        addm    (4*7)(CTX),h
 
        cmp     _INP_END(%rsp), INP
-       ja      done_hash
+       ja      .Ldone_hash
 
        #### Do second block using previously scheduled results
        xor     SRND, SRND
 .align 16
-loop3:
+.Lloop3:
        DO_4ROUNDS       _XFER + 0*32 + 16
        DO_4ROUNDS       _XFER + 1*32 + 16
        add     $2*32, SRND
        cmp     $4*4*32, SRND
-       jb      loop3
+       jb      .Lloop3
 
        mov     _CTX(%rsp), CTX
        mov     _INP(%rsp), INP
@@ -665,10 +671,10 @@ loop3:
        addm    (4*7)(CTX),h
 
        cmp     _INP_END(%rsp), INP
-       jb      loop0
-       ja      done_hash
+       jb      .Lloop0
+       ja      .Ldone_hash
 
-do_last_block:
+.Ldo_last_block:
        VMOVDQ  0*16(INP),XWORD0
        VMOVDQ  1*16(INP),XWORD1
        VMOVDQ  2*16(INP),XWORD2
@@ -679,9 +685,9 @@ do_last_block:
        vpshufb X_BYTE_FLIP_MASK, XWORD2, XWORD2
        vpshufb X_BYTE_FLIP_MASK, XWORD3, XWORD3
 
-       jmp     last_block_enter
+       jmp     .Llast_block_enter
 
-only_one_block:
+.Lonly_one_block:
 
        ## load initial digest
        mov     (4*0)(CTX),a
@@ -698,9 +704,9 @@ only_one_block:
        vmovdqa _SHUF_DC00(%rip), SHUF_DC00
 
        mov     CTX, _CTX(%rsp)
-       jmp     do_last_block
+       jmp     .Ldo_last_block
 
-done_hash:
+.Ldone_hash:
 
        mov     %rbp, %rsp
        pop     %rbp
index 959288eecc6898917164bdbe9d2ba5896ad8cebf..93264ee4454325b9cf3d7713437b20c281e9c370 100644 (file)
@@ -369,7 +369,7 @@ SYM_TYPED_FUNC_START(sha256_transform_ssse3)
        and     $~15, %rsp
 
        shl     $6, NUM_BLKS             # convert to bytes
-       jz      done_hash
+       jz      .Ldone_hash
        add     INP, NUM_BLKS
        mov     NUM_BLKS, _INP_END(%rsp) # pointer to end of data
 
@@ -387,7 +387,7 @@ SYM_TYPED_FUNC_START(sha256_transform_ssse3)
        movdqa  _SHUF_00BA(%rip), SHUF_00BA
        movdqa  _SHUF_DC00(%rip), SHUF_DC00
 
-loop0:
+.Lloop0:
        lea     K256(%rip), TBL
 
        ## byte swap first 16 dwords
@@ -401,7 +401,7 @@ loop0:
        ## schedule 48 input dwords, by doing 3 rounds of 16 each
        mov     $3, SRND
 .align 16
-loop1:
+.Lloop1:
        movdqa  (TBL), XFER
        paddd   X0, XFER
        movdqa  XFER, _XFER(%rsp)
@@ -424,10 +424,10 @@ loop1:
        FOUR_ROUNDS_AND_SCHED
 
        sub     $1, SRND
-       jne     loop1
+       jne     .Lloop1
 
        mov     $2, SRND
-loop2:
+.Lloop2:
        paddd   (TBL), X0
        movdqa  X0, _XFER(%rsp)
        DO_ROUND        0
@@ -446,7 +446,7 @@ loop2:
        movdqa  X3, X1
 
        sub     $1, SRND
-       jne     loop2
+       jne     .Lloop2
 
        addm    (4*0)(CTX),a
        addm    (4*1)(CTX),b
@@ -460,9 +460,9 @@ loop2:
        mov     _INP(%rsp), INP
        add     $64, INP
        cmp     _INP_END(%rsp), INP
-       jne     loop0
+       jne     .Lloop0
 
-done_hash:
+.Ldone_hash:
 
        mov     %rbp, %rsp
        popq    %rbp
index b0984f19fdb408e952773793fa0244f2a50ff8c9..d902b8ea0721846750b0fdfddc82ae5c231e9098 100644 (file)
@@ -276,7 +276,7 @@ frame_size = frame_WK + WK_SIZE
 ########################################################################
 SYM_TYPED_FUNC_START(sha512_transform_avx)
        test msglen, msglen
-       je nowork
+       je .Lnowork
 
        # Save GPRs
        push    %rbx
@@ -291,7 +291,7 @@ SYM_TYPED_FUNC_START(sha512_transform_avx)
        sub     $frame_size, %rsp
        and     $~(0x20 - 1), %rsp
 
-updateblock:
+.Lupdateblock:
 
        # Load state variables
        mov     DIGEST(0), a_64
@@ -348,7 +348,7 @@ updateblock:
        # Advance to next message block
        add     $16*8, msg
        dec     msglen
-       jnz     updateblock
+       jnz     .Lupdateblock
 
        # Restore Stack Pointer
        mov     %rbp, %rsp
@@ -361,7 +361,7 @@ updateblock:
        pop     %r12
        pop     %rbx
 
-nowork:
+.Lnowork:
        RET
 SYM_FUNC_END(sha512_transform_avx)
 
index b1ca99055ef994f5301a53b2e1ccfc7754e7f03e..f08496cd68708fdea7cc72cf53bcc8f8e638649a 100644 (file)
@@ -581,7 +581,7 @@ SYM_TYPED_FUNC_START(sha512_transform_rorx)
        and     $~(0x20 - 1), %rsp
 
        shl     $7, NUM_BLKS    # convert to bytes
-       jz      done_hash
+       jz      .Ldone_hash
        add     INP, NUM_BLKS   # pointer to end of data
        mov     NUM_BLKS, frame_INPEND(%rsp)
 
@@ -600,7 +600,7 @@ SYM_TYPED_FUNC_START(sha512_transform_rorx)
 
        vmovdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK
 
-loop0:
+.Lloop0:
        lea     K512(%rip), TBL
 
        ## byte swap first 16 dwords
@@ -615,7 +615,7 @@ loop0:
        movq    $4, frame_SRND(%rsp)
 
 .align 16
-loop1:
+.Lloop1:
        vpaddq  (TBL), Y_0, XFER
        vmovdqa XFER, frame_XFER(%rsp)
        FOUR_ROUNDS_AND_SCHED
@@ -634,10 +634,10 @@ loop1:
        FOUR_ROUNDS_AND_SCHED
 
        subq    $1, frame_SRND(%rsp)
-       jne     loop1
+       jne     .Lloop1
 
        movq    $2, frame_SRND(%rsp)
-loop2:
+.Lloop2:
        vpaddq  (TBL), Y_0, XFER
        vmovdqa XFER, frame_XFER(%rsp)
        DO_4ROUNDS
@@ -650,7 +650,7 @@ loop2:
        vmovdqa Y_3, Y_1
 
        subq    $1, frame_SRND(%rsp)
-       jne     loop2
+       jne     .Lloop2
 
        mov     frame_CTX(%rsp), CTX2
        addm    8*0(CTX2), a
@@ -665,9 +665,9 @@ loop2:
        mov     frame_INP(%rsp), INP
        add     $128, INP
        cmp     frame_INPEND(%rsp), INP
-       jne     loop0
+       jne     .Lloop0
 
-done_hash:
+.Ldone_hash:
 
        # Restore Stack Pointer
        mov     %rbp, %rsp
index c06afb5270e5f5789ba0d45c948a5f68ac4235f3..65be30156816265452389a476256009c96724db0 100644 (file)
@@ -278,7 +278,7 @@ frame_size = frame_WK + WK_SIZE
 SYM_TYPED_FUNC_START(sha512_transform_ssse3)
 
        test msglen, msglen
-       je nowork
+       je .Lnowork
 
        # Save GPRs
        push    %rbx
@@ -293,7 +293,7 @@ SYM_TYPED_FUNC_START(sha512_transform_ssse3)
        sub     $frame_size, %rsp
        and     $~(0x20 - 1), %rsp
 
-updateblock:
+.Lupdateblock:
 
 # Load state variables
        mov     DIGEST(0), a_64
@@ -350,7 +350,7 @@ updateblock:
        # Advance to next message block
        add     $16*8, msg
        dec     msglen
-       jnz     updateblock
+       jnz     .Lupdateblock
 
        # Restore Stack Pointer
        mov     %rbp, %rsp
@@ -363,7 +363,7 @@ updateblock:
        pop     %r12
        pop     %rbx
 
-nowork:
+.Lnowork:
        RET
 SYM_FUNC_END(sha512_transform_ssse3)
 
index 52398d49bc2f54f50f7dd0e9a461d16843b703a0..69ae5e1b31207f1d6ac115fe7b594ace375930c2 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/kvm_host.h>
 #include <linux/kernel.h>
 #include <linux/highmem.h>
+#include <linux/psp.h>
 #include <linux/psp-sev.h>
 #include <linux/pagemap.h>
 #include <linux/swap.h>
index c32c72048a1c981ddfad225d7d7df35aed79425c..82a290df2822a0c5239af964e73bb5cbada3b79c 100644 (file)
@@ -6,25 +6,35 @@
  * Authors: Weigang Li <[email protected]>
  *          Giovanni Cabiddu <[email protected]>
  */
+
+#include <crypto/internal/acompress.h>
+#include <linux/cryptouser.h>
 #include <linux/errno.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/string.h>
-#include <linux/crypto.h>
-#include <crypto/algapi.h>
-#include <linux/cryptouser.h>
-#include <linux/compiler.h>
 #include <net/netlink.h>
-#include <crypto/internal/acompress.h>
-#include <crypto/internal/scompress.h>
-#include "internal.h"
+
+#include "compress.h"
+
+struct crypto_scomp;
 
 static const struct crypto_type crypto_acomp_type;
 
-#ifdef CONFIG_NET
-static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
+static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
+{
+       return container_of(alg, struct acomp_alg, calg.base);
+}
+
+static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
+{
+       return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg);
+}
+
+static int __maybe_unused crypto_acomp_report(
+       struct sk_buff *skb, struct crypto_alg *alg)
 {
        struct crypto_report_acomp racomp;
 
@@ -34,12 +44,6 @@ static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
 
        return nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, sizeof(racomp), &racomp);
 }
-#else
-static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
-       return -ENOSYS;
-}
-#endif
 
 static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
        __maybe_unused;
@@ -89,13 +93,44 @@ static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
        return extsize;
 }
 
+static inline int __crypto_acomp_report_stat(struct sk_buff *skb,
+                                            struct crypto_alg *alg)
+{
+       struct comp_alg_common *calg = __crypto_comp_alg_common(alg);
+       struct crypto_istat_compress *istat = comp_get_stat(calg);
+       struct crypto_stat_compress racomp;
+
+       memset(&racomp, 0, sizeof(racomp));
+
+       strscpy(racomp.type, "acomp", sizeof(racomp.type));
+       racomp.stat_compress_cnt = atomic64_read(&istat->compress_cnt);
+       racomp.stat_compress_tlen = atomic64_read(&istat->compress_tlen);
+       racomp.stat_decompress_cnt =  atomic64_read(&istat->decompress_cnt);
+       racomp.stat_decompress_tlen = atomic64_read(&istat->decompress_tlen);
+       racomp.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+       return nla_put(skb, CRYPTOCFGA_STAT_ACOMP, sizeof(racomp), &racomp);
+}
+
+#ifdef CONFIG_CRYPTO_STATS
+int crypto_acomp_report_stat(struct sk_buff *skb, struct crypto_alg *alg)
+{
+       return __crypto_acomp_report_stat(skb, alg);
+}
+#endif
+
 static const struct crypto_type crypto_acomp_type = {
        .extsize = crypto_acomp_extsize,
        .init_tfm = crypto_acomp_init_tfm,
 #ifdef CONFIG_PROC_FS
        .show = crypto_acomp_show,
 #endif
+#ifdef CONFIG_CRYPTO_USER
        .report = crypto_acomp_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+       .report_stat = crypto_acomp_report_stat,
+#endif
        .maskclear = ~CRYPTO_ALG_TYPE_MASK,
        .maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
        .type = CRYPTO_ALG_TYPE_ACOMPRESS,
@@ -147,12 +182,24 @@ void acomp_request_free(struct acomp_req *req)
 }
 EXPORT_SYMBOL_GPL(acomp_request_free);
 
-int crypto_register_acomp(struct acomp_alg *alg)
+void comp_prepare_alg(struct comp_alg_common *alg)
 {
+       struct crypto_istat_compress *istat = comp_get_stat(alg);
        struct crypto_alg *base = &alg->base;
 
-       base->cra_type = &crypto_acomp_type;
        base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+               memset(istat, 0, sizeof(*istat));
+}
+
+int crypto_register_acomp(struct acomp_alg *alg)
+{
+       struct crypto_alg *base = &alg->calg.base;
+
+       comp_prepare_alg(&alg->calg);
+
+       base->cra_type = &crypto_acomp_type;
        base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS;
 
        return crypto_register_alg(base);
index 16991095270d2f6777a875d1417b07d92506b039..ffc48a7dfb349bac46d4ca5831ea0f1900fdebd1 100644 (file)
@@ -8,17 +8,27 @@
  */
 
 #include <crypto/internal/aead.h>
+#include <linux/cryptouser.h>
 #include <linux/errno.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/seq_file.h>
-#include <linux/cryptouser.h>
+#include <linux/string.h>
 #include <net/netlink.h>
 
 #include "internal.h"
 
+static inline struct crypto_istat_aead *aead_get_stat(struct aead_alg *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+       return &alg->stat;
+#else
+       return NULL;
+#endif
+}
+
 static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
                            unsigned int keylen)
 {
@@ -80,39 +90,62 @@ int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
 }
 EXPORT_SYMBOL_GPL(crypto_aead_setauthsize);
 
+static inline int crypto_aead_errstat(struct crypto_istat_aead *istat, int err)
+{
+       if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+               return err;
+
+       if (err && err != -EINPROGRESS && err != -EBUSY)
+               atomic64_inc(&istat->err_cnt);
+
+       return err;
+}
+
 int crypto_aead_encrypt(struct aead_request *req)
 {
        struct crypto_aead *aead = crypto_aead_reqtfm(req);
-       struct crypto_alg *alg = aead->base.__crt_alg;
-       unsigned int cryptlen = req->cryptlen;
+       struct aead_alg *alg = crypto_aead_alg(aead);
+       struct crypto_istat_aead *istat;
        int ret;
 
-       crypto_stats_get(alg);
+       istat = aead_get_stat(alg);
+
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+               atomic64_inc(&istat->encrypt_cnt);
+               atomic64_add(req->cryptlen, &istat->encrypt_tlen);
+       }
+
        if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
                ret = -ENOKEY;
        else
-               ret = crypto_aead_alg(aead)->encrypt(req);
-       crypto_stats_aead_encrypt(cryptlen, alg, ret);
-       return ret;
+               ret = alg->encrypt(req);
+
+       return crypto_aead_errstat(istat, ret);
 }
 EXPORT_SYMBOL_GPL(crypto_aead_encrypt);
 
 int crypto_aead_decrypt(struct aead_request *req)
 {
        struct crypto_aead *aead = crypto_aead_reqtfm(req);
-       struct crypto_alg *alg = aead->base.__crt_alg;
-       unsigned int cryptlen = req->cryptlen;
+       struct aead_alg *alg = crypto_aead_alg(aead);
+       struct crypto_istat_aead *istat;
        int ret;
 
-       crypto_stats_get(alg);
+       istat = aead_get_stat(alg);
+
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+               atomic64_inc(&istat->encrypt_cnt);
+               atomic64_add(req->cryptlen, &istat->encrypt_tlen);
+       }
+
        if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
                ret = -ENOKEY;
        else if (req->cryptlen < crypto_aead_authsize(aead))
                ret = -EINVAL;
        else
-               ret = crypto_aead_alg(aead)->decrypt(req);
-       crypto_stats_aead_decrypt(cryptlen, alg, ret);
-       return ret;
+               ret = alg->decrypt(req);
+
+       return crypto_aead_errstat(istat, ret);
 }
 EXPORT_SYMBOL_GPL(crypto_aead_decrypt);
 
@@ -142,8 +175,8 @@ static int crypto_aead_init_tfm(struct crypto_tfm *tfm)
        return 0;
 }
 
-#ifdef CONFIG_NET
-static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
+static int __maybe_unused crypto_aead_report(
+       struct sk_buff *skb, struct crypto_alg *alg)
 {
        struct crypto_report_aead raead;
        struct aead_alg *aead = container_of(alg, struct aead_alg, base);
@@ -159,12 +192,6 @@ static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
 
        return nla_put(skb, CRYPTOCFGA_REPORT_AEAD, sizeof(raead), &raead);
 }
-#else
-static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
-       return -ENOSYS;
-}
-#endif
 
 static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg)
        __maybe_unused;
@@ -188,6 +215,26 @@ static void crypto_aead_free_instance(struct crypto_instance *inst)
        aead->free(aead);
 }
 
+static int __maybe_unused crypto_aead_report_stat(
+       struct sk_buff *skb, struct crypto_alg *alg)
+{
+       struct aead_alg *aead = container_of(alg, struct aead_alg, base);
+       struct crypto_istat_aead *istat = aead_get_stat(aead);
+       struct crypto_stat_aead raead;
+
+       memset(&raead, 0, sizeof(raead));
+
+       strscpy(raead.type, "aead", sizeof(raead.type));
+
+       raead.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
+       raead.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
+       raead.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
+       raead.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
+       raead.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+       return nla_put(skb, CRYPTOCFGA_STAT_AEAD, sizeof(raead), &raead);
+}
+
 static const struct crypto_type crypto_aead_type = {
        .extsize = crypto_alg_extsize,
        .init_tfm = crypto_aead_init_tfm,
@@ -195,7 +242,12 @@ static const struct crypto_type crypto_aead_type = {
 #ifdef CONFIG_PROC_FS
        .show = crypto_aead_show,
 #endif
+#ifdef CONFIG_CRYPTO_USER
        .report = crypto_aead_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+       .report_stat = crypto_aead_report_stat,
+#endif
        .maskclear = ~CRYPTO_ALG_TYPE_MASK,
        .maskset = CRYPTO_ALG_TYPE_MASK,
        .type = CRYPTO_ALG_TYPE_AEAD,
@@ -219,6 +271,7 @@ EXPORT_SYMBOL_GPL(crypto_alloc_aead);
 
 static int aead_prepare_alg(struct aead_alg *alg)
 {
+       struct crypto_istat_aead *istat = aead_get_stat(alg);
        struct crypto_alg *base = &alg->base;
 
        if (max3(alg->maxauthsize, alg->ivsize, alg->chunksize) >
@@ -232,6 +285,9 @@ static int aead_prepare_alg(struct aead_alg *alg)
        base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
        base->cra_flags |= CRYPTO_ALG_TYPE_AEAD;
 
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+               memset(istat, 0, sizeof(*istat));
+
        return 0;
 }
 
index ff8c79d975c1dd7d140f62c1d261282f099a9509..b8a607928e72d7ebb8f246017f2bd5b74e90ab81 100644 (file)
@@ -8,19 +8,18 @@
  * Copyright (c) 2008 Loc Ho <[email protected]>
  */
 
-#include <crypto/internal/hash.h>
 #include <crypto/scatterwalk.h>
+#include <linux/cryptouser.h>
 #include <linux/err.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/seq_file.h>
-#include <linux/cryptouser.h>
-#include <linux/compiler.h>
+#include <linux/string.h>
 #include <net/netlink.h>
 
-#include "internal.h"
+#include "hash.h"
 
 static const struct crypto_type crypto_ahash_type;
 
@@ -296,55 +295,60 @@ static int crypto_ahash_op(struct ahash_request *req,
 {
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
        unsigned long alignmask = crypto_ahash_alignmask(tfm);
+       int err;
 
        if ((unsigned long)req->result & alignmask)
-               return ahash_op_unaligned(req, op, has_state);
+               err = ahash_op_unaligned(req, op, has_state);
+       else
+               err = op(req);
 
-       return op(req);
+       return crypto_hash_errstat(crypto_hash_alg_common(tfm), err);
 }
 
 int crypto_ahash_final(struct ahash_request *req)
 {
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-       struct crypto_alg *alg = tfm->base.__crt_alg;
-       unsigned int nbytes = req->nbytes;
-       int ret;
+       struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
 
-       crypto_stats_get(alg);
-       ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final, true);
-       crypto_stats_ahash_final(nbytes, ret, alg);
-       return ret;
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+               atomic64_inc(&hash_get_stat(alg)->hash_cnt);
+
+       return crypto_ahash_op(req, tfm->final, true);
 }
 EXPORT_SYMBOL_GPL(crypto_ahash_final);
 
 int crypto_ahash_finup(struct ahash_request *req)
 {
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-       struct crypto_alg *alg = tfm->base.__crt_alg;
-       unsigned int nbytes = req->nbytes;
-       int ret;
+       struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
 
-       crypto_stats_get(alg);
-       ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup, true);
-       crypto_stats_ahash_final(nbytes, ret, alg);
-       return ret;
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+               struct crypto_istat_hash *istat = hash_get_stat(alg);
+
+               atomic64_inc(&istat->hash_cnt);
+               atomic64_add(req->nbytes, &istat->hash_tlen);
+       }
+
+       return crypto_ahash_op(req, tfm->finup, true);
 }
 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
 
 int crypto_ahash_digest(struct ahash_request *req)
 {
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-       struct crypto_alg *alg = tfm->base.__crt_alg;
-       unsigned int nbytes = req->nbytes;
-       int ret;
+       struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
+
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+               struct crypto_istat_hash *istat = hash_get_stat(alg);
+
+               atomic64_inc(&istat->hash_cnt);
+               atomic64_add(req->nbytes, &istat->hash_tlen);
+       }
 
-       crypto_stats_get(alg);
        if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
-               ret = -ENOKEY;
-       else
-               ret = crypto_ahash_op(req, tfm->digest, false);
-       crypto_stats_ahash_final(nbytes, ret, alg);
-       return ret;
+               return crypto_hash_errstat(alg, -ENOKEY);
+
+       return crypto_ahash_op(req, tfm->digest, false);
 }
 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
 
@@ -465,8 +469,8 @@ static void crypto_ahash_free_instance(struct crypto_instance *inst)
        ahash->free(ahash);
 }
 
-#ifdef CONFIG_NET
-static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
+static int __maybe_unused crypto_ahash_report(
+       struct sk_buff *skb, struct crypto_alg *alg)
 {
        struct crypto_report_hash rhash;
 
@@ -479,12 +483,6 @@ static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
 
        return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
 }
-#else
-static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
-       return -ENOSYS;
-}
-#endif
 
 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
        __maybe_unused;
@@ -498,6 +496,12 @@ static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
                   __crypto_hash_alg_common(alg)->digestsize);
 }
 
+static int __maybe_unused crypto_ahash_report_stat(
+       struct sk_buff *skb, struct crypto_alg *alg)
+{
+       return crypto_hash_report_stat(skb, alg, "ahash");
+}
+
 static const struct crypto_type crypto_ahash_type = {
        .extsize = crypto_ahash_extsize,
        .init_tfm = crypto_ahash_init_tfm,
@@ -505,7 +509,12 @@ static const struct crypto_type crypto_ahash_type = {
 #ifdef CONFIG_PROC_FS
        .show = crypto_ahash_show,
 #endif
+#ifdef CONFIG_CRYPTO_USER
        .report = crypto_ahash_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+       .report_stat = crypto_ahash_report_stat,
+#endif
        .maskclear = ~CRYPTO_ALG_TYPE_MASK,
        .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
        .type = CRYPTO_ALG_TYPE_AHASH,
@@ -534,17 +543,70 @@ int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
 }
 EXPORT_SYMBOL_GPL(crypto_has_ahash);
 
+struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash)
+{
+       struct hash_alg_common *halg = crypto_hash_alg_common(hash);
+       struct crypto_tfm *tfm = crypto_ahash_tfm(hash);
+       struct crypto_ahash *nhash;
+       struct ahash_alg *alg;
+       int err;
+
+       if (!crypto_hash_alg_has_setkey(halg)) {
+               tfm = crypto_tfm_get(tfm);
+               if (IS_ERR(tfm))
+                       return ERR_CAST(tfm);
+
+               return hash;
+       }
+
+       nhash = crypto_clone_tfm(&crypto_ahash_type, tfm);
+
+       if (IS_ERR(nhash))
+               return nhash;
+
+       nhash->init = hash->init;
+       nhash->update = hash->update;
+       nhash->final = hash->final;
+       nhash->finup = hash->finup;
+       nhash->digest = hash->digest;
+       nhash->export = hash->export;
+       nhash->import = hash->import;
+       nhash->setkey = hash->setkey;
+       nhash->reqsize = hash->reqsize;
+
+       if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
+               return crypto_clone_shash_ops_async(nhash, hash);
+
+       err = -ENOSYS;
+       alg = crypto_ahash_alg(hash);
+       if (!alg->clone_tfm)
+               goto out_free_nhash;
+
+       err = alg->clone_tfm(nhash, hash);
+       if (err)
+               goto out_free_nhash;
+
+       return nhash;
+
+out_free_nhash:
+       crypto_free_ahash(nhash);
+       return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(crypto_clone_ahash);
+
 static int ahash_prepare_alg(struct ahash_alg *alg)
 {
        struct crypto_alg *base = &alg->halg.base;
+       int err;
 
-       if (alg->halg.digestsize > HASH_MAX_DIGESTSIZE ||
-           alg->halg.statesize > HASH_MAX_STATESIZE ||
-           alg->halg.statesize == 0)
+       if (alg->halg.statesize == 0)
                return -EINVAL;
 
+       err = hash_prepare_alg(&alg->halg);
+       if (err)
+               return err;
+
        base->cra_type = &crypto_ahash_type;
-       base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
        base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
 
        return 0;
index ab975a420e1e982c338010db3f1e3651b22cd9d0..186e762b509a6e7e5c0fd073ad03961107573672 100644 (file)
@@ -5,23 +5,20 @@
  * Copyright (c) 2015, Intel Corporation
  * Authors: Tadeusz Struk <[email protected]>
  */
+#include <crypto/internal/akcipher.h>
+#include <linux/cryptouser.h>
 #include <linux/errno.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/string.h>
-#include <linux/crypto.h>
-#include <linux/compiler.h>
-#include <crypto/algapi.h>
-#include <linux/cryptouser.h>
 #include <net/netlink.h>
-#include <crypto/akcipher.h>
-#include <crypto/internal/akcipher.h>
+
 #include "internal.h"
 
-#ifdef CONFIG_NET
-static int crypto_akcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
+static int __maybe_unused crypto_akcipher_report(
+       struct sk_buff *skb, struct crypto_alg *alg)
 {
        struct crypto_report_akcipher rakcipher;
 
@@ -32,12 +29,6 @@ static int crypto_akcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
        return nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER,
                       sizeof(rakcipher), &rakcipher);
 }
-#else
-static int crypto_akcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
-       return -ENOSYS;
-}
-#endif
 
 static void crypto_akcipher_show(struct seq_file *m, struct crypto_alg *alg)
        __maybe_unused;
@@ -76,6 +67,30 @@ static void crypto_akcipher_free_instance(struct crypto_instance *inst)
        akcipher->free(akcipher);
 }
 
+static int __maybe_unused crypto_akcipher_report_stat(
+       struct sk_buff *skb, struct crypto_alg *alg)
+{
+       struct akcipher_alg *akcipher = __crypto_akcipher_alg(alg);
+       struct crypto_istat_akcipher *istat;
+       struct crypto_stat_akcipher rakcipher;
+
+       istat = akcipher_get_stat(akcipher);
+
+       memset(&rakcipher, 0, sizeof(rakcipher));
+
+       strscpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
+       rakcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
+       rakcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
+       rakcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
+       rakcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
+       rakcipher.stat_sign_cnt = atomic64_read(&istat->sign_cnt);
+       rakcipher.stat_verify_cnt = atomic64_read(&istat->verify_cnt);
+       rakcipher.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+       return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER,
+                      sizeof(rakcipher), &rakcipher);
+}
+
 static const struct crypto_type crypto_akcipher_type = {
        .extsize = crypto_alg_extsize,
        .init_tfm = crypto_akcipher_init_tfm,
@@ -83,7 +98,12 @@ static const struct crypto_type crypto_akcipher_type = {
 #ifdef CONFIG_PROC_FS
        .show = crypto_akcipher_show,
 #endif
+#ifdef CONFIG_CRYPTO_USER
        .report = crypto_akcipher_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+       .report_stat = crypto_akcipher_report_stat,
+#endif
        .maskclear = ~CRYPTO_ALG_TYPE_MASK,
        .maskset = CRYPTO_ALG_TYPE_MASK,
        .type = CRYPTO_ALG_TYPE_AKCIPHER,
@@ -108,11 +128,15 @@ EXPORT_SYMBOL_GPL(crypto_alloc_akcipher);
 
 static void akcipher_prepare_alg(struct akcipher_alg *alg)
 {
+       struct crypto_istat_akcipher *istat = akcipher_get_stat(alg);
        struct crypto_alg *base = &alg->base;
 
        base->cra_type = &crypto_akcipher_type;
        base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
        base->cra_flags |= CRYPTO_ALG_TYPE_AKCIPHER;
+
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+               memset(istat, 0, sizeof(*istat));
 }
 
 static int akcipher_default_op(struct akcipher_request *req)
index d08f864f08beef69e787473faa03ea67998a05c0..d7eb8f9e988339825ee355a2c1b35e57bdc3e71c 100644 (file)
@@ -339,8 +339,6 @@ __crypto_register_alg(struct crypto_alg *alg, struct list_head *algs_to_put)
 
        list_add(&alg->cra_list, &crypto_alg_list);
 
-       crypto_stats_init(alg);
-
        if (larval) {
                /* No cheating! */
                alg->cra_flags &= ~CRYPTO_ALG_TESTED;
@@ -493,7 +491,9 @@ void crypto_unregister_alg(struct crypto_alg *alg)
        if (WARN(ret, "Algorithm %s is not registered", alg->cra_driver_name))
                return;
 
-       BUG_ON(refcount_read(&alg->cra_refcnt) != 1);
+       if (WARN_ON(refcount_read(&alg->cra_refcnt) != 1))
+               return;
+
        if (alg->cra_destroy)
                alg->cra_destroy(alg);
 
@@ -1038,219 +1038,6 @@ int crypto_type_has_alg(const char *name, const struct crypto_type *frontend,
 }
 EXPORT_SYMBOL_GPL(crypto_type_has_alg);
 
-#ifdef CONFIG_CRYPTO_STATS
-void crypto_stats_init(struct crypto_alg *alg)
-{
-       memset(&alg->stats, 0, sizeof(alg->stats));
-}
-EXPORT_SYMBOL_GPL(crypto_stats_init);
-
-void crypto_stats_get(struct crypto_alg *alg)
-{
-       crypto_alg_get(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_get);
-
-void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg,
-                              int ret)
-{
-       if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
-               atomic64_inc(&alg->stats.aead.err_cnt);
-       } else {
-               atomic64_inc(&alg->stats.aead.encrypt_cnt);
-               atomic64_add(cryptlen, &alg->stats.aead.encrypt_tlen);
-       }
-       crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_aead_encrypt);
-
-void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg,
-                              int ret)
-{
-       if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
-               atomic64_inc(&alg->stats.aead.err_cnt);
-       } else {
-               atomic64_inc(&alg->stats.aead.decrypt_cnt);
-               atomic64_add(cryptlen, &alg->stats.aead.decrypt_tlen);
-       }
-       crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_aead_decrypt);
-
-void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret,
-                                  struct crypto_alg *alg)
-{
-       if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
-               atomic64_inc(&alg->stats.akcipher.err_cnt);
-       } else {
-               atomic64_inc(&alg->stats.akcipher.encrypt_cnt);
-               atomic64_add(src_len, &alg->stats.akcipher.encrypt_tlen);
-       }
-       crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_akcipher_encrypt);
-
-void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret,
-                                  struct crypto_alg *alg)
-{
-       if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
-               atomic64_inc(&alg->stats.akcipher.err_cnt);
-       } else {
-               atomic64_inc(&alg->stats.akcipher.decrypt_cnt);
-               atomic64_add(src_len, &alg->stats.akcipher.decrypt_tlen);
-       }
-       crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_akcipher_decrypt);
-
-void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg)
-{
-       if (ret && ret != -EINPROGRESS && ret != -EBUSY)
-               atomic64_inc(&alg->stats.akcipher.err_cnt);
-       else
-               atomic64_inc(&alg->stats.akcipher.sign_cnt);
-       crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_akcipher_sign);
-
-void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg)
-{
-       if (ret && ret != -EINPROGRESS && ret != -EBUSY)
-               atomic64_inc(&alg->stats.akcipher.err_cnt);
-       else
-               atomic64_inc(&alg->stats.akcipher.verify_cnt);
-       crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_akcipher_verify);
-
-void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg)
-{
-       if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
-               atomic64_inc(&alg->stats.compress.err_cnt);
-       } else {
-               atomic64_inc(&alg->stats.compress.compress_cnt);
-               atomic64_add(slen, &alg->stats.compress.compress_tlen);
-       }
-       crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_compress);
-
-void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg)
-{
-       if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
-               atomic64_inc(&alg->stats.compress.err_cnt);
-       } else {
-               atomic64_inc(&alg->stats.compress.decompress_cnt);
-               atomic64_add(slen, &alg->stats.compress.decompress_tlen);
-       }
-       crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_decompress);
-
-void crypto_stats_ahash_update(unsigned int nbytes, int ret,
-                              struct crypto_alg *alg)
-{
-       if (ret && ret != -EINPROGRESS && ret != -EBUSY)
-               atomic64_inc(&alg->stats.hash.err_cnt);
-       else
-               atomic64_add(nbytes, &alg->stats.hash.hash_tlen);
-       crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_ahash_update);
-
-void crypto_stats_ahash_final(unsigned int nbytes, int ret,
-                             struct crypto_alg *alg)
-{
-       if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
-               atomic64_inc(&alg->stats.hash.err_cnt);
-       } else {
-               atomic64_inc(&alg->stats.hash.hash_cnt);
-               atomic64_add(nbytes, &alg->stats.hash.hash_tlen);
-       }
-       crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_ahash_final);
-
-void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret)
-{
-       if (ret)
-               atomic64_inc(&alg->stats.kpp.err_cnt);
-       else
-               atomic64_inc(&alg->stats.kpp.setsecret_cnt);
-       crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_kpp_set_secret);
-
-void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret)
-{
-       if (ret)
-               atomic64_inc(&alg->stats.kpp.err_cnt);
-       else
-               atomic64_inc(&alg->stats.kpp.generate_public_key_cnt);
-       crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_kpp_generate_public_key);
-
-void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret)
-{
-       if (ret)
-               atomic64_inc(&alg->stats.kpp.err_cnt);
-       else
-               atomic64_inc(&alg->stats.kpp.compute_shared_secret_cnt);
-       crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_kpp_compute_shared_secret);
-
-void crypto_stats_rng_seed(struct crypto_alg *alg, int ret)
-{
-       if (ret && ret != -EINPROGRESS && ret != -EBUSY)
-               atomic64_inc(&alg->stats.rng.err_cnt);
-       else
-               atomic64_inc(&alg->stats.rng.seed_cnt);
-       crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_rng_seed);
-
-void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen,
-                              int ret)
-{
-       if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
-               atomic64_inc(&alg->stats.rng.err_cnt);
-       } else {
-               atomic64_inc(&alg->stats.rng.generate_cnt);
-               atomic64_add(dlen, &alg->stats.rng.generate_tlen);
-       }
-       crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_rng_generate);
-
-void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret,
-                                  struct crypto_alg *alg)
-{
-       if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
-               atomic64_inc(&alg->stats.cipher.err_cnt);
-       } else {
-               atomic64_inc(&alg->stats.cipher.encrypt_cnt);
-               atomic64_add(cryptlen, &alg->stats.cipher.encrypt_tlen);
-       }
-       crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_skcipher_encrypt);
-
-void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret,
-                                  struct crypto_alg *alg)
-{
-       if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
-               atomic64_inc(&alg->stats.cipher.err_cnt);
-       } else {
-               atomic64_inc(&alg->stats.cipher.decrypt_cnt);
-               atomic64_add(cryptlen, &alg->stats.cipher.decrypt_tlen);
-       }
-       crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_skcipher_decrypt);
-#endif
-
 static void __init crypto_start_tests(void)
 {
        if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS))
index 1d017ec5c63c5260329882e2f2a4bdb557abf3a1..63af72e19fa8c948b2f40570aaaf32eee1b41600 100644 (file)
@@ -235,24 +235,31 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
        struct alg_sock *ask = alg_sk(sk);
        struct hash_ctx *ctx = ask->private;
        struct ahash_request *req = &ctx->req;
-       char state[HASH_MAX_STATESIZE];
+       struct crypto_ahash *tfm;
        struct sock *sk2;
        struct alg_sock *ask2;
        struct hash_ctx *ctx2;
+       char *state;
        bool more;
        int err;
 
+       tfm = crypto_ahash_reqtfm(req);
+       state = kmalloc(crypto_ahash_statesize(tfm), GFP_KERNEL);
+       err = -ENOMEM;
+       if (!state)
+               goto out;
+
        lock_sock(sk);
        more = ctx->more;
        err = more ? crypto_ahash_export(req, state) : 0;
        release_sock(sk);
 
        if (err)
-               return err;
+               goto out_free_state;
 
        err = af_alg_accept(ask->parent, newsock, kern);
        if (err)
-               return err;
+               goto out_free_state;
 
        sk2 = newsock->sk;
        ask2 = alg_sk(sk2);
@@ -260,7 +267,7 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
        ctx2->more = more;
 
        if (!more)
-               return err;
+               goto out_free_state;
 
        err = crypto_ahash_import(&ctx2->req, state);
        if (err) {
@@ -268,6 +275,10 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
                sock_put(sk2);
        }
 
+out_free_state:
+       kfree_sensitive(state);
+
+out:
        return err;
 }
 
index e67cc63368ed8bb3a18382068c3beff2deef34c2..d375e8cd770d17db043c61c6601aaa9cf8e1d73d 100644 (file)
@@ -408,6 +408,7 @@ struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
                goto out_err;
 
        tfm->__crt_alg = alg;
+       refcount_set(&tfm->refcnt, 1);
 
        err = crypto_init_ops(tfm, type, mask);
        if (err)
@@ -487,26 +488,43 @@ err:
 }
 EXPORT_SYMBOL_GPL(crypto_alloc_base);
 
-void *crypto_create_tfm_node(struct crypto_alg *alg,
-                       const struct crypto_type *frontend,
-                       int node)
+static void *crypto_alloc_tfmmem(struct crypto_alg *alg,
+                                const struct crypto_type *frontend, int node,
+                                gfp_t gfp)
 {
-       char *mem;
-       struct crypto_tfm *tfm = NULL;
+       struct crypto_tfm *tfm;
        unsigned int tfmsize;
        unsigned int total;
-       int err = -ENOMEM;
+       char *mem;
 
        tfmsize = frontend->tfmsize;
        total = tfmsize + sizeof(*tfm) + frontend->extsize(alg);
 
-       mem = kzalloc_node(total, GFP_KERNEL, node);
+       mem = kzalloc_node(total, gfp, node);
        if (mem == NULL)
-               goto out_err;
+               return ERR_PTR(-ENOMEM);
 
        tfm = (struct crypto_tfm *)(mem + tfmsize);
        tfm->__crt_alg = alg;
        tfm->node = node;
+       refcount_set(&tfm->refcnt, 1);
+
+       return mem;
+}
+
+void *crypto_create_tfm_node(struct crypto_alg *alg,
+                            const struct crypto_type *frontend,
+                            int node)
+{
+       struct crypto_tfm *tfm;
+       char *mem;
+       int err;
+
+       mem = crypto_alloc_tfmmem(alg, frontend, node, GFP_KERNEL);
+       if (IS_ERR(mem))
+               goto out;
+
+       tfm = (struct crypto_tfm *)(mem + frontend->tfmsize);
 
        err = frontend->init_tfm(tfm);
        if (err)
@@ -523,13 +541,38 @@ out_free_tfm:
        if (err == -EAGAIN)
                crypto_shoot_alg(alg);
        kfree(mem);
-out_err:
        mem = ERR_PTR(err);
 out:
        return mem;
 }
 EXPORT_SYMBOL_GPL(crypto_create_tfm_node);
 
+void *crypto_clone_tfm(const struct crypto_type *frontend,
+                      struct crypto_tfm *otfm)
+{
+       struct crypto_alg *alg = otfm->__crt_alg;
+       struct crypto_tfm *tfm;
+       char *mem;
+
+       mem = ERR_PTR(-ESTALE);
+       if (unlikely(!crypto_mod_get(alg)))
+               goto out;
+
+       mem = crypto_alloc_tfmmem(alg, frontend, otfm->node, GFP_ATOMIC);
+       if (IS_ERR(mem)) {
+               crypto_mod_put(alg);
+               goto out;
+       }
+
+       tfm = (struct crypto_tfm *)(mem + frontend->tfmsize);
+       tfm->crt_flags = otfm->crt_flags;
+       tfm->exit = otfm->exit;
+
+out:
+       return mem;
+}
+EXPORT_SYMBOL_GPL(crypto_clone_tfm);
+
 struct crypto_alg *crypto_find_alg(const char *alg_name,
                                   const struct crypto_type *frontend,
                                   u32 type, u32 mask)
@@ -619,6 +662,8 @@ void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm)
        if (IS_ERR_OR_NULL(mem))
                return;
 
+       if (!refcount_dec_and_test(&tfm->refcnt))
+               return;
        alg = tfm->__crt_alg;
 
        if (!tfm->exit && alg->cra_exit)
index f9cdc5e91664b7cc0aae578ad61ef878cacae742..5e2b2680d7dbfa9aa4e99a4b2cadbf5f668cc74a 100644 (file)
@@ -11,8 +11,8 @@
 #include <linux/async_tx.h>
 #include <linux/gfp.h>
 
-/**
- * pq_scribble_page - space to hold throwaway P or Q buffer for
+/*
+ * struct pq_scribble_page - space to hold throwaway P or Q buffer for
  * synchronous gen_syndrome
  */
 static struct page *pq_scribble_page;
@@ -28,7 +28,7 @@ static struct page *pq_scribble_page;
 
 #define MAX_DISKS 255
 
-/**
+/*
  * do_async_gen_syndrome - asynchronously calculate P and/or Q
  */
 static __async_inline struct dma_async_tx_descriptor *
@@ -100,7 +100,7 @@ do_async_gen_syndrome(struct dma_chan *chan,
        return tx;
 }
 
-/**
+/*
  * do_sync_gen_syndrome - synchronously calculate a raid6 syndrome
  */
 static void
@@ -281,7 +281,7 @@ pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, si
 /**
  * async_syndrome_val - asynchronously validate a raid6 syndrome
  * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
- * @offset: common offset into each block (src and dest) to start transaction
+ * @offsets: common offset into each block (src and dest) to start transaction
  * @disks: number of blocks (including missing P or Q, see below)
  * @len: length of operation in bytes
  * @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set
index 9256934312d72b31e1475aa7f39d3081a48cf831..ad72057a5e0d72f284957d513cf6f50052c0edea 100644 (file)
@@ -124,7 +124,7 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
 
 
 /**
- * submit_disposition - flags for routing an incoming operation
+ * enum submit_disposition - flags for routing an incoming operation
  * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock
  * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch
  * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly
@@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(async_trigger_callback);
 
 /**
  * async_tx_quiesce - ensure tx is complete and freeable upon return
- * @tx - transaction to quiesce
+ * @tx: transaction to quiesce
  */
 void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
 {
diff --git a/crypto/compress.h b/crypto/compress.h
new file mode 100644 (file)
index 0000000..19f6551
--- /dev/null
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Cryptographic API.
+ *
+ * Copyright 2015 LG Electronics Inc.
+ * Copyright (c) 2016, Intel Corporation
+ * Copyright (c) 2023 Herbert Xu <[email protected]>
+ */
+#ifndef _LOCAL_CRYPTO_COMPRESS_H
+#define _LOCAL_CRYPTO_COMPRESS_H
+
+#include "internal.h"
+
+struct acomp_req;
+struct comp_alg_common;
+struct sk_buff;
+
+int crypto_init_scomp_ops_async(struct crypto_tfm *tfm);
+struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req);
+void crypto_acomp_scomp_free_ctx(struct acomp_req *req);
+
+int crypto_acomp_report_stat(struct sk_buff *skb, struct crypto_alg *alg);
+
+void comp_prepare_alg(struct comp_alg_common *alg);
+
+#endif /* _LOCAL_CRYPTO_COMPRESS_H */
index 37365ed30b382a7c5abf044ff890a46f86864886..bbcc368b6a5513d486ae0ec36dd1918337c0ecd2 100644 (file)
@@ -427,12 +427,12 @@ err_free_inst:
        return err;
 }
 
-static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
+static int cryptd_hash_init_tfm(struct crypto_ahash *tfm)
 {
-       struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
-       struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
+       struct ahash_instance *inst = ahash_alg_instance(tfm);
+       struct hashd_instance_ctx *ictx = ahash_instance_ctx(inst);
        struct crypto_shash_spawn *spawn = &ictx->spawn;
-       struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
        struct crypto_shash *hash;
 
        hash = crypto_spawn_shash(spawn);
@@ -440,15 +440,30 @@ static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
                return PTR_ERR(hash);
 
        ctx->child = hash;
-       crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+       crypto_ahash_set_reqsize(tfm,
                                 sizeof(struct cryptd_hash_request_ctx) +
                                 crypto_shash_descsize(hash));
        return 0;
 }
 
-static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
+static int cryptd_hash_clone_tfm(struct crypto_ahash *ntfm,
+                                struct crypto_ahash *tfm)
 {
-       struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct cryptd_hash_ctx *nctx = crypto_ahash_ctx(ntfm);
+       struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+       struct crypto_shash *hash;
+
+       hash = crypto_clone_shash(ctx->child);
+       if (IS_ERR(hash))
+               return PTR_ERR(hash);
+
+       nctx->child = hash;
+       return 0;
+}
+
+static void cryptd_hash_exit_tfm(struct crypto_ahash *tfm)
+{
+       struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 
        crypto_free_shash(ctx->child);
 }
@@ -677,8 +692,9 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
        inst->alg.halg.statesize = alg->statesize;
        inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
 
-       inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
-       inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
+       inst->alg.init_tfm = cryptd_hash_init_tfm;
+       inst->alg.clone_tfm = cryptd_hash_clone_tfm;
+       inst->alg.exit_tfm = cryptd_hash_exit_tfm;
 
        inst->alg.init   = cryptd_hash_init_enqueue;
        inst->alg.update = cryptd_hash_update_enqueue;
index 154884bf9275b965d07bfb4547ba68aacc6109b9..d4f3d39b513769733140c3b2f8dff3d742102811 100644 (file)
@@ -6,18 +6,14 @@
  *
  */
 
-#include <linux/crypto.h>
-#include <linux/cryptouser.h>
-#include <linux/sched.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/cryptouser.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
 #include <net/netlink.h>
 #include <net/sock.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/internal/rng.h>
-#include <crypto/akcipher.h>
-#include <crypto/kpp.h>
-#include <crypto/internal/cryptouser.h>
-
-#include "internal.h"
 
 #define null_terminated(x)     (strnlen(x, sizeof(x)) < sizeof(x))
 
@@ -28,23 +24,6 @@ struct crypto_dump_info {
        u16 nlmsg_flags;
 };
 
-static int crypto_report_aead(struct sk_buff *skb, struct crypto_alg *alg)
-{
-       struct crypto_stat_aead raead;
-
-       memset(&raead, 0, sizeof(raead));
-
-       strscpy(raead.type, "aead", sizeof(raead.type));
-
-       raead.stat_encrypt_cnt = atomic64_read(&alg->stats.aead.encrypt_cnt);
-       raead.stat_encrypt_tlen = atomic64_read(&alg->stats.aead.encrypt_tlen);
-       raead.stat_decrypt_cnt = atomic64_read(&alg->stats.aead.decrypt_cnt);
-       raead.stat_decrypt_tlen = atomic64_read(&alg->stats.aead.decrypt_tlen);
-       raead.stat_err_cnt = atomic64_read(&alg->stats.aead.err_cnt);
-
-       return nla_put(skb, CRYPTOCFGA_STAT_AEAD, sizeof(raead), &raead);
-}
-
 static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
 {
        struct crypto_stat_cipher rcipher;
@@ -53,12 +32,6 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
 
        strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
 
-       rcipher.stat_encrypt_cnt = atomic64_read(&alg->stats.cipher.encrypt_cnt);
-       rcipher.stat_encrypt_tlen = atomic64_read(&alg->stats.cipher.encrypt_tlen);
-       rcipher.stat_decrypt_cnt =  atomic64_read(&alg->stats.cipher.decrypt_cnt);
-       rcipher.stat_decrypt_tlen = atomic64_read(&alg->stats.cipher.decrypt_tlen);
-       rcipher.stat_err_cnt =  atomic64_read(&alg->stats.cipher.err_cnt);
-
        return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher);
 }
 
@@ -69,112 +42,10 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
        memset(&rcomp, 0, sizeof(rcomp));
 
        strscpy(rcomp.type, "compression", sizeof(rcomp.type));
-       rcomp.stat_compress_cnt = atomic64_read(&alg->stats.compress.compress_cnt);
-       rcomp.stat_compress_tlen = atomic64_read(&alg->stats.compress.compress_tlen);
-       rcomp.stat_decompress_cnt = atomic64_read(&alg->stats.compress.decompress_cnt);
-       rcomp.stat_decompress_tlen = atomic64_read(&alg->stats.compress.decompress_tlen);
-       rcomp.stat_err_cnt = atomic64_read(&alg->stats.compress.err_cnt);
 
        return nla_put(skb, CRYPTOCFGA_STAT_COMPRESS, sizeof(rcomp), &rcomp);
 }
 
-static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
-{
-       struct crypto_stat_compress racomp;
-
-       memset(&racomp, 0, sizeof(racomp));
-
-       strscpy(racomp.type, "acomp", sizeof(racomp.type));
-       racomp.stat_compress_cnt = atomic64_read(&alg->stats.compress.compress_cnt);
-       racomp.stat_compress_tlen = atomic64_read(&alg->stats.compress.compress_tlen);
-       racomp.stat_decompress_cnt =  atomic64_read(&alg->stats.compress.decompress_cnt);
-       racomp.stat_decompress_tlen = atomic64_read(&alg->stats.compress.decompress_tlen);
-       racomp.stat_err_cnt = atomic64_read(&alg->stats.compress.err_cnt);
-
-       return nla_put(skb, CRYPTOCFGA_STAT_ACOMP, sizeof(racomp), &racomp);
-}
-
-static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
-{
-       struct crypto_stat_akcipher rakcipher;
-
-       memset(&rakcipher, 0, sizeof(rakcipher));
-
-       strscpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
-       rakcipher.stat_encrypt_cnt = atomic64_read(&alg->stats.akcipher.encrypt_cnt);
-       rakcipher.stat_encrypt_tlen = atomic64_read(&alg->stats.akcipher.encrypt_tlen);
-       rakcipher.stat_decrypt_cnt = atomic64_read(&alg->stats.akcipher.decrypt_cnt);
-       rakcipher.stat_decrypt_tlen = atomic64_read(&alg->stats.akcipher.decrypt_tlen);
-       rakcipher.stat_sign_cnt = atomic64_read(&alg->stats.akcipher.sign_cnt);
-       rakcipher.stat_verify_cnt = atomic64_read(&alg->stats.akcipher.verify_cnt);
-       rakcipher.stat_err_cnt = atomic64_read(&alg->stats.akcipher.err_cnt);
-
-       return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER,
-                      sizeof(rakcipher), &rakcipher);
-}
-
-static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg)
-{
-       struct crypto_stat_kpp rkpp;
-
-       memset(&rkpp, 0, sizeof(rkpp));
-
-       strscpy(rkpp.type, "kpp", sizeof(rkpp.type));
-
-       rkpp.stat_setsecret_cnt = atomic64_read(&alg->stats.kpp.setsecret_cnt);
-       rkpp.stat_generate_public_key_cnt = atomic64_read(&alg->stats.kpp.generate_public_key_cnt);
-       rkpp.stat_compute_shared_secret_cnt = atomic64_read(&alg->stats.kpp.compute_shared_secret_cnt);
-       rkpp.stat_err_cnt = atomic64_read(&alg->stats.kpp.err_cnt);
-
-       return nla_put(skb, CRYPTOCFGA_STAT_KPP, sizeof(rkpp), &rkpp);
-}
-
-static int crypto_report_ahash(struct sk_buff *skb, struct crypto_alg *alg)
-{
-       struct crypto_stat_hash rhash;
-
-       memset(&rhash, 0, sizeof(rhash));
-
-       strscpy(rhash.type, "ahash", sizeof(rhash.type));
-
-       rhash.stat_hash_cnt = atomic64_read(&alg->stats.hash.hash_cnt);
-       rhash.stat_hash_tlen = atomic64_read(&alg->stats.hash.hash_tlen);
-       rhash.stat_err_cnt = atomic64_read(&alg->stats.hash.err_cnt);
-
-       return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash);
-}
-
-static int crypto_report_shash(struct sk_buff *skb, struct crypto_alg *alg)
-{
-       struct crypto_stat_hash rhash;
-
-       memset(&rhash, 0, sizeof(rhash));
-
-       strscpy(rhash.type, "shash", sizeof(rhash.type));
-
-       rhash.stat_hash_cnt =  atomic64_read(&alg->stats.hash.hash_cnt);
-       rhash.stat_hash_tlen = atomic64_read(&alg->stats.hash.hash_tlen);
-       rhash.stat_err_cnt = atomic64_read(&alg->stats.hash.err_cnt);
-
-       return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash);
-}
-
-static int crypto_report_rng(struct sk_buff *skb, struct crypto_alg *alg)
-{
-       struct crypto_stat_rng rrng;
-
-       memset(&rrng, 0, sizeof(rrng));
-
-       strscpy(rrng.type, "rng", sizeof(rrng.type));
-
-       rrng.stat_generate_cnt = atomic64_read(&alg->stats.rng.generate_cnt);
-       rrng.stat_generate_tlen = atomic64_read(&alg->stats.rng.generate_tlen);
-       rrng.stat_seed_cnt = atomic64_read(&alg->stats.rng.seed_cnt);
-       rrng.stat_err_cnt = atomic64_read(&alg->stats.rng.err_cnt);
-
-       return nla_put(skb, CRYPTOCFGA_STAT_RNG, sizeof(rrng), &rrng);
-}
-
 static int crypto_reportstat_one(struct crypto_alg *alg,
                                 struct crypto_user_alg *ualg,
                                 struct sk_buff *skb)
@@ -204,15 +75,13 @@ static int crypto_reportstat_one(struct crypto_alg *alg,
                goto out;
        }
 
-       switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) {
-       case CRYPTO_ALG_TYPE_AEAD:
-               if (crypto_report_aead(skb, alg))
+       if (alg->cra_type && alg->cra_type->report_stat) {
+               if (alg->cra_type->report_stat(skb, alg))
                        goto nla_put_failure;
-               break;
-       case CRYPTO_ALG_TYPE_SKCIPHER:
-               if (crypto_report_cipher(skb, alg))
-                       goto nla_put_failure;
-               break;
+               goto out;
+       }
+
+       switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) {
        case CRYPTO_ALG_TYPE_CIPHER:
                if (crypto_report_cipher(skb, alg))
                        goto nla_put_failure;
@@ -221,34 +90,6 @@ static int crypto_reportstat_one(struct crypto_alg *alg,
                if (crypto_report_comp(skb, alg))
                        goto nla_put_failure;
                break;
-       case CRYPTO_ALG_TYPE_ACOMPRESS:
-               if (crypto_report_acomp(skb, alg))
-                       goto nla_put_failure;
-               break;
-       case CRYPTO_ALG_TYPE_SCOMPRESS:
-               if (crypto_report_acomp(skb, alg))
-                       goto nla_put_failure;
-               break;
-       case CRYPTO_ALG_TYPE_AKCIPHER:
-               if (crypto_report_akcipher(skb, alg))
-                       goto nla_put_failure;
-               break;
-       case CRYPTO_ALG_TYPE_KPP:
-               if (crypto_report_kpp(skb, alg))
-                       goto nla_put_failure;
-               break;
-       case CRYPTO_ALG_TYPE_AHASH:
-               if (crypto_report_ahash(skb, alg))
-                       goto nla_put_failure;
-               break;
-       case CRYPTO_ALG_TYPE_HASH:
-               if (crypto_report_shash(skb, alg))
-                       goto nla_put_failure;
-               break;
-       case CRYPTO_ALG_TYPE_RNG:
-               if (crypto_report_rng(skb, alg))
-                       goto nla_put_failure;
-               break;
        default:
                pr_err("ERROR: Unhandled alg %d in %s\n",
                       alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL),
index 982d4ca4526d89246b0b6c87b87fb71b79516984..ff4ebbc68efab1b2956e42784bb5c9cc1df785cb 100644 (file)
@@ -1546,7 +1546,7 @@ static int drbg_prepare_hrng(struct drbg_state *drbg)
                const int err = PTR_ERR(drbg->jent);
 
                drbg->jent = NULL;
-               if (fips_enabled || err != -ENOENT)
+               if (fips_enabled)
                        return err;
                pr_info("DRBG: Continuing without Jitter RNG\n");
        }
index b05d3c7b3ca53bc7100d5bb0bf67d20a3331883a..92fd506abb21ce354b2b771bb66fc295e5231d53 100644 (file)
@@ -66,20 +66,11 @@ static struct ctl_table crypto_sysctl_table[] = {
        {}
 };
 
-static struct ctl_table crypto_dir_table[] = {
-       {
-               .procname       = "crypto",
-               .mode           = 0555,
-               .child          = crypto_sysctl_table
-       },
-       {}
-};
-
 static struct ctl_table_header *crypto_sysctls;
 
 static void crypto_proc_fips_init(void)
 {
-       crypto_sysctls = register_sysctl_table(crypto_dir_table);
+       crypto_sysctls = register_sysctl("crypto", crypto_sysctl_table);
 }
 
 static void crypto_proc_fips_exit(void)
diff --git a/crypto/hash.h b/crypto/hash.h
new file mode 100644 (file)
index 0000000..7e6c1a9
--- /dev/null
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Cryptographic API.
+ *
+ * Copyright (c) 2023 Herbert Xu <[email protected]>
+ */
+#ifndef _LOCAL_CRYPTO_HASH_H
+#define _LOCAL_CRYPTO_HASH_H
+
+#include <crypto/internal/hash.h>
+#include <linux/cryptouser.h>
+
+#include "internal.h"
+
+static inline int crypto_hash_report_stat(struct sk_buff *skb,
+                                         struct crypto_alg *alg,
+                                         const char *type)
+{
+       struct hash_alg_common *halg = __crypto_hash_alg_common(alg);
+       struct crypto_istat_hash *istat = hash_get_stat(halg);
+       struct crypto_stat_hash rhash;
+
+       memset(&rhash, 0, sizeof(rhash));
+
+       strscpy(rhash.type, type, sizeof(rhash.type));
+
+       rhash.stat_hash_cnt = atomic64_read(&istat->hash_cnt);
+       rhash.stat_hash_tlen = atomic64_read(&istat->hash_tlen);
+       rhash.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+       return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash);
+}
+
+int crypto_init_shash_ops_async(struct crypto_tfm *tfm);
+struct crypto_ahash *crypto_clone_shash_ops_async(struct crypto_ahash *nhash,
+                                                 struct crypto_ahash *hash);
+
+int hash_prepare_alg(struct hash_alg_common *alg);
+
+#endif /* _LOCAL_CRYPTO_HASH_H */
index 3610ff0b67392281db30103e42a889c09b03cc2e..09a7872b406003aa7c6ab14e3b1e7564c934b7f8 100644 (file)
@@ -160,6 +160,20 @@ static int hmac_init_tfm(struct crypto_shash *parent)
        return 0;
 }
 
+static int hmac_clone_tfm(struct crypto_shash *dst, struct crypto_shash *src)
+{
+       struct hmac_ctx *sctx = hmac_ctx(src);
+       struct hmac_ctx *dctx = hmac_ctx(dst);
+       struct crypto_shash *hash;
+
+       hash = crypto_clone_shash(sctx->hash);
+       if (IS_ERR(hash))
+               return PTR_ERR(hash);
+
+       dctx->hash = hash;
+       return 0;
+}
+
 static void hmac_exit_tfm(struct crypto_shash *parent)
 {
        struct hmac_ctx *ctx = hmac_ctx(parent);
@@ -227,6 +241,7 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
        inst->alg.import = hmac_import;
        inst->alg.setkey = hmac_setkey;
        inst->alg.init_tfm = hmac_init_tfm;
+       inst->alg.clone_tfm = hmac_clone_tfm;
        inst->alg.exit_tfm = hmac_exit_tfm;
 
        inst->free = shash_free_singlespawn_instance;
index 932f0aafddc32dc3974e71bdb8d0f37ad9e3aa80..8dd746b1130b6ef588665bee1ceeb9566efd4c5f 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <crypto/algapi.h>
 #include <linux/completion.h>
+#include <linux/err.h>
 #include <linux/jump_label.h>
 #include <linux/list.h>
 #include <linux/module.h>
@@ -47,6 +48,8 @@ extern struct list_head crypto_alg_list;
 extern struct rw_semaphore crypto_alg_sem;
 extern struct blocking_notifier_head crypto_chain;
 
+int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
+
 #ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
 static inline bool crypto_boot_test_finished(void)
 {
@@ -103,6 +106,8 @@ struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
                                      u32 mask);
 void *crypto_create_tfm_node(struct crypto_alg *alg,
                        const struct crypto_type *frontend, int node);
+void *crypto_clone_tfm(const struct crypto_type *frontend,
+                      struct crypto_tfm *otfm);
 
 static inline void *crypto_create_tfm(struct crypto_alg *alg,
                        const struct crypto_type *frontend)
@@ -184,5 +189,10 @@ static inline int crypto_is_test_larval(struct crypto_larval *larval)
        return larval->alg.cra_driver_name[0];
 }
 
+static inline struct crypto_tfm *crypto_tfm_get(struct crypto_tfm *tfm)
+{
+       return refcount_inc_not_zero(&tfm->refcnt) ? tfm : ERR_PTR(-EOVERFLOW);
+}
+
 #endif /* _CRYPTO_INTERNAL_H */
 
index 2d115bec15aeb88b6cbcf214f0c0181a4b2aa56d..b9edfaa51b2732caf8d429f726a3c4820f1935d7 100644 (file)
@@ -37,6 +37,7 @@
  * DAMAGE.
  */
 
+#include <linux/fips.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
@@ -59,11 +60,6 @@ void jent_zfree(void *ptr)
        kfree_sensitive(ptr);
 }
 
-void jent_panic(char *s)
-{
-       panic("%s", s);
-}
-
 void jent_memcpy(void *dest, const void *src, unsigned int n)
 {
        memcpy(dest, src, n);
@@ -102,7 +98,6 @@ void jent_get_nstime(__u64 *out)
 struct jitterentropy {
        spinlock_t jent_lock;
        struct rand_data *entropy_collector;
-       unsigned int reset_cnt;
 };
 
 static int jent_kcapi_init(struct crypto_tfm *tfm)
@@ -138,32 +133,30 @@ static int jent_kcapi_random(struct crypto_rng *tfm,
 
        spin_lock(&rng->jent_lock);
 
-       /* Return a permanent error in case we had too many resets in a row. */
-       if (rng->reset_cnt > (1<<10)) {
-               ret = -EFAULT;
-               goto out;
-       }
-
        ret = jent_read_entropy(rng->entropy_collector, rdata, dlen);
 
-       /* Reset RNG in case of health failures */
-       if (ret < -1) {
-               pr_warn_ratelimited("Reset Jitter RNG due to health test failure: %s failure\n",
-                                   (ret == -2) ? "Repetition Count Test" :
-                                                 "Adaptive Proportion Test");
-
-               rng->reset_cnt++;
-
+       if (ret == -3) {
+               /* Handle permanent health test error */
+               /*
+                * If the kernel was booted with fips=1, it implies that
+                * the entire kernel acts as a FIPS 140 module. In this case
+                * an SP800-90B permanent health test error is treated as
+                * a FIPS module error.
+                */
+               if (fips_enabled)
+                       panic("Jitter RNG permanent health test failure\n");
+
+               pr_err("Jitter RNG permanent health test failure\n");
+               ret = -EFAULT;
+       } else if (ret == -2) {
+               /* Handle intermittent health test error */
+               pr_warn_ratelimited("Reset Jitter RNG due to intermittent health test failure\n");
                ret = -EAGAIN;
-       } else {
-               rng->reset_cnt = 0;
-
-               /* Convert the Jitter RNG error into a usable error code */
-               if (ret == -1)
-                       ret = -EINVAL;
+       } else if (ret == -1) {
+               /* Handle other errors */
+               ret = -EINVAL;
        }
 
-out:
        spin_unlock(&rng->jent_lock);
 
        return ret;
@@ -197,6 +190,10 @@ static int __init jent_mod_init(void)
 
        ret = jent_entropy_init();
        if (ret) {
+               /* Handle permanent health test error */
+               if (fips_enabled)
+                       panic("jitterentropy: Initialization failed with host not compliant with requirements: %d\n", ret);
+
                pr_info("jitterentropy: Initialization failed with host not compliant with requirements: %d\n", ret);
                return -EFAULT;
        }
index 93bff32138238b8a43fe04d77468af8da563e419..22f48bf4c6f5716d0d2bea11fdc9180fa55c1f8d 100644 (file)
@@ -85,10 +85,14 @@ struct rand_data {
                                      * bit generation */
 
        /* Repetition Count Test */
-       int rct_count;                  /* Number of stuck values */
+       unsigned int rct_count;                 /* Number of stuck values */
 
-       /* Adaptive Proportion Test for a significance level of 2^-30 */
+       /* Intermittent health test failure threshold of 2^-30 */
+#define JENT_RCT_CUTOFF                30      /* Taken from SP800-90B sec 4.4.1 */
 #define JENT_APT_CUTOFF                325     /* Taken from SP800-90B sec 4.4.2 */
+       /* Permanent health test failure threshold of 2^-60 */
+#define JENT_RCT_CUTOFF_PERMANENT      60
+#define JENT_APT_CUTOFF_PERMANENT      355
 #define JENT_APT_WINDOW_SIZE   512     /* Data window size */
        /* LSB of time stamp to process */
 #define JENT_APT_LSB           16
@@ -97,8 +101,6 @@ struct rand_data {
        unsigned int apt_count;         /* APT counter */
        unsigned int apt_base;          /* APT base reference */
        unsigned int apt_base_set:1;    /* APT base reference set? */
-
-       unsigned int health_failure:1;  /* Permanent health failure */
 };
 
 /* Flags that can be used to initialize the RNG */
@@ -169,19 +171,26 @@ static void jent_apt_insert(struct rand_data *ec, unsigned int delta_masked)
                return;
        }
 
-       if (delta_masked == ec->apt_base) {
+       if (delta_masked == ec->apt_base)
                ec->apt_count++;
 
-               if (ec->apt_count >= JENT_APT_CUTOFF)
-                       ec->health_failure = 1;
-       }
-
        ec->apt_observations++;
 
        if (ec->apt_observations >= JENT_APT_WINDOW_SIZE)
                jent_apt_reset(ec, delta_masked);
 }
 
+/* APT health test failure detection */
+static int jent_apt_permanent_failure(struct rand_data *ec)
+{
+       return (ec->apt_count >= JENT_APT_CUTOFF_PERMANENT) ? 1 : 0;
+}
+
+static int jent_apt_failure(struct rand_data *ec)
+{
+       return (ec->apt_count >= JENT_APT_CUTOFF) ? 1 : 0;
+}
+
 /***************************************************************************
  * Stuck Test and its use as Repetition Count Test
  *
@@ -206,55 +215,14 @@ static void jent_apt_insert(struct rand_data *ec, unsigned int delta_masked)
  */
 static void jent_rct_insert(struct rand_data *ec, int stuck)
 {
-       /*
-        * If we have a count less than zero, a previous RCT round identified
-        * a failure. We will not overwrite it.
-        */
-       if (ec->rct_count < 0)
-               return;
-
        if (stuck) {
                ec->rct_count++;
-
-               /*
-                * The cutoff value is based on the following consideration:
-                * alpha = 2^-30 as recommended in FIPS 140-2 IG 9.8.
-                * In addition, we require an entropy value H of 1/OSR as this
-                * is the minimum entropy required to provide full entropy.
-                * Note, we collect 64 * OSR deltas for inserting them into
-                * the entropy pool which should then have (close to) 64 bits
-                * of entropy.
-                *
-                * Note, ec->rct_count (which equals to value B in the pseudo
-                * code of SP800-90B section 4.4.1) starts with zero. Hence
-                * we need to subtract one from the cutoff value as calculated
-                * following SP800-90B.
-                */
-               if ((unsigned int)ec->rct_count >= (31 * ec->osr)) {
-                       ec->rct_count = -1;
-                       ec->health_failure = 1;
-               }
        } else {
+               /* Reset RCT */
                ec->rct_count = 0;
        }
 }
 
-/*
- * Is there an RCT health test failure?
- *
- * @ec [in] Reference to entropy collector
- *
- * @return
- *     0 No health test failure
- *     1 Permanent health test failure
- */
-static int jent_rct_failure(struct rand_data *ec)
-{
-       if (ec->rct_count < 0)
-               return 1;
-       return 0;
-}
-
 static inline __u64 jent_delta(__u64 prev, __u64 next)
 {
 #define JENT_UINT64_MAX                (__u64)(~((__u64) 0))
@@ -303,18 +271,26 @@ static int jent_stuck(struct rand_data *ec, __u64 current_delta)
        return 0;
 }
 
-/*
- * Report any health test failures
- *
- * @ec [in] Reference to entropy collector
- *
- * @return
- *     0 No health test failure
- *     1 Permanent health test failure
- */
+/* RCT health test failure detection */
+static int jent_rct_permanent_failure(struct rand_data *ec)
+{
+       return (ec->rct_count >= JENT_RCT_CUTOFF_PERMANENT) ? 1 : 0;
+}
+
+static int jent_rct_failure(struct rand_data *ec)
+{
+       return (ec->rct_count >= JENT_RCT_CUTOFF) ? 1 : 0;
+}
+
+/* Report of health test failures */
 static int jent_health_failure(struct rand_data *ec)
 {
-       return ec->health_failure;
+       return jent_rct_failure(ec) | jent_apt_failure(ec);
+}
+
+static int jent_permanent_health_failure(struct rand_data *ec)
+{
+       return jent_rct_permanent_failure(ec) | jent_apt_permanent_failure(ec);
 }
 
 /***************************************************************************
@@ -600,8 +576,8 @@ static void jent_gen_entropy(struct rand_data *ec)
  *
  * The following error codes can occur:
  *     -1      entropy_collector is NULL
- *     -2      RCT failed
- *     -3      APT test failed
+ *     -2      Intermittent health failure
+ *     -3      Permanent health failure
  */
 int jent_read_entropy(struct rand_data *ec, unsigned char *data,
                      unsigned int len)
@@ -616,39 +592,23 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data,
 
                jent_gen_entropy(ec);
 
-               if (jent_health_failure(ec)) {
-                       int ret;
-
-                       if (jent_rct_failure(ec))
-                               ret = -2;
-                       else
-                               ret = -3;
-
+               if (jent_permanent_health_failure(ec)) {
                        /*
-                        * Re-initialize the noise source
-                        *
-                        * If the health test fails, the Jitter RNG remains
-                        * in failure state and will return a health failure
-                        * during next invocation.
+                        * At this point, the Jitter RNG instance is considered
+                        * as a failed instance. There is no rerun of the
+                        * startup test any more, because the caller
+                        * is assumed to not further use this instance.
                         */
-                       if (jent_entropy_init())
-                               return ret;
-
-                       /* Set APT to initial state */
-                       jent_apt_reset(ec, 0);
-                       ec->apt_base_set = 0;
-
-                       /* Set RCT to initial state */
-                       ec->rct_count = 0;
-
-                       /* Re-enable Jitter RNG */
-                       ec->health_failure = 0;
-
+                       return -3;
+               } else if (jent_health_failure(ec)) {
                        /*
-                        * Return the health test failure status to the
-                        * caller as the generated value is not appropriate.
+                        * Perform startup health tests and return permanent
+                        * error if it fails.
                         */
-                       return ret;
+                       if (jent_entropy_init())
+                               return -3;
+
+                       return -2;
                }
 
                if ((DATA_SIZE_BITS / 8) < len)
index b7397b617ef055ff039c62df31a423798e911014..5cc583f6bc6b84c207137e076470e664374658b4 100644 (file)
@@ -2,7 +2,6 @@
 
 extern void *jent_zalloc(unsigned int len);
 extern void jent_zfree(void *ptr);
-extern void jent_panic(char *s);
 extern void jent_memcpy(void *dest, const void *src, unsigned int n);
 extern void jent_get_nstime(__u64 *out);
 
index 678e871ce418c089208056c22998c1b29cdd34cb..74f2e8e918fa5854e444ae858c604d2d52c6c97c 100644 (file)
@@ -5,23 +5,20 @@
  * Copyright (c) 2016, Intel Corporation
  * Authors: Salvatore Benedetto <[email protected]>
  */
+
+#include <crypto/internal/kpp.h>
+#include <linux/cryptouser.h>
 #include <linux/errno.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/seq_file.h>
-#include <linux/slab.h>
 #include <linux/string.h>
-#include <linux/crypto.h>
-#include <crypto/algapi.h>
-#include <linux/cryptouser.h>
-#include <linux/compiler.h>
 #include <net/netlink.h>
-#include <crypto/kpp.h>
-#include <crypto/internal/kpp.h>
+
 #include "internal.h"
 
-#ifdef CONFIG_NET
-static int crypto_kpp_report(struct sk_buff *skb, struct crypto_alg *alg)
+static int __maybe_unused crypto_kpp_report(
+       struct sk_buff *skb, struct crypto_alg *alg)
 {
        struct crypto_report_kpp rkpp;
 
@@ -31,12 +28,6 @@ static int crypto_kpp_report(struct sk_buff *skb, struct crypto_alg *alg)
 
        return nla_put(skb, CRYPTOCFGA_REPORT_KPP, sizeof(rkpp), &rkpp);
 }
-#else
-static int crypto_kpp_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
-       return -ENOSYS;
-}
-#endif
 
 static void crypto_kpp_show(struct seq_file *m, struct crypto_alg *alg)
        __maybe_unused;
@@ -75,6 +66,29 @@ static void crypto_kpp_free_instance(struct crypto_instance *inst)
        kpp->free(kpp);
 }
 
+static int __maybe_unused crypto_kpp_report_stat(
+       struct sk_buff *skb, struct crypto_alg *alg)
+{
+       struct kpp_alg *kpp = __crypto_kpp_alg(alg);
+       struct crypto_istat_kpp *istat;
+       struct crypto_stat_kpp rkpp;
+
+       istat = kpp_get_stat(kpp);
+
+       memset(&rkpp, 0, sizeof(rkpp));
+
+       strscpy(rkpp.type, "kpp", sizeof(rkpp.type));
+
+       rkpp.stat_setsecret_cnt = atomic64_read(&istat->setsecret_cnt);
+       rkpp.stat_generate_public_key_cnt =
+               atomic64_read(&istat->generate_public_key_cnt);
+       rkpp.stat_compute_shared_secret_cnt =
+               atomic64_read(&istat->compute_shared_secret_cnt);
+       rkpp.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+       return nla_put(skb, CRYPTOCFGA_STAT_KPP, sizeof(rkpp), &rkpp);
+}
+
 static const struct crypto_type crypto_kpp_type = {
        .extsize = crypto_alg_extsize,
        .init_tfm = crypto_kpp_init_tfm,
@@ -82,7 +96,12 @@ static const struct crypto_type crypto_kpp_type = {
 #ifdef CONFIG_PROC_FS
        .show = crypto_kpp_show,
 #endif
+#ifdef CONFIG_CRYPTO_USER
        .report = crypto_kpp_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+       .report_stat = crypto_kpp_report_stat,
+#endif
        .maskclear = ~CRYPTO_ALG_TYPE_MASK,
        .maskset = CRYPTO_ALG_TYPE_MASK,
        .type = CRYPTO_ALG_TYPE_KPP,
@@ -112,11 +131,15 @@ EXPORT_SYMBOL_GPL(crypto_has_kpp);
 
 static void kpp_prepare_alg(struct kpp_alg *alg)
 {
+       struct crypto_istat_kpp *istat = kpp_get_stat(alg);
        struct crypto_alg *base = &alg->base;
 
        base->cra_type = &crypto_kpp_type;
        base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
        base->cra_flags |= CRYPTO_ALG_TYPE_KPP;
+
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+               memset(istat, 0, sizeof(*istat));
 }
 
 int crypto_register_kpp(struct kpp_alg *alg)
index fea082b25fe4b6636ce184acdb907bb571f23603..ffde0f64fb25934b5507841a83bdb13be9c24324 100644 (file)
@@ -8,17 +8,17 @@
  * Copyright (c) 2015 Herbert Xu <[email protected]>
  */
 
-#include <linux/atomic.h>
 #include <crypto/internal/rng.h>
+#include <linux/atomic.h>
+#include <linux/cryptouser.h>
 #include <linux/err.h>
+#include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/random.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/string.h>
-#include <linux/cryptouser.h>
-#include <linux/compiler.h>
 #include <net/netlink.h>
 
 #include "internal.h"
@@ -30,27 +30,30 @@ static int crypto_default_rng_refcnt;
 
 int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
 {
-       struct crypto_alg *alg = tfm->base.__crt_alg;
+       struct rng_alg *alg = crypto_rng_alg(tfm);
        u8 *buf = NULL;
        int err;
 
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+               atomic64_inc(&rng_get_stat(alg)->seed_cnt);
+
        if (!seed && slen) {
                buf = kmalloc(slen, GFP_KERNEL);
+               err = -ENOMEM;
                if (!buf)
-                       return -ENOMEM;
+                       goto out;
 
                err = get_random_bytes_wait(buf, slen);
                if (err)
-                       goto out;
+                       goto free_buf;
                seed = buf;
        }
 
-       crypto_stats_get(alg);
-       err = crypto_rng_alg(tfm)->seed(tfm, seed, slen);
-       crypto_stats_rng_seed(alg, err);
-out:
+       err = alg->seed(tfm, seed, slen);
+free_buf:
        kfree_sensitive(buf);
-       return err;
+out:
+       return crypto_rng_errstat(alg, err);
 }
 EXPORT_SYMBOL_GPL(crypto_rng_reset);
 
@@ -66,8 +69,8 @@ static unsigned int seedsize(struct crypto_alg *alg)
        return ralg->seedsize;
 }
 
-#ifdef CONFIG_NET
-static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
+static int __maybe_unused crypto_rng_report(
+       struct sk_buff *skb, struct crypto_alg *alg)
 {
        struct crypto_report_rng rrng;
 
@@ -79,12 +82,6 @@ static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
 
        return nla_put(skb, CRYPTOCFGA_REPORT_RNG, sizeof(rrng), &rrng);
 }
-#else
-static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
-       return -ENOSYS;
-}
-#endif
 
 static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg)
        __maybe_unused;
@@ -94,13 +91,39 @@ static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg)
        seq_printf(m, "seedsize     : %u\n", seedsize(alg));
 }
 
+static int __maybe_unused crypto_rng_report_stat(
+       struct sk_buff *skb, struct crypto_alg *alg)
+{
+       struct rng_alg *rng = __crypto_rng_alg(alg);
+       struct crypto_istat_rng *istat;
+       struct crypto_stat_rng rrng;
+
+       istat = rng_get_stat(rng);
+
+       memset(&rrng, 0, sizeof(rrng));
+
+       strscpy(rrng.type, "rng", sizeof(rrng.type));
+
+       rrng.stat_generate_cnt = atomic64_read(&istat->generate_cnt);
+       rrng.stat_generate_tlen = atomic64_read(&istat->generate_tlen);
+       rrng.stat_seed_cnt = atomic64_read(&istat->seed_cnt);
+       rrng.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+       return nla_put(skb, CRYPTOCFGA_STAT_RNG, sizeof(rrng), &rrng);
+}
+
 static const struct crypto_type crypto_rng_type = {
        .extsize = crypto_alg_extsize,
        .init_tfm = crypto_rng_init_tfm,
 #ifdef CONFIG_PROC_FS
        .show = crypto_rng_show,
 #endif
+#ifdef CONFIG_CRYPTO_USER
        .report = crypto_rng_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+       .report_stat = crypto_rng_report_stat,
+#endif
        .maskclear = ~CRYPTO_ALG_TYPE_MASK,
        .maskset = CRYPTO_ALG_TYPE_MASK,
        .type = CRYPTO_ALG_TYPE_RNG,
@@ -176,6 +199,7 @@ EXPORT_SYMBOL_GPL(crypto_del_default_rng);
 
 int crypto_register_rng(struct rng_alg *alg)
 {
+       struct crypto_istat_rng *istat = rng_get_stat(alg);
        struct crypto_alg *base = &alg->base;
 
        if (alg->seedsize > PAGE_SIZE / 8)
@@ -185,6 +209,9 @@ int crypto_register_rng(struct rng_alg *alg)
        base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
        base->cra_flags |= CRYPTO_ALG_TYPE_RNG;
 
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+               memset(istat, 0, sizeof(*istat));
+
        return crypto_register_alg(base);
 }
 EXPORT_SYMBOL_GPL(crypto_register_rng);
index 738f4f8f0f41ac356de42004784ec680593d1153..24138b42a648a2fe431c410aa5696daffa5a54a7 100644 (file)
@@ -6,23 +6,22 @@
  * Copyright (c) 2016, Intel Corporation
  * Author: Giovanni Cabiddu <[email protected]>
  */
-#include <linux/errno.h>
+
+#include <crypto/internal/acompress.h>
+#include <crypto/internal/scompress.h>
+#include <crypto/scatterwalk.h>
+#include <linux/cryptouser.h>
+#include <linux/err.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/scatterlist.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/string.h>
-#include <linux/crypto.h>
-#include <linux/compiler.h>
 #include <linux/vmalloc.h>
-#include <crypto/algapi.h>
-#include <linux/cryptouser.h>
 #include <net/netlink.h>
-#include <linux/scatterlist.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/internal/acompress.h>
-#include <crypto/internal/scompress.h>
-#include "internal.h"
+
+#include "compress.h"
 
 struct scomp_scratch {
        spinlock_t      lock;
@@ -38,8 +37,8 @@ static const struct crypto_type crypto_scomp_type;
 static int scomp_scratch_users;
 static DEFINE_MUTEX(scomp_lock);
 
-#ifdef CONFIG_NET
-static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
+static int __maybe_unused crypto_scomp_report(
+       struct sk_buff *skb, struct crypto_alg *alg)
 {
        struct crypto_report_comp rscomp;
 
@@ -50,12 +49,6 @@ static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
        return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
                       sizeof(rscomp), &rscomp);
 }
-#else
-static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
-       return -ENOSYS;
-}
-#endif
 
 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
        __maybe_unused;
@@ -247,7 +240,12 @@ static const struct crypto_type crypto_scomp_type = {
 #ifdef CONFIG_PROC_FS
        .show = crypto_scomp_show,
 #endif
+#ifdef CONFIG_CRYPTO_USER
        .report = crypto_scomp_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+       .report_stat = crypto_acomp_report_stat,
+#endif
        .maskclear = ~CRYPTO_ALG_TYPE_MASK,
        .maskset = CRYPTO_ALG_TYPE_MASK,
        .type = CRYPTO_ALG_TYPE_SCOMPRESS,
@@ -256,10 +254,11 @@ static const struct crypto_type crypto_scomp_type = {
 
 int crypto_register_scomp(struct scomp_alg *alg)
 {
-       struct crypto_alg *base = &alg->base;
+       struct crypto_alg *base = &alg->calg.base;
+
+       comp_prepare_alg(&alg->calg);
 
        base->cra_type = &crypto_scomp_type;
-       base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
        base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
 
        return crypto_register_alg(base);
index 58b46f198449ec80b484214da4a47be8924406ea..5845b7d59b2f259ef0fa00475b8e805ebb5a0bb3 100644 (file)
@@ -6,22 +6,31 @@
  */
 
 #include <crypto/scatterwalk.h>
-#include <crypto/internal/hash.h>
+#include <linux/cryptouser.h>
 #include <linux/err.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/seq_file.h>
-#include <linux/cryptouser.h>
+#include <linux/string.h>
 #include <net/netlink.h>
-#include <linux/compiler.h>
 
-#include "internal.h"
+#include "hash.h"
 
 #define MAX_SHASH_ALIGNMASK 63
 
 static const struct crypto_type crypto_shash_type;
 
+static inline struct crypto_istat_hash *shash_get_stat(struct shash_alg *alg)
+{
+       return hash_get_stat(&alg->halg);
+}
+
+static inline int crypto_shash_errstat(struct shash_alg *alg, int err)
+{
+       return crypto_hash_errstat(&alg->halg, err);
+}
+
 int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
                    unsigned int keylen)
 {
@@ -114,11 +123,17 @@ int crypto_shash_update(struct shash_desc *desc, const u8 *data,
        struct crypto_shash *tfm = desc->tfm;
        struct shash_alg *shash = crypto_shash_alg(tfm);
        unsigned long alignmask = crypto_shash_alignmask(tfm);
+       int err;
+
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+               atomic64_add(len, &shash_get_stat(shash)->hash_tlen);
 
        if ((unsigned long)data & alignmask)
-               return shash_update_unaligned(desc, data, len);
+               err = shash_update_unaligned(desc, data, len);
+       else
+               err = shash->update(desc, data, len);
 
-       return shash->update(desc, data, len);
+       return crypto_shash_errstat(shash, err);
 }
 EXPORT_SYMBOL_GPL(crypto_shash_update);
 
@@ -155,19 +170,25 @@ int crypto_shash_final(struct shash_desc *desc, u8 *out)
        struct crypto_shash *tfm = desc->tfm;
        struct shash_alg *shash = crypto_shash_alg(tfm);
        unsigned long alignmask = crypto_shash_alignmask(tfm);
+       int err;
+
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+               atomic64_inc(&shash_get_stat(shash)->hash_cnt);
 
        if ((unsigned long)out & alignmask)
-               return shash_final_unaligned(desc, out);
+               err = shash_final_unaligned(desc, out);
+       else
+               err = shash->final(desc, out);
 
-       return shash->final(desc, out);
+       return crypto_shash_errstat(shash, err);
 }
 EXPORT_SYMBOL_GPL(crypto_shash_final);
 
 static int shash_finup_unaligned(struct shash_desc *desc, const u8 *data,
                                 unsigned int len, u8 *out)
 {
-       return crypto_shash_update(desc, data, len) ?:
-              crypto_shash_final(desc, out);
+       return shash_update_unaligned(desc, data, len) ?:
+              shash_final_unaligned(desc, out);
 }
 
 int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
@@ -176,11 +197,22 @@ int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
        struct crypto_shash *tfm = desc->tfm;
        struct shash_alg *shash = crypto_shash_alg(tfm);
        unsigned long alignmask = crypto_shash_alignmask(tfm);
+       int err;
+
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+               struct crypto_istat_hash *istat = shash_get_stat(shash);
+
+               atomic64_inc(&istat->hash_cnt);
+               atomic64_add(len, &istat->hash_tlen);
+       }
 
        if (((unsigned long)data | (unsigned long)out) & alignmask)
-               return shash_finup_unaligned(desc, data, len, out);
+               err = shash_finup_unaligned(desc, data, len, out);
+       else
+               err = shash->finup(desc, data, len, out);
 
-       return shash->finup(desc, data, len, out);
+
+       return crypto_shash_errstat(shash, err);
 }
 EXPORT_SYMBOL_GPL(crypto_shash_finup);
 
@@ -188,7 +220,8 @@ static int shash_digest_unaligned(struct shash_desc *desc, const u8 *data,
                                  unsigned int len, u8 *out)
 {
        return crypto_shash_init(desc) ?:
-              crypto_shash_finup(desc, data, len, out);
+              shash_update_unaligned(desc, data, len) ?:
+              shash_final_unaligned(desc, out);
 }
 
 int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
@@ -197,14 +230,23 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
        struct crypto_shash *tfm = desc->tfm;
        struct shash_alg *shash = crypto_shash_alg(tfm);
        unsigned long alignmask = crypto_shash_alignmask(tfm);
+       int err;
 
-       if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
-               return -ENOKEY;
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+               struct crypto_istat_hash *istat = shash_get_stat(shash);
 
-       if (((unsigned long)data | (unsigned long)out) & alignmask)
-               return shash_digest_unaligned(desc, data, len, out);
+               atomic64_inc(&istat->hash_cnt);
+               atomic64_add(len, &istat->hash_tlen);
+       }
 
-       return shash->digest(desc, data, len, out);
+       if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+               err = -ENOKEY;
+       else if (((unsigned long)data | (unsigned long)out) & alignmask)
+               err = shash_digest_unaligned(desc, data, len, out);
+       else
+               err = shash->digest(desc, data, len, out);
+
+       return crypto_shash_errstat(shash, err);
 }
 EXPORT_SYMBOL_GPL(crypto_shash_digest);
 
@@ -403,6 +445,24 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
        return 0;
 }
 
+struct crypto_ahash *crypto_clone_shash_ops_async(struct crypto_ahash *nhash,
+                                                 struct crypto_ahash *hash)
+{
+       struct crypto_shash **nctx = crypto_ahash_ctx(nhash);
+       struct crypto_shash **ctx = crypto_ahash_ctx(hash);
+       struct crypto_shash *shash;
+
+       shash = crypto_clone_shash(*ctx);
+       if (IS_ERR(shash)) {
+               crypto_free_ahash(nhash);
+               return ERR_CAST(shash);
+       }
+
+       *nctx = shash;
+
+       return nhash;
+}
+
 static void crypto_shash_exit_tfm(struct crypto_tfm *tfm)
 {
        struct crypto_shash *hash = __crypto_shash_cast(tfm);
@@ -448,8 +508,8 @@ static void crypto_shash_free_instance(struct crypto_instance *inst)
        shash->free(shash);
 }
 
-#ifdef CONFIG_NET
-static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
+static int __maybe_unused crypto_shash_report(
+       struct sk_buff *skb, struct crypto_alg *alg)
 {
        struct crypto_report_hash rhash;
        struct shash_alg *salg = __crypto_shash_alg(alg);
@@ -463,12 +523,6 @@ static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
 
        return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
 }
-#else
-static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
-       return -ENOSYS;
-}
-#endif
 
 static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
        __maybe_unused;
@@ -481,6 +535,12 @@ static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
        seq_printf(m, "digestsize   : %u\n", salg->digestsize);
 }
 
+static int __maybe_unused crypto_shash_report_stat(
+       struct sk_buff *skb, struct crypto_alg *alg)
+{
+       return crypto_hash_report_stat(skb, alg, "shash");
+}
+
 static const struct crypto_type crypto_shash_type = {
        .extsize = crypto_alg_extsize,
        .init_tfm = crypto_shash_init_tfm,
@@ -488,7 +548,12 @@ static const struct crypto_type crypto_shash_type = {
 #ifdef CONFIG_PROC_FS
        .show = crypto_shash_show,
 #endif
+#ifdef CONFIG_CRYPTO_USER
        .report = crypto_shash_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+       .report_stat = crypto_shash_report_stat,
+#endif
        .maskclear = ~CRYPTO_ALG_TYPE_MASK,
        .maskset = CRYPTO_ALG_TYPE_MASK,
        .type = CRYPTO_ALG_TYPE_SHASH,
@@ -517,13 +582,62 @@ int crypto_has_shash(const char *alg_name, u32 type, u32 mask)
 }
 EXPORT_SYMBOL_GPL(crypto_has_shash);
 
-static int shash_prepare_alg(struct shash_alg *alg)
+struct crypto_shash *crypto_clone_shash(struct crypto_shash *hash)
+{
+       struct crypto_tfm *tfm = crypto_shash_tfm(hash);
+       struct shash_alg *alg = crypto_shash_alg(hash);
+       struct crypto_shash *nhash;
+       int err;
+
+       if (!crypto_shash_alg_has_setkey(alg)) {
+               tfm = crypto_tfm_get(tfm);
+               if (IS_ERR(tfm))
+                       return ERR_CAST(tfm);
+
+               return hash;
+       }
+
+       if (!alg->clone_tfm)
+               return ERR_PTR(-ENOSYS);
+
+       nhash = crypto_clone_tfm(&crypto_shash_type, tfm);
+       if (IS_ERR(nhash))
+               return nhash;
+
+       nhash->descsize = hash->descsize;
+
+       err = alg->clone_tfm(nhash, hash);
+       if (err) {
+               crypto_free_shash(nhash);
+               return ERR_PTR(err);
+       }
+
+       return nhash;
+}
+EXPORT_SYMBOL_GPL(crypto_clone_shash);
+
+int hash_prepare_alg(struct hash_alg_common *alg)
 {
+       struct crypto_istat_hash *istat = hash_get_stat(alg);
        struct crypto_alg *base = &alg->base;
 
-       if (alg->digestsize > HASH_MAX_DIGESTSIZE ||
-           alg->descsize > HASH_MAX_DESCSIZE ||
-           alg->statesize > HASH_MAX_STATESIZE)
+       if (alg->digestsize > HASH_MAX_DIGESTSIZE)
+               return -EINVAL;
+
+       base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+               memset(istat, 0, sizeof(*istat));
+
+       return 0;
+}
+
+static int shash_prepare_alg(struct shash_alg *alg)
+{
+       struct crypto_alg *base = &alg->halg.base;
+       int err;
+
+       if (alg->descsize > HASH_MAX_DESCSIZE)
                return -EINVAL;
 
        if (base->cra_alignmask > MAX_SHASH_ALIGNMASK)
@@ -532,8 +646,11 @@ static int shash_prepare_alg(struct shash_alg *alg)
        if ((alg->export && !alg->import) || (alg->import && !alg->export))
                return -EINVAL;
 
+       err = hash_prepare_alg(&alg->halg);
+       if (err)
+               return err;
+
        base->cra_type = &crypto_shash_type;
-       base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
        base->cra_flags |= CRYPTO_ALG_TYPE_SHASH;
 
        if (!alg->finup)
@@ -543,7 +660,7 @@ static int shash_prepare_alg(struct shash_alg *alg)
        if (!alg->export) {
                alg->export = shash_default_export;
                alg->import = shash_default_import;
-               alg->statesize = alg->descsize;
+               alg->halg.statesize = alg->descsize;
        }
        if (!alg->setkey)
                alg->setkey = shash_no_setkey;
index 7bf4871fec8006ac09c686fc88413256312bf255..6caca02d7e5527ef1559c1169b1caa498f1363f5 100644 (file)
 #include <crypto/scatterwalk.h>
 #include <linux/bug.h>
 #include <linux/cryptouser.h>
-#include <linux/compiler.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
 #include <linux/list.h>
+#include <linux/mm.h>
 #include <linux/module.h>
-#include <linux/rtnetlink.h>
 #include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/string.h>
 #include <net/netlink.h>
 
 #include "internal.h"
@@ -77,6 +80,35 @@ static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
        return max(start, end_page);
 }
 
+static inline struct skcipher_alg *__crypto_skcipher_alg(
+       struct crypto_alg *alg)
+{
+       return container_of(alg, struct skcipher_alg, base);
+}
+
+static inline struct crypto_istat_cipher *skcipher_get_stat(
+       struct skcipher_alg *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+       return &alg->stat;
+#else
+       return NULL;
+#endif
+}
+
+static inline int crypto_skcipher_errstat(struct skcipher_alg *alg, int err)
+{
+       struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
+
+       if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+               return err;
+
+       if (err && err != -EINPROGRESS && err != -EBUSY)
+               atomic64_inc(&istat->err_cnt);
+
+       return err;
+}
+
 static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
 {
        u8 *addr;
@@ -605,34 +637,44 @@ EXPORT_SYMBOL_GPL(crypto_skcipher_setkey);
 int crypto_skcipher_encrypt(struct skcipher_request *req)
 {
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-       struct crypto_alg *alg = tfm->base.__crt_alg;
-       unsigned int cryptlen = req->cryptlen;
+       struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
        int ret;
 
-       crypto_stats_get(alg);
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+               struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
+
+               atomic64_inc(&istat->encrypt_cnt);
+               atomic64_add(req->cryptlen, &istat->encrypt_tlen);
+       }
+
        if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
                ret = -ENOKEY;
        else
-               ret = crypto_skcipher_alg(tfm)->encrypt(req);
-       crypto_stats_skcipher_encrypt(cryptlen, ret, alg);
-       return ret;
+               ret = alg->encrypt(req);
+
+       return crypto_skcipher_errstat(alg, ret);
 }
 EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt);
 
 int crypto_skcipher_decrypt(struct skcipher_request *req)
 {
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-       struct crypto_alg *alg = tfm->base.__crt_alg;
-       unsigned int cryptlen = req->cryptlen;
+       struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
        int ret;
 
-       crypto_stats_get(alg);
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+               struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
+
+               atomic64_inc(&istat->decrypt_cnt);
+               atomic64_add(req->cryptlen, &istat->decrypt_tlen);
+       }
+
        if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
                ret = -ENOKEY;
        else
-               ret = crypto_skcipher_alg(tfm)->decrypt(req);
-       crypto_stats_skcipher_decrypt(cryptlen, ret, alg);
-       return ret;
+               ret = alg->decrypt(req);
+
+       return crypto_skcipher_errstat(alg, ret);
 }
 EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt);
 
@@ -672,8 +714,7 @@ static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
        __maybe_unused;
 static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
 {
-       struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
-                                                    base);
+       struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
 
        seq_printf(m, "type         : skcipher\n");
        seq_printf(m, "async        : %s\n",
@@ -686,12 +727,11 @@ static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
        seq_printf(m, "walksize     : %u\n", skcipher->walksize);
 }
 
-#ifdef CONFIG_NET
-static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
+static int __maybe_unused crypto_skcipher_report(
+       struct sk_buff *skb, struct crypto_alg *alg)
 {
+       struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
        struct crypto_report_blkcipher rblkcipher;
-       struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
-                                                    base);
 
        memset(&rblkcipher, 0, sizeof(rblkcipher));
 
@@ -706,12 +746,28 @@ static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
        return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
                       sizeof(rblkcipher), &rblkcipher);
 }
-#else
-static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
+
+static int __maybe_unused crypto_skcipher_report_stat(
+       struct sk_buff *skb, struct crypto_alg *alg)
 {
-       return -ENOSYS;
+       struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
+       struct crypto_istat_cipher *istat;
+       struct crypto_stat_cipher rcipher;
+
+       istat = skcipher_get_stat(skcipher);
+
+       memset(&rcipher, 0, sizeof(rcipher));
+
+       strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
+
+       rcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
+       rcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
+       rcipher.stat_decrypt_cnt =  atomic64_read(&istat->decrypt_cnt);
+       rcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
+       rcipher.stat_err_cnt =  atomic64_read(&istat->err_cnt);
+
+       return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher);
 }
-#endif
 
 static const struct crypto_type crypto_skcipher_type = {
        .extsize = crypto_alg_extsize,
@@ -720,7 +776,12 @@ static const struct crypto_type crypto_skcipher_type = {
 #ifdef CONFIG_PROC_FS
        .show = crypto_skcipher_show,
 #endif
+#ifdef CONFIG_CRYPTO_USER
        .report = crypto_skcipher_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+       .report_stat = crypto_skcipher_report_stat,
+#endif
        .maskclear = ~CRYPTO_ALG_TYPE_MASK,
        .maskset = CRYPTO_ALG_TYPE_MASK,
        .type = CRYPTO_ALG_TYPE_SKCIPHER,
@@ -775,6 +836,7 @@ EXPORT_SYMBOL_GPL(crypto_has_skcipher);
 
 static int skcipher_prepare_alg(struct skcipher_alg *alg)
 {
+       struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
        struct crypto_alg *base = &alg->base;
 
        if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
@@ -790,6 +852,9 @@ static int skcipher_prepare_alg(struct skcipher_alg *alg)
        base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
        base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
 
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+               memset(istat, 0, sizeof(*istat));
+
        return 0;
 }
 
index 6521feec7756fcc49a9fd4b90e824ab9443108f1..202ca1a3105d500d032d10a1fb61c8b36ff986a9 100644 (file)
 #include <linux/err.h>
 #include <linux/fips.h>
 #include <linux/init.h>
-#include <linux/gfp.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/moduleparam.h>
 #include <linux/scatterlist.h>
+#include <linux/slab.h>
 #include <linux/string.h>
-#include <linux/moduleparam.h>
-#include <linux/jiffies.h>
 #include <linux/timex.h>
-#include <linux/interrupt.h>
+
+#include "internal.h"
 #include "tcrypt.h"
 
 /*
index c91e93ece20b948d6f597df04e6c13ac5936f8f7..216878c8bc3d62f8abd6e708acffffae7d09e5df 100644 (file)
@@ -860,12 +860,50 @@ static int prepare_keybuf(const u8 *key, unsigned int ksize,
 
 #ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
 
+/*
+ * The fuzz tests use prandom instead of the normal Linux RNG since they don't
+ * need cryptographically secure random numbers.  This greatly improves the
+ * performance of these tests, especially if they are run before the Linux RNG
+ * has been initialized or if they are run on a lockdep-enabled kernel.
+ */
+
+static inline void init_rnd_state(struct rnd_state *rng)
+{
+       prandom_seed_state(rng, get_random_u64());
+}
+
+static inline u8 prandom_u8(struct rnd_state *rng)
+{
+       return prandom_u32_state(rng);
+}
+
+static inline u32 prandom_u32_below(struct rnd_state *rng, u32 ceil)
+{
+       /*
+        * This is slightly biased for non-power-of-2 values of 'ceil', but this
+        * isn't important here.
+        */
+       return prandom_u32_state(rng) % ceil;
+}
+
+static inline bool prandom_bool(struct rnd_state *rng)
+{
+       return prandom_u32_below(rng, 2);
+}
+
+static inline u32 prandom_u32_inclusive(struct rnd_state *rng,
+                                       u32 floor, u32 ceil)
+{
+       return floor + prandom_u32_below(rng, ceil - floor + 1);
+}
+
 /* Generate a random length in range [0, max_len], but prefer smaller values */
-static unsigned int generate_random_length(unsigned int max_len)
+static unsigned int generate_random_length(struct rnd_state *rng,
+                                          unsigned int max_len)
 {
-       unsigned int len = get_random_u32_below(max_len + 1);
+       unsigned int len = prandom_u32_below(rng, max_len + 1);
 
-       switch (get_random_u32_below(4)) {
+       switch (prandom_u32_below(rng, 4)) {
        case 0:
                return len % 64;
        case 1:
@@ -878,43 +916,44 @@ static unsigned int generate_random_length(unsigned int max_len)
 }
 
 /* Flip a random bit in the given nonempty data buffer */
-static void flip_random_bit(u8 *buf, size_t size)
+static void flip_random_bit(struct rnd_state *rng, u8 *buf, size_t size)
 {
        size_t bitpos;
 
-       bitpos = get_random_u32_below(size * 8);
+       bitpos = prandom_u32_below(rng, size * 8);
        buf[bitpos / 8] ^= 1 << (bitpos % 8);
 }
 
 /* Flip a random byte in the given nonempty data buffer */
-static void flip_random_byte(u8 *buf, size_t size)
+static void flip_random_byte(struct rnd_state *rng, u8 *buf, size_t size)
 {
-       buf[get_random_u32_below(size)] ^= 0xff;
+       buf[prandom_u32_below(rng, size)] ^= 0xff;
 }
 
 /* Sometimes make some random changes to the given nonempty data buffer */
-static void mutate_buffer(u8 *buf, size_t size)
+static void mutate_buffer(struct rnd_state *rng, u8 *buf, size_t size)
 {
        size_t num_flips;
        size_t i;
 
        /* Sometimes flip some bits */
-       if (get_random_u32_below(4) == 0) {
-               num_flips = min_t(size_t, 1 << get_random_u32_below(8), size * 8);
+       if (prandom_u32_below(rng, 4) == 0) {
+               num_flips = min_t(size_t, 1 << prandom_u32_below(rng, 8),
+                                 size * 8);
                for (i = 0; i < num_flips; i++)
-                       flip_random_bit(buf, size);
+                       flip_random_bit(rng, buf, size);
        }
 
        /* Sometimes flip some bytes */
-       if (get_random_u32_below(4) == 0) {
-               num_flips = min_t(size_t, 1 << get_random_u32_below(8), size);
+       if (prandom_u32_below(rng, 4) == 0) {
+               num_flips = min_t(size_t, 1 << prandom_u32_below(rng, 8), size);
                for (i = 0; i < num_flips; i++)
-                       flip_random_byte(buf, size);
+                       flip_random_byte(rng, buf, size);
        }
 }
 
 /* Randomly generate 'count' bytes, but sometimes make them "interesting" */
-static void generate_random_bytes(u8 *buf, size_t count)
+static void generate_random_bytes(struct rnd_state *rng, u8 *buf, size_t count)
 {
        u8 b;
        u8 increment;
@@ -923,11 +962,11 @@ static void generate_random_bytes(u8 *buf, size_t count)
        if (count == 0)
                return;
 
-       switch (get_random_u32_below(8)) { /* Choose a generation strategy */
+       switch (prandom_u32_below(rng, 8)) { /* Choose a generation strategy */
        case 0:
        case 1:
                /* All the same byte, plus optional mutations */
-               switch (get_random_u32_below(4)) {
+               switch (prandom_u32_below(rng, 4)) {
                case 0:
                        b = 0x00;
                        break;
@@ -935,28 +974,28 @@ static void generate_random_bytes(u8 *buf, size_t count)
                        b = 0xff;
                        break;
                default:
-                       b = get_random_u8();
+                       b = prandom_u8(rng);
                        break;
                }
                memset(buf, b, count);
-               mutate_buffer(buf, count);
+               mutate_buffer(rng, buf, count);
                break;
        case 2:
                /* Ascending or descending bytes, plus optional mutations */
-               increment = get_random_u8();
-               b = get_random_u8();
+               increment = prandom_u8(rng);
+               b = prandom_u8(rng);
                for (i = 0; i < count; i++, b += increment)
                        buf[i] = b;
-               mutate_buffer(buf, count);
+               mutate_buffer(rng, buf, count);
                break;
        default:
                /* Fully random bytes */
-               for (i = 0; i < count; i++)
-                       buf[i] = get_random_u8();
+               prandom_bytes_state(rng, buf, count);
        }
 }
 
-static char *generate_random_sgl_divisions(struct test_sg_division *divs,
+static char *generate_random_sgl_divisions(struct rnd_state *rng,
+                                          struct test_sg_division *divs,
                                           size_t max_divs, char *p, char *end,
                                           bool gen_flushes, u32 req_flags)
 {
@@ -967,24 +1006,26 @@ static char *generate_random_sgl_divisions(struct test_sg_division *divs,
                unsigned int this_len;
                const char *flushtype_str;
 
-               if (div == &divs[max_divs - 1] || get_random_u32_below(2) == 0)
+               if (div == &divs[max_divs - 1] || prandom_bool(rng))
                        this_len = remaining;
                else
-                       this_len = get_random_u32_inclusive(1, remaining);
+                       this_len = prandom_u32_inclusive(rng, 1, remaining);
                div->proportion_of_total = this_len;
 
-               if (get_random_u32_below(4) == 0)
-                       div->offset = get_random_u32_inclusive(PAGE_SIZE - 128, PAGE_SIZE - 1);
-               else if (get_random_u32_below(2) == 0)
-                       div->offset = get_random_u32_below(32);
+               if (prandom_u32_below(rng, 4) == 0)
+                       div->offset = prandom_u32_inclusive(rng,
+                                                           PAGE_SIZE - 128,
+                                                           PAGE_SIZE - 1);
+               else if (prandom_bool(rng))
+                       div->offset = prandom_u32_below(rng, 32);
                else
-                       div->offset = get_random_u32_below(PAGE_SIZE);
-               if (get_random_u32_below(8) == 0)
+                       div->offset = prandom_u32_below(rng, PAGE_SIZE);
+               if (prandom_u32_below(rng, 8) == 0)
                        div->offset_relative_to_alignmask = true;
 
                div->flush_type = FLUSH_TYPE_NONE;
                if (gen_flushes) {
-                       switch (get_random_u32_below(4)) {
+                       switch (prandom_u32_below(rng, 4)) {
                        case 0:
                                div->flush_type = FLUSH_TYPE_REIMPORT;
                                break;
@@ -996,7 +1037,7 @@ static char *generate_random_sgl_divisions(struct test_sg_division *divs,
 
                if (div->flush_type != FLUSH_TYPE_NONE &&
                    !(req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) &&
-                   get_random_u32_below(2) == 0)
+                   prandom_bool(rng))
                        div->nosimd = true;
 
                switch (div->flush_type) {
@@ -1031,7 +1072,8 @@ static char *generate_random_sgl_divisions(struct test_sg_division *divs,
 }
 
 /* Generate a random testvec_config for fuzz testing */
-static void generate_random_testvec_config(struct testvec_config *cfg,
+static void generate_random_testvec_config(struct rnd_state *rng,
+                                          struct testvec_config *cfg,
                                           char *name, size_t max_namelen)
 {
        char *p = name;
@@ -1043,7 +1085,7 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
 
        p += scnprintf(p, end - p, "random:");
 
-       switch (get_random_u32_below(4)) {
+       switch (prandom_u32_below(rng, 4)) {
        case 0:
        case 1:
                cfg->inplace_mode = OUT_OF_PLACE;
@@ -1058,12 +1100,12 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
                break;
        }
 
-       if (get_random_u32_below(2) == 0) {
+       if (prandom_bool(rng)) {
                cfg->req_flags |= CRYPTO_TFM_REQ_MAY_SLEEP;
                p += scnprintf(p, end - p, " may_sleep");
        }
 
-       switch (get_random_u32_below(4)) {
+       switch (prandom_u32_below(rng, 4)) {
        case 0:
                cfg->finalization_type = FINALIZATION_TYPE_FINAL;
                p += scnprintf(p, end - p, " use_final");
@@ -1078,36 +1120,37 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
                break;
        }
 
-       if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) &&
-           get_random_u32_below(2) == 0) {
+       if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) && prandom_bool(rng)) {
                cfg->nosimd = true;
                p += scnprintf(p, end - p, " nosimd");
        }
 
        p += scnprintf(p, end - p, " src_divs=[");
-       p = generate_random_sgl_divisions(cfg->src_divs,
+       p = generate_random_sgl_divisions(rng, cfg->src_divs,
                                          ARRAY_SIZE(cfg->src_divs), p, end,
                                          (cfg->finalization_type !=
                                           FINALIZATION_TYPE_DIGEST),
                                          cfg->req_flags);
        p += scnprintf(p, end - p, "]");
 
-       if (cfg->inplace_mode == OUT_OF_PLACE && get_random_u32_below(2) == 0) {
+       if (cfg->inplace_mode == OUT_OF_PLACE && prandom_bool(rng)) {
                p += scnprintf(p, end - p, " dst_divs=[");
-               p = generate_random_sgl_divisions(cfg->dst_divs,
+               p = generate_random_sgl_divisions(rng, cfg->dst_divs,
                                                  ARRAY_SIZE(cfg->dst_divs),
                                                  p, end, false,
                                                  cfg->req_flags);
                p += scnprintf(p, end - p, "]");
        }
 
-       if (get_random_u32_below(2) == 0) {
-               cfg->iv_offset = get_random_u32_inclusive(1, MAX_ALGAPI_ALIGNMASK);
+       if (prandom_bool(rng)) {
+               cfg->iv_offset = prandom_u32_inclusive(rng, 1,
+                                                      MAX_ALGAPI_ALIGNMASK);
                p += scnprintf(p, end - p, " iv_offset=%u", cfg->iv_offset);
        }
 
-       if (get_random_u32_below(2) == 0) {
-               cfg->key_offset = get_random_u32_inclusive(1, MAX_ALGAPI_ALIGNMASK);
+       if (prandom_bool(rng)) {
+               cfg->key_offset = prandom_u32_inclusive(rng, 1,
+                                                       MAX_ALGAPI_ALIGNMASK);
                p += scnprintf(p, end - p, " key_offset=%u", cfg->key_offset);
        }
 
@@ -1620,11 +1663,14 @@ static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num,
 
 #ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
        if (!noextratests) {
+               struct rnd_state rng;
                struct testvec_config cfg;
                char cfgname[TESTVEC_CONFIG_NAMELEN];
 
+               init_rnd_state(&rng);
+
                for (i = 0; i < fuzz_iterations; i++) {
-                       generate_random_testvec_config(&cfg, cfgname,
+                       generate_random_testvec_config(&rng, &cfg, cfgname,
                                                       sizeof(cfgname));
                        err = test_hash_vec_cfg(vec, vec_name, &cfg,
                                                req, desc, tsgl, hashstate);
@@ -1642,15 +1688,16 @@ static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num,
  * Generate a hash test vector from the given implementation.
  * Assumes the buffers in 'vec' were already allocated.
  */
-static void generate_random_hash_testvec(struct shash_desc *desc,
+static void generate_random_hash_testvec(struct rnd_state *rng,
+                                        struct shash_desc *desc,
                                         struct hash_testvec *vec,
                                         unsigned int maxkeysize,
                                         unsigned int maxdatasize,
                                         char *name, size_t max_namelen)
 {
        /* Data */
-       vec->psize = generate_random_length(maxdatasize);
-       generate_random_bytes((u8 *)vec->plaintext, vec->psize);
+       vec->psize = generate_random_length(rng, maxdatasize);
+       generate_random_bytes(rng, (u8 *)vec->plaintext, vec->psize);
 
        /*
         * Key: length in range [1, maxkeysize], but usually choose maxkeysize.
@@ -1660,9 +1707,9 @@ static void generate_random_hash_testvec(struct shash_desc *desc,
        vec->ksize = 0;
        if (maxkeysize) {
                vec->ksize = maxkeysize;
-               if (get_random_u32_below(4) == 0)
-                       vec->ksize = get_random_u32_inclusive(1, maxkeysize);
-               generate_random_bytes((u8 *)vec->key, vec->ksize);
+               if (prandom_u32_below(rng, 4) == 0)
+                       vec->ksize = prandom_u32_inclusive(rng, 1, maxkeysize);
+               generate_random_bytes(rng, (u8 *)vec->key, vec->ksize);
 
                vec->setkey_error = crypto_shash_setkey(desc->tfm, vec->key,
                                                        vec->ksize);
@@ -1696,6 +1743,7 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
        const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
        const char *algname = crypto_hash_alg_common(tfm)->base.cra_name;
        const char *driver = crypto_ahash_driver_name(tfm);
+       struct rnd_state rng;
        char _generic_driver[CRYPTO_MAX_ALG_NAME];
        struct crypto_shash *generic_tfm = NULL;
        struct shash_desc *generic_desc = NULL;
@@ -1709,6 +1757,8 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
        if (noextratests)
                return 0;
 
+       init_rnd_state(&rng);
+
        if (!generic_driver) { /* Use default naming convention? */
                err = build_generic_driver_name(algname, _generic_driver);
                if (err)
@@ -1777,10 +1827,11 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
        }
 
        for (i = 0; i < fuzz_iterations * 8; i++) {
-               generate_random_hash_testvec(generic_desc, &vec,
+               generate_random_hash_testvec(&rng, generic_desc, &vec,
                                             maxkeysize, maxdatasize,
                                             vec_name, sizeof(vec_name));
-               generate_random_testvec_config(cfg, cfgname, sizeof(cfgname));
+               generate_random_testvec_config(&rng, cfg, cfgname,
+                                              sizeof(cfgname));
 
                err = test_hash_vec_cfg(&vec, vec_name, cfg,
                                        req, desc, tsgl, hashstate);
@@ -2182,11 +2233,14 @@ static int test_aead_vec(int enc, const struct aead_testvec *vec,
 
 #ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
        if (!noextratests) {
+               struct rnd_state rng;
                struct testvec_config cfg;
                char cfgname[TESTVEC_CONFIG_NAMELEN];
 
+               init_rnd_state(&rng);
+
                for (i = 0; i < fuzz_iterations; i++) {
-                       generate_random_testvec_config(&cfg, cfgname,
+                       generate_random_testvec_config(&rng, &cfg, cfgname,
                                                       sizeof(cfgname));
                        err = test_aead_vec_cfg(enc, vec, vec_name,
                                                &cfg, req, tsgls);
@@ -2202,6 +2256,7 @@ static int test_aead_vec(int enc, const struct aead_testvec *vec,
 #ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
 
 struct aead_extra_tests_ctx {
+       struct rnd_state rng;
        struct aead_request *req;
        struct crypto_aead *tfm;
        const struct alg_test_desc *test_desc;
@@ -2220,24 +2275,26 @@ struct aead_extra_tests_ctx {
  * here means the full ciphertext including the authentication tag.  The
  * authentication tag (and hence also the ciphertext) is assumed to be nonempty.
  */
-static void mutate_aead_message(struct aead_testvec *vec, bool aad_iv,
+static void mutate_aead_message(struct rnd_state *rng,
+                               struct aead_testvec *vec, bool aad_iv,
                                unsigned int ivsize)
 {
        const unsigned int aad_tail_size = aad_iv ? ivsize : 0;
        const unsigned int authsize = vec->clen - vec->plen;
 
-       if (get_random_u32_below(2) == 0 && vec->alen > aad_tail_size) {
+       if (prandom_bool(rng) && vec->alen > aad_tail_size) {
                 /* Mutate the AAD */
-               flip_random_bit((u8 *)vec->assoc, vec->alen - aad_tail_size);
-               if (get_random_u32_below(2) == 0)
+               flip_random_bit(rng, (u8 *)vec->assoc,
+                               vec->alen - aad_tail_size);
+               if (prandom_bool(rng))
                        return;
        }
-       if (get_random_u32_below(2) == 0) {
+       if (prandom_bool(rng)) {
                /* Mutate auth tag (assuming it's at the end of ciphertext) */
-               flip_random_bit((u8 *)vec->ctext + vec->plen, authsize);
+               flip_random_bit(rng, (u8 *)vec->ctext + vec->plen, authsize);
        } else {
                /* Mutate any part of the ciphertext */
-               flip_random_bit((u8 *)vec->ctext, vec->clen);
+               flip_random_bit(rng, (u8 *)vec->ctext, vec->clen);
        }
 }
 
@@ -2248,7 +2305,8 @@ static void mutate_aead_message(struct aead_testvec *vec, bool aad_iv,
  */
 #define MIN_COLLISION_FREE_AUTHSIZE 8
 
-static void generate_aead_message(struct aead_request *req,
+static void generate_aead_message(struct rnd_state *rng,
+                                 struct aead_request *req,
                                  const struct aead_test_suite *suite,
                                  struct aead_testvec *vec,
                                  bool prefer_inauthentic)
@@ -2257,17 +2315,18 @@ static void generate_aead_message(struct aead_request *req,
        const unsigned int ivsize = crypto_aead_ivsize(tfm);
        const unsigned int authsize = vec->clen - vec->plen;
        const bool inauthentic = (authsize >= MIN_COLLISION_FREE_AUTHSIZE) &&
-                                (prefer_inauthentic || get_random_u32_below(4) == 0);
+                                (prefer_inauthentic ||
+                                 prandom_u32_below(rng, 4) == 0);
 
        /* Generate the AAD. */
-       generate_random_bytes((u8 *)vec->assoc, vec->alen);
+       generate_random_bytes(rng, (u8 *)vec->assoc, vec->alen);
        if (suite->aad_iv && vec->alen >= ivsize)
                /* Avoid implementation-defined behavior. */
                memcpy((u8 *)vec->assoc + vec->alen - ivsize, vec->iv, ivsize);
 
-       if (inauthentic && get_random_u32_below(2) == 0) {
+       if (inauthentic && prandom_bool(rng)) {
                /* Generate a random ciphertext. */
-               generate_random_bytes((u8 *)vec->ctext, vec->clen);
+               generate_random_bytes(rng, (u8 *)vec->ctext, vec->clen);
        } else {
                int i = 0;
                struct scatterlist src[2], dst;
@@ -2279,7 +2338,7 @@ static void generate_aead_message(struct aead_request *req,
                if (vec->alen)
                        sg_set_buf(&src[i++], vec->assoc, vec->alen);
                if (vec->plen) {
-                       generate_random_bytes((u8 *)vec->ptext, vec->plen);
+                       generate_random_bytes(rng, (u8 *)vec->ptext, vec->plen);
                        sg_set_buf(&src[i++], vec->ptext, vec->plen);
                }
                sg_init_one(&dst, vec->ctext, vec->alen + vec->clen);
@@ -2299,7 +2358,7 @@ static void generate_aead_message(struct aead_request *req,
                 * Mutate the authentic (ciphertext, AAD) pair to get an
                 * inauthentic one.
                 */
-               mutate_aead_message(vec, suite->aad_iv, ivsize);
+               mutate_aead_message(rng, vec, suite->aad_iv, ivsize);
        }
        vec->novrfy = 1;
        if (suite->einval_allowed)
@@ -2313,7 +2372,8 @@ static void generate_aead_message(struct aead_request *req,
  * If 'prefer_inauthentic' is true, then this function will generate inauthentic
  * test vectors (i.e. vectors with 'vec->novrfy=1') more often.
  */
-static void generate_random_aead_testvec(struct aead_request *req,
+static void generate_random_aead_testvec(struct rnd_state *rng,
+                                        struct aead_request *req,
                                         struct aead_testvec *vec,
                                         const struct aead_test_suite *suite,
                                         unsigned int maxkeysize,
@@ -2329,18 +2389,18 @@ static void generate_random_aead_testvec(struct aead_request *req,
 
        /* Key: length in [0, maxkeysize], but usually choose maxkeysize */
        vec->klen = maxkeysize;
-       if (get_random_u32_below(4) == 0)
-               vec->klen = get_random_u32_below(maxkeysize + 1);
-       generate_random_bytes((u8 *)vec->key, vec->klen);
+       if (prandom_u32_below(rng, 4) == 0)
+               vec->klen = prandom_u32_below(rng, maxkeysize + 1);
+       generate_random_bytes(rng, (u8 *)vec->key, vec->klen);
        vec->setkey_error = crypto_aead_setkey(tfm, vec->key, vec->klen);
 
        /* IV */
-       generate_random_bytes((u8 *)vec->iv, ivsize);
+       generate_random_bytes(rng, (u8 *)vec->iv, ivsize);
 
        /* Tag length: in [0, maxauthsize], but usually choose maxauthsize */
        authsize = maxauthsize;
-       if (get_random_u32_below(4) == 0)
-               authsize = get_random_u32_below(maxauthsize + 1);
+       if (prandom_u32_below(rng, 4) == 0)
+               authsize = prandom_u32_below(rng, maxauthsize + 1);
        if (prefer_inauthentic && authsize < MIN_COLLISION_FREE_AUTHSIZE)
                authsize = MIN_COLLISION_FREE_AUTHSIZE;
        if (WARN_ON(authsize > maxdatasize))
@@ -2349,11 +2409,11 @@ static void generate_random_aead_testvec(struct aead_request *req,
        vec->setauthsize_error = crypto_aead_setauthsize(tfm, authsize);
 
        /* AAD, plaintext, and ciphertext lengths */
-       total_len = generate_random_length(maxdatasize);
-       if (get_random_u32_below(4) == 0)
+       total_len = generate_random_length(rng, maxdatasize);
+       if (prandom_u32_below(rng, 4) == 0)
                vec->alen = 0;
        else
-               vec->alen = generate_random_length(total_len);
+               vec->alen = generate_random_length(rng, total_len);
        vec->plen = total_len - vec->alen;
        vec->clen = vec->plen + authsize;
 
@@ -2364,7 +2424,7 @@ static void generate_random_aead_testvec(struct aead_request *req,
        vec->novrfy = 0;
        vec->crypt_error = 0;
        if (vec->setkey_error == 0 && vec->setauthsize_error == 0)
-               generate_aead_message(req, suite, vec, prefer_inauthentic);
+               generate_aead_message(rng, req, suite, vec, prefer_inauthentic);
        snprintf(name, max_namelen,
                 "\"random: alen=%u plen=%u authsize=%u klen=%u novrfy=%d\"",
                 vec->alen, vec->plen, authsize, vec->klen, vec->novrfy);
@@ -2376,7 +2436,7 @@ static void try_to_generate_inauthentic_testvec(
        int i;
 
        for (i = 0; i < 10; i++) {
-               generate_random_aead_testvec(ctx->req, &ctx->vec,
+               generate_random_aead_testvec(&ctx->rng, ctx->req, &ctx->vec,
                                             &ctx->test_desc->suite.aead,
                                             ctx->maxkeysize, ctx->maxdatasize,
                                             ctx->vec_name,
@@ -2407,7 +2467,8 @@ static int test_aead_inauthentic_inputs(struct aead_extra_tests_ctx *ctx)
                 */
                try_to_generate_inauthentic_testvec(ctx);
                if (ctx->vec.novrfy) {
-                       generate_random_testvec_config(&ctx->cfg, ctx->cfgname,
+                       generate_random_testvec_config(&ctx->rng, &ctx->cfg,
+                                                      ctx->cfgname,
                                                       sizeof(ctx->cfgname));
                        err = test_aead_vec_cfg(DECRYPT, &ctx->vec,
                                                ctx->vec_name, &ctx->cfg,
@@ -2497,12 +2558,13 @@ static int test_aead_vs_generic_impl(struct aead_extra_tests_ctx *ctx)
         * the other implementation against them.
         */
        for (i = 0; i < fuzz_iterations * 8; i++) {
-               generate_random_aead_testvec(generic_req, &ctx->vec,
+               generate_random_aead_testvec(&ctx->rng, generic_req, &ctx->vec,
                                             &ctx->test_desc->suite.aead,
                                             ctx->maxkeysize, ctx->maxdatasize,
                                             ctx->vec_name,
                                             sizeof(ctx->vec_name), false);
-               generate_random_testvec_config(&ctx->cfg, ctx->cfgname,
+               generate_random_testvec_config(&ctx->rng, &ctx->cfg,
+                                              ctx->cfgname,
                                               sizeof(ctx->cfgname));
                if (!ctx->vec.novrfy) {
                        err = test_aead_vec_cfg(ENCRYPT, &ctx->vec,
@@ -2541,6 +2603,7 @@ static int test_aead_extra(const struct alg_test_desc *test_desc,
        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
        if (!ctx)
                return -ENOMEM;
+       init_rnd_state(&ctx->rng);
        ctx->req = req;
        ctx->tfm = crypto_aead_reqtfm(req);
        ctx->test_desc = test_desc;
@@ -2930,11 +2993,14 @@ static int test_skcipher_vec(int enc, const struct cipher_testvec *vec,
 
 #ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
        if (!noextratests) {
+               struct rnd_state rng;
                struct testvec_config cfg;
                char cfgname[TESTVEC_CONFIG_NAMELEN];
 
+               init_rnd_state(&rng);
+
                for (i = 0; i < fuzz_iterations; i++) {
-                       generate_random_testvec_config(&cfg, cfgname,
+                       generate_random_testvec_config(&rng, &cfg, cfgname,
                                                       sizeof(cfgname));
                        err = test_skcipher_vec_cfg(enc, vec, vec_name,
                                                    &cfg, req, tsgls);
@@ -2952,7 +3018,8 @@ static int test_skcipher_vec(int enc, const struct cipher_testvec *vec,
  * Generate a symmetric cipher test vector from the given implementation.
  * Assumes the buffers in 'vec' were already allocated.
  */
-static void generate_random_cipher_testvec(struct skcipher_request *req,
+static void generate_random_cipher_testvec(struct rnd_state *rng,
+                                          struct skcipher_request *req,
                                           struct cipher_testvec *vec,
                                           unsigned int maxdatasize,
                                           char *name, size_t max_namelen)
@@ -2966,17 +3033,17 @@ static void generate_random_cipher_testvec(struct skcipher_request *req,
 
        /* Key: length in [0, maxkeysize], but usually choose maxkeysize */
        vec->klen = maxkeysize;
-       if (get_random_u32_below(4) == 0)
-               vec->klen = get_random_u32_below(maxkeysize + 1);
-       generate_random_bytes((u8 *)vec->key, vec->klen);
+       if (prandom_u32_below(rng, 4) == 0)
+               vec->klen = prandom_u32_below(rng, maxkeysize + 1);
+       generate_random_bytes(rng, (u8 *)vec->key, vec->klen);
        vec->setkey_error = crypto_skcipher_setkey(tfm, vec->key, vec->klen);
 
        /* IV */
-       generate_random_bytes((u8 *)vec->iv, ivsize);
+       generate_random_bytes(rng, (u8 *)vec->iv, ivsize);
 
        /* Plaintext */
-       vec->len = generate_random_length(maxdatasize);
-       generate_random_bytes((u8 *)vec->ptext, vec->len);
+       vec->len = generate_random_length(rng, maxdatasize);
+       generate_random_bytes(rng, (u8 *)vec->ptext, vec->len);
 
        /* If the key couldn't be set, no need to continue to encrypt. */
        if (vec->setkey_error)
@@ -3018,6 +3085,7 @@ static int test_skcipher_vs_generic_impl(const char *generic_driver,
        const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
        const char *algname = crypto_skcipher_alg(tfm)->base.cra_name;
        const char *driver = crypto_skcipher_driver_name(tfm);
+       struct rnd_state rng;
        char _generic_driver[CRYPTO_MAX_ALG_NAME];
        struct crypto_skcipher *generic_tfm = NULL;
        struct skcipher_request *generic_req = NULL;
@@ -3035,6 +3103,8 @@ static int test_skcipher_vs_generic_impl(const char *generic_driver,
        if (strncmp(algname, "kw(", 3) == 0)
                return 0;
 
+       init_rnd_state(&rng);
+
        if (!generic_driver) { /* Use default naming convention? */
                err = build_generic_driver_name(algname, _generic_driver);
                if (err)
@@ -3119,9 +3189,11 @@ static int test_skcipher_vs_generic_impl(const char *generic_driver,
        }
 
        for (i = 0; i < fuzz_iterations * 8; i++) {
-               generate_random_cipher_testvec(generic_req, &vec, maxdatasize,
+               generate_random_cipher_testvec(&rng, generic_req, &vec,
+                                              maxdatasize,
                                               vec_name, sizeof(vec_name));
-               generate_random_testvec_config(cfg, cfgname, sizeof(cfgname));
+               generate_random_testvec_config(&rng, cfg, cfgname,
+                                              sizeof(cfgname));
 
                err = test_skcipher_vec_cfg(ENCRYPT, &vec, vec_name,
                                            cfg, req, tsgls);
@@ -4572,6 +4644,12 @@ static const struct alg_test_desc alg_test_descs[] = {
                .suite = {
                        .hash = __VECS(aes_cmac128_tv_template)
                }
+       }, {
+               .alg = "cmac(camellia)",
+               .test = alg_test_hash,
+               .suite = {
+                       .hash = __VECS(camellia_cmac128_tv_template)
+               }
        }, {
                .alg = "cmac(des3_ede)",
                .test = alg_test_hash,
index f10bfb9d997353817ae25d992881d84193dd69ef..5ca7a412508fbfb239b26230cb07852b62718417 100644 (file)
@@ -25665,6 +25665,53 @@ static const struct cipher_testvec fcrypt_pcbc_tv_template[] = {
 /*
  * CAMELLIA test vectors.
  */
+static const struct hash_testvec camellia_cmac128_tv_template[] = {
+       { /* From draft-kato-ipsec-camellia-cmac96and128-01 */
+               .key            = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+                                 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+               .plaintext      = zeroed_string,
+               .digest         = "\xba\x92\x57\x82\xaa\xa1\xf5\xd9"
+                                 "\xa0\x0f\x89\x64\x80\x94\xfc\x71",
+               .psize          = 0,
+               .ksize          = 16,
+       }, {
+               .key            = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+                                 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+               .plaintext      = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+                                 "\xe9\x3d\x7e\x11\x73\x93\x17\x2a",
+               .digest         = "\x6d\x96\x28\x54\xa3\xb9\xfd\xa5"
+                                 "\x6d\x7d\x45\xa9\x5e\xe1\x79\x93",
+               .psize          = 16,
+               .ksize          = 16,
+       }, {
+               .key            = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+                                 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+               .plaintext      = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+                                 "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+                                 "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+                                 "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+                                 "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11",
+               .digest         = "\x5c\x18\xd1\x19\xcc\xd6\x76\x61"
+                                 "\x44\xac\x18\x66\x13\x1d\x9f\x22",
+               .psize          = 40,
+               .ksize          = 16,
+       }, {
+               .key            = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+                                 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+               .plaintext      = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+                                 "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+                                 "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+                                 "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+                                 "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+                                 "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+                                 "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+                                 "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+               .digest         = "\xc2\x69\x9a\x6e\xba\x55\xce\x9d"
+                                 "\x93\x9a\x8a\x4e\x19\x46\x6e\xe9",
+               .psize          = 64,
+               .ksize          = 16,
+       }
+};
 static const struct cipher_testvec camellia_tv_template[] = {
        {
                .key    = "\x01\x23\x45\x67\x89\xab\xcd\xef"
index 8bb30282ca46b1630929dabcd90562594e941890..a4eb8e35f13d7d02ee1317fecd3577b555843270 100644 (file)
@@ -18,9 +18,7 @@
 
 struct meson_rng_data {
        void __iomem *base;
-       struct platform_device *pdev;
        struct hwrng rng;
-       struct clk *core_clk;
 };
 
 static int meson_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
@@ -33,47 +31,28 @@ static int meson_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
        return sizeof(u32);
 }
 
-static void meson_rng_clk_disable(void *data)
-{
-       clk_disable_unprepare(data);
-}
-
 static int meson_rng_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct meson_rng_data *data;
-       int ret;
+       struct clk *core_clk;
 
        data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
-       data->pdev = pdev;
-
        data->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(data->base))
                return PTR_ERR(data->base);
 
-       data->core_clk = devm_clk_get_optional(dev, "core");
-       if (IS_ERR(data->core_clk))
-               return dev_err_probe(dev, PTR_ERR(data->core_clk),
+       core_clk = devm_clk_get_optional_enabled(dev, "core");
+       if (IS_ERR(core_clk))
+               return dev_err_probe(dev, PTR_ERR(core_clk),
                                     "Failed to get core clock\n");
 
-       if (data->core_clk) {
-               ret = clk_prepare_enable(data->core_clk);
-               if (ret)
-                       return ret;
-               ret = devm_add_action_or_reset(dev, meson_rng_clk_disable,
-                                              data->core_clk);
-               if (ret)
-                       return ret;
-       }
-
        data->rng.name = pdev->name;
        data->rng.read = meson_rng_read;
 
-       platform_set_drvdata(pdev, data);
-
        return devm_hwrng_register(dev, &data->rng);
 }
 
index 008e6db9ce010b3e219178f6a05ccfff70b3e753..7c8f3cb7c6af785cdc54eaabc0322b2010fe09c8 100644 (file)
@@ -84,7 +84,6 @@ struct xgene_rng_dev {
        unsigned long failure_ts;/* First failure timestamp */
        struct timer_list failure_timer;
        struct device *dev;
-       struct clk *clk;
 };
 
 static void xgene_rng_expired_timer(struct timer_list *t)
@@ -200,7 +199,7 @@ static void xgene_rng_chk_overflow(struct xgene_rng_dev *ctx)
 
 static irqreturn_t xgene_rng_irq_handler(int irq, void *id)
 {
-       struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) id;
+       struct xgene_rng_dev *ctx = id;
 
        /* RNG Alarm Counter overflow */
        xgene_rng_chk_overflow(ctx);
@@ -314,6 +313,7 @@ static struct hwrng xgene_rng_func = {
 static int xgene_rng_probe(struct platform_device *pdev)
 {
        struct xgene_rng_dev *ctx;
+       struct clk *clk;
        int rc = 0;
 
        ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
@@ -337,58 +337,36 @@ static int xgene_rng_probe(struct platform_device *pdev)
 
        rc = devm_request_irq(&pdev->dev, ctx->irq, xgene_rng_irq_handler, 0,
                                dev_name(&pdev->dev), ctx);
-       if (rc) {
-               dev_err(&pdev->dev, "Could not request RNG alarm IRQ\n");
-               return rc;
-       }
+       if (rc)
+               return dev_err_probe(&pdev->dev, rc, "Could not request RNG alarm IRQ\n");
 
        /* Enable IP clock */
-       ctx->clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(ctx->clk)) {
-               dev_warn(&pdev->dev, "Couldn't get the clock for RNG\n");
-       } else {
-               rc = clk_prepare_enable(ctx->clk);
-               if (rc) {
-                       dev_warn(&pdev->dev,
-                                "clock prepare enable failed for RNG");
-                       return rc;
-               }
-       }
+       clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
+       if (IS_ERR(clk))
+               return dev_err_probe(&pdev->dev, PTR_ERR(clk), "Couldn't get the clock for RNG\n");
 
        xgene_rng_func.priv = (unsigned long) ctx;
 
        rc = devm_hwrng_register(&pdev->dev, &xgene_rng_func);
-       if (rc) {
-               dev_err(&pdev->dev, "RNG registering failed error %d\n", rc);
-               if (!IS_ERR(ctx->clk))
-                       clk_disable_unprepare(ctx->clk);
-               return rc;
-       }
+       if (rc)
+               return dev_err_probe(&pdev->dev, rc, "RNG registering failed\n");
 
        rc = device_init_wakeup(&pdev->dev, 1);
-       if (rc) {
-               dev_err(&pdev->dev, "RNG device_init_wakeup failed error %d\n",
-                       rc);
-               if (!IS_ERR(ctx->clk))
-                       clk_disable_unprepare(ctx->clk);
-               return rc;
-       }
+       if (rc)
+               return dev_err_probe(&pdev->dev, rc, "RNG device_init_wakeup failed\n");
 
        return 0;
 }
 
 static int xgene_rng_remove(struct platform_device *pdev)
 {
-       struct xgene_rng_dev *ctx = platform_get_drvdata(pdev);
        int rc;
 
        rc = device_init_wakeup(&pdev->dev, 0);
        if (rc)
                dev_err(&pdev->dev, "RNG init wakeup failed error %d\n", rc);
-       if (!IS_ERR(ctx->clk))
-               clk_disable_unprepare(ctx->clk);
 
-       return rc;
+       return 0;
 }
 
 static const struct of_device_id xgene_rng_of_match[] = {
index 3b2516d1433f7698afc45e24d1041c6c618cd97a..9c440cd0fed05e64d588746788a527dbc0e38a30 100644 (file)
@@ -240,21 +240,6 @@ config CRYPTO_DEV_TALITOS2
          Say 'Y' here to use the Freescale Security Engine (SEC)
          version 2 and following as found on MPC83xx, MPC85xx, etc ...
 
-config CRYPTO_DEV_IXP4XX
-       tristate "Driver for IXP4xx crypto hardware acceleration"
-       depends on ARCH_IXP4XX && IXP4XX_QMGR && IXP4XX_NPE
-       select CRYPTO_AES
-       select CRYPTO_DES
-       select CRYPTO_ECB
-       select CRYPTO_CBC
-       select CRYPTO_CTR
-       select CRYPTO_LIB_DES
-       select CRYPTO_AEAD
-       select CRYPTO_AUTHENC
-       select CRYPTO_SKCIPHER
-       help
-         Driver for the IXP4xx NPE crypto engine.
-
 config CRYPTO_DEV_PPC4XX
        tristate "Driver AMCC PPC4xx crypto accelerator"
        depends on PPC && 4xx
@@ -502,10 +487,10 @@ config CRYPTO_DEV_MXS_DCP
          To compile this driver as a module, choose M here: the module
          will be called mxs-dcp.
 
-source "drivers/crypto/qat/Kconfig"
 source "drivers/crypto/cavium/cpt/Kconfig"
 source "drivers/crypto/cavium/nitrox/Kconfig"
 source "drivers/crypto/marvell/Kconfig"
+source "drivers/crypto/intel/Kconfig"
 
 config CRYPTO_DEV_CAVIUM_ZIP
        tristate "Cavium ZIP driver"
@@ -774,7 +759,7 @@ config CRYPTO_DEV_ARTPEC6
 config CRYPTO_DEV_CCREE
        tristate "Support for ARM TrustZone CryptoCell family of security processors"
        depends on CRYPTO && CRYPTO_HW && OF && HAS_DMA
-       default n
+       depends on HAS_IOMEM
        select CRYPTO_HASH
        select CRYPTO_SKCIPHER
        select CRYPTO_LIB_DES
@@ -810,6 +795,7 @@ config CRYPTO_DEV_SA2UL
        select CRYPTO_AES
        select CRYPTO_ALGAPI
        select CRYPTO_AUTHENC
+       select CRYPTO_DES
        select CRYPTO_SHA1
        select CRYPTO_SHA256
        select CRYPTO_SHA512
@@ -820,7 +806,6 @@ config CRYPTO_DEV_SA2UL
          used for crypto offload.  Select this if you want to use hardware
          acceleration for cryptographic algorithms on these devices.
 
-source "drivers/crypto/keembay/Kconfig"
 source "drivers/crypto/aspeed/Kconfig"
 
 endif # CRYPTO_HW
index 476f1a25ca326acb7902594129e5f98772f26035..51d36701e7851acd33986169456ad1f4cd1a0313 100644 (file)
@@ -19,7 +19,6 @@ obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += caam/
 obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
 obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
 obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
-obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
 obj-$(CONFIG_CRYPTO_DEV_MARVELL) += marvell/
 obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
 obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
@@ -33,7 +32,6 @@ obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
 obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
 obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
 obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
-obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
 obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
 obj-$(CONFIG_CRYPTO_DEV_QCOM_RNG) += qcom-rng.o
 obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
@@ -51,4 +49,4 @@ obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
 obj-y += xilinx/
 obj-y += hisilicon/
 obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/
-obj-y += keembay/
+obj-y += intel/
index 50dc783821b699b8bdb31e1db7796aba4b8f2413..d553f3f1efbeea65c498d212d6dbe2ecab1ab00b 100644 (file)
@@ -1101,7 +1101,7 @@ static void crypto4xx_bh_tasklet_cb(unsigned long data)
 static inline irqreturn_t crypto4xx_interrupt_handler(int irq, void *data,
                                                      u32 clr_val)
 {
-       struct device *dev = (struct device *)data;
+       struct device *dev = data;
        struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
 
        writel(clr_val, core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
index 1f77ebd7348926ec04b057c348677b859d52c141..470122c87feaae9228e4fe05ae8e1a3d402a91f7 100644 (file)
@@ -289,7 +289,7 @@ static int aspeed_acry_rsa_ctx_copy(struct aspeed_acry_dev *acry_dev, void *buf,
 
                        if (mode == ASPEED_RSA_EXP_MODE)
                                idx = acry_dev->exp_dw_mapping[j - 1];
-                       else if (mode == ASPEED_RSA_MOD_MODE)
+                       else /* mode == ASPEED_RSA_MOD_MODE */
                                idx = acry_dev->mod_dw_mapping[j - 1];
 
                        dw_buf[idx] = cpu_to_le32(data);
@@ -712,7 +712,6 @@ static int aspeed_acry_probe(struct platform_device *pdev)
 {
        struct aspeed_acry_dev *acry_dev;
        struct device *dev = &pdev->dev;
-       struct resource *res;
        int rc;
 
        acry_dev = devm_kzalloc(dev, sizeof(struct aspeed_acry_dev),
@@ -724,13 +723,11 @@ static int aspeed_acry_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, acry_dev);
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       acry_dev->regs = devm_ioremap_resource(dev, res);
+       acry_dev->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(acry_dev->regs))
                return PTR_ERR(acry_dev->regs);
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-       acry_dev->acry_sram = devm_ioremap_resource(dev, res);
+       acry_dev->acry_sram = devm_platform_ioremap_resource(pdev, 1);
        if (IS_ERR(acry_dev->acry_sram))
                return PTR_ERR(acry_dev->acry_sram);
 
@@ -782,7 +779,10 @@ static int aspeed_acry_probe(struct platform_device *pdev)
        acry_dev->buf_addr = dmam_alloc_coherent(dev, ASPEED_ACRY_BUFF_SIZE,
                                                 &acry_dev->buf_dma_addr,
                                                 GFP_KERNEL);
-       memzero_explicit(acry_dev->buf_addr, ASPEED_ACRY_BUFF_SIZE);
+       if (!acry_dev->buf_addr) {
+               rc = -ENOMEM;
+               goto err_engine_rsa_start;
+       }
 
        aspeed_acry_register(acry_dev);
 
index ed10f2ae452305a8f66dce2e681c950a1d478461..143d33fbb31621aad3eb5267b44434a525dc5240 100644 (file)
@@ -493,17 +493,11 @@ static void atmel_aes_set_iv_as_last_ciphertext_block(struct atmel_aes_dev *dd)
        if (req->cryptlen < ivsize)
                return;
 
-       if (rctx->mode & AES_FLAGS_ENCRYPT) {
+       if (rctx->mode & AES_FLAGS_ENCRYPT)
                scatterwalk_map_and_copy(req->iv, req->dst,
                                         req->cryptlen - ivsize, ivsize, 0);
-       } else {
-               if (req->src == req->dst)
-                       memcpy(req->iv, rctx->lastc, ivsize);
-               else
-                       scatterwalk_map_and_copy(req->iv, req->src,
-                                                req->cryptlen - ivsize,
-                                                ivsize, 0);
-       }
+       else
+               memcpy(req->iv, rctx->lastc, ivsize);
 }
 
 static inline struct atmel_aes_ctr_ctx *
@@ -1146,7 +1140,7 @@ static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
        rctx->mode = mode;
 
        if (opmode != AES_FLAGS_ECB &&
-           !(mode & AES_FLAGS_ENCRYPT) && req->src == req->dst) {
+           !(mode & AES_FLAGS_ENCRYPT)) {
                unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
 
                if (req->cryptlen >= ivsize)
@@ -1341,7 +1335,7 @@ static struct skcipher_alg aes_algs[] = {
 {
        .base.cra_name          = "cfb(aes)",
        .base.cra_driver_name   = "atmel-cfb-aes",
-       .base.cra_blocksize     = AES_BLOCK_SIZE,
+       .base.cra_blocksize     = 1,
        .base.cra_ctxsize       = sizeof(struct atmel_aes_ctx),
 
        .init                   = atmel_aes_init_tfm,
index e7c1db2739ec036e35727c0362ea84b5ddce9d6c..6bef634d3c86f55d5004e33897db2fe521361e2c 100644 (file)
@@ -1948,14 +1948,32 @@ static int atmel_sha_hmac_digest2(struct atmel_sha_dev *dd)
        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
        struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
+       struct scatterlist *sgbuf;
        size_t hs = ctx->hash_size;
        size_t i, num_words = hs / sizeof(u32);
        bool use_dma = false;
        u32 mr;
 
        /* Special case for empty message. */
-       if (!req->nbytes)
-               return atmel_sha_complete(dd, -EINVAL); // TODO:
+       if (!req->nbytes) {
+               req->nbytes = 0;
+               ctx->bufcnt = 0;
+               ctx->digcnt[0] = 0;
+               ctx->digcnt[1] = 0;
+               switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
+               case SHA_FLAGS_SHA1:
+               case SHA_FLAGS_SHA224:
+               case SHA_FLAGS_SHA256:
+                       atmel_sha_fill_padding(ctx, 64);
+                       break;
+
+               case SHA_FLAGS_SHA384:
+               case SHA_FLAGS_SHA512:
+                       atmel_sha_fill_padding(ctx, 128);
+                       break;
+               }
+               sg_init_one(&dd->tmp, ctx->buffer, ctx->bufcnt);
+       }
 
        /* Check DMA threshold and alignment. */
        if (req->nbytes > ATMEL_SHA_DMA_THRESHOLD &&
@@ -1985,12 +2003,20 @@ static int atmel_sha_hmac_digest2(struct atmel_sha_dev *dd)
 
        atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
 
+       /* Special case for empty message. */
+       if (!req->nbytes) {
+               sgbuf = &dd->tmp;
+               req->nbytes = ctx->bufcnt;
+       } else {
+               sgbuf = req->src;
+       }
+
        /* Process data. */
        if (use_dma)
-               return atmel_sha_dma_start(dd, req->src, req->nbytes,
+               return atmel_sha_dma_start(dd, sgbuf, req->nbytes,
                                           atmel_sha_hmac_final_done);
 
-       return atmel_sha_cpu_start(dd, req->src, req->nbytes, false, true,
+       return atmel_sha_cpu_start(dd, sgbuf, req->nbytes, false, true,
                                   atmel_sha_hmac_final_done);
 }
 
index 4403dbb0f0b1d64d9c0019b2b28cee4d9b21d7e3..44a185a84760a73f141afcf0a78cddb7c2a1db94 100644 (file)
@@ -126,7 +126,7 @@ static void atmel_sha204a_remove(struct i2c_client *client)
        kfree((void *)i2c_priv->hwrng.priv);
 }
 
-static const struct of_device_id atmel_sha204a_dt_ids[] = {
+static const struct of_device_id atmel_sha204a_dt_ids[] __maybe_unused = {
        { .compatible = "atmel,atsha204", },
        { .compatible = "atmel,atsha204a", },
        { /* sentinel */ }
index b2d48c1649b9d785915eba3ad46ffb7f84db5962..c9ded8be9c395298e3e77c5841781b01e4759d70 100644 (file)
@@ -565,17 +565,12 @@ atmel_tdes_set_iv_as_last_ciphertext_block(struct atmel_tdes_dev *dd)
        if (req->cryptlen < ivsize)
                return;
 
-       if (rctx->mode & TDES_FLAGS_ENCRYPT) {
+       if (rctx->mode & TDES_FLAGS_ENCRYPT)
                scatterwalk_map_and_copy(req->iv, req->dst,
                                         req->cryptlen - ivsize, ivsize, 0);
-       } else {
-               if (req->src == req->dst)
-                       memcpy(req->iv, rctx->lastc, ivsize);
-               else
-                       scatterwalk_map_and_copy(req->iv, req->src,
-                                                req->cryptlen - ivsize,
-                                                ivsize, 0);
-       }
+       else
+               memcpy(req->iv, rctx->lastc, ivsize);
+
 }
 
 static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err)
@@ -722,7 +717,7 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
        rctx->mode = mode;
 
        if ((mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB &&
-           !(mode & TDES_FLAGS_ENCRYPT) && req->src == req->dst) {
+           !(mode & TDES_FLAGS_ENCRYPT)) {
                unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
 
                if (req->cryptlen >= ivsize)
index 12b1c8346243d35d9d1cf3451edf5fadc0ae5774..feb86013dbf63f73011eb0d23988f98c24bf75c1 100644 (file)
@@ -3,7 +3,7 @@
  * caam - Freescale FSL CAAM support for crypto API
  *
  * Copyright 2008-2011 Freescale Semiconductor, Inc.
- * Copyright 2016-2019 NXP
+ * Copyright 2016-2019, 2023 NXP
  *
  * Based on talitos crypto API driver.
  *
@@ -3542,13 +3542,14 @@ int caam_algapi_init(struct device *ctrldev)
         * First, detect presence and attributes of DES, AES, and MD blocks.
         */
        if (priv->era < 10) {
+               struct caam_perfmon __iomem *perfmon = &priv->jr[0]->perfmon;
                u32 cha_vid, cha_inst, aes_rn;
 
-               cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
+               cha_vid = rd_reg32(&perfmon->cha_id_ls);
                aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
                md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
 
-               cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+               cha_inst = rd_reg32(&perfmon->cha_num_ls);
                des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
                           CHA_ID_LS_DES_SHIFT;
                aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
@@ -3556,23 +3557,23 @@ int caam_algapi_init(struct device *ctrldev)
                ccha_inst = 0;
                ptha_inst = 0;
 
-               aes_rn = rd_reg32(&priv->ctrl->perfmon.cha_rev_ls) &
-                        CHA_ID_LS_AES_MASK;
+               aes_rn = rd_reg32(&perfmon->cha_rev_ls) & CHA_ID_LS_AES_MASK;
                gcm_support = !(aes_vid == CHA_VER_VID_AES_LP && aes_rn < 8);
        } else {
+               struct version_regs __iomem *vreg = &priv->jr[0]->vreg;
                u32 aesa, mdha;
 
-               aesa = rd_reg32(&priv->ctrl->vreg.aesa);
-               mdha = rd_reg32(&priv->ctrl->vreg.mdha);
+               aesa = rd_reg32(&vreg->aesa);
+               mdha = rd_reg32(&vreg->mdha);
 
                aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
                md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
 
-               des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK;
+               des_inst = rd_reg32(&vreg->desa) & CHA_VER_NUM_MASK;
                aes_inst = aesa & CHA_VER_NUM_MASK;
                md_inst = mdha & CHA_VER_NUM_MASK;
-               ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
-               ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
+               ccha_inst = rd_reg32(&vreg->ccha) & CHA_VER_NUM_MASK;
+               ptha_inst = rd_reg32(&vreg->ptha) & CHA_VER_NUM_MASK;
 
                gcm_support = aesa & CHA_VER_MISC_AES_GCM;
        }
index 82d3c730a502ecb5c131d8cea7daee167c1bdee5..80deb003f0a59a6b1ab541f60abfb189e820c508 100644 (file)
@@ -3,7 +3,7 @@
  * caam - Freescale FSL CAAM support for ahash functions of crypto API
  *
  * Copyright 2011 Freescale Semiconductor, Inc.
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2019, 2023 NXP
  *
  * Based on caamalg.c crypto API driver.
  *
@@ -1956,12 +1956,14 @@ int caam_algapi_hash_init(struct device *ctrldev)
         * presence and attributes of MD block.
         */
        if (priv->era < 10) {
-               md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
+               struct caam_perfmon __iomem *perfmon = &priv->jr[0]->perfmon;
+
+               md_vid = (rd_reg32(&perfmon->cha_id_ls) &
                          CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
-               md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
+               md_inst = (rd_reg32(&perfmon->cha_num_ls) &
                           CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
        } else {
-               u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
+               u32 mdha = rd_reg32(&priv->jr[0]->vreg.mdha);
 
                md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
                md_inst = mdha & CHA_VER_NUM_MASK;
index e40614fef39dc30a558e7d45b812c3d68938e769..72afc249d42fbb2c418617121bc96348fa6be257 100644 (file)
@@ -3,7 +3,7 @@
  * caam - Freescale FSL CAAM support for Public Key Cryptography
  *
  * Copyright 2016 Freescale Semiconductor, Inc.
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2019, 2023 NXP
  *
  * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
  * all the desired key parameters, input and output pointers.
@@ -1168,10 +1168,10 @@ int caam_pkc_init(struct device *ctrldev)
 
        /* Determine public key hardware accelerator presence. */
        if (priv->era < 10) {
-               pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
+               pk_inst = (rd_reg32(&priv->jr[0]->perfmon.cha_num_ls) &
                           CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
        } else {
-               pkha = rd_reg32(&priv->ctrl->vreg.pkha);
+               pkha = rd_reg32(&priv->jr[0]->vreg.pkha);
                pk_inst = pkha & CHA_VER_NUM_MASK;
 
                /*
index 1fd8ff965006cab6792bb65cc63d93fc6327197b..50eb55da45c29ba1936625537d575d1c1928052c 100644 (file)
@@ -3,7 +3,7 @@
  * caam - Freescale FSL CAAM support for hw_random
  *
  * Copyright 2011 Freescale Semiconductor, Inc.
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2019, 2023 NXP
  *
  * Based on caamalg.c crypto API driver.
  *
@@ -227,10 +227,10 @@ int caam_rng_init(struct device *ctrldev)
 
        /* Check for an instantiated RNG before registration */
        if (priv->era < 10)
-               rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
+               rng_inst = (rd_reg32(&priv->jr[0]->perfmon.cha_num_ls) &
                            CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
        else
-               rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK;
+               rng_inst = rd_reg32(&priv->jr[0]->vreg.rng) & CHA_VER_NUM_MASK;
 
        if (!rng_inst)
                return 0;
index 6278afb951c3068fdecb8ed464cd8508727f87f3..bedcc2ab3a00a65efddae36317a91cbd5d5222fb 100644 (file)
@@ -3,7 +3,7 @@
  * Controller-level driver, kernel property detection, initialization
  *
  * Copyright 2008-2012 Freescale Semiconductor, Inc.
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2019, 2023 NXP
  */
 
 #include <linux/device.h>
@@ -284,6 +284,10 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
                const u32 rdsta_if = RDSTA_IF0 << sh_idx;
                const u32 rdsta_pr = RDSTA_PR0 << sh_idx;
                const u32 rdsta_mask = rdsta_if | rdsta_pr;
+
+               /* Clear the contents before using the descriptor */
+               memset(desc, 0x00, CAAM_CMD_SZ * 7);
+
                /*
                 * If the corresponding bit is set, this state handle
                 * was initialized by somebody else, so it's left alone.
@@ -327,8 +331,6 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
                }
 
                dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
-               /* Clear the contents before recreating the descriptor */
-               memset(desc, 0x00, CAAM_CMD_SZ * 7);
        }
 
        kfree(desc);
@@ -395,7 +397,7 @@ start_rng:
                      RTMCTL_SAMP_MODE_RAW_ES_SC);
 }
 
-static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl)
+static int caam_get_era_from_hw(struct caam_perfmon __iomem *perfmon)
 {
        static const struct {
                u16 ip_id;
@@ -421,12 +423,12 @@ static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl)
        u16 ip_id;
        int i;
 
-       ccbvid = rd_reg32(&ctrl->perfmon.ccb_id);
+       ccbvid = rd_reg32(&perfmon->ccb_id);
        era = (ccbvid & CCBVID_ERA_MASK) >> CCBVID_ERA_SHIFT;
        if (era)        /* This is '0' prior to CAAM ERA-6 */
                return era;
 
-       id_ms = rd_reg32(&ctrl->perfmon.caam_id_ms);
+       id_ms = rd_reg32(&perfmon->caam_id_ms);
        ip_id = (id_ms & SECVID_MS_IPID_MASK) >> SECVID_MS_IPID_SHIFT;
        maj_rev = (id_ms & SECVID_MS_MAJ_REV_MASK) >> SECVID_MS_MAJ_REV_SHIFT;
 
@@ -444,9 +446,9 @@ static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl)
  * In case this property is not passed an attempt to retrieve the CAAM
  * era via register reads will be made.
  *
- * @ctrl:      controller region
+ * @perfmon:   Performance Monitor Registers
  */
-static int caam_get_era(struct caam_ctrl __iomem *ctrl)
+static int caam_get_era(struct caam_perfmon __iomem *perfmon)
 {
        struct device_node *caam_node;
        int ret;
@@ -459,7 +461,7 @@ static int caam_get_era(struct caam_ctrl __iomem *ctrl)
        if (!ret)
                return prop;
        else
-               return caam_get_era_from_hw(ctrl);
+               return caam_get_era_from_hw(perfmon);
 }
 
 /*
@@ -626,12 +628,14 @@ static int caam_probe(struct platform_device *pdev)
        struct device_node *nprop, *np;
        struct caam_ctrl __iomem *ctrl;
        struct caam_drv_private *ctrlpriv;
+       struct caam_perfmon __iomem *perfmon;
        struct dentry *dfs_root;
        u32 scfgr, comp_params;
        u8 rng_vid;
        int pg_size;
        int BLOCK_OFFSET = 0;
        bool pr_support = false;
+       bool reg_access = true;
 
        ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL);
        if (!ctrlpriv)
@@ -645,6 +649,17 @@ static int caam_probe(struct platform_device *pdev)
        caam_imx = (bool)imx_soc_match;
 
        if (imx_soc_match) {
+               /*
+                * Until Layerscape and i.MX OP-TEE get in sync,
+                * only i.MX OP-TEE use cases disallow access to
+                * caam page 0 (controller) registers.
+                */
+               np = of_find_compatible_node(NULL, NULL, "linaro,optee-tz");
+               ctrlpriv->optee_en = !!np;
+               of_node_put(np);
+
+               reg_access = !ctrlpriv->optee_en;
+
                if (!imx_soc_match->data) {
                        dev_err(dev, "No clock data provided for i.MX SoC");
                        return -EINVAL;
@@ -665,10 +680,38 @@ static int caam_probe(struct platform_device *pdev)
                return ret;
        }
 
-       caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
+       ring = 0;
+       for_each_available_child_of_node(nprop, np)
+               if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
+                   of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
+                       u32 reg;
+
+                       if (of_property_read_u32_index(np, "reg", 0, &reg)) {
+                               dev_err(dev, "%s read reg property error\n",
+                                       np->full_name);
+                               continue;
+                       }
+
+                       ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
+                                            ((__force uint8_t *)ctrl + reg);
+
+                       ctrlpriv->total_jobrs++;
+                       ring++;
+               }
+
+       /*
+        * Wherever possible, instead of accessing registers from the global page,
+        * use the alias registers in the first (cf. DT nodes order)
+        * job ring's page.
+        */
+       perfmon = ring ? (struct caam_perfmon __iomem *)&ctrlpriv->jr[0]->perfmon :
+                        (struct caam_perfmon __iomem *)&ctrl->perfmon;
+
+       caam_little_end = !(bool)(rd_reg32(&perfmon->status) &
                                  (CSTA_PLEND | CSTA_ALT_PLEND));
-       comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
-       if (comp_params & CTPR_MS_PS && rd_reg32(&ctrl->mcr) & MCFGR_LONG_PTR)
+       comp_params = rd_reg32(&perfmon->comp_parms_ms);
+       if (reg_access && comp_params & CTPR_MS_PS &&
+           rd_reg32(&ctrl->mcr) & MCFGR_LONG_PTR)
                caam_ptr_sz = sizeof(u64);
        else
                caam_ptr_sz = sizeof(u32);
@@ -733,6 +776,9 @@ static int caam_probe(struct platform_device *pdev)
        }
 #endif
 
+       if (!reg_access)
+               goto set_dma_mask;
+
        /*
         * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
         * long pointers in master configuration register.
@@ -772,13 +818,14 @@ static int caam_probe(struct platform_device *pdev)
                              JRSTART_JR1_START | JRSTART_JR2_START |
                              JRSTART_JR3_START);
 
+set_dma_mask:
        ret = dma_set_mask_and_coherent(dev, caam_get_dma_mask(dev));
        if (ret) {
                dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
                return ret;
        }
 
-       ctrlpriv->era = caam_get_era(ctrl);
+       ctrlpriv->era = caam_get_era(perfmon);
        ctrlpriv->domain = iommu_get_domain_for_dev(dev);
 
        dfs_root = debugfs_create_dir(dev_name(dev), NULL);
@@ -789,7 +836,7 @@ static int caam_probe(struct platform_device *pdev)
                        return ret;
        }
 
-       caam_debugfs_init(ctrlpriv, dfs_root);
+       caam_debugfs_init(ctrlpriv, perfmon, dfs_root);
 
        /* Check to see if (DPAA 1.x) QI present. If so, enable */
        if (ctrlpriv->qi_present && !caam_dpaa2) {
@@ -808,26 +855,16 @@ static int caam_probe(struct platform_device *pdev)
 #endif
        }
 
-       ring = 0;
-       for_each_available_child_of_node(nprop, np)
-               if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
-                   of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
-                       ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
-                                            ((__force uint8_t *)ctrl +
-                                            (ring + JR_BLOCK_NUMBER) *
-                                             BLOCK_OFFSET
-                                            );
-                       ctrlpriv->total_jobrs++;
-                       ring++;
-               }
-
        /* If no QI and no rings specified, quit and go home */
        if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
                dev_err(dev, "no queues configured, terminating\n");
                return -ENOMEM;
        }
 
-       comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ls);
+       if (!reg_access)
+               goto report_live;
+
+       comp_params = rd_reg32(&perfmon->comp_parms_ls);
        ctrlpriv->blob_present = !!(comp_params & CTPR_LS_BLOB);
 
        /*
@@ -836,15 +873,21 @@ static int caam_probe(struct platform_device *pdev)
         * check both here.
         */
        if (ctrlpriv->era < 10) {
-               rng_vid = (rd_reg32(&ctrl->perfmon.cha_id_ls) &
+               rng_vid = (rd_reg32(&perfmon->cha_id_ls) &
                           CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
                ctrlpriv->blob_present = ctrlpriv->blob_present &&
-                       (rd_reg32(&ctrl->perfmon.cha_num_ls) & CHA_ID_LS_AES_MASK);
+                       (rd_reg32(&perfmon->cha_num_ls) & CHA_ID_LS_AES_MASK);
        } else {
-               rng_vid = (rd_reg32(&ctrl->vreg.rng) & CHA_VER_VID_MASK) >>
+               struct version_regs __iomem *vreg;
+
+               vreg =  ctrlpriv->total_jobrs ?
+                       (struct version_regs __iomem *)&ctrlpriv->jr[0]->vreg :
+                       (struct version_regs __iomem *)&ctrl->vreg;
+
+               rng_vid = (rd_reg32(&vreg->rng) & CHA_VER_VID_MASK) >>
                           CHA_VER_VID_SHIFT;
                ctrlpriv->blob_present = ctrlpriv->blob_present &&
-                       (rd_reg32(&ctrl->vreg.aesa) & CHA_VER_MISC_AES_NUM_MASK);
+                       (rd_reg32(&vreg->aesa) & CHA_VER_MISC_AES_NUM_MASK);
        }
 
        /*
@@ -923,10 +966,11 @@ static int caam_probe(struct platform_device *pdev)
                clrsetbits_32(&ctrl->scfgr, 0, SCFGR_RDBENABLE);
        }
 
+report_live:
        /* NOTE: RTIC detection ought to go here, around Si time */
 
-       caam_id = (u64)rd_reg32(&ctrl->perfmon.caam_id_ms) << 32 |
-                 (u64)rd_reg32(&ctrl->perfmon.caam_id_ls);
+       caam_id = (u64)rd_reg32(&perfmon->caam_id_ms) << 32 |
+                 (u64)rd_reg32(&perfmon->caam_id_ls);
 
        /* Report "alive" for developer to see */
        dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
index 806bb20d2aa196a2cef2ff526191b786e3973b99..6358d3cabf57106e7a7d7099cd1133a6d2a9f834 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
-/* Copyright 2019 NXP */
+/* Copyright 2019, 2023 NXP */
 
 #include <linux/debugfs.h>
 #include "compat.h"
@@ -42,16 +42,15 @@ void caam_debugfs_qi_init(struct caam_drv_private *ctrlpriv)
 }
 #endif
 
-void caam_debugfs_init(struct caam_drv_private *ctrlpriv, struct dentry *root)
+void caam_debugfs_init(struct caam_drv_private *ctrlpriv,
+                      struct caam_perfmon __force *perfmon,
+                      struct dentry *root)
 {
-       struct caam_perfmon *perfmon;
-
        /*
         * FIXME: needs better naming distinction, as some amalgamation of
         * "caam" and nprop->full_name. The OF name isn't distinctive,
         * but does separate instances
         */
-       perfmon = (struct caam_perfmon __force *)&ctrlpriv->ctrl->perfmon;
 
        ctrlpriv->ctl = debugfs_create_dir("ctl", root);
 
@@ -78,6 +77,9 @@ void caam_debugfs_init(struct caam_drv_private *ctrlpriv, struct dentry *root)
        debugfs_create_file("fault_status", 0444, ctrlpriv->ctl,
                            &perfmon->status, &caam_fops_u32_ro);
 
+       if (ctrlpriv->optee_en)
+               return;
+
        /* Internal covering keys (useful in non-secure mode only) */
        ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
        ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
index 661d768acdbfccfb65f5b2480be1a9f0a878281a..8b5d1acd21a7f6e31769e2ce08e29c191c343149 100644 (file)
@@ -1,16 +1,19 @@
 /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
-/* Copyright 2019 NXP */
+/* Copyright 2019, 2023 NXP */
 
 #ifndef CAAM_DEBUGFS_H
 #define CAAM_DEBUGFS_H
 
 struct dentry;
 struct caam_drv_private;
+struct caam_perfmon;
 
 #ifdef CONFIG_DEBUG_FS
-void caam_debugfs_init(struct caam_drv_private *ctrlpriv, struct dentry *root);
+void caam_debugfs_init(struct caam_drv_private *ctrlpriv,
+                      struct caam_perfmon __force *perfmon, struct dentry *root);
 #else
 static inline void caam_debugfs_init(struct caam_drv_private *ctrlpriv,
+                                    struct caam_perfmon __force *perfmon,
                                     struct dentry *root)
 {}
 #endif
index 0eca8c2fd91601f2ecbc039565ae2f8fe8137db0..020a9d8a8a07513277b1830ea08c703828e50190 100644 (file)
@@ -8,7 +8,7 @@
 
 static int dpseci_dbg_fqs_show(struct seq_file *file, void *offset)
 {
-       struct dpaa2_caam_priv *priv = (struct dpaa2_caam_priv *)file->private;
+       struct dpaa2_caam_priv *priv = file->private;
        u32 fqid, fcnt, bcnt;
        int i, err;
 
index 572cf66c887a055b72d145ead8ece8c923abc461..86ed1b91c22d4da0d7b142d9e0b1dab66f97c8a7 100644 (file)
@@ -94,6 +94,7 @@ struct caam_drv_private {
        u8 qi_present;          /* Nonzero if QI present in device */
        u8 blob_present;        /* Nonzero if BLOB support present in device */
        u8 mc_en;               /* Nonzero if MC f/w is active */
+       u8 optee_en;            /* Nonzero if OP-TEE f/w is active */
        int secvio_irq;         /* Security violation interrupt number */
        int virt_en;            /* Virtualization enabled in CAAM */
        int era;                /* CAAM Era (internal HW revision) */
index 724fdec18bf9782083bd7ce7215844dc15a07398..96dea5304d228af9cf7d9a46291b450b4457222a 100644 (file)
@@ -4,7 +4,7 @@
  * JobR backend functionality
  *
  * Copyright 2008-2012 Freescale Semiconductor, Inc.
- * Copyright 2019 NXP
+ * Copyright 2019, 2023 NXP
  */
 
 #include <linux/of_irq.h>
@@ -72,19 +72,27 @@ static void caam_jr_crypto_engine_exit(void *data)
        crypto_engine_exit(jrpriv->engine);
 }
 
-static int caam_reset_hw_jr(struct device *dev)
+/*
+ * Put the CAAM in quiesce, ie stop
+ *
+ * Must be called with itr disabled
+ */
+static int caam_jr_stop_processing(struct device *dev, u32 jrcr_bits)
 {
        struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
        unsigned int timeout = 100000;
 
-       /*
-        * mask interrupts since we are going to poll
-        * for reset completion status
-        */
-       clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK);
+       /* Check the current status */
+       if (rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_INPROGRESS)
+               goto wait_quiesce_completion;
 
-       /* initiate flush (required prior to reset) */
-       wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
+       /* Reset the field */
+       clrsetbits_32(&jrp->rregs->jrintstatus, JRINT_ERR_HALT_MASK, 0);
+
+       /* initiate flush / park (required prior to reset) */
+       wr_reg32(&jrp->rregs->jrcommand, jrcr_bits);
+
+wait_quiesce_completion:
        while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
                JRINT_ERR_HALT_INPROGRESS) && --timeout)
                cpu_relax();
@@ -95,8 +103,35 @@ static int caam_reset_hw_jr(struct device *dev)
                return -EIO;
        }
 
+       return 0;
+}
+
+/*
+ * Flush the job ring, so the jobs running will be stopped, jobs queued will be
+ * invalidated and the CAAM will no longer fetch fron input ring.
+ *
+ * Must be called with itr disabled
+ */
+static int caam_jr_flush(struct device *dev)
+{
+       return caam_jr_stop_processing(dev, JRCR_RESET);
+}
+
+static int caam_reset_hw_jr(struct device *dev)
+{
+       struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+       unsigned int timeout = 100000;
+       int err;
+       /*
+        * mask interrupts since we are going to poll
+        * for reset completion status
+        */
+       clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK);
+       err = caam_jr_flush(dev);
+       if (err)
+               return err;
+
        /* initiate reset */
-       timeout = 100000;
        wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
        while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
                cpu_relax();
@@ -163,6 +198,11 @@ static int caam_jr_remove(struct platform_device *pdev)
        return ret;
 }
 
+static void caam_jr_platform_shutdown(struct platform_device *pdev)
+{
+       caam_jr_remove(pdev);
+}
+
 /* Main per-ring interrupt handler */
 static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
 {
@@ -618,6 +658,7 @@ static struct platform_driver caam_jr_driver = {
        },
        .probe       = caam_jr_probe,
        .remove      = caam_jr_remove,
+       .shutdown    = caam_jr_platform_shutdown,
 };
 
 static int __init jr_driver_init(void)
index 432a61aca0c5fae3994e9428e2916e659b738e36..65114f766e7de4930eeb7fdc7cc31c5804dc3aa5 100644 (file)
@@ -1,5 +1,4 @@
 // SPDX-License-Identifier: GPL-2.0-only
-#include <linux/aer.h>
 #include <linux/delay.h>
 #include <linux/firmware.h>
 #include <linux/list.h>
index db362fe472ea3ab137da1b1b31e7505e9037534d..f6196495e862d365bfd0055ed83a139ee42a40e0 100644 (file)
@@ -10,7 +10,8 @@ ccp-$(CONFIG_CRYPTO_DEV_CCP_DEBUGFS) += ccp-debugfs.o
 ccp-$(CONFIG_PCI) += sp-pci.o
 ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \
                                    sev-dev.o \
-                                   tee-dev.o
+                                   tee-dev.o \
+                                   platform-access.o
 
 obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
 ccp-crypto-objs := ccp-crypto-main.o \
diff --git a/drivers/crypto/ccp/platform-access.c b/drivers/crypto/ccp/platform-access.c
new file mode 100644 (file)
index 0000000..939c924
--- /dev/null
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Platform Security Processor (PSP) Platform Access interface
+ *
+ * Copyright (C) 2023 Advanced Micro Devices, Inc.
+ *
+ * Author: Mario Limonciello <[email protected]>
+ *
+ * Some of this code is adapted from drivers/i2c/busses/i2c-designware-amdpsp.c
+ * developed by Jan Dabros <[email protected]> and Copyright (C) 2022 Google Inc.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/errno.h>
+#include <linux/iopoll.h>
+#include <linux/mutex.h>
+
+#include "platform-access.h"
+
+#define PSP_CMD_TIMEOUT_US     (500 * USEC_PER_MSEC)
+#define DOORBELL_CMDRESP_STS   GENMASK(7, 0)
+
+/* Recovery field should be equal 0 to start sending commands */
+static int check_recovery(u32 __iomem *cmd)
+{
+       return FIELD_GET(PSP_CMDRESP_RECOVERY, ioread32(cmd));
+}
+
+static int wait_cmd(u32 __iomem *cmd)
+{
+       u32 tmp, expected;
+
+       /* Expect mbox_cmd to be cleared and ready bit to be set by PSP */
+       expected = FIELD_PREP(PSP_CMDRESP_RESP, 1);
+
+       /*
+        * Check for readiness of PSP mailbox in a tight loop in order to
+        * process further as soon as command was consumed.
+        */
+       return readl_poll_timeout(cmd, tmp, (tmp & expected), 0,
+                                 PSP_CMD_TIMEOUT_US);
+}
+
+int psp_check_platform_access_status(void)
+{
+       struct psp_device *psp = psp_get_master_device();
+
+       if (!psp || !psp->platform_access_data)
+               return -ENODEV;
+
+       return 0;
+}
+EXPORT_SYMBOL(psp_check_platform_access_status);
+
+int psp_send_platform_access_msg(enum psp_platform_access_msg msg,
+                                struct psp_request *req)
+{
+       struct psp_device *psp = psp_get_master_device();
+       u32 __iomem *cmd, *lo, *hi;
+       struct psp_platform_access_device *pa_dev;
+       phys_addr_t req_addr;
+       u32 cmd_reg;
+       int ret;
+
+       if (!psp || !psp->platform_access_data)
+               return -ENODEV;
+
+       pa_dev = psp->platform_access_data;
+       cmd = psp->io_regs + pa_dev->vdata->cmdresp_reg;
+       lo = psp->io_regs + pa_dev->vdata->cmdbuff_addr_lo_reg;
+       hi = psp->io_regs + pa_dev->vdata->cmdbuff_addr_hi_reg;
+
+       mutex_lock(&pa_dev->mailbox_mutex);
+
+       if (check_recovery(cmd)) {
+               dev_dbg(psp->dev, "platform mailbox is in recovery\n");
+               ret = -EBUSY;
+               goto unlock;
+       }
+
+       if (wait_cmd(cmd)) {
+               dev_dbg(psp->dev, "platform mailbox is not done processing command\n");
+               ret = -EBUSY;
+               goto unlock;
+       }
+
+       /*
+        * Fill mailbox with address of command-response buffer, which will be
+        * used for sending i2c requests as well as reading status returned by
+        * PSP. Use physical address of buffer, since PSP will map this region.
+        */
+       req_addr = __psp_pa(req);
+       iowrite32(lower_32_bits(req_addr), lo);
+       iowrite32(upper_32_bits(req_addr), hi);
+
+       print_hex_dump_debug("->psp ", DUMP_PREFIX_OFFSET, 16, 2, req,
+                            req->header.payload_size, false);
+
+       /* Write command register to trigger processing */
+       cmd_reg = FIELD_PREP(PSP_CMDRESP_CMD, msg);
+       iowrite32(cmd_reg, cmd);
+
+       if (wait_cmd(cmd)) {
+               ret = -ETIMEDOUT;
+               goto unlock;
+       }
+
+       /* Ensure it was triggered by this driver */
+       if (ioread32(lo) != lower_32_bits(req_addr) ||
+           ioread32(hi) != upper_32_bits(req_addr)) {
+               ret = -EBUSY;
+               goto unlock;
+       }
+
+       /* Store the status in request header for caller to investigate */
+       cmd_reg = ioread32(cmd);
+       req->header.status = FIELD_GET(PSP_CMDRESP_STS, cmd_reg);
+       if (req->header.status) {
+               ret = -EIO;
+               goto unlock;
+       }
+
+       print_hex_dump_debug("<-psp ", DUMP_PREFIX_OFFSET, 16, 2, req,
+                            req->header.payload_size, false);
+
+       ret = 0;
+
+unlock:
+       mutex_unlock(&pa_dev->mailbox_mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(psp_send_platform_access_msg);
+
+int psp_ring_platform_doorbell(int msg, u32 *result)
+{
+       struct psp_device *psp = psp_get_master_device();
+       struct psp_platform_access_device *pa_dev;
+       u32 __iomem *button, *cmd;
+       int ret, val;
+
+       if (!psp || !psp->platform_access_data)
+               return -ENODEV;
+
+       pa_dev = psp->platform_access_data;
+       button = psp->io_regs + pa_dev->vdata->doorbell_button_reg;
+       cmd = psp->io_regs + pa_dev->vdata->doorbell_cmd_reg;
+
+       mutex_lock(&pa_dev->doorbell_mutex);
+
+       if (wait_cmd(cmd)) {
+               dev_err(psp->dev, "doorbell command not done processing\n");
+               ret = -EBUSY;
+               goto unlock;
+       }
+
+       iowrite32(FIELD_PREP(DOORBELL_CMDRESP_STS, msg), cmd);
+       iowrite32(PSP_DRBL_RING, button);
+
+       if (wait_cmd(cmd)) {
+               ret = -ETIMEDOUT;
+               goto unlock;
+       }
+
+       val = FIELD_GET(DOORBELL_CMDRESP_STS, ioread32(cmd));
+       if (val) {
+               if (result)
+                       *result = val;
+               ret = -EIO;
+               goto unlock;
+       }
+
+       ret = 0;
+unlock:
+       mutex_unlock(&pa_dev->doorbell_mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(psp_ring_platform_doorbell);
+
+void platform_access_dev_destroy(struct psp_device *psp)
+{
+       struct psp_platform_access_device *pa_dev = psp->platform_access_data;
+
+       if (!pa_dev)
+               return;
+
+       mutex_destroy(&pa_dev->mailbox_mutex);
+       mutex_destroy(&pa_dev->doorbell_mutex);
+       psp->platform_access_data = NULL;
+}
+
+int platform_access_dev_init(struct psp_device *psp)
+{
+       struct device *dev = psp->dev;
+       struct psp_platform_access_device *pa_dev;
+
+       pa_dev = devm_kzalloc(dev, sizeof(*pa_dev), GFP_KERNEL);
+       if (!pa_dev)
+               return -ENOMEM;
+
+       psp->platform_access_data = pa_dev;
+       pa_dev->psp = psp;
+       pa_dev->dev = dev;
+
+       pa_dev->vdata = (struct platform_access_vdata *)psp->vdata->platform_access;
+
+       mutex_init(&pa_dev->mailbox_mutex);
+       mutex_init(&pa_dev->doorbell_mutex);
+
+       dev_dbg(dev, "platform access enabled\n");
+
+       return 0;
+}
diff --git a/drivers/crypto/ccp/platform-access.h b/drivers/crypto/ccp/platform-access.h
new file mode 100644 (file)
index 0000000..a83f03b
--- /dev/null
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AMD Platform Security Processor (PSP) Platform Access interface
+ *
+ * Copyright (C) 2023 Advanced Micro Devices, Inc.
+ *
+ * Author: Mario Limonciello <[email protected]>
+ */
+
+#ifndef __PSP_PLATFORM_ACCESS_H__
+#define __PSP_PLATFORM_ACCESS_H__
+
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/mutex.h>
+#include <linux/psp-platform-access.h>
+
+#include "psp-dev.h"
+
+struct psp_platform_access_device {
+       struct device *dev;
+       struct psp_device *psp;
+
+       struct platform_access_vdata *vdata;
+
+       struct mutex mailbox_mutex;
+       struct mutex doorbell_mutex;
+
+       void *platform_access_data;
+};
+
+void platform_access_dev_destroy(struct psp_device *psp);
+int platform_access_dev_init(struct psp_device *psp);
+
+#endif /* __PSP_PLATFORM_ACCESS_H__ */
index c9c741ac84421929715691f567fccd90fe77a2e4..e3d6955d3265501f2bbfe640a103453c6813ed53 100644 (file)
@@ -14,6 +14,7 @@
 #include "psp-dev.h"
 #include "sev-dev.h"
 #include "tee-dev.h"
+#include "platform-access.h"
 
 struct psp_device *psp_master;
 
@@ -42,18 +43,15 @@ static irqreturn_t psp_irq_handler(int irq, void *data)
        /* Read the interrupt status: */
        status = ioread32(psp->io_regs + psp->vdata->intsts_reg);
 
+       /* Clear the interrupt status by writing the same value we read. */
+       iowrite32(status, psp->io_regs + psp->vdata->intsts_reg);
+
        /* invoke subdevice interrupt handlers */
        if (status) {
                if (psp->sev_irq_handler)
                        psp->sev_irq_handler(irq, psp->sev_irq_data, status);
-
-               if (psp->tee_irq_handler)
-                       psp->tee_irq_handler(irq, psp->tee_irq_data, status);
        }
 
-       /* Clear the interrupt status by writing the same value we read. */
-       iowrite32(status, psp->io_regs + psp->vdata->intsts_reg);
-
        return IRQ_HANDLED;
 }
 
@@ -105,6 +103,17 @@ static int psp_check_tee_support(struct psp_device *psp)
        return 0;
 }
 
+static void psp_init_platform_access(struct psp_device *psp)
+{
+       int ret;
+
+       ret = platform_access_dev_init(psp);
+       if (ret) {
+               dev_warn(psp->dev, "platform access init failed: %d\n", ret);
+               return;
+       }
+}
+
 static int psp_init(struct psp_device *psp)
 {
        int ret;
@@ -121,6 +130,9 @@ static int psp_init(struct psp_device *psp)
                        return ret;
        }
 
+       if (psp->vdata->platform_access)
+               psp_init_platform_access(psp);
+
        return 0;
 }
 
@@ -201,6 +213,8 @@ void psp_dev_destroy(struct sp_device *sp)
 
        tee_dev_destroy(psp);
 
+       platform_access_dev_destroy(psp);
+
        sp_free_psp_irq(sp, psp);
 
        if (sp->clear_psp_master_device)
@@ -219,18 +233,6 @@ void psp_clear_sev_irq_handler(struct psp_device *psp)
        psp_set_sev_irq_handler(psp, NULL, NULL);
 }
 
-void psp_set_tee_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
-                            void *data)
-{
-       psp->tee_irq_data = data;
-       psp->tee_irq_handler = handler;
-}
-
-void psp_clear_tee_irq_handler(struct psp_device *psp)
-{
-       psp_set_tee_irq_handler(psp, NULL, NULL);
-}
-
 struct psp_device *psp_get_master_device(void)
 {
        struct sp_device *sp = sp_get_psp_master_device();
index d528eb04c3ef674ce3bf466b0441d8e2629906aa..505e4bdeaca84ea990f4c42700f4ec383f331ebf 100644 (file)
@@ -17,9 +17,6 @@
 
 #include "sp-dev.h"
 
-#define PSP_CMDRESP_RESP               BIT(31)
-#define PSP_CMDRESP_ERR_MASK           0xffff
-
 #define MAX_PSP_NAME_LEN               16
 
 extern struct psp_device *psp_master;
@@ -40,11 +37,9 @@ struct psp_device {
        psp_irq_handler_t sev_irq_handler;
        void *sev_irq_data;
 
-       psp_irq_handler_t tee_irq_handler;
-       void *tee_irq_data;
-
        void *sev_data;
        void *tee_data;
+       void *platform_access_data;
 
        unsigned int capability;
 };
@@ -53,10 +48,6 @@ void psp_set_sev_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
                             void *data);
 void psp_clear_sev_irq_handler(struct psp_device *psp);
 
-void psp_set_tee_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
-                            void *data);
-void psp_clear_tee_irq_handler(struct psp_device *psp);
-
 struct psp_device *psp_get_master_device(void);
 
 #define PSP_CAPABILITY_SEV                     BIT(0)
index e346c00b132af53b47e686b5e34f7a041f67fd66..b1f3327f65e0eda8db71de61c16e90d7aa8e7c74 100644 (file)
@@ -7,6 +7,7 @@
  * Author: Brijesh Singh <[email protected]>
  */
 
+#include <linux/bitfield.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/kthread.h>
@@ -24,6 +25,7 @@
 #include <linux/cpufeature.h>
 #include <linux/fs.h>
 #include <linux/fs_struct.h>
+#include <linux/psp.h>
 
 #include <asm/smp.h>
 #include <asm/cacheflush.h>
@@ -102,7 +104,7 @@ static void sev_irq_handler(int irq, void *data, unsigned int status)
 
        /* Check if it is SEV command completion: */
        reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg);
-       if (reg & PSP_CMDRESP_RESP) {
+       if (FIELD_GET(PSP_CMDRESP_RESP, reg)) {
                sev->int_rcvd = 1;
                wake_up(&sev->int_queue);
        }
@@ -346,9 +348,7 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
 
        sev->int_rcvd = 0;
 
-       reg = cmd;
-       reg <<= SEV_CMDRESP_CMD_SHIFT;
-       reg |= SEV_CMDRESP_IOC;
+       reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC;
        iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg);
 
        /* wait for command completion */
@@ -366,11 +366,11 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
        psp_timeout = psp_cmd_timeout;
 
        if (psp_ret)
-               *psp_ret = reg & PSP_CMDRESP_ERR_MASK;
+               *psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg);
 
-       if (reg & PSP_CMDRESP_ERR_MASK) {
-               dev_dbg(sev->dev, "sev command %#x failed (%#010x)\n",
-                       cmd, reg & PSP_CMDRESP_ERR_MASK);
+       if (FIELD_GET(PSP_CMDRESP_STS, reg)) {
+               dev_dbg(sev->dev, "sev command %#x failed (%#010lx)\n",
+                       cmd, FIELD_GET(PSP_CMDRESP_STS, reg));
                ret = -EIO;
        } else {
                ret = sev_write_init_ex_file_if_required(cmd);
index 666c21eb81ab35e94a56c9a28bce47b56e35bf88..778c95155e745becb09ada5f21d83044e6a120e1 100644 (file)
@@ -25,8 +25,8 @@
 #include <linux/miscdevice.h>
 #include <linux/capability.h>
 
+#define SEV_CMDRESP_CMD                        GENMASK(26, 16)
 #define SEV_CMD_COMPLETE               BIT(1)
-#define SEV_CMDRESP_CMD_SHIFT          16
 #define SEV_CMDRESP_IOC                        BIT(0)
 
 struct sev_misc_dev {
index 20377e67f65dfd959e4df11d834e913151d87f48..1253a021798580432048b5928558113c40e2a982 100644 (file)
@@ -53,9 +53,19 @@ struct tee_vdata {
        const unsigned int ring_rptr_reg;
 };
 
+struct platform_access_vdata {
+       const unsigned int cmdresp_reg;
+       const unsigned int cmdbuff_addr_lo_reg;
+       const unsigned int cmdbuff_addr_hi_reg;
+       const unsigned int doorbell_button_reg;
+       const unsigned int doorbell_cmd_reg;
+
+};
+
 struct psp_vdata {
        const struct sev_vdata *sev;
        const struct tee_vdata *tee;
+       const struct platform_access_vdata *platform_access;
        const unsigned int feature_reg;
        const unsigned int inten_reg;
        const unsigned int intsts_reg;
index cde33b2ac71b25f8b21932e82e4b923266bf9d1f..aa15bc4cac2be50a661009a6f41f09e9b9841663 100644 (file)
@@ -361,6 +361,14 @@ static const struct tee_vdata teev1 = {
        .ring_rptr_reg          = 0x10554,      /* C2PMSG_21 */
 };
 
+static const struct platform_access_vdata pa_v1 = {
+       .cmdresp_reg            = 0x10570,      /* C2PMSG_28 */
+       .cmdbuff_addr_lo_reg    = 0x10574,      /* C2PMSG_29 */
+       .cmdbuff_addr_hi_reg    = 0x10578,      /* C2PMSG_30 */
+       .doorbell_button_reg    = 0x10a24,      /* C2PMSG_73 */
+       .doorbell_cmd_reg       = 0x10a40,      /* C2PMSG_80 */
+};
+
 static const struct psp_vdata pspv1 = {
        .sev                    = &sevv1,
        .feature_reg            = 0x105fc,      /* C2PMSG_63 */
@@ -377,6 +385,7 @@ static const struct psp_vdata pspv2 = {
 
 static const struct psp_vdata pspv3 = {
        .tee                    = &teev1,
+       .platform_access        = &pa_v1,
        .feature_reg            = 0x109fc,      /* C2PMSG_63 */
        .inten_reg              = 0x10690,      /* P2CMSG_INTEN */
        .intsts_reg             = 0x10694,      /* P2CMSG_INTSTS */
@@ -451,9 +460,9 @@ static const struct pci_device_id sp_pci_table[] = {
        { PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] },
        { PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)&dev_vdata[3] },
        { PCI_VDEVICE(AMD, 0x15DF), (kernel_ulong_t)&dev_vdata[4] },
-       { PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[4] },
        { PCI_VDEVICE(AMD, 0x14CA), (kernel_ulong_t)&dev_vdata[5] },
        { PCI_VDEVICE(AMD, 0x15C7), (kernel_ulong_t)&dev_vdata[6] },
+       { PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[6] },
        /* Last entry must be zero */
        { 0, }
 };
index 5c9d47f3be375093fc31c96f336570555fa04df0..5560bf8329a127eb335683514aee86a08d868162 100644 (file)
@@ -8,12 +8,13 @@
  * Copyright (C) 2019,2021 Advanced Micro Devices, Inc.
  */
 
+#include <linux/bitfield.h>
 #include <linux/types.h>
 #include <linux/mutex.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/gfp.h>
-#include <linux/psp-sev.h>
+#include <linux/psp.h>
 #include <linux/psp-tee.h>
 
 #include "psp-dev.h"
@@ -69,7 +70,7 @@ static int tee_wait_cmd_poll(struct psp_tee_device *tee, unsigned int timeout,
 
        while (--nloop) {
                *reg = ioread32(tee->io_regs + tee->vdata->cmdresp_reg);
-               if (*reg & PSP_CMDRESP_RESP)
+               if (FIELD_GET(PSP_CMDRESP_RESP, *reg))
                        return 0;
 
                usleep_range(10000, 10100);
@@ -149,9 +150,9 @@ static int tee_init_ring(struct psp_tee_device *tee)
                goto free_buf;
        }
 
-       if (reg & PSP_CMDRESP_ERR_MASK) {
-               dev_err(tee->dev, "tee: ring init command failed (%#010x)\n",
-                       reg & PSP_CMDRESP_ERR_MASK);
+       if (FIELD_GET(PSP_CMDRESP_STS, reg)) {
+               dev_err(tee->dev, "tee: ring init command failed (%#010lx)\n",
+                       FIELD_GET(PSP_CMDRESP_STS, reg));
                tee_free_ring(tee);
                ret = -EIO;
        }
@@ -179,9 +180,9 @@ static void tee_destroy_ring(struct psp_tee_device *tee)
        ret = tee_wait_cmd_poll(tee, TEE_DEFAULT_TIMEOUT, &reg);
        if (ret) {
                dev_err(tee->dev, "tee: ring destroy command timed out\n");
-       } else if (reg & PSP_CMDRESP_ERR_MASK) {
-               dev_err(tee->dev, "tee: ring destroy command failed (%#010x)\n",
-                       reg & PSP_CMDRESP_ERR_MASK);
+       } else if (FIELD_GET(PSP_CMDRESP_STS, reg)) {
+               dev_err(tee->dev, "tee: ring destroy command failed (%#010lx)\n",
+                       FIELD_GET(PSP_CMDRESP_STS, reg));
        }
 
 free_ring:
index d489c6f808925796655d5838aa010d7000dbbdaa..c57f929805d5c4fa0d91f3adbbd27f8232efc168 100644 (file)
@@ -350,9 +350,9 @@ static int init_cc_resources(struct platform_device *plat_dev)
 
        /* Get device resources */
        /* First CC registers space */
-       req_mem_cc_regs = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
        /* Map registers space */
-       new_drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
+       new_drvdata->cc_base = devm_platform_get_and_ioremap_resource(plat_dev,
+                                                                     0, &req_mem_cc_regs);
        if (IS_ERR(new_drvdata->cc_base))
                return PTR_ERR(new_drvdata->cc_base);
 
index 5a7f6611803c172e331c5f50262c23792b76837c..8e4a49b7ab4fba967fd94b69b8a3802dc0191406 100644 (file)
@@ -879,7 +879,7 @@ static int hifn_enable_crypto(struct hifn_device *dev)
 
 static void hifn_init_dma(struct hifn_device *dev)
 {
-       struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+       struct hifn_dma *dma = dev->desc_virt;
        u32 dptr = dev->desc_dma;
        int i;
 
@@ -1072,7 +1072,7 @@ static int hifn_setup_crypto_command(struct hifn_device *dev,
                u8 *buf, unsigned dlen, unsigned slen,
                u8 *key, int keylen, u8 *iv, int ivsize, u16 mode)
 {
-       struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+       struct hifn_dma *dma = dev->desc_virt;
        struct hifn_crypt_command *cry_cmd;
        u8 *buf_pos = buf;
        u16 cmd_len;
@@ -1113,7 +1113,7 @@ static int hifn_setup_cmd_desc(struct hifn_device *dev,
                struct hifn_context *ctx, struct hifn_request_context *rctx,
                void *priv, unsigned int nbytes)
 {
-       struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+       struct hifn_dma *dma = dev->desc_virt;
        int cmd_len, sa_idx;
        u8 *buf, *buf_pos;
        u16 mask;
@@ -1231,7 +1231,7 @@ err_out:
 static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page,
                unsigned int offset, unsigned int size, int last)
 {
-       struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+       struct hifn_dma *dma = dev->desc_virt;
        int idx;
        dma_addr_t addr;
 
@@ -1264,7 +1264,7 @@ static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page,
 
 static void hifn_setup_res_desc(struct hifn_device *dev)
 {
-       struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+       struct hifn_dma *dma = dev->desc_virt;
 
        dma->resr[dma->resi].l = __cpu_to_le32(HIFN_USED_RESULT |
                        HIFN_D_VALID | HIFN_D_LAST);
@@ -1290,7 +1290,7 @@ static void hifn_setup_res_desc(struct hifn_device *dev)
 static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page,
                unsigned offset, unsigned size, int last)
 {
-       struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+       struct hifn_dma *dma = dev->desc_virt;
        int idx;
        dma_addr_t addr;
 
@@ -1710,7 +1710,7 @@ static void hifn_process_ready(struct skcipher_request *req, int error)
 
 static void hifn_clear_rings(struct hifn_device *dev, int error)
 {
-       struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+       struct hifn_dma *dma = dev->desc_virt;
        int i, u;
 
        dev_dbg(&dev->pdev->dev, "ring cleanup 1: i: %d.%d.%d.%d, u: %d.%d.%d.%d, "
@@ -1784,7 +1784,7 @@ static void hifn_work(struct work_struct *work)
 
        spin_lock_irqsave(&dev->lock, flags);
        if (dev->active == 0) {
-               struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+               struct hifn_dma *dma = dev->desc_virt;
 
                if (dma->cmdu == 0 && (dev->flags & HIFN_FLAG_CMD_BUSY)) {
                        dev->flags &= ~HIFN_FLAG_CMD_BUSY;
@@ -1815,7 +1815,7 @@ static void hifn_work(struct work_struct *work)
        if (reset) {
                if (++dev->reset >= 5) {
                        int i;
-                       struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+                       struct hifn_dma *dma = dev->desc_virt;
 
                        dev_info(&dev->pdev->dev,
                                 "r: %08x, active: %d, started: %d, "
@@ -1848,8 +1848,8 @@ static void hifn_work(struct work_struct *work)
 
 static irqreturn_t hifn_interrupt(int irq, void *data)
 {
-       struct hifn_device *dev = (struct hifn_device *)data;
-       struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+       struct hifn_device *dev = data;
+       struct hifn_dma *dma = dev->desc_virt;
        u32 dmacsr, restart;
 
        dmacsr = hifn_read_1(dev, HIFN_1_DMA_CSR);
@@ -1914,7 +1914,7 @@ static void hifn_flush(struct hifn_device *dev)
        unsigned long flags;
        struct crypto_async_request *async_req;
        struct skcipher_request *req;
-       struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+       struct hifn_dma *dma = dev->desc_virt;
        int i;
 
        for (i = 0; i < HIFN_D_RES_RSIZE; ++i) {
index 4137a8bf131f0cb3362f3560b963e056df3af3d0..e8690c223584e90d24c93537c87cffdb21cc7795 100644 (file)
@@ -82,3 +82,10 @@ config CRYPTO_DEV_HISI_TRNG
        select CRYPTO_RNG
        help
          Support for HiSilicon TRNG Driver.
+
+config CRYPTO_DEV_HISTB_TRNG
+       tristate "Support for HiSTB TRNG Driver"
+       depends on ARCH_HISI || COMPILE_TEST
+       select HW_RANDOM
+       help
+         Support for HiSTB TRNG Driver.
index 8595a5a5d22888d2e9e3d090f170057971f77fe8..fc51e0edec696281ba5680b16f3471815a892616 100644 (file)
@@ -5,4 +5,4 @@ obj-$(CONFIG_CRYPTO_DEV_HISI_SEC2) += sec2/
 obj-$(CONFIG_CRYPTO_DEV_HISI_QM) += hisi_qm.o
 hisi_qm-objs = qm.o sgl.o debugfs.o
 obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += zip/
-obj-$(CONFIG_CRYPTO_DEV_HISI_TRNG) += trng/
+obj-y += trng/
index 923f9c2792654f7e2ed739e6041159ba46f6d250..5d0adfb54a34bad4344d696eea9124ad1ff1c565 100644 (file)
@@ -1,7 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /* Copyright (c) 2018-2019 HiSilicon Limited. */
 #include <linux/acpi.h>
-#include <linux/aer.h>
 #include <linux/bitops.h>
 #include <linux/debugfs.h>
 #include <linux/init.h>
index e4c84433a88aa5003ebfbb926acbe2caa73f45dc..8b563ab4748474ebdc2726b8668925e6ba24563d 100644 (file)
@@ -2,7 +2,6 @@
 /* Copyright (c) 2019 HiSilicon Limited. */
 #include <asm/page.h>
 #include <linux/acpi.h>
-#include <linux/aer.h>
 #include <linux/bitmap.h>
 #include <linux/dma-mapping.h>
 #include <linux/idr.h>
index 93572c0d4faa33b8f6065ff94eee6e23038fd29c..77f9f131b85035eeb3494a2fc08ff03aecb5ae7b 100644 (file)
@@ -2,7 +2,6 @@
 /* Copyright (c) 2019 HiSilicon Limited. */
 
 #include <linux/acpi.h>
-#include <linux/aer.h>
 #include <linux/bitops.h>
 #include <linux/debugfs.h>
 #include <linux/init.h>
index d909079f351c64e335c83c9b8d670ec5cdedab69..cf20b057c66b63f92a1adad42e384a028e514b6d 100644 (file)
@@ -1,2 +1,5 @@
 obj-$(CONFIG_CRYPTO_DEV_HISI_TRNG) += hisi-trng-v2.o
 hisi-trng-v2-objs = trng.o
+
+obj-$(CONFIG_CRYPTO_DEV_HISTB_TRNG) += histb-trng.o
+histb-trng-objs += trng-stb.o
diff --git a/drivers/crypto/hisilicon/trng/trng-stb.c b/drivers/crypto/hisilicon/trng/trng-stb.c
new file mode 100644 (file)
index 0000000..29200a7
--- /dev/null
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Device driver for True RNG in HiSTB SoCs
+ *
+ * Copyright (c) 2023 David Yang
+ */
+
+#include <crypto/internal/rng.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+
+#define HISTB_TRNG_CTRL                0x0
+#define  RNG_SOURCE                    GENMASK(1, 0)
+#define  DROP_ENABLE                   BIT(5)
+#define  POST_PROCESS_ENABLE           BIT(7)
+#define  POST_PROCESS_DEPTH            GENMASK(15, 8)
+#define HISTB_TRNG_NUMBER      0x4
+#define HISTB_TRNG_STAT                0x8
+#define  DATA_COUNT                    GENMASK(2, 0)   /* max 4 */
+
+struct histb_trng_priv {
+       struct hwrng rng;
+       void __iomem *base;
+};
+
+/*
+ * Observed:
+ * depth = 1 -> ~1ms
+ * depth = 255 -> ~16ms
+ */
+static int histb_trng_wait(void __iomem *base)
+{
+       u32 val;
+
+       return readl_relaxed_poll_timeout(base + HISTB_TRNG_STAT, val,
+                                         val & DATA_COUNT, 1000, 30 * 1000);
+}
+
+static void histb_trng_init(void __iomem *base, unsigned int depth)
+{
+       u32 val;
+
+       val = readl_relaxed(base + HISTB_TRNG_CTRL);
+
+       val &= ~RNG_SOURCE;
+       val |= 2;
+
+       val &= ~POST_PROCESS_DEPTH;
+       val |= min(depth, 0xffu) << 8;
+
+       val |= POST_PROCESS_ENABLE;
+       val |= DROP_ENABLE;
+
+       writel_relaxed(val, base + HISTB_TRNG_CTRL);
+}
+
+static int histb_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+       struct histb_trng_priv *priv = container_of(rng, typeof(*priv), rng);
+       void __iomem *base = priv->base;
+
+       for (int i = 0; i < max; i += sizeof(u32)) {
+               if (!(readl_relaxed(base + HISTB_TRNG_STAT) & DATA_COUNT)) {
+                       if (!wait)
+                               return i;
+                       if (histb_trng_wait(base)) {
+                               pr_err("failed to generate random number, generated %d\n",
+                                      i);
+                               return i ? i : -ETIMEDOUT;
+                       }
+               }
+               *(u32 *) (data + i) = readl_relaxed(base + HISTB_TRNG_NUMBER);
+       }
+
+       return max;
+}
+
+static unsigned int histb_trng_get_depth(void __iomem *base)
+{
+       return (readl_relaxed(base + HISTB_TRNG_CTRL) & POST_PROCESS_DEPTH) >> 8;
+}
+
+static ssize_t
+depth_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct histb_trng_priv *priv = dev_get_drvdata(dev);
+       void __iomem *base = priv->base;
+
+       return sprintf(buf, "%d\n", histb_trng_get_depth(base));
+}
+
+static ssize_t
+depth_store(struct device *dev, struct device_attribute *attr,
+           const char *buf, size_t count)
+{
+       struct histb_trng_priv *priv = dev_get_drvdata(dev);
+       void __iomem *base = priv->base;
+       unsigned int depth;
+
+       if (kstrtouint(buf, 0, &depth))
+               return -ERANGE;
+
+       histb_trng_init(base, depth);
+       return count;
+}
+
+static DEVICE_ATTR_RW(depth);
+
+static struct attribute *histb_trng_attrs[] = {
+       &dev_attr_depth.attr,
+       NULL,
+};
+
+ATTRIBUTE_GROUPS(histb_trng);
+
+static int histb_trng_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct histb_trng_priv *priv;
+       void __iomem *base;
+       int ret;
+
+       priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(base))
+               return -ENOMEM;
+
+       histb_trng_init(base, 144);
+       if (histb_trng_wait(base)) {
+               dev_err(dev, "cannot bring up device\n");
+               return -ENODEV;
+       }
+
+       priv->base = base;
+       priv->rng.name = pdev->name;
+       priv->rng.read = histb_trng_read;
+       ret = devm_hwrng_register(dev, &priv->rng);
+       if (ret) {
+               dev_err(dev, "failed to register hwrng: %d\n", ret);
+               return ret;
+       }
+
+       platform_set_drvdata(pdev, priv);
+       dev_set_drvdata(dev, priv);
+       return 0;
+}
+
+static const struct of_device_id histb_trng_of_match[] = {
+       { .compatible = "hisilicon,histb-trng", },
+       { }
+};
+
+static struct platform_driver histb_trng_driver = {
+       .probe = histb_trng_probe,
+       .driver = {
+               .name = "histb-trng",
+               .of_match_table = histb_trng_of_match,
+               .dev_groups = histb_trng_groups,
+       },
+};
+
+module_platform_driver(histb_trng_driver);
+
+MODULE_DESCRIPTION("HiSTB True RNG");
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_AUTHOR("David Yang <[email protected]>");
index 1549bec3aea59334a8202206ad595aea5166764c..f3ce34198775d889d30e20f50df660e39edf7fde 100644 (file)
@@ -1,7 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /* Copyright (c) 2019 HiSilicon Limited. */
 #include <linux/acpi.h>
-#include <linux/aer.h>
 #include <linux/bitops.h>
 #include <linux/debugfs.h>
 #include <linux/init.h>
index fe93d19e304495bdf96e7b86b657244a39376c5f..359aa2b41016e77af00e3462b5a3916e7491738f 100644 (file)
@@ -209,7 +209,7 @@ static int img_hash_xmit_cpu(struct img_hash_dev *hdev, const u8 *buf,
 
 static void img_hash_dma_callback(void *data)
 {
-       struct img_hash_dev *hdev = (struct img_hash_dev *)data;
+       struct img_hash_dev *hdev = data;
        struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 
        if (ctx->bufcnt) {
@@ -927,7 +927,7 @@ finish:
        img_hash_finish_req(hdev->req, err);
 }
 
-static const struct of_device_id img_hash_match[] = {
+static const struct of_device_id img_hash_match[] __maybe_unused = {
        { .compatible = "img,hash-accelerator" },
        {}
 };
@@ -966,8 +966,7 @@ static int img_hash_probe(struct platform_device *pdev)
        }
 
        /* Write port (DMA or CPU) */
-       hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-       hdev->cpu_addr = devm_ioremap_resource(dev, hash_res);
+       hdev->cpu_addr = devm_platform_get_and_ioremap_resource(pdev, 1, &hash_res);
        if (IS_ERR(hdev->cpu_addr)) {
                err = PTR_ERR(hdev->cpu_addr);
                goto res_err;
index 6858753af6b324a0b80a6b34ba3057890f9fb6c3..9ff02b5abc4aeb1f9867a47e761027cdea6a3e05 100644 (file)
@@ -474,7 +474,7 @@ release_fw:
                goto retry_fw;
        }
 
-       dev_dbg(priv->dev, "Firmware load failed.\n");
+       dev_err(priv->dev, "Firmware load failed.\n");
 
        return ret;
 }
@@ -1628,19 +1628,23 @@ static int safexcel_probe_generic(void *pdev,
                                                     &priv->ring[i].rdr);
                if (ret) {
                        dev_err(dev, "Failed to initialize rings\n");
-                       return ret;
+                       goto err_cleanup_rings;
                }
 
                priv->ring[i].rdr_req = devm_kcalloc(dev,
                        EIP197_DEFAULT_RING_SIZE,
                        sizeof(*priv->ring[i].rdr_req),
                        GFP_KERNEL);
-               if (!priv->ring[i].rdr_req)
-                       return -ENOMEM;
+               if (!priv->ring[i].rdr_req) {
+                       ret = -ENOMEM;
+                       goto err_cleanup_rings;
+               }
 
                ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
-               if (!ring_irq)
-                       return -ENOMEM;
+               if (!ring_irq) {
+                       ret = -ENOMEM;
+                       goto err_cleanup_rings;
+               }
 
                ring_irq->priv = priv;
                ring_irq->ring = i;
@@ -1654,7 +1658,8 @@ static int safexcel_probe_generic(void *pdev,
                                                ring_irq);
                if (irq < 0) {
                        dev_err(dev, "Failed to get IRQ ID for ring %d\n", i);
-                       return irq;
+                       ret = irq;
+                       goto err_cleanup_rings;
                }
 
                priv->ring[i].irq = irq;
@@ -1666,8 +1671,10 @@ static int safexcel_probe_generic(void *pdev,
                snprintf(wq_name, 9, "wq_ring%d", i);
                priv->ring[i].workqueue =
                        create_singlethread_workqueue(wq_name);
-               if (!priv->ring[i].workqueue)
-                       return -ENOMEM;
+               if (!priv->ring[i].workqueue) {
+                       ret = -ENOMEM;
+                       goto err_cleanup_rings;
+               }
 
                priv->ring[i].requests = 0;
                priv->ring[i].busy = false;
@@ -1684,16 +1691,26 @@ static int safexcel_probe_generic(void *pdev,
        ret = safexcel_hw_init(priv);
        if (ret) {
                dev_err(dev, "HW init failed (%d)\n", ret);
-               return ret;
+               goto err_cleanup_rings;
        }
 
        ret = safexcel_register_algorithms(priv);
        if (ret) {
                dev_err(dev, "Failed to register algorithms (%d)\n", ret);
-               return ret;
+               goto err_cleanup_rings;
        }
 
        return 0;
+
+err_cleanup_rings:
+       for (i = 0; i < priv->config.rings; i++) {
+               if (priv->ring[i].irq)
+                       irq_set_affinity_hint(priv->ring[i].irq, NULL);
+               if (priv->ring[i].workqueue)
+                       destroy_workqueue(priv->ring[i].workqueue);
+       }
+
+       return ret;
 }
 
 static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
diff --git a/drivers/crypto/intel/Kconfig b/drivers/crypto/intel/Kconfig
new file mode 100644 (file)
index 0000000..3d90c87
--- /dev/null
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+source "drivers/crypto/intel/keembay/Kconfig"
+source "drivers/crypto/intel/ixp4xx/Kconfig"
+source "drivers/crypto/intel/qat/Kconfig"
diff --git a/drivers/crypto/intel/Makefile b/drivers/crypto/intel/Makefile
new file mode 100644 (file)
index 0000000..b3d0352
--- /dev/null
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-y += keembay/
+obj-y += ixp4xx/
+obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
diff --git a/drivers/crypto/intel/ixp4xx/Kconfig b/drivers/crypto/intel/ixp4xx/Kconfig
new file mode 100644 (file)
index 0000000..af3cc56
--- /dev/null
@@ -0,0 +1,14 @@
+config CRYPTO_DEV_IXP4XX
+       tristate "Driver for IXP4xx crypto hardware acceleration"
+       depends on (ARCH_IXP4XX || COMPILE_TEST) && IXP4XX_QMGR && IXP4XX_NPE
+       select CRYPTO_AES
+       select CRYPTO_DES
+       select CRYPTO_ECB
+       select CRYPTO_CBC
+       select CRYPTO_CTR
+       select CRYPTO_LIB_DES
+       select CRYPTO_AEAD
+       select CRYPTO_AUTHENC
+       select CRYPTO_SKCIPHER
+       help
+         Driver for the IXP4xx NPE crypto engine.
diff --git a/drivers/crypto/intel/ixp4xx/Makefile b/drivers/crypto/intel/ixp4xx/Makefile
new file mode 100644 (file)
index 0000000..74ebefd
--- /dev/null
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
diff --git a/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c b/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
new file mode 100644 (file)
index 0000000..ed15379
--- /dev/null
@@ -0,0 +1,1604 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel IXP4xx NPE-C crypto driver
+ *
+ * Copyright (C) 2008 Christian Hohnstaedt <[email protected]>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <linux/rtnetlink.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/gfp.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include <crypto/ctr.h>
+#include <crypto/internal/des.h>
+#include <crypto/aes.h>
+#include <crypto/hmac.h>
+#include <crypto/sha1.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+
+#include <linux/soc/ixp4xx/npe.h>
+#include <linux/soc/ixp4xx/qmgr.h>
+
+/* Intermittent includes, delete this after v5.14-rc1 */
+#include <linux/soc/ixp4xx/cpu.h>
+
+#define MAX_KEYLEN 32
+
+/* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
+#define NPE_CTX_LEN 80
+#define AES_BLOCK128 16
+
+#define NPE_OP_HASH_VERIFY   0x01
+#define NPE_OP_CCM_ENABLE    0x04
+#define NPE_OP_CRYPT_ENABLE  0x08
+#define NPE_OP_HASH_ENABLE   0x10
+#define NPE_OP_NOT_IN_PLACE  0x20
+#define NPE_OP_HMAC_DISABLE  0x40
+#define NPE_OP_CRYPT_ENCRYPT 0x80
+
+#define NPE_OP_CCM_GEN_MIC   0xcc
+#define NPE_OP_HASH_GEN_ICV  0x50
+#define NPE_OP_ENC_GEN_KEY   0xc9
+
+#define MOD_ECB     0x0000
+#define MOD_CTR     0x1000
+#define MOD_CBC_ENC 0x2000
+#define MOD_CBC_DEC 0x3000
+#define MOD_CCM_ENC 0x4000
+#define MOD_CCM_DEC 0x5000
+
+#define KEYLEN_128  4
+#define KEYLEN_192  6
+#define KEYLEN_256  8
+
+#define CIPH_DECR   0x0000
+#define CIPH_ENCR   0x0400
+
+#define MOD_DES     0x0000
+#define MOD_TDEA2   0x0100
+#define MOD_3DES   0x0200
+#define MOD_AES     0x0800
+#define MOD_AES128  (0x0800 | KEYLEN_128)
+#define MOD_AES192  (0x0900 | KEYLEN_192)
+#define MOD_AES256  (0x0a00 | KEYLEN_256)
+
+#define MAX_IVLEN   16
+#define NPE_QLEN    16
+/* Space for registering when the first
+ * NPE_QLEN crypt_ctl are busy */
+#define NPE_QLEN_TOTAL 64
+
+#define CTL_FLAG_UNUSED                0x0000
+#define CTL_FLAG_USED          0x1000
+#define CTL_FLAG_PERFORM_ABLK  0x0001
+#define CTL_FLAG_GEN_ICV       0x0002
+#define CTL_FLAG_GEN_REVAES    0x0004
+#define CTL_FLAG_PERFORM_AEAD  0x0008
+#define CTL_FLAG_MASK          0x000f
+
+#define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
+
+#define MD5_DIGEST_SIZE   16
+
+struct buffer_desc {
+       u32 phys_next;
+#ifdef __ARMEB__
+       u16 buf_len;
+       u16 pkt_len;
+#else
+       u16 pkt_len;
+       u16 buf_len;
+#endif
+       dma_addr_t phys_addr;
+       u32 __reserved[4];
+       struct buffer_desc *next;
+       enum dma_data_direction dir;
+};
+
+struct crypt_ctl {
+#ifdef __ARMEB__
+       u8 mode;                /* NPE_OP_*  operation mode */
+       u8 init_len;
+       u16 reserved;
+#else
+       u16 reserved;
+       u8 init_len;
+       u8 mode;                /* NPE_OP_*  operation mode */
+#endif
+       u8 iv[MAX_IVLEN];       /* IV for CBC mode or CTR IV for CTR mode */
+       u32 icv_rev_aes;        /* icv or rev aes */
+       u32 src_buf;
+       u32 dst_buf;
+#ifdef __ARMEB__
+       u16 auth_offs;          /* Authentication start offset */
+       u16 auth_len;           /* Authentication data length */
+       u16 crypt_offs;         /* Cryption start offset */
+       u16 crypt_len;          /* Cryption data length */
+#else
+       u16 auth_len;           /* Authentication data length */
+       u16 auth_offs;          /* Authentication start offset */
+       u16 crypt_len;          /* Cryption data length */
+       u16 crypt_offs;         /* Cryption start offset */
+#endif
+       u32 aadAddr;            /* Additional Auth Data Addr for CCM mode */
+       u32 crypto_ctx;         /* NPE Crypto Param structure address */
+
+       /* Used by Host: 4*4 bytes*/
+       unsigned int ctl_flags;
+       union {
+               struct skcipher_request *ablk_req;
+               struct aead_request *aead_req;
+               struct crypto_tfm *tfm;
+       } data;
+       struct buffer_desc *regist_buf;
+       u8 *regist_ptr;
+};
+
+struct ablk_ctx {
+       struct buffer_desc *src;
+       struct buffer_desc *dst;
+       u8 iv[MAX_IVLEN];
+       bool encrypt;
+       struct skcipher_request fallback_req;   // keep at the end
+};
+
+struct aead_ctx {
+       struct buffer_desc *src;
+       struct buffer_desc *dst;
+       struct scatterlist ivlist;
+       /* used when the hmac is not on one sg entry */
+       u8 *hmac_virt;
+       int encrypt;
+};
+
+struct ix_hash_algo {
+       u32 cfgword;
+       unsigned char *icv;
+};
+
+struct ix_sa_dir {
+       unsigned char *npe_ctx;
+       dma_addr_t npe_ctx_phys;
+       int npe_ctx_idx;
+       u8 npe_mode;
+};
+
+struct ixp_ctx {
+       struct ix_sa_dir encrypt;
+       struct ix_sa_dir decrypt;
+       int authkey_len;
+       u8 authkey[MAX_KEYLEN];
+       int enckey_len;
+       u8 enckey[MAX_KEYLEN];
+       u8 salt[MAX_IVLEN];
+       u8 nonce[CTR_RFC3686_NONCE_SIZE];
+       unsigned int salted;
+       atomic_t configuring;
+       struct completion completion;
+       struct crypto_skcipher *fallback_tfm;
+};
+
+struct ixp_alg {
+       struct skcipher_alg crypto;
+       const struct ix_hash_algo *hash;
+       u32 cfg_enc;
+       u32 cfg_dec;
+
+       int registered;
+};
+
+struct ixp_aead_alg {
+       struct aead_alg crypto;
+       const struct ix_hash_algo *hash;
+       u32 cfg_enc;
+       u32 cfg_dec;
+
+       int registered;
+};
+
+static const struct ix_hash_algo hash_alg_md5 = {
+       .cfgword        = 0xAA010004,
+       .icv            = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
+                         "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
+};
+
+static const struct ix_hash_algo hash_alg_sha1 = {
+       .cfgword        = 0x00000005,
+       .icv            = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
+                         "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
+};
+
+static struct npe *npe_c;
+
+static unsigned int send_qid;
+static unsigned int recv_qid;
+static struct dma_pool *buffer_pool;
+static struct dma_pool *ctx_pool;
+
+static struct crypt_ctl *crypt_virt;
+static dma_addr_t crypt_phys;
+
+static int support_aes = 1;
+
+static struct platform_device *pdev;
+
+static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
+{
+       return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
+}
+
+static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
+{
+       return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
+}
+
+static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
+{
+       return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->cfg_enc;
+}
+
+static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
+{
+       return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->cfg_dec;
+}
+
+static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
+{
+       return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->hash;
+}
+
+static int setup_crypt_desc(void)
+{
+       struct device *dev = &pdev->dev;
+
+       BUILD_BUG_ON(!(IS_ENABLED(CONFIG_COMPILE_TEST) &&
+                      IS_ENABLED(CONFIG_64BIT)) &&
+                    sizeof(struct crypt_ctl) != 64);
+       crypt_virt = dma_alloc_coherent(dev,
+                                       NPE_QLEN * sizeof(struct crypt_ctl),
+                                       &crypt_phys, GFP_ATOMIC);
+       if (!crypt_virt)
+               return -ENOMEM;
+       return 0;
+}
+
+static DEFINE_SPINLOCK(desc_lock);
+static struct crypt_ctl *get_crypt_desc(void)
+{
+       int i;
+       static int idx;
+       unsigned long flags;
+
+       spin_lock_irqsave(&desc_lock, flags);
+
+       if (unlikely(!crypt_virt))
+               setup_crypt_desc();
+       if (unlikely(!crypt_virt)) {
+               spin_unlock_irqrestore(&desc_lock, flags);
+               return NULL;
+       }
+       i = idx;
+       if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
+               if (++idx >= NPE_QLEN)
+                       idx = 0;
+               crypt_virt[i].ctl_flags = CTL_FLAG_USED;
+               spin_unlock_irqrestore(&desc_lock, flags);
+               return crypt_virt + i;
+       } else {
+               spin_unlock_irqrestore(&desc_lock, flags);
+               return NULL;
+       }
+}
+
+static DEFINE_SPINLOCK(emerg_lock);
+static struct crypt_ctl *get_crypt_desc_emerg(void)
+{
+       int i;
+       static int idx = NPE_QLEN;
+       struct crypt_ctl *desc;
+       unsigned long flags;
+
+       desc = get_crypt_desc();
+       if (desc)
+               return desc;
+       if (unlikely(!crypt_virt))
+               return NULL;
+
+       spin_lock_irqsave(&emerg_lock, flags);
+       i = idx;
+       if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
+               if (++idx >= NPE_QLEN_TOTAL)
+                       idx = NPE_QLEN;
+               crypt_virt[i].ctl_flags = CTL_FLAG_USED;
+               spin_unlock_irqrestore(&emerg_lock, flags);
+               return crypt_virt + i;
+       } else {
+               spin_unlock_irqrestore(&emerg_lock, flags);
+               return NULL;
+       }
+}
+
+static void free_buf_chain(struct device *dev, struct buffer_desc *buf,
+                          dma_addr_t phys)
+{
+       while (buf) {
+               struct buffer_desc *buf1;
+               u32 phys1;
+
+               buf1 = buf->next;
+               phys1 = buf->phys_next;
+               dma_unmap_single(dev, buf->phys_addr, buf->buf_len, buf->dir);
+               dma_pool_free(buffer_pool, buf, phys);
+               buf = buf1;
+               phys = phys1;
+       }
+}
+
+static struct tasklet_struct crypto_done_tasklet;
+
+static void finish_scattered_hmac(struct crypt_ctl *crypt)
+{
+       struct aead_request *req = crypt->data.aead_req;
+       struct aead_ctx *req_ctx = aead_request_ctx(req);
+       struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+       int authsize = crypto_aead_authsize(tfm);
+       int decryptlen = req->assoclen + req->cryptlen - authsize;
+
+       if (req_ctx->encrypt) {
+               scatterwalk_map_and_copy(req_ctx->hmac_virt, req->dst,
+                                        decryptlen, authsize, 1);
+       }
+       dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
+}
+
+static void one_packet(dma_addr_t phys)
+{
+       struct device *dev = &pdev->dev;
+       struct crypt_ctl *crypt;
+       struct ixp_ctx *ctx;
+       int failed;
+
+       failed = phys & 0x1 ? -EBADMSG : 0;
+       phys &= ~0x3;
+       crypt = crypt_phys2virt(phys);
+
+       switch (crypt->ctl_flags & CTL_FLAG_MASK) {
+       case CTL_FLAG_PERFORM_AEAD: {
+               struct aead_request *req = crypt->data.aead_req;
+               struct aead_ctx *req_ctx = aead_request_ctx(req);
+
+               free_buf_chain(dev, req_ctx->src, crypt->src_buf);
+               free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
+               if (req_ctx->hmac_virt)
+                       finish_scattered_hmac(crypt);
+
+               aead_request_complete(req, failed);
+               break;
+       }
+       case CTL_FLAG_PERFORM_ABLK: {
+               struct skcipher_request *req = crypt->data.ablk_req;
+               struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
+               struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+               unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+               unsigned int offset;
+
+               if (ivsize > 0) {
+                       offset = req->cryptlen - ivsize;
+                       if (req_ctx->encrypt) {
+                               scatterwalk_map_and_copy(req->iv, req->dst,
+                                                        offset, ivsize, 0);
+                       } else {
+                               memcpy(req->iv, req_ctx->iv, ivsize);
+                               memzero_explicit(req_ctx->iv, ivsize);
+                       }
+               }
+
+               if (req_ctx->dst)
+                       free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
+
+               free_buf_chain(dev, req_ctx->src, crypt->src_buf);
+               skcipher_request_complete(req, failed);
+               break;
+       }
+       case CTL_FLAG_GEN_ICV:
+               ctx = crypto_tfm_ctx(crypt->data.tfm);
+               dma_pool_free(ctx_pool, crypt->regist_ptr,
+                             crypt->regist_buf->phys_addr);
+               dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
+               if (atomic_dec_and_test(&ctx->configuring))
+                       complete(&ctx->completion);
+               break;
+       case CTL_FLAG_GEN_REVAES:
+               ctx = crypto_tfm_ctx(crypt->data.tfm);
+               *(__be32 *)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
+               if (atomic_dec_and_test(&ctx->configuring))
+                       complete(&ctx->completion);
+               break;
+       default:
+               BUG();
+       }
+       crypt->ctl_flags = CTL_FLAG_UNUSED;
+}
+
+static void irqhandler(void *_unused)
+{
+       tasklet_schedule(&crypto_done_tasklet);
+}
+
+static void crypto_done_action(unsigned long arg)
+{
+       int i;
+
+       for (i = 0; i < 4; i++) {
+               dma_addr_t phys = qmgr_get_entry(recv_qid);
+               if (!phys)
+                       return;
+               one_packet(phys);
+       }
+       tasklet_schedule(&crypto_done_tasklet);
+}
+
+static int init_ixp_crypto(struct device *dev)
+{
+       struct device_node *np = dev->of_node;
+       u32 msg[2] = { 0, 0 };
+       int ret = -ENODEV;
+       u32 npe_id;
+
+       dev_info(dev, "probing...\n");
+
+       /* Locate the NPE and queue manager to use from device tree */
+       if (IS_ENABLED(CONFIG_OF) && np) {
+               struct of_phandle_args queue_spec;
+               struct of_phandle_args npe_spec;
+
+               ret = of_parse_phandle_with_fixed_args(np, "intel,npe-handle",
+                                                      1, 0, &npe_spec);
+               if (ret) {
+                       dev_err(dev, "no NPE engine specified\n");
+                       return -ENODEV;
+               }
+               npe_id = npe_spec.args[0];
+
+               ret = of_parse_phandle_with_fixed_args(np, "queue-rx", 1, 0,
+                                                      &queue_spec);
+               if (ret) {
+                       dev_err(dev, "no rx queue phandle\n");
+                       return -ENODEV;
+               }
+               recv_qid = queue_spec.args[0];
+
+               ret = of_parse_phandle_with_fixed_args(np, "queue-txready", 1, 0,
+                                                      &queue_spec);
+               if (ret) {
+                       dev_err(dev, "no txready queue phandle\n");
+                       return -ENODEV;
+               }
+               send_qid = queue_spec.args[0];
+       } else {
+               /*
+                * Hardcoded engine when using platform data, this goes away
+                * when we switch to using DT only.
+                */
+               npe_id = 2;
+               send_qid = 29;
+               recv_qid = 30;
+       }
+
+       npe_c = npe_request(npe_id);
+       if (!npe_c)
+               return ret;
+
+       if (!npe_running(npe_c)) {
+               ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
+               if (ret)
+                       goto npe_release;
+               if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
+                       goto npe_error;
+       } else {
+               if (npe_send_message(npe_c, msg, "STATUS_MSG"))
+                       goto npe_error;
+
+               if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
+                       goto npe_error;
+       }
+
+       switch ((msg[1] >> 16) & 0xff) {
+       case 3:
+               dev_warn(dev, "Firmware of %s lacks AES support\n", npe_name(npe_c));
+               support_aes = 0;
+               break;
+       case 4:
+       case 5:
+               support_aes = 1;
+               break;
+       default:
+               dev_err(dev, "Firmware of %s lacks crypto support\n", npe_name(npe_c));
+               ret = -ENODEV;
+               goto npe_release;
+       }
+       /* buffer_pool will also be used to sometimes store the hmac,
+        * so assure it is large enough
+        */
+       BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
+       buffer_pool = dma_pool_create("buffer", dev, sizeof(struct buffer_desc),
+                                     32, 0);
+       ret = -ENOMEM;
+       if (!buffer_pool)
+               goto err;
+
+       ctx_pool = dma_pool_create("context", dev, NPE_CTX_LEN, 16, 0);
+       if (!ctx_pool)
+               goto err;
+
+       ret = qmgr_request_queue(send_qid, NPE_QLEN_TOTAL, 0, 0,
+                                "ixp_crypto:out", NULL);
+       if (ret)
+               goto err;
+       ret = qmgr_request_queue(recv_qid, NPE_QLEN, 0, 0,
+                                "ixp_crypto:in", NULL);
+       if (ret) {
+               qmgr_release_queue(send_qid);
+               goto err;
+       }
+       qmgr_set_irq(recv_qid, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
+       tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
+
+       qmgr_enable_irq(recv_qid);
+       return 0;
+
+npe_error:
+       dev_err(dev, "%s not responding\n", npe_name(npe_c));
+       ret = -EIO;
+err:
+       dma_pool_destroy(ctx_pool);
+       dma_pool_destroy(buffer_pool);
+npe_release:
+       npe_release(npe_c);
+       return ret;
+}
+
+static void release_ixp_crypto(struct device *dev)
+{
+       qmgr_disable_irq(recv_qid);
+       tasklet_kill(&crypto_done_tasklet);
+
+       qmgr_release_queue(send_qid);
+       qmgr_release_queue(recv_qid);
+
+       dma_pool_destroy(ctx_pool);
+       dma_pool_destroy(buffer_pool);
+
+       npe_release(npe_c);
+
+       if (crypt_virt)
+               dma_free_coherent(dev, NPE_QLEN * sizeof(struct crypt_ctl),
+                                 crypt_virt, crypt_phys);
+}
+
+static void reset_sa_dir(struct ix_sa_dir *dir)
+{
+       memset(dir->npe_ctx, 0, NPE_CTX_LEN);
+       dir->npe_ctx_idx = 0;
+       dir->npe_mode = 0;
+}
+
+static int init_sa_dir(struct ix_sa_dir *dir)
+{
+       dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
+       if (!dir->npe_ctx)
+               return -ENOMEM;
+
+       reset_sa_dir(dir);
+       return 0;
+}
+
+static void free_sa_dir(struct ix_sa_dir *dir)
+{
+       memset(dir->npe_ctx, 0, NPE_CTX_LEN);
+       dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
+}
+
+static int init_tfm(struct crypto_tfm *tfm)
+{
+       struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
+       int ret;
+
+       atomic_set(&ctx->configuring, 0);
+       ret = init_sa_dir(&ctx->encrypt);
+       if (ret)
+               return ret;
+       ret = init_sa_dir(&ctx->decrypt);
+       if (ret)
+               free_sa_dir(&ctx->encrypt);
+
+       return ret;
+}
+
+static int init_tfm_ablk(struct crypto_skcipher *tfm)
+{
+       struct crypto_tfm *ctfm = crypto_skcipher_tfm(tfm);
+       struct ixp_ctx *ctx = crypto_tfm_ctx(ctfm);
+       const char *name = crypto_tfm_alg_name(ctfm);
+
+       ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+       if (IS_ERR(ctx->fallback_tfm)) {
+               pr_err("ERROR: Cannot allocate fallback for %s %ld\n",
+                       name, PTR_ERR(ctx->fallback_tfm));
+               return PTR_ERR(ctx->fallback_tfm);
+       }
+
+       pr_info("Fallback for %s is %s\n",
+                crypto_tfm_alg_driver_name(&tfm->base),
+                crypto_tfm_alg_driver_name(crypto_skcipher_tfm(ctx->fallback_tfm))
+                );
+
+       crypto_skcipher_set_reqsize(tfm, sizeof(struct ablk_ctx) + crypto_skcipher_reqsize(ctx->fallback_tfm));
+       return init_tfm(crypto_skcipher_tfm(tfm));
+}
+
+static int init_tfm_aead(struct crypto_aead *tfm)
+{
+       crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx));
+       return init_tfm(crypto_aead_tfm(tfm));
+}
+
+static void exit_tfm(struct crypto_tfm *tfm)
+{
+       struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       free_sa_dir(&ctx->encrypt);
+       free_sa_dir(&ctx->decrypt);
+}
+
+static void exit_tfm_ablk(struct crypto_skcipher *tfm)
+{
+       struct crypto_tfm *ctfm = crypto_skcipher_tfm(tfm);
+       struct ixp_ctx *ctx = crypto_tfm_ctx(ctfm);
+
+       crypto_free_skcipher(ctx->fallback_tfm);
+       exit_tfm(crypto_skcipher_tfm(tfm));
+}
+
+static void exit_tfm_aead(struct crypto_aead *tfm)
+{
+       exit_tfm(crypto_aead_tfm(tfm));
+}
+
+static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
+                             int init_len, u32 ctx_addr, const u8 *key,
+                             int key_len)
+{
+       struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct crypt_ctl *crypt;
+       struct buffer_desc *buf;
+       int i;
+       u8 *pad;
+       dma_addr_t pad_phys, buf_phys;
+
+       BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
+       pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
+       if (!pad)
+               return -ENOMEM;
+       buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
+       if (!buf) {
+               dma_pool_free(ctx_pool, pad, pad_phys);
+               return -ENOMEM;
+       }
+       crypt = get_crypt_desc_emerg();
+       if (!crypt) {
+               dma_pool_free(ctx_pool, pad, pad_phys);
+               dma_pool_free(buffer_pool, buf, buf_phys);
+               return -EAGAIN;
+       }
+
+       memcpy(pad, key, key_len);
+       memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
+       for (i = 0; i < HMAC_PAD_BLOCKLEN; i++)
+               pad[i] ^= xpad;
+
+       crypt->data.tfm = tfm;
+       crypt->regist_ptr = pad;
+       crypt->regist_buf = buf;
+
+       crypt->auth_offs = 0;
+       crypt->auth_len = HMAC_PAD_BLOCKLEN;
+       crypt->crypto_ctx = ctx_addr;
+       crypt->src_buf = buf_phys;
+       crypt->icv_rev_aes = target;
+       crypt->mode = NPE_OP_HASH_GEN_ICV;
+       crypt->init_len = init_len;
+       crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
+
+       buf->next = NULL;
+       buf->buf_len = HMAC_PAD_BLOCKLEN;
+       buf->pkt_len = 0;
+       buf->phys_addr = pad_phys;
+
+       atomic_inc(&ctx->configuring);
+       qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
+       BUG_ON(qmgr_stat_overflow(send_qid));
+       return 0;
+}
+
+static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned int authsize,
+                     const u8 *key, int key_len, unsigned int digest_len)
+{
+       u32 itarget, otarget, npe_ctx_addr;
+       unsigned char *cinfo;
+       int init_len, ret = 0;
+       u32 cfgword;
+       struct ix_sa_dir *dir;
+       struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
+       const struct ix_hash_algo *algo;
+
+       dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
+       cinfo = dir->npe_ctx + dir->npe_ctx_idx;
+       algo = ix_hash(tfm);
+
+       /* write cfg word to cryptinfo */
+       cfgword = algo->cfgword | (authsize << 6); /* (authsize/4) << 8 */
+#ifndef __ARMEB__
+       cfgword ^= 0xAA000000; /* change the "byte swap" flags */
+#endif
+       *(__be32 *)cinfo = cpu_to_be32(cfgword);
+       cinfo += sizeof(cfgword);
+
+       /* write ICV to cryptinfo */
+       memcpy(cinfo, algo->icv, digest_len);
+       cinfo += digest_len;
+
+       itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
+                               + sizeof(algo->cfgword);
+       otarget = itarget + digest_len;
+       init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
+       npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
+
+       dir->npe_ctx_idx += init_len;
+       dir->npe_mode |= NPE_OP_HASH_ENABLE;
+
+       if (!encrypt)
+               dir->npe_mode |= NPE_OP_HASH_VERIFY;
+
+       ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
+                                init_len, npe_ctx_addr, key, key_len);
+       if (ret)
+               return ret;
+       return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
+                                 init_len, npe_ctx_addr, key, key_len);
+}
+
+static int gen_rev_aes_key(struct crypto_tfm *tfm)
+{
+       struct crypt_ctl *crypt;
+       struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct ix_sa_dir *dir = &ctx->decrypt;
+
+       crypt = get_crypt_desc_emerg();
+       if (!crypt)
+               return -EAGAIN;
+
+       *(__be32 *)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
+
+       crypt->data.tfm = tfm;
+       crypt->crypt_offs = 0;
+       crypt->crypt_len = AES_BLOCK128;
+       crypt->src_buf = 0;
+       crypt->crypto_ctx = dir->npe_ctx_phys;
+       crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
+       crypt->mode = NPE_OP_ENC_GEN_KEY;
+       crypt->init_len = dir->npe_ctx_idx;
+       crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
+
+       atomic_inc(&ctx->configuring);
+       qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
+       BUG_ON(qmgr_stat_overflow(send_qid));
+       return 0;
+}
+
+static int setup_cipher(struct crypto_tfm *tfm, int encrypt, const u8 *key,
+                       int key_len)
+{
+       u8 *cinfo;
+       u32 cipher_cfg;
+       u32 keylen_cfg = 0;
+       struct ix_sa_dir *dir;
+       struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
+       int err;
+
+       dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
+       cinfo = dir->npe_ctx;
+
+       if (encrypt) {
+               cipher_cfg = cipher_cfg_enc(tfm);
+               dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
+       } else {
+               cipher_cfg = cipher_cfg_dec(tfm);
+       }
+       if (cipher_cfg & MOD_AES) {
+               switch (key_len) {
+               case 16:
+                       keylen_cfg = MOD_AES128;
+                       break;
+               case 24:
+                       keylen_cfg = MOD_AES192;
+                       break;
+               case 32:
+                       keylen_cfg = MOD_AES256;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+               cipher_cfg |= keylen_cfg;
+       } else {
+               err = crypto_des_verify_key(tfm, key);
+               if (err)
+                       return err;
+       }
+       /* write cfg word to cryptinfo */
+       *(__be32 *)cinfo = cpu_to_be32(cipher_cfg);
+       cinfo += sizeof(cipher_cfg);
+
+       /* write cipher key to cryptinfo */
+       memcpy(cinfo, key, key_len);
+       /* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
+       if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
+               memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE - key_len);
+               key_len = DES3_EDE_KEY_SIZE;
+       }
+       dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
+       dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
+       if ((cipher_cfg & MOD_AES) && !encrypt)
+               return gen_rev_aes_key(tfm);
+
+       return 0;
+}
+
+static struct buffer_desc *chainup_buffers(struct device *dev,
+               struct scatterlist *sg, unsigned int nbytes,
+               struct buffer_desc *buf, gfp_t flags,
+               enum dma_data_direction dir)
+{
+       for (; nbytes > 0; sg = sg_next(sg)) {
+               unsigned int len = min(nbytes, sg->length);
+               struct buffer_desc *next_buf;
+               dma_addr_t next_buf_phys;
+               void *ptr;
+
+               nbytes -= len;
+               ptr = sg_virt(sg);
+               next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
+               if (!next_buf) {
+                       buf = NULL;
+                       break;
+               }
+               sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
+               buf->next = next_buf;
+               buf->phys_next = next_buf_phys;
+               buf = next_buf;
+
+               buf->phys_addr = sg_dma_address(sg);
+               buf->buf_len = len;
+               buf->dir = dir;
+       }
+       buf->next = NULL;
+       buf->phys_next = 0;
+       return buf;
+}
+
+static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key,
+                      unsigned int key_len)
+{
+       struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
+       int ret;
+
+       init_completion(&ctx->completion);
+       atomic_inc(&ctx->configuring);
+
+       reset_sa_dir(&ctx->encrypt);
+       reset_sa_dir(&ctx->decrypt);
+
+       ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
+       ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
+
+       ret = setup_cipher(&tfm->base, 0, key, key_len);
+       if (ret)
+               goto out;
+       ret = setup_cipher(&tfm->base, 1, key, key_len);
+out:
+       if (!atomic_dec_and_test(&ctx->configuring))
+               wait_for_completion(&ctx->completion);
+       if (ret)
+               return ret;
+       crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+       crypto_skcipher_set_flags(ctx->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+
+       return crypto_skcipher_setkey(ctx->fallback_tfm, key, key_len);
+}
+
+static int ablk_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
+                           unsigned int key_len)
+{
+       return verify_skcipher_des3_key(tfm, key) ?:
+              ablk_setkey(tfm, key, key_len);
+}
+
+static int ablk_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key,
+                              unsigned int key_len)
+{
+       struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+       /* the nonce is stored in bytes at end of key */
+       if (key_len < CTR_RFC3686_NONCE_SIZE)
+               return -EINVAL;
+
+       memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
+              CTR_RFC3686_NONCE_SIZE);
+
+       key_len -= CTR_RFC3686_NONCE_SIZE;
+       return ablk_setkey(tfm, key, key_len);
+}
+
+static int ixp4xx_cipher_fallback(struct skcipher_request *areq, int encrypt)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+       struct ixp_ctx *op = crypto_skcipher_ctx(tfm);
+       struct ablk_ctx *rctx = skcipher_request_ctx(areq);
+       int err;
+
+       skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
+       skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
+                                     areq->base.complete, areq->base.data);
+       skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
+                                  areq->cryptlen, areq->iv);
+       if (encrypt)
+               err = crypto_skcipher_encrypt(&rctx->fallback_req);
+       else
+               err = crypto_skcipher_decrypt(&rctx->fallback_req);
+       return err;
+}
+
+static int ablk_perform(struct skcipher_request *req, int encrypt)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
+       unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+       struct ix_sa_dir *dir;
+       struct crypt_ctl *crypt;
+       unsigned int nbytes = req->cryptlen;
+       enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
+       struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
+       struct buffer_desc src_hook;
+       struct device *dev = &pdev->dev;
+       unsigned int offset;
+       gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+                               GFP_KERNEL : GFP_ATOMIC;
+
+       if (sg_nents(req->src) > 1 || sg_nents(req->dst) > 1)
+               return ixp4xx_cipher_fallback(req, encrypt);
+
+       if (qmgr_stat_full(send_qid))
+               return -EAGAIN;
+       if (atomic_read(&ctx->configuring))
+               return -EAGAIN;
+
+       dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
+       req_ctx->encrypt = encrypt;
+
+       crypt = get_crypt_desc();
+       if (!crypt)
+               return -ENOMEM;
+
+       crypt->data.ablk_req = req;
+       crypt->crypto_ctx = dir->npe_ctx_phys;
+       crypt->mode = dir->npe_mode;
+       crypt->init_len = dir->npe_ctx_idx;
+
+       crypt->crypt_offs = 0;
+       crypt->crypt_len = nbytes;
+
+       BUG_ON(ivsize && !req->iv);
+       memcpy(crypt->iv, req->iv, ivsize);
+       if (ivsize > 0 && !encrypt) {
+               offset = req->cryptlen - ivsize;
+               scatterwalk_map_and_copy(req_ctx->iv, req->src, offset, ivsize, 0);
+       }
+       if (req->src != req->dst) {
+               struct buffer_desc dst_hook;
+
+               crypt->mode |= NPE_OP_NOT_IN_PLACE;
+               /* This was never tested by Intel
+                * for more than one dst buffer, I think. */
+               req_ctx->dst = NULL;
+               if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
+                                    flags, DMA_FROM_DEVICE))
+                       goto free_buf_dest;
+               src_direction = DMA_TO_DEVICE;
+               req_ctx->dst = dst_hook.next;
+               crypt->dst_buf = dst_hook.phys_next;
+       } else {
+               req_ctx->dst = NULL;
+       }
+       req_ctx->src = NULL;
+       if (!chainup_buffers(dev, req->src, nbytes, &src_hook, flags,
+                            src_direction))
+               goto free_buf_src;
+
+       req_ctx->src = src_hook.next;
+       crypt->src_buf = src_hook.phys_next;
+       crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
+       qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
+       BUG_ON(qmgr_stat_overflow(send_qid));
+       return -EINPROGRESS;
+
+free_buf_src:
+       free_buf_chain(dev, req_ctx->src, crypt->src_buf);
+free_buf_dest:
+       if (req->src != req->dst)
+               free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
+
+       crypt->ctl_flags = CTL_FLAG_UNUSED;
+       return -ENOMEM;
+}
+
+static int ablk_encrypt(struct skcipher_request *req)
+{
+       return ablk_perform(req, 1);
+}
+
+static int ablk_decrypt(struct skcipher_request *req)
+{
+       return ablk_perform(req, 0);
+}
+
+static int ablk_rfc3686_crypt(struct skcipher_request *req)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
+       u8 iv[CTR_RFC3686_BLOCK_SIZE];
+       u8 *info = req->iv;
+       int ret;
+
+       /* set up counter block */
+       memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
+       memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
+
+       /* initialize counter portion of counter block */
+       *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
+               cpu_to_be32(1);
+
+       req->iv = iv;
+       ret = ablk_perform(req, 1);
+       req->iv = info;
+       return ret;
+}
+
+static int aead_perform(struct aead_request *req, int encrypt,
+                       int cryptoffset, int eff_cryptlen, u8 *iv)
+{
+       struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+       struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
+       unsigned int ivsize = crypto_aead_ivsize(tfm);
+       unsigned int authsize = crypto_aead_authsize(tfm);
+       struct ix_sa_dir *dir;
+       struct crypt_ctl *crypt;
+       unsigned int cryptlen;
+       struct buffer_desc *buf, src_hook;
+       struct aead_ctx *req_ctx = aead_request_ctx(req);
+       struct device *dev = &pdev->dev;
+       gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+                               GFP_KERNEL : GFP_ATOMIC;
+       enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
+       unsigned int lastlen;
+
+       if (qmgr_stat_full(send_qid))
+               return -EAGAIN;
+       if (atomic_read(&ctx->configuring))
+               return -EAGAIN;
+
+       if (encrypt) {
+               dir = &ctx->encrypt;
+               cryptlen = req->cryptlen;
+       } else {
+               dir = &ctx->decrypt;
+               /* req->cryptlen includes the authsize when decrypting */
+               cryptlen = req->cryptlen - authsize;
+               eff_cryptlen -= authsize;
+       }
+       crypt = get_crypt_desc();
+       if (!crypt)
+               return -ENOMEM;
+
+       crypt->data.aead_req = req;
+       crypt->crypto_ctx = dir->npe_ctx_phys;
+       crypt->mode = dir->npe_mode;
+       crypt->init_len = dir->npe_ctx_idx;
+
+       crypt->crypt_offs = cryptoffset;
+       crypt->crypt_len = eff_cryptlen;
+
+       crypt->auth_offs = 0;
+       crypt->auth_len = req->assoclen + cryptlen;
+       BUG_ON(ivsize && !req->iv);
+       memcpy(crypt->iv, req->iv, ivsize);
+
+       buf = chainup_buffers(dev, req->src, crypt->auth_len,
+                             &src_hook, flags, src_direction);
+       req_ctx->src = src_hook.next;
+       crypt->src_buf = src_hook.phys_next;
+       if (!buf)
+               goto free_buf_src;
+
+       lastlen = buf->buf_len;
+       if (lastlen >= authsize)
+               crypt->icv_rev_aes = buf->phys_addr +
+                                    buf->buf_len - authsize;
+
+       req_ctx->dst = NULL;
+
+       if (req->src != req->dst) {
+               struct buffer_desc dst_hook;
+
+               crypt->mode |= NPE_OP_NOT_IN_PLACE;
+               src_direction = DMA_TO_DEVICE;
+
+               buf = chainup_buffers(dev, req->dst, crypt->auth_len,
+                                     &dst_hook, flags, DMA_FROM_DEVICE);
+               req_ctx->dst = dst_hook.next;
+               crypt->dst_buf = dst_hook.phys_next;
+
+               if (!buf)
+                       goto free_buf_dst;
+
+               if (encrypt) {
+                       lastlen = buf->buf_len;
+                       if (lastlen >= authsize)
+                               crypt->icv_rev_aes = buf->phys_addr +
+                                                    buf->buf_len - authsize;
+               }
+       }
+
+       if (unlikely(lastlen < authsize)) {
+               dma_addr_t dma;
+               /* The 12 hmac bytes are scattered,
+                * we need to copy them into a safe buffer */
+               req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags, &dma);
+               crypt->icv_rev_aes = dma;
+               if (unlikely(!req_ctx->hmac_virt))
+                       goto free_buf_dst;
+               if (!encrypt) {
+                       scatterwalk_map_and_copy(req_ctx->hmac_virt,
+                                                req->src, cryptlen, authsize, 0);
+               }
+               req_ctx->encrypt = encrypt;
+       } else {
+               req_ctx->hmac_virt = NULL;
+       }
+
+       crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
+       qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
+       BUG_ON(qmgr_stat_overflow(send_qid));
+       return -EINPROGRESS;
+
+free_buf_dst:
+       free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
+free_buf_src:
+       free_buf_chain(dev, req_ctx->src, crypt->src_buf);
+       crypt->ctl_flags = CTL_FLAG_UNUSED;
+       return -ENOMEM;
+}
+
+static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
+{
+       struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
+       unsigned int digest_len = crypto_aead_maxauthsize(tfm);
+       int ret;
+
+       if (!ctx->enckey_len && !ctx->authkey_len)
+               return 0;
+       init_completion(&ctx->completion);
+       atomic_inc(&ctx->configuring);
+
+       reset_sa_dir(&ctx->encrypt);
+       reset_sa_dir(&ctx->decrypt);
+
+       ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
+       if (ret)
+               goto out;
+       ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
+       if (ret)
+               goto out;
+       ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
+                        ctx->authkey_len, digest_len);
+       if (ret)
+               goto out;
+       ret = setup_auth(&tfm->base, 1, authsize,  ctx->authkey,
+                        ctx->authkey_len, digest_len);
+out:
+       if (!atomic_dec_and_test(&ctx->configuring))
+               wait_for_completion(&ctx->completion);
+       return ret;
+}
+
+static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+       int max = crypto_aead_maxauthsize(tfm) >> 2;
+
+       if ((authsize >> 2) < 1 || (authsize >> 2) > max || (authsize & 3))
+               return -EINVAL;
+       return aead_setup(tfm, authsize);
+}
+
+static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
+                      unsigned int keylen)
+{
+       struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
+       struct crypto_authenc_keys keys;
+
+       if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+               goto badkey;
+
+       if (keys.authkeylen > sizeof(ctx->authkey))
+               goto badkey;
+
+       if (keys.enckeylen > sizeof(ctx->enckey))
+               goto badkey;
+
+       memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
+       memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
+       ctx->authkey_len = keys.authkeylen;
+       ctx->enckey_len = keys.enckeylen;
+
+       memzero_explicit(&keys, sizeof(keys));
+       return aead_setup(tfm, crypto_aead_authsize(tfm));
+badkey:
+       memzero_explicit(&keys, sizeof(keys));
+       return -EINVAL;
+}
+
+static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+                           unsigned int keylen)
+{
+       struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
+       struct crypto_authenc_keys keys;
+       int err;
+
+       err = crypto_authenc_extractkeys(&keys, key, keylen);
+       if (unlikely(err))
+               goto badkey;
+
+       err = -EINVAL;
+       if (keys.authkeylen > sizeof(ctx->authkey))
+               goto badkey;
+
+       err = verify_aead_des3_key(tfm, keys.enckey, keys.enckeylen);
+       if (err)
+               goto badkey;
+
+       memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
+       memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
+       ctx->authkey_len = keys.authkeylen;
+       ctx->enckey_len = keys.enckeylen;
+
+       memzero_explicit(&keys, sizeof(keys));
+       return aead_setup(tfm, crypto_aead_authsize(tfm));
+badkey:
+       memzero_explicit(&keys, sizeof(keys));
+       return err;
+}
+
+static int aead_encrypt(struct aead_request *req)
+{
+       return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
+}
+
+static int aead_decrypt(struct aead_request *req)
+{
+       return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv);
+}
+
+static struct ixp_alg ixp4xx_algos[] = {
+{
+       .crypto = {
+               .base.cra_name          = "cbc(des)",
+               .base.cra_blocksize     = DES_BLOCK_SIZE,
+
+               .min_keysize            = DES_KEY_SIZE,
+               .max_keysize            = DES_KEY_SIZE,
+               .ivsize                 = DES_BLOCK_SIZE,
+       },
+       .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
+       .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
+
+}, {
+       .crypto = {
+               .base.cra_name          = "ecb(des)",
+               .base.cra_blocksize     = DES_BLOCK_SIZE,
+               .min_keysize            = DES_KEY_SIZE,
+               .max_keysize            = DES_KEY_SIZE,
+       },
+       .cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
+       .cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
+}, {
+       .crypto = {
+               .base.cra_name          = "cbc(des3_ede)",
+               .base.cra_blocksize     = DES3_EDE_BLOCK_SIZE,
+
+               .min_keysize            = DES3_EDE_KEY_SIZE,
+               .max_keysize            = DES3_EDE_KEY_SIZE,
+               .ivsize                 = DES3_EDE_BLOCK_SIZE,
+               .setkey                 = ablk_des3_setkey,
+       },
+       .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
+       .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
+}, {
+       .crypto = {
+               .base.cra_name          = "ecb(des3_ede)",
+               .base.cra_blocksize     = DES3_EDE_BLOCK_SIZE,
+
+               .min_keysize            = DES3_EDE_KEY_SIZE,
+               .max_keysize            = DES3_EDE_KEY_SIZE,
+               .setkey                 = ablk_des3_setkey,
+       },
+       .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
+       .cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
+}, {
+       .crypto = {
+               .base.cra_name          = "cbc(aes)",
+               .base.cra_blocksize     = AES_BLOCK_SIZE,
+
+               .min_keysize            = AES_MIN_KEY_SIZE,
+               .max_keysize            = AES_MAX_KEY_SIZE,
+               .ivsize                 = AES_BLOCK_SIZE,
+       },
+       .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
+       .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
+}, {
+       .crypto = {
+               .base.cra_name          = "ecb(aes)",
+               .base.cra_blocksize     = AES_BLOCK_SIZE,
+
+               .min_keysize            = AES_MIN_KEY_SIZE,
+               .max_keysize            = AES_MAX_KEY_SIZE,
+       },
+       .cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
+       .cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
+}, {
+       .crypto = {
+               .base.cra_name          = "ctr(aes)",
+               .base.cra_blocksize     = 1,
+
+               .min_keysize            = AES_MIN_KEY_SIZE,
+               .max_keysize            = AES_MAX_KEY_SIZE,
+               .ivsize                 = AES_BLOCK_SIZE,
+       },
+       .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
+       .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
+}, {
+       .crypto = {
+               .base.cra_name          = "rfc3686(ctr(aes))",
+               .base.cra_blocksize     = 1,
+
+               .min_keysize            = AES_MIN_KEY_SIZE,
+               .max_keysize            = AES_MAX_KEY_SIZE,
+               .ivsize                 = AES_BLOCK_SIZE,
+               .setkey                 = ablk_rfc3686_setkey,
+               .encrypt                = ablk_rfc3686_crypt,
+               .decrypt                = ablk_rfc3686_crypt,
+       },
+       .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
+       .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
+} };
+
+static struct ixp_aead_alg ixp4xx_aeads[] = {
+{
+       .crypto = {
+               .base = {
+                       .cra_name       = "authenc(hmac(md5),cbc(des))",
+                       .cra_blocksize  = DES_BLOCK_SIZE,
+               },
+               .ivsize         = DES_BLOCK_SIZE,
+               .maxauthsize    = MD5_DIGEST_SIZE,
+       },
+       .hash = &hash_alg_md5,
+       .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
+       .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
+}, {
+       .crypto = {
+               .base = {
+                       .cra_name       = "authenc(hmac(md5),cbc(des3_ede))",
+                       .cra_blocksize  = DES3_EDE_BLOCK_SIZE,
+               },
+               .ivsize         = DES3_EDE_BLOCK_SIZE,
+               .maxauthsize    = MD5_DIGEST_SIZE,
+               .setkey         = des3_aead_setkey,
+       },
+       .hash = &hash_alg_md5,
+       .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
+       .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
+}, {
+       .crypto = {
+               .base = {
+                       .cra_name       = "authenc(hmac(sha1),cbc(des))",
+                       .cra_blocksize  = DES_BLOCK_SIZE,
+               },
+                       .ivsize         = DES_BLOCK_SIZE,
+                       .maxauthsize    = SHA1_DIGEST_SIZE,
+       },
+       .hash = &hash_alg_sha1,
+       .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
+       .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
+}, {
+       .crypto = {
+               .base = {
+                       .cra_name       = "authenc(hmac(sha1),cbc(des3_ede))",
+                       .cra_blocksize  = DES3_EDE_BLOCK_SIZE,
+               },
+               .ivsize         = DES3_EDE_BLOCK_SIZE,
+               .maxauthsize    = SHA1_DIGEST_SIZE,
+               .setkey         = des3_aead_setkey,
+       },
+       .hash = &hash_alg_sha1,
+       .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
+       .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
+}, {
+       .crypto = {
+               .base = {
+                       .cra_name       = "authenc(hmac(md5),cbc(aes))",
+                       .cra_blocksize  = AES_BLOCK_SIZE,
+               },
+               .ivsize         = AES_BLOCK_SIZE,
+               .maxauthsize    = MD5_DIGEST_SIZE,
+       },
+       .hash = &hash_alg_md5,
+       .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
+       .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
+}, {
+       .crypto = {
+               .base = {
+                       .cra_name       = "authenc(hmac(sha1),cbc(aes))",
+                       .cra_blocksize  = AES_BLOCK_SIZE,
+               },
+               .ivsize         = AES_BLOCK_SIZE,
+               .maxauthsize    = SHA1_DIGEST_SIZE,
+       },
+       .hash = &hash_alg_sha1,
+       .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
+       .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
+} };
+
+#define IXP_POSTFIX "-ixp4xx"
+
+static int ixp_crypto_probe(struct platform_device *_pdev)
+{
+       struct device *dev = &_pdev->dev;
+       int num = ARRAY_SIZE(ixp4xx_algos);
+       int i, err;
+
+       pdev = _pdev;
+
+       err = init_ixp_crypto(dev);
+       if (err)
+               return err;
+
+       for (i = 0; i < num; i++) {
+               struct skcipher_alg *cra = &ixp4xx_algos[i].crypto;
+
+               if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+                            "%s"IXP_POSTFIX, cra->base.cra_name) >=
+                            CRYPTO_MAX_ALG_NAME)
+                       continue;
+               if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
+                       continue;
+
+               /* block ciphers */
+               cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+                                     CRYPTO_ALG_ASYNC |
+                                     CRYPTO_ALG_ALLOCATES_MEMORY |
+                                     CRYPTO_ALG_NEED_FALLBACK;
+               if (!cra->setkey)
+                       cra->setkey = ablk_setkey;
+               if (!cra->encrypt)
+                       cra->encrypt = ablk_encrypt;
+               if (!cra->decrypt)
+                       cra->decrypt = ablk_decrypt;
+               cra->init = init_tfm_ablk;
+               cra->exit = exit_tfm_ablk;
+
+               cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
+               cra->base.cra_module = THIS_MODULE;
+               cra->base.cra_alignmask = 3;
+               cra->base.cra_priority = 300;
+               if (crypto_register_skcipher(cra))
+                       dev_err(&pdev->dev, "Failed to register '%s'\n",
+                               cra->base.cra_name);
+               else
+                       ixp4xx_algos[i].registered = 1;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
+               struct aead_alg *cra = &ixp4xx_aeads[i].crypto;
+
+               if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+                            "%s"IXP_POSTFIX, cra->base.cra_name) >=
+                   CRYPTO_MAX_ALG_NAME)
+                       continue;
+               if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
+                       continue;
+
+               /* authenc */
+               cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+                                     CRYPTO_ALG_ASYNC |
+                                     CRYPTO_ALG_ALLOCATES_MEMORY;
+               cra->setkey = cra->setkey ?: aead_setkey;
+               cra->setauthsize = aead_setauthsize;
+               cra->encrypt = aead_encrypt;
+               cra->decrypt = aead_decrypt;
+               cra->init = init_tfm_aead;
+               cra->exit = exit_tfm_aead;
+
+               cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
+               cra->base.cra_module = THIS_MODULE;
+               cra->base.cra_alignmask = 3;
+               cra->base.cra_priority = 300;
+
+               if (crypto_register_aead(cra))
+                       dev_err(&pdev->dev, "Failed to register '%s'\n",
+                               cra->base.cra_driver_name);
+               else
+                       ixp4xx_aeads[i].registered = 1;
+       }
+       return 0;
+}
+
+static int ixp_crypto_remove(struct platform_device *pdev)
+{
+       int num = ARRAY_SIZE(ixp4xx_algos);
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
+               if (ixp4xx_aeads[i].registered)
+                       crypto_unregister_aead(&ixp4xx_aeads[i].crypto);
+       }
+
+       for (i = 0; i < num; i++) {
+               if (ixp4xx_algos[i].registered)
+                       crypto_unregister_skcipher(&ixp4xx_algos[i].crypto);
+       }
+       release_ixp_crypto(&pdev->dev);
+
+       return 0;
+}
+static const struct of_device_id ixp4xx_crypto_of_match[] = {
+       {
+               .compatible = "intel,ixp4xx-crypto",
+       },
+       {},
+};
+
+static struct platform_driver ixp_crypto_driver = {
+       .probe = ixp_crypto_probe,
+       .remove = ixp_crypto_remove,
+       .driver = {
+               .name = "ixp4xx_crypto",
+               .of_match_table = ixp4xx_crypto_of_match,
+       },
+};
+module_platform_driver(ixp_crypto_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Hohnstaedt <[email protected]>");
+MODULE_DESCRIPTION("IXP4xx hardware crypto");
+
diff --git a/drivers/crypto/intel/keembay/Kconfig b/drivers/crypto/intel/keembay/Kconfig
new file mode 100644 (file)
index 0000000..1cd62f9
--- /dev/null
@@ -0,0 +1,90 @@
+config CRYPTO_DEV_KEEMBAY_OCS_AES_SM4
+       tristate "Support for Intel Keem Bay OCS AES/SM4 HW acceleration"
+       depends on HAS_IOMEM
+       depends on ARCH_KEEMBAY || COMPILE_TEST
+       select CRYPTO_SKCIPHER
+       select CRYPTO_AEAD
+       select CRYPTO_ENGINE
+       help
+         Support for Intel Keem Bay Offload and Crypto Subsystem (OCS) AES and
+         SM4 cipher hardware acceleration for use with Crypto API.
+
+         Provides HW acceleration for the following transformations:
+         cbc(aes), ctr(aes), ccm(aes), gcm(aes), cbc(sm4), ctr(sm4), ccm(sm4)
+         and gcm(sm4).
+
+         Optionally, support for the following transformations can also be
+         enabled: ecb(aes), cts(cbc(aes)), ecb(sm4) and cts(cbc(sm4)).
+
+config CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
+       bool "Support for Intel Keem Bay OCS AES/SM4 ECB HW acceleration"
+       depends on CRYPTO_DEV_KEEMBAY_OCS_AES_SM4
+       help
+         Support for Intel Keem Bay Offload and Crypto Subsystem (OCS)
+         AES/SM4 ECB mode hardware acceleration for use with Crypto API.
+
+         Provides OCS version of ecb(aes) and ecb(sm4)
+
+         Intel does not recommend use of ECB mode with AES/SM4.
+
+config CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
+       bool "Support for Intel Keem Bay OCS AES/SM4 CTS HW acceleration"
+       depends on CRYPTO_DEV_KEEMBAY_OCS_AES_SM4
+       help
+         Support for Intel Keem Bay Offload and Crypto Subsystem (OCS)
+         AES/SM4 CBC with CTS mode hardware acceleration for use with
+         Crypto API.
+
+         Provides OCS version of cts(cbc(aes)) and cts(cbc(sm4)).
+
+         Intel does not recommend use of CTS mode with AES/SM4.
+
+config CRYPTO_DEV_KEEMBAY_OCS_ECC
+       tristate "Support for Intel Keem Bay OCS ECC HW acceleration"
+       depends on ARCH_KEEMBAY || COMPILE_TEST
+       depends on OF
+       depends on HAS_IOMEM
+       select CRYPTO_ECDH
+       select CRYPTO_ENGINE
+       help
+         Support for Intel Keem Bay Offload and Crypto Subsystem (OCS)
+         Elliptic Curve Cryptography (ECC) hardware acceleration for use with
+         Crypto API.
+
+         Provides OCS acceleration for ECDH-256 and ECDH-384.
+
+         Say Y or M if you are compiling for the Intel Keem Bay SoC. The
+         module will be called keembay-ocs-ecc.
+
+         If unsure, say N.
+
+config CRYPTO_DEV_KEEMBAY_OCS_HCU
+       tristate "Support for Intel Keem Bay OCS HCU HW acceleration"
+       select CRYPTO_HASH
+       select CRYPTO_ENGINE
+       depends on HAS_IOMEM
+       depends on ARCH_KEEMBAY || COMPILE_TEST
+       depends on OF
+       help
+         Support for Intel Keem Bay Offload and Crypto Subsystem (OCS) Hash
+         Control Unit (HCU) hardware acceleration for use with Crypto API.
+
+         Provides OCS HCU hardware acceleration of sha256, sha384, sha512, and
+         sm3, as well as the HMAC variant of these algorithms.
+
+         Say Y or M if you're building for the Intel Keem Bay SoC. If compiled
+         as a module, the module will be called keembay-ocs-hcu.
+
+         If unsure, say N.
+
+config CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
+       bool "Enable sha224 and hmac(sha224) support in Intel Keem Bay OCS HCU"
+       depends on CRYPTO_DEV_KEEMBAY_OCS_HCU
+       help
+         Enables support for sha224 and hmac(sha224) algorithms in the Intel
+         Keem Bay OCS HCU driver. Intel recommends not to use these
+         algorithms.
+
+         Provides OCS HCU hardware acceleration of sha224 and hmac(224).
+
+         If unsure, say N.
diff --git a/drivers/crypto/intel/keembay/Makefile b/drivers/crypto/intel/keembay/Makefile
new file mode 100644 (file)
index 0000000..7c12c3c
--- /dev/null
@@ -0,0 +1,10 @@
+#
+# Makefile for Intel Keem Bay OCS Crypto API Linux drivers
+#
+obj-$(CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4) += keembay-ocs-aes.o
+keembay-ocs-aes-objs := keembay-ocs-aes-core.o ocs-aes.o
+
+obj-$(CONFIG_CRYPTO_DEV_KEEMBAY_OCS_ECC) += keembay-ocs-ecc.o
+
+obj-$(CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU) += keembay-ocs-hcu.o
+keembay-ocs-hcu-objs := keembay-ocs-hcu-core.o ocs-hcu.o
diff --git a/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c b/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c
new file mode 100644 (file)
index 0000000..ae31be0
--- /dev/null
@@ -0,0 +1,1704 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Keem Bay OCS AES Crypto Driver.
+ *
+ * Copyright (C) 2018-2020 Intel Corporation
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include <crypto/aes.h>
+#include <crypto/engine.h>
+#include <crypto/gcm.h>
+#include <crypto/scatterwalk.h>
+
+#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
+
+#include "ocs-aes.h"
+
+#define KMB_OCS_PRIORITY       350
+#define DRV_NAME               "keembay-ocs-aes"
+
+#define OCS_AES_MIN_KEY_SIZE   16
+#define OCS_AES_MAX_KEY_SIZE   32
+#define OCS_AES_KEYSIZE_128    16
+#define OCS_AES_KEYSIZE_192    24
+#define OCS_AES_KEYSIZE_256    32
+#define OCS_SM4_KEY_SIZE       16
+
+/**
+ * struct ocs_aes_tctx - OCS AES Transform context
+ * @engine_ctx:                Engine context.
+ * @aes_dev:           The OCS AES device.
+ * @key:               AES/SM4 key.
+ * @key_len:           The length (in bytes) of @key.
+ * @cipher:            OCS cipher to use (either AES or SM4).
+ * @sw_cipher:         The cipher to use as fallback.
+ * @use_fallback:      Whether or not fallback cipher should be used.
+ */
+struct ocs_aes_tctx {
+       struct crypto_engine_ctx engine_ctx;
+       struct ocs_aes_dev *aes_dev;
+       u8 key[OCS_AES_KEYSIZE_256];
+       unsigned int key_len;
+       enum ocs_cipher cipher;
+       union {
+               struct crypto_sync_skcipher *sk;
+               struct crypto_aead *aead;
+       } sw_cipher;
+       bool use_fallback;
+};
+
+/**
+ * struct ocs_aes_rctx - OCS AES Request context.
+ * @instruction:       Instruction to be executed (encrypt / decrypt).
+ * @mode:              Mode to use (ECB, CBC, CTR, CCm, GCM, CTS)
+ * @src_nents:         Number of source SG entries.
+ * @dst_nents:         Number of destination SG entries.
+ * @src_dma_count:     The number of DMA-mapped entries of the source SG.
+ * @dst_dma_count:     The number of DMA-mapped entries of the destination SG.
+ * @in_place:          Whether or not this is an in place request, i.e.,
+ *                     src_sg == dst_sg.
+ * @src_dll:           OCS DMA linked list for input data.
+ * @dst_dll:           OCS DMA linked list for output data.
+ * @last_ct_blk:       Buffer to hold last cipher text block (only used in CBC
+ *                     mode).
+ * @cts_swap:          Whether or not CTS swap must be performed.
+ * @aad_src_dll:       OCS DMA linked list for input AAD data.
+ * @aad_dst_dll:       OCS DMA linked list for output AAD data.
+ * @in_tag:            Buffer to hold input encrypted tag (only used for
+ *                     CCM/GCM decrypt).
+ * @out_tag:           Buffer to hold output encrypted / decrypted tag (only
+ *                     used for GCM encrypt / decrypt).
+ */
+struct ocs_aes_rctx {
+       /* Fields common across all modes. */
+       enum ocs_instruction    instruction;
+       enum ocs_mode           mode;
+       int                     src_nents;
+       int                     dst_nents;
+       int                     src_dma_count;
+       int                     dst_dma_count;
+       bool                    in_place;
+       struct ocs_dll_desc     src_dll;
+       struct ocs_dll_desc     dst_dll;
+
+       /* CBC specific */
+       u8                      last_ct_blk[AES_BLOCK_SIZE];
+
+       /* CTS specific */
+       int                     cts_swap;
+
+       /* CCM/GCM specific */
+       struct ocs_dll_desc     aad_src_dll;
+       struct ocs_dll_desc     aad_dst_dll;
+       u8                      in_tag[AES_BLOCK_SIZE];
+
+       /* GCM specific */
+       u8                      out_tag[AES_BLOCK_SIZE];
+};
+
+/* Driver data. */
+struct ocs_aes_drv {
+       struct list_head dev_list;
+       spinlock_t lock;        /* Protects dev_list. */
+};
+
+static struct ocs_aes_drv ocs_aes = {
+       .dev_list = LIST_HEAD_INIT(ocs_aes.dev_list),
+       .lock = __SPIN_LOCK_UNLOCKED(ocs_aes.lock),
+};
+
+static struct ocs_aes_dev *kmb_ocs_aes_find_dev(struct ocs_aes_tctx *tctx)
+{
+       struct ocs_aes_dev *aes_dev;
+
+       spin_lock(&ocs_aes.lock);
+
+       if (tctx->aes_dev) {
+               aes_dev = tctx->aes_dev;
+               goto exit;
+       }
+
+       /* Only a single OCS device available */
+       aes_dev = list_first_entry(&ocs_aes.dev_list, struct ocs_aes_dev, list);
+       tctx->aes_dev = aes_dev;
+
+exit:
+       spin_unlock(&ocs_aes.lock);
+
+       return aes_dev;
+}
+
+/*
+ * Ensure key is 128-bit or 256-bit for AES or 128-bit for SM4 and an actual
+ * key is being passed in.
+ *
+ * Return: 0 if key is valid, -EINVAL otherwise.
+ */
+static int check_key(const u8 *in_key, size_t key_len, enum ocs_cipher cipher)
+{
+       if (!in_key)
+               return -EINVAL;
+
+       /* For AES, only 128-byte or 256-byte keys are supported. */
+       if (cipher == OCS_AES && (key_len == OCS_AES_KEYSIZE_128 ||
+                                 key_len == OCS_AES_KEYSIZE_256))
+               return 0;
+
+       /* For SM4, only 128-byte keys are supported. */
+       if (cipher == OCS_SM4 && key_len == OCS_AES_KEYSIZE_128)
+               return 0;
+
+       /* Everything else is unsupported. */
+       return -EINVAL;
+}
+
+/* Save key into transformation context. */
+static int save_key(struct ocs_aes_tctx *tctx, const u8 *in_key, size_t key_len,
+                   enum ocs_cipher cipher)
+{
+       int ret;
+
+       ret = check_key(in_key, key_len, cipher);
+       if (ret)
+               return ret;
+
+       memcpy(tctx->key, in_key, key_len);
+       tctx->key_len = key_len;
+       tctx->cipher = cipher;
+
+       return 0;
+}
+
+/* Set key for symmetric cypher. */
+static int kmb_ocs_sk_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
+                             size_t key_len, enum ocs_cipher cipher)
+{
+       struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
+
+       /* Fallback is used for AES with 192-bit key. */
+       tctx->use_fallback = (cipher == OCS_AES &&
+                             key_len == OCS_AES_KEYSIZE_192);
+
+       if (!tctx->use_fallback)
+               return save_key(tctx, in_key, key_len, cipher);
+
+       crypto_sync_skcipher_clear_flags(tctx->sw_cipher.sk,
+                                        CRYPTO_TFM_REQ_MASK);
+       crypto_sync_skcipher_set_flags(tctx->sw_cipher.sk,
+                                      tfm->base.crt_flags &
+                                      CRYPTO_TFM_REQ_MASK);
+
+       return crypto_sync_skcipher_setkey(tctx->sw_cipher.sk, in_key, key_len);
+}
+
+/* Set key for AEAD cipher. */
+static int kmb_ocs_aead_set_key(struct crypto_aead *tfm, const u8 *in_key,
+                               size_t key_len, enum ocs_cipher cipher)
+{
+       struct ocs_aes_tctx *tctx = crypto_aead_ctx(tfm);
+
+       /* Fallback is used for AES with 192-bit key. */
+       tctx->use_fallback = (cipher == OCS_AES &&
+                             key_len == OCS_AES_KEYSIZE_192);
+
+       if (!tctx->use_fallback)
+               return save_key(tctx, in_key, key_len, cipher);
+
+       crypto_aead_clear_flags(tctx->sw_cipher.aead, CRYPTO_TFM_REQ_MASK);
+       crypto_aead_set_flags(tctx->sw_cipher.aead,
+                             crypto_aead_get_flags(tfm) & CRYPTO_TFM_REQ_MASK);
+
+       return crypto_aead_setkey(tctx->sw_cipher.aead, in_key, key_len);
+}
+
+/* Swap two AES blocks in SG lists. */
+static void sg_swap_blocks(struct scatterlist *sgl, unsigned int nents,
+                          off_t blk1_offset, off_t blk2_offset)
+{
+       u8 tmp_buf1[AES_BLOCK_SIZE], tmp_buf2[AES_BLOCK_SIZE];
+
+       /*
+        * No easy way to copy within sg list, so copy both blocks to temporary
+        * buffers first.
+        */
+       sg_pcopy_to_buffer(sgl, nents, tmp_buf1, AES_BLOCK_SIZE, blk1_offset);
+       sg_pcopy_to_buffer(sgl, nents, tmp_buf2, AES_BLOCK_SIZE, blk2_offset);
+       sg_pcopy_from_buffer(sgl, nents, tmp_buf1, AES_BLOCK_SIZE, blk2_offset);
+       sg_pcopy_from_buffer(sgl, nents, tmp_buf2, AES_BLOCK_SIZE, blk1_offset);
+}
+
+/* Initialize request context to default values. */
+static void ocs_aes_init_rctx(struct ocs_aes_rctx *rctx)
+{
+       /* Zero everything. */
+       memset(rctx, 0, sizeof(*rctx));
+
+       /* Set initial value for DMA addresses. */
+       rctx->src_dll.dma_addr = DMA_MAPPING_ERROR;
+       rctx->dst_dll.dma_addr = DMA_MAPPING_ERROR;
+       rctx->aad_src_dll.dma_addr = DMA_MAPPING_ERROR;
+       rctx->aad_dst_dll.dma_addr = DMA_MAPPING_ERROR;
+}
+
+static int kmb_ocs_sk_validate_input(struct skcipher_request *req,
+                                    enum ocs_mode mode)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       int iv_size = crypto_skcipher_ivsize(tfm);
+
+       switch (mode) {
+       case OCS_MODE_ECB:
+               /* Ensure input length is multiple of block size */
+               if (req->cryptlen % AES_BLOCK_SIZE != 0)
+                       return -EINVAL;
+
+               return 0;
+
+       case OCS_MODE_CBC:
+               /* Ensure input length is multiple of block size */
+               if (req->cryptlen % AES_BLOCK_SIZE != 0)
+                       return -EINVAL;
+
+               /* Ensure IV is present and block size in length */
+               if (!req->iv || iv_size != AES_BLOCK_SIZE)
+                       return -EINVAL;
+               /*
+                * NOTE: Since req->cryptlen == 0 case was already handled in
+                * kmb_ocs_sk_common(), the above two conditions also guarantee
+                * that: cryptlen >= iv_size
+                */
+               return 0;
+
+       case OCS_MODE_CTR:
+               /* Ensure IV is present and block size in length */
+               if (!req->iv || iv_size != AES_BLOCK_SIZE)
+                       return -EINVAL;
+               return 0;
+
+       case OCS_MODE_CTS:
+               /* Ensure input length >= block size */
+               if (req->cryptlen < AES_BLOCK_SIZE)
+                       return -EINVAL;
+
+               /* Ensure IV is present and block size in length */
+               if (!req->iv || iv_size != AES_BLOCK_SIZE)
+                       return -EINVAL;
+
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
+/*
+ * Called by encrypt() / decrypt() skcipher functions.
+ *
+ * Use fallback if needed, otherwise initialize context and enqueue request
+ * into engine.
+ */
+static int kmb_ocs_sk_common(struct skcipher_request *req,
+                            enum ocs_cipher cipher,
+                            enum ocs_instruction instruction,
+                            enum ocs_mode mode)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct ocs_aes_rctx *rctx = skcipher_request_ctx(req);
+       struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
+       struct ocs_aes_dev *aes_dev;
+       int rc;
+
+       if (tctx->use_fallback) {
+               SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, tctx->sw_cipher.sk);
+
+               skcipher_request_set_sync_tfm(subreq, tctx->sw_cipher.sk);
+               skcipher_request_set_callback(subreq, req->base.flags, NULL,
+                                             NULL);
+               skcipher_request_set_crypt(subreq, req->src, req->dst,
+                                          req->cryptlen, req->iv);
+
+               if (instruction == OCS_ENCRYPT)
+                       rc = crypto_skcipher_encrypt(subreq);
+               else
+                       rc = crypto_skcipher_decrypt(subreq);
+
+               skcipher_request_zero(subreq);
+
+               return rc;
+       }
+
+       /*
+        * If cryptlen == 0, no processing needed for ECB, CBC and CTR.
+        *
+        * For CTS continue: kmb_ocs_sk_validate_input() will return -EINVAL.
+        */
+       if (!req->cryptlen && mode != OCS_MODE_CTS)
+               return 0;
+
+       rc = kmb_ocs_sk_validate_input(req, mode);
+       if (rc)
+               return rc;
+
+       aes_dev = kmb_ocs_aes_find_dev(tctx);
+       if (!aes_dev)
+               return -ENODEV;
+
+       if (cipher != tctx->cipher)
+               return -EINVAL;
+
+       ocs_aes_init_rctx(rctx);
+       rctx->instruction = instruction;
+       rctx->mode = mode;
+
+       return crypto_transfer_skcipher_request_to_engine(aes_dev->engine, req);
+}
+
+static void cleanup_ocs_dma_linked_list(struct device *dev,
+                                       struct ocs_dll_desc *dll)
+{
+       if (dll->vaddr)
+               dma_free_coherent(dev, dll->size, dll->vaddr, dll->dma_addr);
+       dll->vaddr = NULL;
+       dll->size = 0;
+       dll->dma_addr = DMA_MAPPING_ERROR;
+}
+
+static void kmb_ocs_sk_dma_cleanup(struct skcipher_request *req)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct ocs_aes_rctx *rctx = skcipher_request_ctx(req);
+       struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
+       struct device *dev = tctx->aes_dev->dev;
+
+       if (rctx->src_dma_count) {
+               dma_unmap_sg(dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
+               rctx->src_dma_count = 0;
+       }
+
+       if (rctx->dst_dma_count) {
+               dma_unmap_sg(dev, req->dst, rctx->dst_nents, rctx->in_place ?
+                                                            DMA_BIDIRECTIONAL :
+                                                            DMA_FROM_DEVICE);
+               rctx->dst_dma_count = 0;
+       }
+
+       /* Clean up OCS DMA linked lists */
+       cleanup_ocs_dma_linked_list(dev, &rctx->src_dll);
+       cleanup_ocs_dma_linked_list(dev, &rctx->dst_dll);
+}
+
+static int kmb_ocs_sk_prepare_inplace(struct skcipher_request *req)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct ocs_aes_rctx *rctx = skcipher_request_ctx(req);
+       struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
+       int iv_size = crypto_skcipher_ivsize(tfm);
+       int rc;
+
+       /*
+        * For CBC decrypt, save last block (iv) to last_ct_blk buffer.
+        *
+        * Note: if we are here, we already checked that cryptlen >= iv_size
+        * and iv_size == AES_BLOCK_SIZE (i.e., the size of last_ct_blk); see
+        * kmb_ocs_sk_validate_input().
+        */
+       if (rctx->mode == OCS_MODE_CBC && rctx->instruction == OCS_DECRYPT)
+               scatterwalk_map_and_copy(rctx->last_ct_blk, req->src,
+                                        req->cryptlen - iv_size, iv_size, 0);
+
+       /* For CTS decrypt, swap last two blocks, if needed. */
+       if (rctx->cts_swap && rctx->instruction == OCS_DECRYPT)
+               sg_swap_blocks(req->dst, rctx->dst_nents,
+                              req->cryptlen - AES_BLOCK_SIZE,
+                              req->cryptlen - (2 * AES_BLOCK_SIZE));
+
+       /* src and dst buffers are the same, use bidirectional DMA mapping. */
+       rctx->dst_dma_count = dma_map_sg(tctx->aes_dev->dev, req->dst,
+                                        rctx->dst_nents, DMA_BIDIRECTIONAL);
+       if (rctx->dst_dma_count == 0) {
+               dev_err(tctx->aes_dev->dev, "Failed to map destination sg\n");
+               return -ENOMEM;
+       }
+
+       /* Create DST linked list */
+       rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->dst,
+                                           rctx->dst_dma_count, &rctx->dst_dll,
+                                           req->cryptlen, 0);
+       if (rc)
+               return rc;
+       /*
+        * If descriptor creation was successful, set the src_dll.dma_addr to
+        * the value of dst_dll.dma_addr, as we do in-place AES operation on
+        * the src.
+        */
+       rctx->src_dll.dma_addr = rctx->dst_dll.dma_addr;
+
+       return 0;
+}
+
+static int kmb_ocs_sk_prepare_notinplace(struct skcipher_request *req)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct ocs_aes_rctx *rctx = skcipher_request_ctx(req);
+       struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
+       int rc;
+
+       rctx->src_nents =  sg_nents_for_len(req->src, req->cryptlen);
+       if (rctx->src_nents < 0)
+               return -EBADMSG;
+
+       /* Map SRC SG. */
+       rctx->src_dma_count = dma_map_sg(tctx->aes_dev->dev, req->src,
+                                        rctx->src_nents, DMA_TO_DEVICE);
+       if (rctx->src_dma_count == 0) {
+               dev_err(tctx->aes_dev->dev, "Failed to map source sg\n");
+               return -ENOMEM;
+       }
+
+       /* Create SRC linked list */
+       rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->src,
+                                           rctx->src_dma_count, &rctx->src_dll,
+                                           req->cryptlen, 0);
+       if (rc)
+               return rc;
+
+       /* Map DST SG. */
+       rctx->dst_dma_count = dma_map_sg(tctx->aes_dev->dev, req->dst,
+                                        rctx->dst_nents, DMA_FROM_DEVICE);
+       if (rctx->dst_dma_count == 0) {
+               dev_err(tctx->aes_dev->dev, "Failed to map destination sg\n");
+               return -ENOMEM;
+       }
+
+       /* Create DST linked list */
+       rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->dst,
+                                           rctx->dst_dma_count, &rctx->dst_dll,
+                                           req->cryptlen, 0);
+       if (rc)
+               return rc;
+
+       /* If this is not a CTS decrypt operation with swapping, we are done. */
+       if (!(rctx->cts_swap && rctx->instruction == OCS_DECRYPT))
+               return 0;
+
+       /*
+        * Otherwise, we have to copy src to dst (as we cannot modify src).
+        * Use OCS AES bypass mode to copy src to dst via DMA.
+        *
+        * NOTE: for anything other than small data sizes this is rather
+        * inefficient.
+        */
+       rc = ocs_aes_bypass_op(tctx->aes_dev, rctx->dst_dll.dma_addr,
+                              rctx->src_dll.dma_addr, req->cryptlen);
+       if (rc)
+               return rc;
+
+       /*
+        * Now dst == src, so clean up what we did so far and use in_place
+        * logic.
+        */
+       kmb_ocs_sk_dma_cleanup(req);
+       rctx->in_place = true;
+
+       return kmb_ocs_sk_prepare_inplace(req);
+}
+
+static int kmb_ocs_sk_run(struct skcipher_request *req)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct ocs_aes_rctx *rctx = skcipher_request_ctx(req);
+       struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
+       struct ocs_aes_dev *aes_dev = tctx->aes_dev;
+       int iv_size = crypto_skcipher_ivsize(tfm);
+       int rc;
+
+       rctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
+       if (rctx->dst_nents < 0)
+               return -EBADMSG;
+
+       /*
+        * If 2 blocks or greater, and multiple of block size swap last two
+        * blocks to be compatible with other crypto API CTS implementations:
+        * OCS mode uses CBC-CS2, whereas other crypto API implementations use
+        * CBC-CS3.
+        * CBC-CS2 and CBC-CS3 defined by:
+        * https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38a-add.pdf
+        */
+       rctx->cts_swap = (rctx->mode == OCS_MODE_CTS &&
+                         req->cryptlen > AES_BLOCK_SIZE &&
+                         req->cryptlen % AES_BLOCK_SIZE == 0);
+
+       rctx->in_place = (req->src == req->dst);
+
+       if (rctx->in_place)
+               rc = kmb_ocs_sk_prepare_inplace(req);
+       else
+               rc = kmb_ocs_sk_prepare_notinplace(req);
+
+       if (rc)
+               goto error;
+
+       rc = ocs_aes_op(aes_dev, rctx->mode, tctx->cipher, rctx->instruction,
+                       rctx->dst_dll.dma_addr, rctx->src_dll.dma_addr,
+                       req->cryptlen, req->iv, iv_size);
+       if (rc)
+               goto error;
+
+       /* Clean-up DMA before further processing output. */
+       kmb_ocs_sk_dma_cleanup(req);
+
+       /* For CTS Encrypt, swap last 2 blocks, if needed. */
+       if (rctx->cts_swap && rctx->instruction == OCS_ENCRYPT) {
+               sg_swap_blocks(req->dst, rctx->dst_nents,
+                              req->cryptlen - AES_BLOCK_SIZE,
+                              req->cryptlen - (2 * AES_BLOCK_SIZE));
+               return 0;
+       }
+
+       /* For CBC copy IV to req->IV. */
+       if (rctx->mode == OCS_MODE_CBC) {
+               /* CBC encrypt case. */
+               if (rctx->instruction == OCS_ENCRYPT) {
+                       scatterwalk_map_and_copy(req->iv, req->dst,
+                                                req->cryptlen - iv_size,
+                                                iv_size, 0);
+                       return 0;
+               }
+               /* CBC decrypt case. */
+               if (rctx->in_place)
+                       memcpy(req->iv, rctx->last_ct_blk, iv_size);
+               else
+                       scatterwalk_map_and_copy(req->iv, req->src,
+                                                req->cryptlen - iv_size,
+                                                iv_size, 0);
+               return 0;
+       }
+       /* For all other modes there's nothing to do. */
+
+       return 0;
+
+error:
+       kmb_ocs_sk_dma_cleanup(req);
+
+       return rc;
+}
+
+static int kmb_ocs_aead_validate_input(struct aead_request *req,
+                                      enum ocs_instruction instruction,
+                                      enum ocs_mode mode)
+{
+       struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+       int tag_size = crypto_aead_authsize(tfm);
+       int iv_size = crypto_aead_ivsize(tfm);
+
+       /* For decrypt crytplen == len(PT) + len(tag). */
+       if (instruction == OCS_DECRYPT && req->cryptlen < tag_size)
+               return -EINVAL;
+
+       /* IV is mandatory. */
+       if (!req->iv)
+               return -EINVAL;
+
+       switch (mode) {
+       case OCS_MODE_GCM:
+               if (iv_size != GCM_AES_IV_SIZE)
+                       return -EINVAL;
+
+               return 0;
+
+       case OCS_MODE_CCM:
+               /* Ensure IV is present and block size in length */
+               if (iv_size != AES_BLOCK_SIZE)
+                       return -EINVAL;
+
+               return 0;
+
+       default:
+               return -EINVAL;
+       }
+}
+
+/*
+ * Called by encrypt() / decrypt() aead functions.
+ *
+ * Use fallback if needed, otherwise initialize context and enqueue request
+ * into engine.
+ */
+static int kmb_ocs_aead_common(struct aead_request *req,
+                              enum ocs_cipher cipher,
+                              enum ocs_instruction instruction,
+                              enum ocs_mode mode)
+{
+       struct ocs_aes_tctx *tctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+       struct ocs_aes_rctx *rctx = aead_request_ctx(req);
+       struct ocs_aes_dev *dd;
+       int rc;
+
+       if (tctx->use_fallback) {
+               struct aead_request *subreq = aead_request_ctx(req);
+
+               aead_request_set_tfm(subreq, tctx->sw_cipher.aead);
+               aead_request_set_callback(subreq, req->base.flags,
+                                         req->base.complete, req->base.data);
+               aead_request_set_crypt(subreq, req->src, req->dst,
+                                      req->cryptlen, req->iv);
+               aead_request_set_ad(subreq, req->assoclen);
+               rc = crypto_aead_setauthsize(tctx->sw_cipher.aead,
+                                            crypto_aead_authsize(crypto_aead_reqtfm(req)));
+               if (rc)
+                       return rc;
+
+               return (instruction == OCS_ENCRYPT) ?
+                      crypto_aead_encrypt(subreq) :
+                      crypto_aead_decrypt(subreq);
+       }
+
+       rc = kmb_ocs_aead_validate_input(req, instruction, mode);
+       if (rc)
+               return rc;
+
+       dd = kmb_ocs_aes_find_dev(tctx);
+       if (!dd)
+               return -ENODEV;
+
+       if (cipher != tctx->cipher)
+               return -EINVAL;
+
+       ocs_aes_init_rctx(rctx);
+       rctx->instruction = instruction;
+       rctx->mode = mode;
+
+       return crypto_transfer_aead_request_to_engine(dd->engine, req);
+}
+
+static void kmb_ocs_aead_dma_cleanup(struct aead_request *req)
+{
+       struct ocs_aes_tctx *tctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+       struct ocs_aes_rctx *rctx = aead_request_ctx(req);
+       struct device *dev = tctx->aes_dev->dev;
+
+       if (rctx->src_dma_count) {
+               dma_unmap_sg(dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
+               rctx->src_dma_count = 0;
+       }
+
+       if (rctx->dst_dma_count) {
+               dma_unmap_sg(dev, req->dst, rctx->dst_nents, rctx->in_place ?
+                                                            DMA_BIDIRECTIONAL :
+                                                            DMA_FROM_DEVICE);
+               rctx->dst_dma_count = 0;
+       }
+       /* Clean up OCS DMA linked lists */
+       cleanup_ocs_dma_linked_list(dev, &rctx->src_dll);
+       cleanup_ocs_dma_linked_list(dev, &rctx->dst_dll);
+       cleanup_ocs_dma_linked_list(dev, &rctx->aad_src_dll);
+       cleanup_ocs_dma_linked_list(dev, &rctx->aad_dst_dll);
+}
+
+/**
+ * kmb_ocs_aead_dma_prepare() - Do DMA mapping for AEAD processing.
+ * @req:               The AEAD request being processed.
+ * @src_dll_size:      Where to store the length of the data mapped into the
+ *                     src_dll OCS DMA list.
+ *
+ * Do the following:
+ * - DMA map req->src and req->dst
+ * - Initialize the following OCS DMA linked lists: rctx->src_dll,
+ *   rctx->dst_dll, rctx->aad_src_dll and rxtc->aad_dst_dll.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+static int kmb_ocs_aead_dma_prepare(struct aead_request *req, u32 *src_dll_size)
+{
+       struct ocs_aes_tctx *tctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+       const int tag_size = crypto_aead_authsize(crypto_aead_reqtfm(req));
+       struct ocs_aes_rctx *rctx = aead_request_ctx(req);
+       u32 in_size;    /* The length of the data to be mapped by src_dll. */
+       u32 out_size;   /* The length of the data to be mapped by dst_dll. */
+       u32 dst_size;   /* The length of the data in dst_sg. */
+       int rc;
+
+       /* Get number of entries in input data SG list. */
+       rctx->src_nents = sg_nents_for_len(req->src,
+                                          req->assoclen + req->cryptlen);
+       if (rctx->src_nents < 0)
+               return -EBADMSG;
+
+       if (rctx->instruction == OCS_DECRYPT) {
+               /*
+                * For decrypt:
+                * - src sg list is:            AAD|CT|tag
+                * - dst sg list expects:       AAD|PT
+                *
+                * in_size == len(CT); out_size == len(PT)
+                */
+
+               /* req->cryptlen includes both CT and tag. */
+               in_size = req->cryptlen - tag_size;
+
+               /* out_size = PT size == CT size */
+               out_size = in_size;
+
+               /* len(dst_sg) == len(AAD) + len(PT) */
+               dst_size = req->assoclen + out_size;
+
+               /*
+                * Copy tag from source SG list to 'in_tag' buffer.
+                *
+                * Note: this needs to be done here, before DMA mapping src_sg.
+                */
+               sg_pcopy_to_buffer(req->src, rctx->src_nents, rctx->in_tag,
+                                  tag_size, req->assoclen + in_size);
+
+       } else { /* OCS_ENCRYPT */
+               /*
+                * For encrypt:
+                *      src sg list is:         AAD|PT
+                *      dst sg list expects:    AAD|CT|tag
+                */
+               /* in_size == len(PT) */
+               in_size = req->cryptlen;
+
+               /*
+                * In CCM mode the OCS engine appends the tag to the ciphertext,
+                * but in GCM mode the tag must be read from the tag registers
+                * and appended manually below
+                */
+               out_size = (rctx->mode == OCS_MODE_CCM) ? in_size + tag_size :
+                                                         in_size;
+               /* len(dst_sg) == len(AAD) + len(CT) + len(tag) */
+               dst_size = req->assoclen + in_size + tag_size;
+       }
+       *src_dll_size = in_size;
+
+       /* Get number of entries in output data SG list. */
+       rctx->dst_nents = sg_nents_for_len(req->dst, dst_size);
+       if (rctx->dst_nents < 0)
+               return -EBADMSG;
+
+       rctx->in_place = (req->src == req->dst) ? 1 : 0;
+
+       /* Map destination; use bidirectional mapping for in-place case. */
+       rctx->dst_dma_count = dma_map_sg(tctx->aes_dev->dev, req->dst,
+                                        rctx->dst_nents,
+                                        rctx->in_place ? DMA_BIDIRECTIONAL :
+                                                         DMA_FROM_DEVICE);
+       if (rctx->dst_dma_count == 0 && rctx->dst_nents != 0) {
+               dev_err(tctx->aes_dev->dev, "Failed to map destination sg\n");
+               return -ENOMEM;
+       }
+
+       /* Create AAD DST list: maps dst[0:AAD_SIZE-1]. */
+       rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->dst,
+                                           rctx->dst_dma_count,
+                                           &rctx->aad_dst_dll, req->assoclen,
+                                           0);
+       if (rc)
+               return rc;
+
+       /* Create DST list: maps dst[AAD_SIZE:out_size] */
+       rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->dst,
+                                           rctx->dst_dma_count, &rctx->dst_dll,
+                                           out_size, req->assoclen);
+       if (rc)
+               return rc;
+
+       if (rctx->in_place) {
+               /* If this is not CCM encrypt, we are done. */
+               if (!(rctx->mode == OCS_MODE_CCM &&
+                     rctx->instruction == OCS_ENCRYPT)) {
+                       /*
+                        * SRC and DST are the same, so re-use the same DMA
+                        * addresses (to avoid allocating new DMA lists
+                        * identical to the dst ones).
+                        */
+                       rctx->src_dll.dma_addr = rctx->dst_dll.dma_addr;
+                       rctx->aad_src_dll.dma_addr = rctx->aad_dst_dll.dma_addr;
+
+                       return 0;
+               }
+               /*
+                * For CCM encrypt the input and output linked lists contain
+                * different amounts of data, so, we need to create different
+                * SRC and AAD SRC lists, even for the in-place case.
+                */
+               rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->dst,
+                                                   rctx->dst_dma_count,
+                                                   &rctx->aad_src_dll,
+                                                   req->assoclen, 0);
+               if (rc)
+                       return rc;
+               rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->dst,
+                                                   rctx->dst_dma_count,
+                                                   &rctx->src_dll, in_size,
+                                                   req->assoclen);
+               if (rc)
+                       return rc;
+
+               return 0;
+       }
+       /* Not in-place case. */
+
+       /* Map source SG. */
+       rctx->src_dma_count = dma_map_sg(tctx->aes_dev->dev, req->src,
+                                        rctx->src_nents, DMA_TO_DEVICE);
+       if (rctx->src_dma_count == 0 && rctx->src_nents != 0) {
+               dev_err(tctx->aes_dev->dev, "Failed to map source sg\n");
+               return -ENOMEM;
+       }
+
+       /* Create AAD SRC list. */
+       rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->src,
+                                           rctx->src_dma_count,
+                                           &rctx->aad_src_dll,
+                                           req->assoclen, 0);
+       if (rc)
+               return rc;
+
+       /* Create SRC list. */
+       rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->src,
+                                           rctx->src_dma_count,
+                                           &rctx->src_dll, in_size,
+                                           req->assoclen);
+       if (rc)
+               return rc;
+
+       if (req->assoclen == 0)
+               return 0;
+
+       /* Copy AAD from src sg to dst sg using OCS DMA. */
+       rc = ocs_aes_bypass_op(tctx->aes_dev, rctx->aad_dst_dll.dma_addr,
+                              rctx->aad_src_dll.dma_addr, req->cryptlen);
+       if (rc)
+               dev_err(tctx->aes_dev->dev,
+                       "Failed to copy source AAD to destination AAD\n");
+
+       return rc;
+}
+
+static int kmb_ocs_aead_run(struct aead_request *req)
+{
+       struct ocs_aes_tctx *tctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+       const int tag_size = crypto_aead_authsize(crypto_aead_reqtfm(req));
+       struct ocs_aes_rctx *rctx = aead_request_ctx(req);
+       u32 in_size;    /* The length of the data mapped by src_dll. */
+       int rc;
+
+       rc = kmb_ocs_aead_dma_prepare(req, &in_size);
+       if (rc)
+               goto exit;
+
+       /* For CCM, we just call the OCS processing and we are done. */
+       if (rctx->mode == OCS_MODE_CCM) {
+               rc = ocs_aes_ccm_op(tctx->aes_dev, tctx->cipher,
+                                   rctx->instruction, rctx->dst_dll.dma_addr,
+                                   rctx->src_dll.dma_addr, in_size,
+                                   req->iv,
+                                   rctx->aad_src_dll.dma_addr, req->assoclen,
+                                   rctx->in_tag, tag_size);
+               goto exit;
+       }
+       /* GCM case; invoke OCS processing. */
+       rc = ocs_aes_gcm_op(tctx->aes_dev, tctx->cipher,
+                           rctx->instruction,
+                           rctx->dst_dll.dma_addr,
+                           rctx->src_dll.dma_addr, in_size,
+                           req->iv,
+                           rctx->aad_src_dll.dma_addr, req->assoclen,
+                           rctx->out_tag, tag_size);
+       if (rc)
+               goto exit;
+
+       /* For GCM decrypt, we have to compare in_tag with out_tag. */
+       if (rctx->instruction == OCS_DECRYPT) {
+               rc = memcmp(rctx->in_tag, rctx->out_tag, tag_size) ?
+                    -EBADMSG : 0;
+               goto exit;
+       }
+
+       /* For GCM encrypt, we must manually copy out_tag to DST sg. */
+
+       /* Clean-up must be called before the sg_pcopy_from_buffer() below. */
+       kmb_ocs_aead_dma_cleanup(req);
+
+       /* Copy tag to destination sg after AAD and CT. */
+       sg_pcopy_from_buffer(req->dst, rctx->dst_nents, rctx->out_tag,
+                            tag_size, req->assoclen + req->cryptlen);
+
+       /* Return directly as DMA cleanup already done. */
+       return 0;
+
+exit:
+       kmb_ocs_aead_dma_cleanup(req);
+
+       return rc;
+}
+
+static int kmb_ocs_aes_sk_do_one_request(struct crypto_engine *engine,
+                                        void *areq)
+{
+       struct skcipher_request *req =
+                       container_of(areq, struct skcipher_request, base);
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
+       int err;
+
+       if (!tctx->aes_dev) {
+               err = -ENODEV;
+               goto exit;
+       }
+
+       err = ocs_aes_set_key(tctx->aes_dev, tctx->key_len, tctx->key,
+                             tctx->cipher);
+       if (err)
+               goto exit;
+
+       err = kmb_ocs_sk_run(req);
+
+exit:
+       crypto_finalize_skcipher_request(engine, req, err);
+
+       return 0;
+}
+
+static int kmb_ocs_aes_aead_do_one_request(struct crypto_engine *engine,
+                                          void *areq)
+{
+       struct aead_request *req = container_of(areq,
+                                               struct aead_request, base);
+       struct ocs_aes_tctx *tctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+       int err;
+
+       if (!tctx->aes_dev)
+               return -ENODEV;
+
+       err = ocs_aes_set_key(tctx->aes_dev, tctx->key_len, tctx->key,
+                             tctx->cipher);
+       if (err)
+               goto exit;
+
+       err = kmb_ocs_aead_run(req);
+
+exit:
+       crypto_finalize_aead_request(tctx->aes_dev->engine, req, err);
+
+       return 0;
+}
+
+static int kmb_ocs_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
+                              unsigned int key_len)
+{
+       return kmb_ocs_sk_set_key(tfm, in_key, key_len, OCS_AES);
+}
+
+static int kmb_ocs_aes_aead_set_key(struct crypto_aead *tfm, const u8 *in_key,
+                                   unsigned int key_len)
+{
+       return kmb_ocs_aead_set_key(tfm, in_key, key_len, OCS_AES);
+}
+
+#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
+static int kmb_ocs_aes_ecb_encrypt(struct skcipher_request *req)
+{
+       return kmb_ocs_sk_common(req, OCS_AES, OCS_ENCRYPT, OCS_MODE_ECB);
+}
+
+static int kmb_ocs_aes_ecb_decrypt(struct skcipher_request *req)
+{
+       return kmb_ocs_sk_common(req, OCS_AES, OCS_DECRYPT, OCS_MODE_ECB);
+}
+#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
+
+static int kmb_ocs_aes_cbc_encrypt(struct skcipher_request *req)
+{
+       return kmb_ocs_sk_common(req, OCS_AES, OCS_ENCRYPT, OCS_MODE_CBC);
+}
+
+static int kmb_ocs_aes_cbc_decrypt(struct skcipher_request *req)
+{
+       return kmb_ocs_sk_common(req, OCS_AES, OCS_DECRYPT, OCS_MODE_CBC);
+}
+
+static int kmb_ocs_aes_ctr_encrypt(struct skcipher_request *req)
+{
+       return kmb_ocs_sk_common(req, OCS_AES, OCS_ENCRYPT, OCS_MODE_CTR);
+}
+
+static int kmb_ocs_aes_ctr_decrypt(struct skcipher_request *req)
+{
+       return kmb_ocs_sk_common(req, OCS_AES, OCS_DECRYPT, OCS_MODE_CTR);
+}
+
+#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
+static int kmb_ocs_aes_cts_encrypt(struct skcipher_request *req)
+{
+       return kmb_ocs_sk_common(req, OCS_AES, OCS_ENCRYPT, OCS_MODE_CTS);
+}
+
+static int kmb_ocs_aes_cts_decrypt(struct skcipher_request *req)
+{
+       return kmb_ocs_sk_common(req, OCS_AES, OCS_DECRYPT, OCS_MODE_CTS);
+}
+#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
+
+static int kmb_ocs_aes_gcm_encrypt(struct aead_request *req)
+{
+       return kmb_ocs_aead_common(req, OCS_AES, OCS_ENCRYPT, OCS_MODE_GCM);
+}
+
+static int kmb_ocs_aes_gcm_decrypt(struct aead_request *req)
+{
+       return kmb_ocs_aead_common(req, OCS_AES, OCS_DECRYPT, OCS_MODE_GCM);
+}
+
+static int kmb_ocs_aes_ccm_encrypt(struct aead_request *req)
+{
+       return kmb_ocs_aead_common(req, OCS_AES, OCS_ENCRYPT, OCS_MODE_CCM);
+}
+
+static int kmb_ocs_aes_ccm_decrypt(struct aead_request *req)
+{
+       return kmb_ocs_aead_common(req, OCS_AES, OCS_DECRYPT, OCS_MODE_CCM);
+}
+
+static int kmb_ocs_sm4_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
+                              unsigned int key_len)
+{
+       return kmb_ocs_sk_set_key(tfm, in_key, key_len, OCS_SM4);
+}
+
+static int kmb_ocs_sm4_aead_set_key(struct crypto_aead *tfm, const u8 *in_key,
+                                   unsigned int key_len)
+{
+       return kmb_ocs_aead_set_key(tfm, in_key, key_len, OCS_SM4);
+}
+
+#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
+static int kmb_ocs_sm4_ecb_encrypt(struct skcipher_request *req)
+{
+       return kmb_ocs_sk_common(req, OCS_SM4, OCS_ENCRYPT, OCS_MODE_ECB);
+}
+
+static int kmb_ocs_sm4_ecb_decrypt(struct skcipher_request *req)
+{
+       return kmb_ocs_sk_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_ECB);
+}
+#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
+
+static int kmb_ocs_sm4_cbc_encrypt(struct skcipher_request *req)
+{
+       return kmb_ocs_sk_common(req, OCS_SM4, OCS_ENCRYPT, OCS_MODE_CBC);
+}
+
+static int kmb_ocs_sm4_cbc_decrypt(struct skcipher_request *req)
+{
+       return kmb_ocs_sk_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_CBC);
+}
+
+static int kmb_ocs_sm4_ctr_encrypt(struct skcipher_request *req)
+{
+       return kmb_ocs_sk_common(req, OCS_SM4, OCS_ENCRYPT, OCS_MODE_CTR);
+}
+
+static int kmb_ocs_sm4_ctr_decrypt(struct skcipher_request *req)
+{
+       return kmb_ocs_sk_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_CTR);
+}
+
+#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
+static int kmb_ocs_sm4_cts_encrypt(struct skcipher_request *req)
+{
+       return kmb_ocs_sk_common(req, OCS_SM4, OCS_ENCRYPT, OCS_MODE_CTS);
+}
+
+static int kmb_ocs_sm4_cts_decrypt(struct skcipher_request *req)
+{
+       return kmb_ocs_sk_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_CTS);
+}
+#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
+
+static int kmb_ocs_sm4_gcm_encrypt(struct aead_request *req)
+{
+       return kmb_ocs_aead_common(req, OCS_SM4, OCS_ENCRYPT, OCS_MODE_GCM);
+}
+
+static int kmb_ocs_sm4_gcm_decrypt(struct aead_request *req)
+{
+       return kmb_ocs_aead_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_GCM);
+}
+
+static int kmb_ocs_sm4_ccm_encrypt(struct aead_request *req)
+{
+       return kmb_ocs_aead_common(req, OCS_SM4, OCS_ENCRYPT, OCS_MODE_CCM);
+}
+
+static int kmb_ocs_sm4_ccm_decrypt(struct aead_request *req)
+{
+       return kmb_ocs_aead_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_CCM);
+}
+
+static inline int ocs_common_init(struct ocs_aes_tctx *tctx)
+{
+       tctx->engine_ctx.op.prepare_request = NULL;
+       tctx->engine_ctx.op.do_one_request = kmb_ocs_aes_sk_do_one_request;
+       tctx->engine_ctx.op.unprepare_request = NULL;
+
+       return 0;
+}
+
+static int ocs_aes_init_tfm(struct crypto_skcipher *tfm)
+{
+       const char *alg_name = crypto_tfm_alg_name(&tfm->base);
+       struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
+       struct crypto_sync_skcipher *blk;
+
+       /* set fallback cipher in case it will be needed */
+       blk = crypto_alloc_sync_skcipher(alg_name, 0, CRYPTO_ALG_NEED_FALLBACK);
+       if (IS_ERR(blk))
+               return PTR_ERR(blk);
+
+       tctx->sw_cipher.sk = blk;
+
+       crypto_skcipher_set_reqsize(tfm, sizeof(struct ocs_aes_rctx));
+
+       return ocs_common_init(tctx);
+}
+
+static int ocs_sm4_init_tfm(struct crypto_skcipher *tfm)
+{
+       struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
+
+       crypto_skcipher_set_reqsize(tfm, sizeof(struct ocs_aes_rctx));
+
+       return ocs_common_init(tctx);
+}
+
+static inline void clear_key(struct ocs_aes_tctx *tctx)
+{
+       memzero_explicit(tctx->key, OCS_AES_KEYSIZE_256);
+
+       /* Zero key registers if set */
+       if (tctx->aes_dev)
+               ocs_aes_set_key(tctx->aes_dev, OCS_AES_KEYSIZE_256,
+                               tctx->key, OCS_AES);
+}
+
+static void ocs_exit_tfm(struct crypto_skcipher *tfm)
+{
+       struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
+
+       clear_key(tctx);
+
+       if (tctx->sw_cipher.sk) {
+               crypto_free_sync_skcipher(tctx->sw_cipher.sk);
+               tctx->sw_cipher.sk = NULL;
+       }
+}
+
+static inline int ocs_common_aead_init(struct ocs_aes_tctx *tctx)
+{
+       tctx->engine_ctx.op.prepare_request = NULL;
+       tctx->engine_ctx.op.do_one_request = kmb_ocs_aes_aead_do_one_request;
+       tctx->engine_ctx.op.unprepare_request = NULL;
+
+       return 0;
+}
+
+static int ocs_aes_aead_cra_init(struct crypto_aead *tfm)
+{
+       const char *alg_name = crypto_tfm_alg_name(&tfm->base);
+       struct ocs_aes_tctx *tctx = crypto_aead_ctx(tfm);
+       struct crypto_aead *blk;
+
+       /* Set fallback cipher in case it will be needed */
+       blk = crypto_alloc_aead(alg_name, 0, CRYPTO_ALG_NEED_FALLBACK);
+       if (IS_ERR(blk))
+               return PTR_ERR(blk);
+
+       tctx->sw_cipher.aead = blk;
+
+       crypto_aead_set_reqsize(tfm,
+                               max(sizeof(struct ocs_aes_rctx),
+                                   (sizeof(struct aead_request) +
+                                    crypto_aead_reqsize(tctx->sw_cipher.aead))));
+
+       return ocs_common_aead_init(tctx);
+}
+
+static int kmb_ocs_aead_ccm_setauthsize(struct crypto_aead *tfm,
+                                       unsigned int authsize)
+{
+       switch (authsize) {
+       case 4:
+       case 6:
+       case 8:
+       case 10:
+       case 12:
+       case 14:
+       case 16:
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int kmb_ocs_aead_gcm_setauthsize(struct crypto_aead *tfm,
+                                       unsigned int authsize)
+{
+       return crypto_gcm_check_authsize(authsize);
+}
+
+static int ocs_sm4_aead_cra_init(struct crypto_aead *tfm)
+{
+       struct ocs_aes_tctx *tctx = crypto_aead_ctx(tfm);
+
+       crypto_aead_set_reqsize(tfm, sizeof(struct ocs_aes_rctx));
+
+       return ocs_common_aead_init(tctx);
+}
+
+static void ocs_aead_cra_exit(struct crypto_aead *tfm)
+{
+       struct ocs_aes_tctx *tctx = crypto_aead_ctx(tfm);
+
+       clear_key(tctx);
+
+       if (tctx->sw_cipher.aead) {
+               crypto_free_aead(tctx->sw_cipher.aead);
+               tctx->sw_cipher.aead = NULL;
+       }
+}
+
+static struct skcipher_alg algs[] = {
+#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
+       {
+               .base.cra_name = "ecb(aes)",
+               .base.cra_driver_name = "ecb-aes-keembay-ocs",
+               .base.cra_priority = KMB_OCS_PRIORITY,
+               .base.cra_flags = CRYPTO_ALG_ASYNC |
+                                 CRYPTO_ALG_KERN_DRIVER_ONLY |
+                                 CRYPTO_ALG_NEED_FALLBACK,
+               .base.cra_blocksize = AES_BLOCK_SIZE,
+               .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+               .base.cra_module = THIS_MODULE,
+               .base.cra_alignmask = 0,
+
+               .min_keysize = OCS_AES_MIN_KEY_SIZE,
+               .max_keysize = OCS_AES_MAX_KEY_SIZE,
+               .setkey = kmb_ocs_aes_set_key,
+               .encrypt = kmb_ocs_aes_ecb_encrypt,
+               .decrypt = kmb_ocs_aes_ecb_decrypt,
+               .init = ocs_aes_init_tfm,
+               .exit = ocs_exit_tfm,
+       },
+#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
+       {
+               .base.cra_name = "cbc(aes)",
+               .base.cra_driver_name = "cbc-aes-keembay-ocs",
+               .base.cra_priority = KMB_OCS_PRIORITY,
+               .base.cra_flags = CRYPTO_ALG_ASYNC |
+                                 CRYPTO_ALG_KERN_DRIVER_ONLY |
+                                 CRYPTO_ALG_NEED_FALLBACK,
+               .base.cra_blocksize = AES_BLOCK_SIZE,
+               .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+               .base.cra_module = THIS_MODULE,
+               .base.cra_alignmask = 0,
+
+               .min_keysize = OCS_AES_MIN_KEY_SIZE,
+               .max_keysize = OCS_AES_MAX_KEY_SIZE,
+               .ivsize = AES_BLOCK_SIZE,
+               .setkey = kmb_ocs_aes_set_key,
+               .encrypt = kmb_ocs_aes_cbc_encrypt,
+               .decrypt = kmb_ocs_aes_cbc_decrypt,
+               .init = ocs_aes_init_tfm,
+               .exit = ocs_exit_tfm,
+       },
+       {
+               .base.cra_name = "ctr(aes)",
+               .base.cra_driver_name = "ctr-aes-keembay-ocs",
+               .base.cra_priority = KMB_OCS_PRIORITY,
+               .base.cra_flags = CRYPTO_ALG_ASYNC |
+                                 CRYPTO_ALG_KERN_DRIVER_ONLY |
+                                 CRYPTO_ALG_NEED_FALLBACK,
+               .base.cra_blocksize = 1,
+               .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+               .base.cra_module = THIS_MODULE,
+               .base.cra_alignmask = 0,
+
+               .min_keysize = OCS_AES_MIN_KEY_SIZE,
+               .max_keysize = OCS_AES_MAX_KEY_SIZE,
+               .ivsize = AES_BLOCK_SIZE,
+               .setkey = kmb_ocs_aes_set_key,
+               .encrypt = kmb_ocs_aes_ctr_encrypt,
+               .decrypt = kmb_ocs_aes_ctr_decrypt,
+               .init = ocs_aes_init_tfm,
+               .exit = ocs_exit_tfm,
+       },
+#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
+       {
+               .base.cra_name = "cts(cbc(aes))",
+               .base.cra_driver_name = "cts-aes-keembay-ocs",
+               .base.cra_priority = KMB_OCS_PRIORITY,
+               .base.cra_flags = CRYPTO_ALG_ASYNC |
+                                 CRYPTO_ALG_KERN_DRIVER_ONLY |
+                                 CRYPTO_ALG_NEED_FALLBACK,
+               .base.cra_blocksize = AES_BLOCK_SIZE,
+               .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+               .base.cra_module = THIS_MODULE,
+               .base.cra_alignmask = 0,
+
+               .min_keysize = OCS_AES_MIN_KEY_SIZE,
+               .max_keysize = OCS_AES_MAX_KEY_SIZE,
+               .ivsize = AES_BLOCK_SIZE,
+               .setkey = kmb_ocs_aes_set_key,
+               .encrypt = kmb_ocs_aes_cts_encrypt,
+               .decrypt = kmb_ocs_aes_cts_decrypt,
+               .init = ocs_aes_init_tfm,
+               .exit = ocs_exit_tfm,
+       },
+#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
+#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
+       {
+               .base.cra_name = "ecb(sm4)",
+               .base.cra_driver_name = "ecb-sm4-keembay-ocs",
+               .base.cra_priority = KMB_OCS_PRIORITY,
+               .base.cra_flags = CRYPTO_ALG_ASYNC |
+                                 CRYPTO_ALG_KERN_DRIVER_ONLY,
+               .base.cra_blocksize = AES_BLOCK_SIZE,
+               .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+               .base.cra_module = THIS_MODULE,
+               .base.cra_alignmask = 0,
+
+               .min_keysize = OCS_SM4_KEY_SIZE,
+               .max_keysize = OCS_SM4_KEY_SIZE,
+               .setkey = kmb_ocs_sm4_set_key,
+               .encrypt = kmb_ocs_sm4_ecb_encrypt,
+               .decrypt = kmb_ocs_sm4_ecb_decrypt,
+               .init = ocs_sm4_init_tfm,
+               .exit = ocs_exit_tfm,
+       },
+#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
+       {
+               .base.cra_name = "cbc(sm4)",
+               .base.cra_driver_name = "cbc-sm4-keembay-ocs",
+               .base.cra_priority = KMB_OCS_PRIORITY,
+               .base.cra_flags = CRYPTO_ALG_ASYNC |
+                                 CRYPTO_ALG_KERN_DRIVER_ONLY,
+               .base.cra_blocksize = AES_BLOCK_SIZE,
+               .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+               .base.cra_module = THIS_MODULE,
+               .base.cra_alignmask = 0,
+
+               .min_keysize = OCS_SM4_KEY_SIZE,
+               .max_keysize = OCS_SM4_KEY_SIZE,
+               .ivsize = AES_BLOCK_SIZE,
+               .setkey = kmb_ocs_sm4_set_key,
+               .encrypt = kmb_ocs_sm4_cbc_encrypt,
+               .decrypt = kmb_ocs_sm4_cbc_decrypt,
+               .init = ocs_sm4_init_tfm,
+               .exit = ocs_exit_tfm,
+       },
+       {
+               .base.cra_name = "ctr(sm4)",
+               .base.cra_driver_name = "ctr-sm4-keembay-ocs",
+               .base.cra_priority = KMB_OCS_PRIORITY,
+               .base.cra_flags = CRYPTO_ALG_ASYNC |
+                                 CRYPTO_ALG_KERN_DRIVER_ONLY,
+               .base.cra_blocksize = 1,
+               .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+               .base.cra_module = THIS_MODULE,
+               .base.cra_alignmask = 0,
+
+               .min_keysize = OCS_SM4_KEY_SIZE,
+               .max_keysize = OCS_SM4_KEY_SIZE,
+               .ivsize = AES_BLOCK_SIZE,
+               .setkey = kmb_ocs_sm4_set_key,
+               .encrypt = kmb_ocs_sm4_ctr_encrypt,
+               .decrypt = kmb_ocs_sm4_ctr_decrypt,
+               .init = ocs_sm4_init_tfm,
+               .exit = ocs_exit_tfm,
+       },
+#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
+       {
+               .base.cra_name = "cts(cbc(sm4))",
+               .base.cra_driver_name = "cts-sm4-keembay-ocs",
+               .base.cra_priority = KMB_OCS_PRIORITY,
+               .base.cra_flags = CRYPTO_ALG_ASYNC |
+                                 CRYPTO_ALG_KERN_DRIVER_ONLY,
+               .base.cra_blocksize = AES_BLOCK_SIZE,
+               .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+               .base.cra_module = THIS_MODULE,
+               .base.cra_alignmask = 0,
+
+               .min_keysize = OCS_SM4_KEY_SIZE,
+               .max_keysize = OCS_SM4_KEY_SIZE,
+               .ivsize = AES_BLOCK_SIZE,
+               .setkey = kmb_ocs_sm4_set_key,
+               .encrypt = kmb_ocs_sm4_cts_encrypt,
+               .decrypt = kmb_ocs_sm4_cts_decrypt,
+               .init = ocs_sm4_init_tfm,
+               .exit = ocs_exit_tfm,
+       }
+#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
+};
+
+static struct aead_alg algs_aead[] = {
+       {
+               .base = {
+                       .cra_name = "gcm(aes)",
+                       .cra_driver_name = "gcm-aes-keembay-ocs",
+                       .cra_priority = KMB_OCS_PRIORITY,
+                       .cra_flags = CRYPTO_ALG_ASYNC |
+                                    CRYPTO_ALG_KERN_DRIVER_ONLY |
+                                    CRYPTO_ALG_NEED_FALLBACK,
+                       .cra_blocksize = 1,
+                       .cra_ctxsize = sizeof(struct ocs_aes_tctx),
+                       .cra_alignmask = 0,
+                       .cra_module = THIS_MODULE,
+               },
+               .init = ocs_aes_aead_cra_init,
+               .exit = ocs_aead_cra_exit,
+               .ivsize = GCM_AES_IV_SIZE,
+               .maxauthsize = AES_BLOCK_SIZE,
+               .setauthsize = kmb_ocs_aead_gcm_setauthsize,
+               .setkey = kmb_ocs_aes_aead_set_key,
+               .encrypt = kmb_ocs_aes_gcm_encrypt,
+               .decrypt = kmb_ocs_aes_gcm_decrypt,
+       },
+       {
+               .base = {
+                       .cra_name = "ccm(aes)",
+                       .cra_driver_name = "ccm-aes-keembay-ocs",
+                       .cra_priority = KMB_OCS_PRIORITY,
+                       .cra_flags = CRYPTO_ALG_ASYNC |
+                                    CRYPTO_ALG_KERN_DRIVER_ONLY |
+                                    CRYPTO_ALG_NEED_FALLBACK,
+                       .cra_blocksize = 1,
+                       .cra_ctxsize = sizeof(struct ocs_aes_tctx),
+                       .cra_alignmask = 0,
+                       .cra_module = THIS_MODULE,
+               },
+               .init = ocs_aes_aead_cra_init,
+               .exit = ocs_aead_cra_exit,
+               .ivsize = AES_BLOCK_SIZE,
+               .maxauthsize = AES_BLOCK_SIZE,
+               .setauthsize = kmb_ocs_aead_ccm_setauthsize,
+               .setkey = kmb_ocs_aes_aead_set_key,
+               .encrypt = kmb_ocs_aes_ccm_encrypt,
+               .decrypt = kmb_ocs_aes_ccm_decrypt,
+       },
+       {
+               .base = {
+                       .cra_name = "gcm(sm4)",
+                       .cra_driver_name = "gcm-sm4-keembay-ocs",
+                       .cra_priority = KMB_OCS_PRIORITY,
+                       .cra_flags = CRYPTO_ALG_ASYNC |
+                                    CRYPTO_ALG_KERN_DRIVER_ONLY,
+                       .cra_blocksize = 1,
+                       .cra_ctxsize = sizeof(struct ocs_aes_tctx),
+                       .cra_alignmask = 0,
+                       .cra_module = THIS_MODULE,
+               },
+               .init = ocs_sm4_aead_cra_init,
+               .exit = ocs_aead_cra_exit,
+               .ivsize = GCM_AES_IV_SIZE,
+               .maxauthsize = AES_BLOCK_SIZE,
+               .setauthsize = kmb_ocs_aead_gcm_setauthsize,
+               .setkey = kmb_ocs_sm4_aead_set_key,
+               .encrypt = kmb_ocs_sm4_gcm_encrypt,
+               .decrypt = kmb_ocs_sm4_gcm_decrypt,
+       },
+       {
+               .base = {
+                       .cra_name = "ccm(sm4)",
+                       .cra_driver_name = "ccm-sm4-keembay-ocs",
+                       .cra_priority = KMB_OCS_PRIORITY,
+                       .cra_flags = CRYPTO_ALG_ASYNC |
+                                    CRYPTO_ALG_KERN_DRIVER_ONLY,
+                       .cra_blocksize = 1,
+                       .cra_ctxsize = sizeof(struct ocs_aes_tctx),
+                       .cra_alignmask = 0,
+                       .cra_module = THIS_MODULE,
+               },
+               .init = ocs_sm4_aead_cra_init,
+               .exit = ocs_aead_cra_exit,
+               .ivsize = AES_BLOCK_SIZE,
+               .maxauthsize = AES_BLOCK_SIZE,
+               .setauthsize = kmb_ocs_aead_ccm_setauthsize,
+               .setkey = kmb_ocs_sm4_aead_set_key,
+               .encrypt = kmb_ocs_sm4_ccm_encrypt,
+               .decrypt = kmb_ocs_sm4_ccm_decrypt,
+       }
+};
+
+static void unregister_aes_algs(struct ocs_aes_dev *aes_dev)
+{
+       crypto_unregister_aeads(algs_aead, ARRAY_SIZE(algs_aead));
+       crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
+}
+
+static int register_aes_algs(struct ocs_aes_dev *aes_dev)
+{
+       int ret;
+
+       /*
+        * If any algorithm fails to register, all preceding algorithms that
+        * were successfully registered will be automatically unregistered.
+        */
+       ret = crypto_register_aeads(algs_aead, ARRAY_SIZE(algs_aead));
+       if (ret)
+               return ret;
+
+       ret = crypto_register_skciphers(algs, ARRAY_SIZE(algs));
+       if (ret)
+               crypto_unregister_aeads(algs_aead, ARRAY_SIZE(algs));
+
+       return ret;
+}
+
+/* Device tree driver match. */
+static const struct of_device_id kmb_ocs_aes_of_match[] = {
+       {
+               .compatible = "intel,keembay-ocs-aes",
+       },
+       {}
+};
+
+static int kmb_ocs_aes_remove(struct platform_device *pdev)
+{
+       struct ocs_aes_dev *aes_dev;
+
+       aes_dev = platform_get_drvdata(pdev);
+
+       unregister_aes_algs(aes_dev);
+
+       spin_lock(&ocs_aes.lock);
+       list_del(&aes_dev->list);
+       spin_unlock(&ocs_aes.lock);
+
+       crypto_engine_exit(aes_dev->engine);
+
+       return 0;
+}
+
+static int kmb_ocs_aes_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct ocs_aes_dev *aes_dev;
+       int rc;
+
+       aes_dev = devm_kzalloc(dev, sizeof(*aes_dev), GFP_KERNEL);
+       if (!aes_dev)
+               return -ENOMEM;
+
+       aes_dev->dev = dev;
+
+       platform_set_drvdata(pdev, aes_dev);
+
+       rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+       if (rc) {
+               dev_err(dev, "Failed to set 32 bit dma mask %d\n", rc);
+               return rc;
+       }
+
+       /* Get base register address. */
+       aes_dev->base_reg = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(aes_dev->base_reg))
+               return PTR_ERR(aes_dev->base_reg);
+
+       /* Get and request IRQ */
+       aes_dev->irq = platform_get_irq(pdev, 0);
+       if (aes_dev->irq < 0)
+               return aes_dev->irq;
+
+       rc = devm_request_threaded_irq(dev, aes_dev->irq, ocs_aes_irq_handler,
+                                      NULL, 0, "keembay-ocs-aes", aes_dev);
+       if (rc < 0) {
+               dev_err(dev, "Could not request IRQ\n");
+               return rc;
+       }
+
+       INIT_LIST_HEAD(&aes_dev->list);
+       spin_lock(&ocs_aes.lock);
+       list_add_tail(&aes_dev->list, &ocs_aes.dev_list);
+       spin_unlock(&ocs_aes.lock);
+
+       init_completion(&aes_dev->irq_completion);
+
+       /* Initialize crypto engine */
+       aes_dev->engine = crypto_engine_alloc_init(dev, true);
+       if (!aes_dev->engine) {
+               rc = -ENOMEM;
+               goto list_del;
+       }
+
+       rc = crypto_engine_start(aes_dev->engine);
+       if (rc) {
+               dev_err(dev, "Could not start crypto engine\n");
+               goto cleanup;
+       }
+
+       rc = register_aes_algs(aes_dev);
+       if (rc) {
+               dev_err(dev,
+                       "Could not register OCS algorithms with Crypto API\n");
+               goto cleanup;
+       }
+
+       return 0;
+
+cleanup:
+       crypto_engine_exit(aes_dev->engine);
+list_del:
+       spin_lock(&ocs_aes.lock);
+       list_del(&aes_dev->list);
+       spin_unlock(&ocs_aes.lock);
+
+       return rc;
+}
+
+/* The OCS driver is a platform device. */
+static struct platform_driver kmb_ocs_aes_driver = {
+       .probe = kmb_ocs_aes_probe,
+       .remove = kmb_ocs_aes_remove,
+       .driver = {
+                       .name = DRV_NAME,
+                       .of_match_table = kmb_ocs_aes_of_match,
+               },
+};
+
+module_platform_driver(kmb_ocs_aes_driver);
+
+MODULE_DESCRIPTION("Intel Keem Bay Offload and Crypto Subsystem (OCS) AES/SM4 Driver");
+MODULE_LICENSE("GPL");
+
+MODULE_ALIAS_CRYPTO("cbc-aes-keembay-ocs");
+MODULE_ALIAS_CRYPTO("ctr-aes-keembay-ocs");
+MODULE_ALIAS_CRYPTO("gcm-aes-keembay-ocs");
+MODULE_ALIAS_CRYPTO("ccm-aes-keembay-ocs");
+
+MODULE_ALIAS_CRYPTO("cbc-sm4-keembay-ocs");
+MODULE_ALIAS_CRYPTO("ctr-sm4-keembay-ocs");
+MODULE_ALIAS_CRYPTO("gcm-sm4-keembay-ocs");
+MODULE_ALIAS_CRYPTO("ccm-sm4-keembay-ocs");
+
+#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
+MODULE_ALIAS_CRYPTO("ecb-aes-keembay-ocs");
+MODULE_ALIAS_CRYPTO("ecb-sm4-keembay-ocs");
+#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
+
+#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
+MODULE_ALIAS_CRYPTO("cts-aes-keembay-ocs");
+MODULE_ALIAS_CRYPTO("cts-sm4-keembay-ocs");
+#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
diff --git a/drivers/crypto/intel/keembay/keembay-ocs-ecc.c b/drivers/crypto/intel/keembay/keembay-ocs-ecc.c
new file mode 100644 (file)
index 0000000..2269df1
--- /dev/null
@@ -0,0 +1,1016 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Keem Bay OCS ECC Crypto Driver.
+ *
+ * Copyright (C) 2019-2021 Intel Corporation
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/fips.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <crypto/ecc_curve.h>
+#include <crypto/ecdh.h>
+#include <crypto/engine.h>
+#include <crypto/kpp.h>
+#include <crypto/rng.h>
+
+#include <crypto/internal/ecc.h>
+#include <crypto/internal/kpp.h>
+
+#define DRV_NAME                       "keembay-ocs-ecc"
+
+#define KMB_OCS_ECC_PRIORITY           350
+
+#define HW_OFFS_OCS_ECC_COMMAND                0x00000000
+#define HW_OFFS_OCS_ECC_STATUS         0x00000004
+#define HW_OFFS_OCS_ECC_DATA_IN                0x00000080
+#define HW_OFFS_OCS_ECC_CX_DATA_OUT    0x00000100
+#define HW_OFFS_OCS_ECC_CY_DATA_OUT    0x00000180
+#define HW_OFFS_OCS_ECC_ISR            0x00000400
+#define HW_OFFS_OCS_ECC_IER            0x00000404
+
+#define HW_OCS_ECC_ISR_INT_STATUS_DONE BIT(0)
+#define HW_OCS_ECC_COMMAND_INS_BP      BIT(0)
+
+#define HW_OCS_ECC_COMMAND_START_VAL   BIT(0)
+
+#define OCS_ECC_OP_SIZE_384            BIT(8)
+#define OCS_ECC_OP_SIZE_256            0
+
+/* ECC Instruction : for ECC_COMMAND */
+#define OCS_ECC_INST_WRITE_AX          (0x1 << HW_OCS_ECC_COMMAND_INS_BP)
+#define OCS_ECC_INST_WRITE_AY          (0x2 << HW_OCS_ECC_COMMAND_INS_BP)
+#define OCS_ECC_INST_WRITE_BX_D                (0x3 << HW_OCS_ECC_COMMAND_INS_BP)
+#define OCS_ECC_INST_WRITE_BY_L                (0x4 << HW_OCS_ECC_COMMAND_INS_BP)
+#define OCS_ECC_INST_WRITE_P           (0x5 << HW_OCS_ECC_COMMAND_INS_BP)
+#define OCS_ECC_INST_WRITE_A           (0x6 << HW_OCS_ECC_COMMAND_INS_BP)
+#define OCS_ECC_INST_CALC_D_IDX_A      (0x8 << HW_OCS_ECC_COMMAND_INS_BP)
+#define OCS_ECC_INST_CALC_A_POW_B_MODP (0xB << HW_OCS_ECC_COMMAND_INS_BP)
+#define OCS_ECC_INST_CALC_A_MUL_B_MODP (0xC  << HW_OCS_ECC_COMMAND_INS_BP)
+#define OCS_ECC_INST_CALC_A_ADD_B_MODP (0xD << HW_OCS_ECC_COMMAND_INS_BP)
+
+#define ECC_ENABLE_INTR                        1
+
+#define POLL_USEC                      100
+#define TIMEOUT_USEC                   10000
+
+#define KMB_ECC_VLI_MAX_DIGITS         ECC_CURVE_NIST_P384_DIGITS
+#define KMB_ECC_VLI_MAX_BYTES          (KMB_ECC_VLI_MAX_DIGITS \
+                                        << ECC_DIGITS_TO_BYTES_SHIFT)
+
+#define POW_CUBE                       3
+
+/**
+ * struct ocs_ecc_dev - ECC device context
+ * @list: List of device contexts
+ * @dev: OCS ECC device
+ * @base_reg: IO base address of OCS ECC
+ * @engine: Crypto engine for the device
+ * @irq_done: IRQ done completion.
+ * @irq: IRQ number
+ */
+struct ocs_ecc_dev {
+       struct list_head list;
+       struct device *dev;
+       void __iomem *base_reg;
+       struct crypto_engine *engine;
+       struct completion irq_done;
+       int irq;
+};
+
+/**
+ * struct ocs_ecc_ctx - Transformation context.
+ * @engine_ctx:         Crypto engine ctx.
+ * @ecc_dev:    The ECC driver associated with this context.
+ * @curve:      The elliptic curve used by this transformation.
+ * @private_key: The private key.
+ */
+struct ocs_ecc_ctx {
+       struct crypto_engine_ctx engine_ctx;
+       struct ocs_ecc_dev *ecc_dev;
+       const struct ecc_curve *curve;
+       u64 private_key[KMB_ECC_VLI_MAX_DIGITS];
+};
+
+/* Driver data. */
+struct ocs_ecc_drv {
+       struct list_head dev_list;
+       spinlock_t lock;        /* Protects dev_list. */
+};
+
+/* Global variable holding the list of OCS ECC devices (only one expected). */
+static struct ocs_ecc_drv ocs_ecc = {
+       .dev_list = LIST_HEAD_INIT(ocs_ecc.dev_list),
+       .lock = __SPIN_LOCK_UNLOCKED(ocs_ecc.lock),
+};
+
+/* Get OCS ECC tfm context from kpp_request. */
+static inline struct ocs_ecc_ctx *kmb_ocs_ecc_tctx(struct kpp_request *req)
+{
+       return kpp_tfm_ctx(crypto_kpp_reqtfm(req));
+}
+
+/* Converts number of digits to number of bytes. */
+static inline unsigned int digits_to_bytes(unsigned int n)
+{
+       return n << ECC_DIGITS_TO_BYTES_SHIFT;
+}
+
+/*
+ * Wait for ECC idle i.e when an operation (other than write operations)
+ * is done.
+ */
+static inline int ocs_ecc_wait_idle(struct ocs_ecc_dev *dev)
+{
+       u32 value;
+
+       return readl_poll_timeout((dev->base_reg + HW_OFFS_OCS_ECC_STATUS),
+                                 value,
+                                 !(value & HW_OCS_ECC_ISR_INT_STATUS_DONE),
+                                 POLL_USEC, TIMEOUT_USEC);
+}
+
+static void ocs_ecc_cmd_start(struct ocs_ecc_dev *ecc_dev, u32 op_size)
+{
+       iowrite32(op_size | HW_OCS_ECC_COMMAND_START_VAL,
+                 ecc_dev->base_reg + HW_OFFS_OCS_ECC_COMMAND);
+}
+
+/* Direct write of u32 buffer to ECC engine with associated instruction. */
+static void ocs_ecc_write_cmd_and_data(struct ocs_ecc_dev *dev,
+                                      u32 op_size,
+                                      u32 inst,
+                                      const void *data_in,
+                                      size_t data_size)
+{
+       iowrite32(op_size | inst, dev->base_reg + HW_OFFS_OCS_ECC_COMMAND);
+
+       /* MMIO Write src uint32 to dst. */
+       memcpy_toio(dev->base_reg + HW_OFFS_OCS_ECC_DATA_IN, data_in,
+                   data_size);
+}
+
+/* Start OCS ECC operation and wait for its completion. */
+static int ocs_ecc_trigger_op(struct ocs_ecc_dev *ecc_dev, u32 op_size,
+                             u32 inst)
+{
+       reinit_completion(&ecc_dev->irq_done);
+
+       iowrite32(ECC_ENABLE_INTR, ecc_dev->base_reg + HW_OFFS_OCS_ECC_IER);
+       iowrite32(op_size | inst, ecc_dev->base_reg + HW_OFFS_OCS_ECC_COMMAND);
+
+       return wait_for_completion_interruptible(&ecc_dev->irq_done);
+}
+
+/**
+ * ocs_ecc_read_cx_out() - Read the CX data output buffer.
+ * @dev:       The OCS ECC device to read from.
+ * @cx_out:    The buffer where to store the CX value. Must be at least
+ *             @byte_count byte long.
+ * @byte_count:        The amount of data to read.
+ */
+static inline void ocs_ecc_read_cx_out(struct ocs_ecc_dev *dev, void *cx_out,
+                                      size_t byte_count)
+{
+       memcpy_fromio(cx_out, dev->base_reg + HW_OFFS_OCS_ECC_CX_DATA_OUT,
+                     byte_count);
+}
+
+/**
+ * ocs_ecc_read_cy_out() - Read the CX data output buffer.
+ * @dev:       The OCS ECC device to read from.
+ * @cy_out:    The buffer where to store the CY value. Must be at least
+ *             @byte_count byte long.
+ * @byte_count:        The amount of data to read.
+ */
+static inline void ocs_ecc_read_cy_out(struct ocs_ecc_dev *dev, void *cy_out,
+                                      size_t byte_count)
+{
+       memcpy_fromio(cy_out, dev->base_reg + HW_OFFS_OCS_ECC_CY_DATA_OUT,
+                     byte_count);
+}
+
+static struct ocs_ecc_dev *kmb_ocs_ecc_find_dev(struct ocs_ecc_ctx *tctx)
+{
+       if (tctx->ecc_dev)
+               return tctx->ecc_dev;
+
+       spin_lock(&ocs_ecc.lock);
+
+       /* Only a single OCS device available. */
+       tctx->ecc_dev = list_first_entry(&ocs_ecc.dev_list, struct ocs_ecc_dev,
+                                        list);
+
+       spin_unlock(&ocs_ecc.lock);
+
+       return tctx->ecc_dev;
+}
+
+/* Do point multiplication using OCS ECC HW. */
+static int kmb_ecc_point_mult(struct ocs_ecc_dev *ecc_dev,
+                             struct ecc_point *result,
+                             const struct ecc_point *point,
+                             u64 *scalar,
+                             const struct ecc_curve *curve)
+{
+       u8 sca[KMB_ECC_VLI_MAX_BYTES]; /* Use the maximum data size. */
+       u32 op_size = (curve->g.ndigits > ECC_CURVE_NIST_P256_DIGITS) ?
+                     OCS_ECC_OP_SIZE_384 : OCS_ECC_OP_SIZE_256;
+       size_t nbytes = digits_to_bytes(curve->g.ndigits);
+       int rc = 0;
+
+       /* Generate random nbytes for Simple and Differential SCA protection. */
+       rc = crypto_get_default_rng();
+       if (rc)
+               return rc;
+
+       rc = crypto_rng_get_bytes(crypto_default_rng, sca, nbytes);
+       crypto_put_default_rng();
+       if (rc)
+               return rc;
+
+       /* Wait engine to be idle before starting new operation. */
+       rc = ocs_ecc_wait_idle(ecc_dev);
+       if (rc)
+               return rc;
+
+       /* Send ecc_start pulse as well as indicating operation size. */
+       ocs_ecc_cmd_start(ecc_dev, op_size);
+
+       /* Write ax param; Base point (Gx). */
+       ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_AX,
+                                  point->x, nbytes);
+
+       /* Write ay param; Base point (Gy). */
+       ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_AY,
+                                  point->y, nbytes);
+
+       /*
+        * Write the private key into DATA_IN reg.
+        *
+        * Since DATA_IN register is used to write different values during the
+        * computation private Key value is overwritten with
+        * side-channel-resistance value.
+        */
+       ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_BX_D,
+                                  scalar, nbytes);
+
+       /* Write operand by/l. */
+       ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_BY_L,
+                                  sca, nbytes);
+       memzero_explicit(sca, sizeof(sca));
+
+       /* Write p = curve prime(GF modulus). */
+       ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_P,
+                                  curve->p, nbytes);
+
+       /* Write a = curve coefficient. */
+       ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_A,
+                                  curve->a, nbytes);
+
+       /* Make hardware perform the multiplication. */
+       rc = ocs_ecc_trigger_op(ecc_dev, op_size, OCS_ECC_INST_CALC_D_IDX_A);
+       if (rc)
+               return rc;
+
+       /* Read result. */
+       ocs_ecc_read_cx_out(ecc_dev, result->x, nbytes);
+       ocs_ecc_read_cy_out(ecc_dev, result->y, nbytes);
+
+       return 0;
+}
+
+/**
+ * kmb_ecc_do_scalar_op() - Perform Scalar operation using OCS ECC HW.
+ * @ecc_dev:   The OCS ECC device to use.
+ * @scalar_out:        Where to store the output scalar.
+ * @scalar_a:  Input scalar operand 'a'.
+ * @scalar_b:  Input scalar operand 'b'
+ * @curve:     The curve on which the operation is performed.
+ * @ndigits:   The size of the operands (in digits).
+ * @inst:      The operation to perform (as an OCS ECC instruction).
+ *
+ * Return:     0 on success, negative error code otherwise.
+ */
+static int kmb_ecc_do_scalar_op(struct ocs_ecc_dev *ecc_dev, u64 *scalar_out,
+                               const u64 *scalar_a, const u64 *scalar_b,
+                               const struct ecc_curve *curve,
+                               unsigned int ndigits, const u32 inst)
+{
+       u32 op_size = (ndigits > ECC_CURVE_NIST_P256_DIGITS) ?
+                     OCS_ECC_OP_SIZE_384 : OCS_ECC_OP_SIZE_256;
+       size_t nbytes = digits_to_bytes(ndigits);
+       int rc;
+
+       /* Wait engine to be idle before starting new operation. */
+       rc = ocs_ecc_wait_idle(ecc_dev);
+       if (rc)
+               return rc;
+
+       /* Send ecc_start pulse as well as indicating operation size. */
+       ocs_ecc_cmd_start(ecc_dev, op_size);
+
+       /* Write ax param (Base point (Gx).*/
+       ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_AX,
+                                  scalar_a, nbytes);
+
+       /* Write ay param Base point (Gy).*/
+       ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_AY,
+                                  scalar_b, nbytes);
+
+       /* Write p = curve prime(GF modulus).*/
+       ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_P,
+                                  curve->p, nbytes);
+
+       /* Give instruction A.B or A+B to ECC engine. */
+       rc = ocs_ecc_trigger_op(ecc_dev, op_size, inst);
+       if (rc)
+               return rc;
+
+       ocs_ecc_read_cx_out(ecc_dev, scalar_out, nbytes);
+
+       if (vli_is_zero(scalar_out, ndigits))
+               return -EINVAL;
+
+       return 0;
+}
+
+/* SP800-56A section 5.6.2.3.4 partial verification: ephemeral keys only */
+static int kmb_ocs_ecc_is_pubkey_valid_partial(struct ocs_ecc_dev *ecc_dev,
+                                              const struct ecc_curve *curve,
+                                              struct ecc_point *pk)
+{
+       u64 xxx[KMB_ECC_VLI_MAX_DIGITS] = { 0 };
+       u64 yy[KMB_ECC_VLI_MAX_DIGITS] = { 0 };
+       u64 w[KMB_ECC_VLI_MAX_DIGITS] = { 0 };
+       int rc;
+
+       if (WARN_ON(pk->ndigits != curve->g.ndigits))
+               return -EINVAL;
+
+       /* Check 1: Verify key is not the zero point. */
+       if (ecc_point_is_zero(pk))
+               return -EINVAL;
+
+       /* Check 2: Verify key is in the range [0, p-1]. */
+       if (vli_cmp(curve->p, pk->x, pk->ndigits) != 1)
+               return -EINVAL;
+
+       if (vli_cmp(curve->p, pk->y, pk->ndigits) != 1)
+               return -EINVAL;
+
+       /* Check 3: Verify that y^2 == (x^3 + a·x + b) mod p */
+
+        /* y^2 */
+       /* Compute y^2 -> store in yy */
+       rc = kmb_ecc_do_scalar_op(ecc_dev, yy, pk->y, pk->y, curve, pk->ndigits,
+                                 OCS_ECC_INST_CALC_A_MUL_B_MODP);
+       if (rc)
+               goto exit;
+
+       /* x^3 */
+       /* Assigning w = 3, used for calculating x^3. */
+       w[0] = POW_CUBE;
+       /* Load the next stage.*/
+       rc = kmb_ecc_do_scalar_op(ecc_dev, xxx, pk->x, w, curve, pk->ndigits,
+                                 OCS_ECC_INST_CALC_A_POW_B_MODP);
+       if (rc)
+               goto exit;
+
+       /* Do a*x -> store in w. */
+       rc = kmb_ecc_do_scalar_op(ecc_dev, w, curve->a, pk->x, curve,
+                                 pk->ndigits,
+                                 OCS_ECC_INST_CALC_A_MUL_B_MODP);
+       if (rc)
+               goto exit;
+
+       /* Do ax + b == w + b; store in w. */
+       rc = kmb_ecc_do_scalar_op(ecc_dev, w, w, curve->b, curve,
+                                 pk->ndigits,
+                                 OCS_ECC_INST_CALC_A_ADD_B_MODP);
+       if (rc)
+               goto exit;
+
+       /* x^3 + ax + b == x^3 + w -> store in w. */
+       rc = kmb_ecc_do_scalar_op(ecc_dev, w, xxx, w, curve, pk->ndigits,
+                                 OCS_ECC_INST_CALC_A_ADD_B_MODP);
+       if (rc)
+               goto exit;
+
+       /* Compare y^2 == x^3 + a·x + b. */
+       rc = vli_cmp(yy, w, pk->ndigits);
+       if (rc)
+               rc = -EINVAL;
+
+exit:
+       memzero_explicit(xxx, sizeof(xxx));
+       memzero_explicit(yy, sizeof(yy));
+       memzero_explicit(w, sizeof(w));
+
+       return rc;
+}
+
+/* SP800-56A section 5.6.2.3.3 full verification */
+static int kmb_ocs_ecc_is_pubkey_valid_full(struct ocs_ecc_dev *ecc_dev,
+                                           const struct ecc_curve *curve,
+                                           struct ecc_point *pk)
+{
+       struct ecc_point *nQ;
+       int rc;
+
+       /* Checks 1 through 3 */
+       rc = kmb_ocs_ecc_is_pubkey_valid_partial(ecc_dev, curve, pk);
+       if (rc)
+               return rc;
+
+       /* Check 4: Verify that nQ is the zero point. */
+       nQ = ecc_alloc_point(pk->ndigits);
+       if (!nQ)
+               return -ENOMEM;
+
+       rc = kmb_ecc_point_mult(ecc_dev, nQ, pk, curve->n, curve);
+       if (rc)
+               goto exit;
+
+       if (!ecc_point_is_zero(nQ))
+               rc = -EINVAL;
+
+exit:
+       ecc_free_point(nQ);
+
+       return rc;
+}
+
+static int kmb_ecc_is_key_valid(const struct ecc_curve *curve,
+                               const u64 *private_key, size_t private_key_len)
+{
+       size_t ndigits = curve->g.ndigits;
+       u64 one[KMB_ECC_VLI_MAX_DIGITS] = {1};
+       u64 res[KMB_ECC_VLI_MAX_DIGITS];
+
+       if (private_key_len != digits_to_bytes(ndigits))
+               return -EINVAL;
+
+       if (!private_key)
+               return -EINVAL;
+
+       /* Make sure the private key is in the range [2, n-3]. */
+       if (vli_cmp(one, private_key, ndigits) != -1)
+               return -EINVAL;
+
+       vli_sub(res, curve->n, one, ndigits);
+       vli_sub(res, res, one, ndigits);
+       if (vli_cmp(res, private_key, ndigits) != 1)
+               return -EINVAL;
+
+       return 0;
+}
+
+/*
+ * ECC private keys are generated using the method of extra random bits,
+ * equivalent to that described in FIPS 186-4, Appendix B.4.1.
+ *
+ * d = (c mod(n–1)) + 1    where c is a string of random bits, 64 bits longer
+ *                         than requested
+ * 0 <= c mod(n-1) <= n-2  and implies that
+ * 1 <= d <= n-1
+ *
+ * This method generates a private key uniformly distributed in the range
+ * [1, n-1].
+ */
+static int kmb_ecc_gen_privkey(const struct ecc_curve *curve, u64 *privkey)
+{
+       size_t nbytes = digits_to_bytes(curve->g.ndigits);
+       u64 priv[KMB_ECC_VLI_MAX_DIGITS];
+       size_t nbits;
+       int rc;
+
+       nbits = vli_num_bits(curve->n, curve->g.ndigits);
+
+       /* Check that N is included in Table 1 of FIPS 186-4, section 6.1.1 */
+       if (nbits < 160 || curve->g.ndigits > ARRAY_SIZE(priv))
+               return -EINVAL;
+
+       /*
+        * FIPS 186-4 recommends that the private key should be obtained from a
+        * RBG with a security strength equal to or greater than the security
+        * strength associated with N.
+        *
+        * The maximum security strength identified by NIST SP800-57pt1r4 for
+        * ECC is 256 (N >= 512).
+        *
+        * This condition is met by the default RNG because it selects a favored
+        * DRBG with a security strength of 256.
+        */
+       if (crypto_get_default_rng())
+               return -EFAULT;
+
+       rc = crypto_rng_get_bytes(crypto_default_rng, (u8 *)priv, nbytes);
+       crypto_put_default_rng();
+       if (rc)
+               goto cleanup;
+
+       rc = kmb_ecc_is_key_valid(curve, priv, nbytes);
+       if (rc)
+               goto cleanup;
+
+       ecc_swap_digits(priv, privkey, curve->g.ndigits);
+
+cleanup:
+       memzero_explicit(&priv, sizeof(priv));
+
+       return rc;
+}
+
+static int kmb_ocs_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
+                                  unsigned int len)
+{
+       struct ocs_ecc_ctx *tctx = kpp_tfm_ctx(tfm);
+       struct ecdh params;
+       int rc = 0;
+
+       rc = crypto_ecdh_decode_key(buf, len, &params);
+       if (rc)
+               goto cleanup;
+
+       /* Ensure key size is not bigger then expected. */
+       if (params.key_size > digits_to_bytes(tctx->curve->g.ndigits)) {
+               rc = -EINVAL;
+               goto cleanup;
+       }
+
+       /* Auto-generate private key is not provided. */
+       if (!params.key || !params.key_size) {
+               rc = kmb_ecc_gen_privkey(tctx->curve, tctx->private_key);
+               goto cleanup;
+       }
+
+       rc = kmb_ecc_is_key_valid(tctx->curve, (const u64 *)params.key,
+                                 params.key_size);
+       if (rc)
+               goto cleanup;
+
+       ecc_swap_digits((const u64 *)params.key, tctx->private_key,
+                       tctx->curve->g.ndigits);
+cleanup:
+       memzero_explicit(&params, sizeof(params));
+
+       if (rc)
+               tctx->curve = NULL;
+
+       return rc;
+}
+
+/* Compute shared secret. */
+static int kmb_ecc_do_shared_secret(struct ocs_ecc_ctx *tctx,
+                                   struct kpp_request *req)
+{
+       struct ocs_ecc_dev *ecc_dev = tctx->ecc_dev;
+       const struct ecc_curve *curve = tctx->curve;
+       u64 shared_secret[KMB_ECC_VLI_MAX_DIGITS];
+       u64 pubk_buf[KMB_ECC_VLI_MAX_DIGITS * 2];
+       size_t copied, nbytes, pubk_len;
+       struct ecc_point *pk, *result;
+       int rc;
+
+       nbytes = digits_to_bytes(curve->g.ndigits);
+
+       /* Public key is a point, thus it has two coordinates */
+       pubk_len = 2 * nbytes;
+
+       /* Copy public key from SG list to pubk_buf. */
+       copied = sg_copy_to_buffer(req->src,
+                                  sg_nents_for_len(req->src, pubk_len),
+                                  pubk_buf, pubk_len);
+       if (copied != pubk_len)
+               return -EINVAL;
+
+       /* Allocate and initialize public key point. */
+       pk = ecc_alloc_point(curve->g.ndigits);
+       if (!pk)
+               return -ENOMEM;
+
+       ecc_swap_digits(pubk_buf, pk->x, curve->g.ndigits);
+       ecc_swap_digits(&pubk_buf[curve->g.ndigits], pk->y, curve->g.ndigits);
+
+       /*
+        * Check the public key for following
+        * Check 1: Verify key is not the zero point.
+        * Check 2: Verify key is in the range [1, p-1].
+        * Check 3: Verify that y^2 == (x^3 + a·x + b) mod p
+        */
+       rc = kmb_ocs_ecc_is_pubkey_valid_partial(ecc_dev, curve, pk);
+       if (rc)
+               goto exit_free_pk;
+
+       /* Allocate point for storing computed shared secret. */
+       result = ecc_alloc_point(pk->ndigits);
+       if (!result) {
+               rc = -ENOMEM;
+               goto exit_free_pk;
+       }
+
+       /* Calculate the shared secret.*/
+       rc = kmb_ecc_point_mult(ecc_dev, result, pk, tctx->private_key, curve);
+       if (rc)
+               goto exit_free_result;
+
+       if (ecc_point_is_zero(result)) {
+               rc = -EFAULT;
+               goto exit_free_result;
+       }
+
+       /* Copy shared secret from point to buffer. */
+       ecc_swap_digits(result->x, shared_secret, result->ndigits);
+
+       /* Request might ask for less bytes than what we have. */
+       nbytes = min_t(size_t, nbytes, req->dst_len);
+
+       copied = sg_copy_from_buffer(req->dst,
+                                    sg_nents_for_len(req->dst, nbytes),
+                                    shared_secret, nbytes);
+
+       if (copied != nbytes)
+               rc = -EINVAL;
+
+       memzero_explicit(shared_secret, sizeof(shared_secret));
+
+exit_free_result:
+       ecc_free_point(result);
+
+exit_free_pk:
+       ecc_free_point(pk);
+
+       return rc;
+}
+
+/* Compute public key. */
+static int kmb_ecc_do_public_key(struct ocs_ecc_ctx *tctx,
+                                struct kpp_request *req)
+{
+       const struct ecc_curve *curve = tctx->curve;
+       u64 pubk_buf[KMB_ECC_VLI_MAX_DIGITS * 2];
+       struct ecc_point *pk;
+       size_t pubk_len;
+       size_t copied;
+       int rc;
+
+       /* Public key is a point, so it has double the digits. */
+       pubk_len = 2 * digits_to_bytes(curve->g.ndigits);
+
+       pk = ecc_alloc_point(curve->g.ndigits);
+       if (!pk)
+               return -ENOMEM;
+
+       /* Public Key(pk) = priv * G. */
+       rc = kmb_ecc_point_mult(tctx->ecc_dev, pk, &curve->g, tctx->private_key,
+                               curve);
+       if (rc)
+               goto exit;
+
+       /* SP800-56A rev 3 5.6.2.1.3 key check */
+       if (kmb_ocs_ecc_is_pubkey_valid_full(tctx->ecc_dev, curve, pk)) {
+               rc = -EAGAIN;
+               goto exit;
+       }
+
+       /* Copy public key from point to buffer. */
+       ecc_swap_digits(pk->x, pubk_buf, pk->ndigits);
+       ecc_swap_digits(pk->y, &pubk_buf[pk->ndigits], pk->ndigits);
+
+       /* Copy public key to req->dst. */
+       copied = sg_copy_from_buffer(req->dst,
+                                    sg_nents_for_len(req->dst, pubk_len),
+                                    pubk_buf, pubk_len);
+
+       if (copied != pubk_len)
+               rc = -EINVAL;
+
+exit:
+       ecc_free_point(pk);
+
+       return rc;
+}
+
+static int kmb_ocs_ecc_do_one_request(struct crypto_engine *engine,
+                                     void *areq)
+{
+       struct kpp_request *req = container_of(areq, struct kpp_request, base);
+       struct ocs_ecc_ctx *tctx = kmb_ocs_ecc_tctx(req);
+       struct ocs_ecc_dev *ecc_dev = tctx->ecc_dev;
+       int rc;
+
+       if (req->src)
+               rc = kmb_ecc_do_shared_secret(tctx, req);
+       else
+               rc = kmb_ecc_do_public_key(tctx, req);
+
+       crypto_finalize_kpp_request(ecc_dev->engine, req, rc);
+
+       return 0;
+}
+
+static int kmb_ocs_ecdh_generate_public_key(struct kpp_request *req)
+{
+       struct ocs_ecc_ctx *tctx = kmb_ocs_ecc_tctx(req);
+       const struct ecc_curve *curve = tctx->curve;
+
+       /* Ensure kmb_ocs_ecdh_set_secret() has been successfully called. */
+       if (!tctx->curve)
+               return -EINVAL;
+
+       /* Ensure dst is present. */
+       if (!req->dst)
+               return -EINVAL;
+
+       /* Check the request dst is big enough to hold the public key. */
+       if (req->dst_len < (2 * digits_to_bytes(curve->g.ndigits)))
+               return -EINVAL;
+
+       /* 'src' is not supposed to be present when generate pubk is called. */
+       if (req->src)
+               return -EINVAL;
+
+       return crypto_transfer_kpp_request_to_engine(tctx->ecc_dev->engine,
+                                                    req);
+}
+
+static int kmb_ocs_ecdh_compute_shared_secret(struct kpp_request *req)
+{
+       struct ocs_ecc_ctx *tctx = kmb_ocs_ecc_tctx(req);
+       const struct ecc_curve *curve = tctx->curve;
+
+       /* Ensure kmb_ocs_ecdh_set_secret() has been successfully called. */
+       if (!tctx->curve)
+               return -EINVAL;
+
+       /* Ensure dst is present. */
+       if (!req->dst)
+               return -EINVAL;
+
+       /* Ensure src is present. */
+       if (!req->src)
+               return -EINVAL;
+
+       /*
+        * req->src is expected to the (other-side) public key, so its length
+        * must be 2 * coordinate size (in bytes).
+        */
+       if (req->src_len != 2 * digits_to_bytes(curve->g.ndigits))
+               return -EINVAL;
+
+       return crypto_transfer_kpp_request_to_engine(tctx->ecc_dev->engine,
+                                                    req);
+}
+
+static int kmb_ecc_tctx_init(struct ocs_ecc_ctx *tctx, unsigned int curve_id)
+{
+       memset(tctx, 0, sizeof(*tctx));
+
+       tctx->ecc_dev = kmb_ocs_ecc_find_dev(tctx);
+
+       if (IS_ERR(tctx->ecc_dev)) {
+               pr_err("Failed to find the device : %ld\n",
+                      PTR_ERR(tctx->ecc_dev));
+               return PTR_ERR(tctx->ecc_dev);
+       }
+
+       tctx->curve = ecc_get_curve(curve_id);
+       if (!tctx->curve)
+               return -EOPNOTSUPP;
+
+       tctx->engine_ctx.op.prepare_request = NULL;
+       tctx->engine_ctx.op.do_one_request = kmb_ocs_ecc_do_one_request;
+       tctx->engine_ctx.op.unprepare_request = NULL;
+
+       return 0;
+}
+
+static int kmb_ocs_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
+{
+       struct ocs_ecc_ctx *tctx = kpp_tfm_ctx(tfm);
+
+       return kmb_ecc_tctx_init(tctx, ECC_CURVE_NIST_P256);
+}
+
+static int kmb_ocs_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm)
+{
+       struct ocs_ecc_ctx *tctx = kpp_tfm_ctx(tfm);
+
+       return kmb_ecc_tctx_init(tctx, ECC_CURVE_NIST_P384);
+}
+
+static void kmb_ocs_ecdh_exit_tfm(struct crypto_kpp *tfm)
+{
+       struct ocs_ecc_ctx *tctx = kpp_tfm_ctx(tfm);
+
+       memzero_explicit(tctx->private_key, sizeof(*tctx->private_key));
+}
+
+static unsigned int kmb_ocs_ecdh_max_size(struct crypto_kpp *tfm)
+{
+       struct ocs_ecc_ctx *tctx = kpp_tfm_ctx(tfm);
+
+       /* Public key is made of two coordinates, so double the digits. */
+       return digits_to_bytes(tctx->curve->g.ndigits) * 2;
+}
+
+static struct kpp_alg ocs_ecdh_p256 = {
+       .set_secret = kmb_ocs_ecdh_set_secret,
+       .generate_public_key = kmb_ocs_ecdh_generate_public_key,
+       .compute_shared_secret = kmb_ocs_ecdh_compute_shared_secret,
+       .init = kmb_ocs_ecdh_nist_p256_init_tfm,
+       .exit = kmb_ocs_ecdh_exit_tfm,
+       .max_size = kmb_ocs_ecdh_max_size,
+       .base = {
+               .cra_name = "ecdh-nist-p256",
+               .cra_driver_name = "ecdh-nist-p256-keembay-ocs",
+               .cra_priority = KMB_OCS_ECC_PRIORITY,
+               .cra_module = THIS_MODULE,
+               .cra_ctxsize = sizeof(struct ocs_ecc_ctx),
+       },
+};
+
+static struct kpp_alg ocs_ecdh_p384 = {
+       .set_secret = kmb_ocs_ecdh_set_secret,
+       .generate_public_key = kmb_ocs_ecdh_generate_public_key,
+       .compute_shared_secret = kmb_ocs_ecdh_compute_shared_secret,
+       .init = kmb_ocs_ecdh_nist_p384_init_tfm,
+       .exit = kmb_ocs_ecdh_exit_tfm,
+       .max_size = kmb_ocs_ecdh_max_size,
+       .base = {
+               .cra_name = "ecdh-nist-p384",
+               .cra_driver_name = "ecdh-nist-p384-keembay-ocs",
+               .cra_priority = KMB_OCS_ECC_PRIORITY,
+               .cra_module = THIS_MODULE,
+               .cra_ctxsize = sizeof(struct ocs_ecc_ctx),
+       },
+};
+
+static irqreturn_t ocs_ecc_irq_handler(int irq, void *dev_id)
+{
+       struct ocs_ecc_dev *ecc_dev = dev_id;
+       u32 status;
+
+       /*
+        * Read the status register and write it back to clear the
+        * DONE_INT_STATUS bit.
+        */
+       status = ioread32(ecc_dev->base_reg + HW_OFFS_OCS_ECC_ISR);
+       iowrite32(status, ecc_dev->base_reg + HW_OFFS_OCS_ECC_ISR);
+
+       if (!(status & HW_OCS_ECC_ISR_INT_STATUS_DONE))
+               return IRQ_NONE;
+
+       complete(&ecc_dev->irq_done);
+
+       return IRQ_HANDLED;
+}
+
+static int kmb_ocs_ecc_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct ocs_ecc_dev *ecc_dev;
+       int rc;
+
+       ecc_dev = devm_kzalloc(dev, sizeof(*ecc_dev), GFP_KERNEL);
+       if (!ecc_dev)
+               return -ENOMEM;
+
+       ecc_dev->dev = dev;
+
+       platform_set_drvdata(pdev, ecc_dev);
+
+       INIT_LIST_HEAD(&ecc_dev->list);
+       init_completion(&ecc_dev->irq_done);
+
+       /* Get base register address. */
+       ecc_dev->base_reg = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(ecc_dev->base_reg)) {
+               dev_err(dev, "Failed to get base address\n");
+               rc = PTR_ERR(ecc_dev->base_reg);
+               goto list_del;
+       }
+
+       /* Get and request IRQ */
+       ecc_dev->irq = platform_get_irq(pdev, 0);
+       if (ecc_dev->irq < 0) {
+               rc = ecc_dev->irq;
+               goto list_del;
+       }
+
+       rc = devm_request_threaded_irq(dev, ecc_dev->irq, ocs_ecc_irq_handler,
+                                      NULL, 0, "keembay-ocs-ecc", ecc_dev);
+       if (rc < 0) {
+               dev_err(dev, "Could not request IRQ\n");
+               goto list_del;
+       }
+
+       /* Add device to the list of OCS ECC devices. */
+       spin_lock(&ocs_ecc.lock);
+       list_add_tail(&ecc_dev->list, &ocs_ecc.dev_list);
+       spin_unlock(&ocs_ecc.lock);
+
+       /* Initialize crypto engine. */
+       ecc_dev->engine = crypto_engine_alloc_init(dev, 1);
+       if (!ecc_dev->engine) {
+               dev_err(dev, "Could not allocate crypto engine\n");
+               rc = -ENOMEM;
+               goto list_del;
+       }
+
+       rc = crypto_engine_start(ecc_dev->engine);
+       if (rc) {
+               dev_err(dev, "Could not start crypto engine\n");
+               goto cleanup;
+       }
+
+       /* Register the KPP algo. */
+       rc = crypto_register_kpp(&ocs_ecdh_p256);
+       if (rc) {
+               dev_err(dev,
+                       "Could not register OCS algorithms with Crypto API\n");
+               goto cleanup;
+       }
+
+       rc = crypto_register_kpp(&ocs_ecdh_p384);
+       if (rc) {
+               dev_err(dev,
+                       "Could not register OCS algorithms with Crypto API\n");
+               goto ocs_ecdh_p384_error;
+       }
+
+       return 0;
+
+ocs_ecdh_p384_error:
+       crypto_unregister_kpp(&ocs_ecdh_p256);
+
+cleanup:
+       crypto_engine_exit(ecc_dev->engine);
+
+list_del:
+       spin_lock(&ocs_ecc.lock);
+       list_del(&ecc_dev->list);
+       spin_unlock(&ocs_ecc.lock);
+
+       return rc;
+}
+
+static int kmb_ocs_ecc_remove(struct platform_device *pdev)
+{
+       struct ocs_ecc_dev *ecc_dev;
+
+       ecc_dev = platform_get_drvdata(pdev);
+
+       crypto_unregister_kpp(&ocs_ecdh_p384);
+       crypto_unregister_kpp(&ocs_ecdh_p256);
+
+       spin_lock(&ocs_ecc.lock);
+       list_del(&ecc_dev->list);
+       spin_unlock(&ocs_ecc.lock);
+
+       crypto_engine_exit(ecc_dev->engine);
+
+       return 0;
+}
+
+/* Device tree driver match. */
+static const struct of_device_id kmb_ocs_ecc_of_match[] = {
+       {
+               .compatible = "intel,keembay-ocs-ecc",
+       },
+       {}
+};
+
+/* The OCS driver is a platform device. */
+static struct platform_driver kmb_ocs_ecc_driver = {
+       .probe = kmb_ocs_ecc_probe,
+       .remove = kmb_ocs_ecc_remove,
+       .driver = {
+                       .name = DRV_NAME,
+                       .of_match_table = kmb_ocs_ecc_of_match,
+               },
+};
+module_platform_driver(kmb_ocs_ecc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel Keem Bay OCS ECC Driver");
+MODULE_ALIAS_CRYPTO("ecdh-nist-p256");
+MODULE_ALIAS_CRYPTO("ecdh-nist-p384");
+MODULE_ALIAS_CRYPTO("ecdh-nist-p256-keembay-ocs");
+MODULE_ALIAS_CRYPTO("ecdh-nist-p384-keembay-ocs");
diff --git a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
new file mode 100644 (file)
index 0000000..d4bcbed
--- /dev/null
@@ -0,0 +1,1264 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Keem Bay OCS HCU Crypto Driver.
+ *
+ * Copyright (C) 2018-2020 Intel Corporation
+ */
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+
+#include <crypto/engine.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha2.h>
+#include <crypto/sm3.h>
+#include <crypto/hmac.h>
+#include <crypto/internal/hash.h>
+
+#include "ocs-hcu.h"
+
+#define DRV_NAME       "keembay-ocs-hcu"
+
+/* Flag marking a final request. */
+#define REQ_FINAL                      BIT(0)
+/* Flag marking a HMAC request. */
+#define REQ_FLAGS_HMAC                 BIT(1)
+/* Flag set when HW HMAC is being used. */
+#define REQ_FLAGS_HMAC_HW              BIT(2)
+/* Flag set when SW HMAC is being used. */
+#define REQ_FLAGS_HMAC_SW              BIT(3)
+
+/**
+ * struct ocs_hcu_ctx: OCS HCU Transform context.
+ * @engine_ctx:         Crypto Engine context.
+ * @hcu_dev:    The OCS HCU device used by the transformation.
+ * @key:        The key (used only for HMAC transformations).
+ * @key_len:    The length of the key.
+ * @is_sm3_tfm:  Whether or not this is an SM3 transformation.
+ * @is_hmac_tfm: Whether or not this is a HMAC transformation.
+ */
+struct ocs_hcu_ctx {
+       struct crypto_engine_ctx engine_ctx;
+       struct ocs_hcu_dev *hcu_dev;
+       u8 key[SHA512_BLOCK_SIZE];
+       size_t key_len;
+       bool is_sm3_tfm;
+       bool is_hmac_tfm;
+};
+
+/**
+ * struct ocs_hcu_rctx - Context for the request.
+ * @hcu_dev:       OCS HCU device to be used to service the request.
+ * @flags:         Flags tracking request status.
+ * @algo:          Algorithm to use for the request.
+ * @blk_sz:        Block size of the transformation / request.
+ * @dig_sz:        Digest size of the transformation / request.
+ * @dma_list:      OCS DMA linked list.
+ * @hash_ctx:      OCS HCU hashing context.
+ * @buffer:        Buffer to store: partial block of data and SW HMAC
+ *                 artifacts (ipad, opad, etc.).
+ * @buf_cnt:       Number of bytes currently stored in the buffer.
+ * @buf_dma_addr:   The DMA address of @buffer (when mapped).
+ * @buf_dma_count:  The number of bytes in @buffer currently DMA-mapped.
+ * @sg:                    Head of the scatterlist entries containing data.
+ * @sg_data_total:  Total data in the SG list at any time.
+ * @sg_data_offset: Offset into the data of the current individual SG node.
+ * @sg_dma_nents:   Number of sg entries mapped in dma_list.
+ */
+struct ocs_hcu_rctx {
+       struct ocs_hcu_dev      *hcu_dev;
+       u32                     flags;
+       enum ocs_hcu_algo       algo;
+       size_t                  blk_sz;
+       size_t                  dig_sz;
+       struct ocs_hcu_dma_list *dma_list;
+       struct ocs_hcu_hash_ctx hash_ctx;
+       /*
+        * Buffer is double the block size because we need space for SW HMAC
+        * artifacts, i.e:
+        * - ipad (1 block) + a possible partial block of data.
+        * - opad (1 block) + digest of H(k ^ ipad || m)
+        */
+       u8                      buffer[2 * SHA512_BLOCK_SIZE];
+       size_t                  buf_cnt;
+       dma_addr_t              buf_dma_addr;
+       size_t                  buf_dma_count;
+       struct scatterlist      *sg;
+       unsigned int            sg_data_total;
+       unsigned int            sg_data_offset;
+       unsigned int            sg_dma_nents;
+};
+
+/**
+ * struct ocs_hcu_drv - Driver data
+ * @dev_list:  The list of HCU devices.
+ * @lock:      The lock protecting dev_list.
+ */
+struct ocs_hcu_drv {
+       struct list_head dev_list;
+       spinlock_t lock; /* Protects dev_list. */
+};
+
+static struct ocs_hcu_drv ocs_hcu = {
+       .dev_list = LIST_HEAD_INIT(ocs_hcu.dev_list),
+       .lock = __SPIN_LOCK_UNLOCKED(ocs_hcu.lock),
+};
+
+/*
+ * Return the total amount of data in the request; that is: the data in the
+ * request buffer + the data in the sg list.
+ */
+static inline unsigned int kmb_get_total_data(struct ocs_hcu_rctx *rctx)
+{
+       return rctx->sg_data_total + rctx->buf_cnt;
+}
+
+/* Move remaining content of scatter-gather list to context buffer. */
+static int flush_sg_to_ocs_buffer(struct ocs_hcu_rctx *rctx)
+{
+       size_t count;
+
+       if (rctx->sg_data_total > (sizeof(rctx->buffer) - rctx->buf_cnt)) {
+               WARN(1, "%s: sg data does not fit in buffer\n", __func__);
+               return -EINVAL;
+       }
+
+       while (rctx->sg_data_total) {
+               if (!rctx->sg) {
+                       WARN(1, "%s: unexpected NULL sg\n", __func__);
+                       return -EINVAL;
+               }
+               /*
+                * If current sg has been fully processed, skip to the next
+                * one.
+                */
+               if (rctx->sg_data_offset == rctx->sg->length) {
+                       rctx->sg = sg_next(rctx->sg);
+                       rctx->sg_data_offset = 0;
+                       continue;
+               }
+               /*
+                * Determine the maximum data available to copy from the node.
+                * Minimum of the length left in the sg node, or the total data
+                * in the request.
+                */
+               count = min(rctx->sg->length - rctx->sg_data_offset,
+                           rctx->sg_data_total);
+               /* Copy from scatter-list entry to context buffer. */
+               scatterwalk_map_and_copy(&rctx->buffer[rctx->buf_cnt],
+                                        rctx->sg, rctx->sg_data_offset,
+                                        count, 0);
+
+               rctx->sg_data_offset += count;
+               rctx->sg_data_total -= count;
+               rctx->buf_cnt += count;
+       }
+
+       return 0;
+}
+
+static struct ocs_hcu_dev *kmb_ocs_hcu_find_dev(struct ahash_request *req)
+{
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+       struct ocs_hcu_ctx *tctx = crypto_ahash_ctx(tfm);
+
+       /* If the HCU device for the request was previously set, return it. */
+       if (tctx->hcu_dev)
+               return tctx->hcu_dev;
+
+       /*
+        * Otherwise, get the first HCU device available (there should be one
+        * and only one device).
+        */
+       spin_lock_bh(&ocs_hcu.lock);
+       tctx->hcu_dev = list_first_entry_or_null(&ocs_hcu.dev_list,
+                                                struct ocs_hcu_dev,
+                                                list);
+       spin_unlock_bh(&ocs_hcu.lock);
+
+       return tctx->hcu_dev;
+}
+
+/* Free OCS DMA linked list and DMA-able context buffer. */
+static void kmb_ocs_hcu_dma_cleanup(struct ahash_request *req,
+                                   struct ocs_hcu_rctx *rctx)
+{
+       struct ocs_hcu_dev *hcu_dev = rctx->hcu_dev;
+       struct device *dev = hcu_dev->dev;
+
+       /* Unmap rctx->buffer (if mapped). */
+       if (rctx->buf_dma_count) {
+               dma_unmap_single(dev, rctx->buf_dma_addr, rctx->buf_dma_count,
+                                DMA_TO_DEVICE);
+               rctx->buf_dma_count = 0;
+       }
+
+       /* Unmap req->src (if mapped). */
+       if (rctx->sg_dma_nents) {
+               dma_unmap_sg(dev, req->src, rctx->sg_dma_nents, DMA_TO_DEVICE);
+               rctx->sg_dma_nents = 0;
+       }
+
+       /* Free dma_list (if allocated). */
+       if (rctx->dma_list) {
+               ocs_hcu_dma_list_free(hcu_dev, rctx->dma_list);
+               rctx->dma_list = NULL;
+       }
+}
+
+/*
+ * Prepare for DMA operation:
+ * - DMA-map request context buffer (if needed)
+ * - DMA-map SG list (only the entries to be processed, see note below)
+ * - Allocate OCS HCU DMA linked list (number of elements =  SG entries to
+ *   process + context buffer (if not empty)).
+ * - Add DMA-mapped request context buffer to OCS HCU DMA list.
+ * - Add SG entries to DMA list.
+ *
+ * Note: if this is a final request, we process all the data in the SG list,
+ * otherwise we can only process up to the maximum amount of block-aligned data
+ * (the remainder will be put into the context buffer and processed in the next
+ * request).
+ */
+static int kmb_ocs_dma_prepare(struct ahash_request *req)
+{
+       struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
+       struct device *dev = rctx->hcu_dev->dev;
+       unsigned int remainder = 0;
+       unsigned int total;
+       size_t nents;
+       size_t count;
+       int rc;
+       int i;
+
+       /* This function should be called only when there is data to process. */
+       total = kmb_get_total_data(rctx);
+       if (!total)
+               return -EINVAL;
+
+       /*
+        * If this is not a final DMA (terminated DMA), the data passed to the
+        * HCU must be aligned to the block size; compute the remainder data to
+        * be processed in the next request.
+        */
+       if (!(rctx->flags & REQ_FINAL))
+               remainder = total % rctx->blk_sz;
+
+       /* Determine the number of scatter gather list entries to process. */
+       nents = sg_nents_for_len(req->src, rctx->sg_data_total - remainder);
+
+       /* If there are entries to process, map them. */
+       if (nents) {
+               rctx->sg_dma_nents = dma_map_sg(dev, req->src, nents,
+                                               DMA_TO_DEVICE);
+               if (!rctx->sg_dma_nents) {
+                       dev_err(dev, "Failed to MAP SG\n");
+                       rc = -ENOMEM;
+                       goto cleanup;
+               }
+               /*
+                * The value returned by dma_map_sg() can be < nents; so update
+                * nents accordingly.
+                */
+               nents = rctx->sg_dma_nents;
+       }
+
+       /*
+        * If context buffer is not empty, map it and add extra DMA entry for
+        * it.
+        */
+       if (rctx->buf_cnt) {
+               rctx->buf_dma_addr = dma_map_single(dev, rctx->buffer,
+                                                   rctx->buf_cnt,
+                                                   DMA_TO_DEVICE);
+               if (dma_mapping_error(dev, rctx->buf_dma_addr)) {
+                       dev_err(dev, "Failed to map request context buffer\n");
+                       rc = -ENOMEM;
+                       goto cleanup;
+               }
+               rctx->buf_dma_count = rctx->buf_cnt;
+               /* Increase number of dma entries. */
+               nents++;
+       }
+
+       /* Allocate OCS HCU DMA list. */
+       rctx->dma_list = ocs_hcu_dma_list_alloc(rctx->hcu_dev, nents);
+       if (!rctx->dma_list) {
+               rc = -ENOMEM;
+               goto cleanup;
+       }
+
+       /* Add request context buffer (if previously DMA-mapped) */
+       if (rctx->buf_dma_count) {
+               rc = ocs_hcu_dma_list_add_tail(rctx->hcu_dev, rctx->dma_list,
+                                              rctx->buf_dma_addr,
+                                              rctx->buf_dma_count);
+               if (rc)
+                       goto cleanup;
+       }
+
+       /* Add the SG nodes to be processed to the DMA linked list. */
+       for_each_sg(req->src, rctx->sg, rctx->sg_dma_nents, i) {
+               /*
+                * The number of bytes to add to the list entry is the minimum
+                * between:
+                * - The DMA length of the SG entry.
+                * - The data left to be processed.
+                */
+               count = min(rctx->sg_data_total - remainder,
+                           sg_dma_len(rctx->sg) - rctx->sg_data_offset);
+               /*
+                * Do not create a zero length DMA descriptor. Check in case of
+                * zero length SG node.
+                */
+               if (count == 0)
+                       continue;
+               /* Add sg to HCU DMA list. */
+               rc = ocs_hcu_dma_list_add_tail(rctx->hcu_dev,
+                                              rctx->dma_list,
+                                              rctx->sg->dma_address,
+                                              count);
+               if (rc)
+                       goto cleanup;
+
+               /* Update amount of data remaining in SG list. */
+               rctx->sg_data_total -= count;
+
+               /*
+                * If  remaining data is equal to remainder (note: 'less than'
+                * case should never happen in practice), we are done: update
+                * offset and exit the loop.
+                */
+               if (rctx->sg_data_total <= remainder) {
+                       WARN_ON(rctx->sg_data_total < remainder);
+                       rctx->sg_data_offset += count;
+                       break;
+               }
+
+               /*
+                * If we get here is because we need to process the next sg in
+                * the list; set offset within the sg to 0.
+                */
+               rctx->sg_data_offset = 0;
+       }
+
+       return 0;
+cleanup:
+       dev_err(dev, "Failed to prepare DMA.\n");
+       kmb_ocs_hcu_dma_cleanup(req, rctx);
+
+       return rc;
+}
+
+static void kmb_ocs_hcu_secure_cleanup(struct ahash_request *req)
+{
+       struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
+
+       /* Clear buffer of any data. */
+       memzero_explicit(rctx->buffer, sizeof(rctx->buffer));
+}
+
+static int kmb_ocs_hcu_handle_queue(struct ahash_request *req)
+{
+       struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
+
+       if (!hcu_dev)
+               return -ENOENT;
+
+       return crypto_transfer_hash_request_to_engine(hcu_dev->engine, req);
+}
+
+static int prepare_ipad(struct ahash_request *req)
+{
+       struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+       struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
+       int i;
+
+       WARN(rctx->buf_cnt, "%s: Context buffer is not empty\n", __func__);
+       WARN(!(rctx->flags & REQ_FLAGS_HMAC_SW),
+            "%s: HMAC_SW flag is not set\n", __func__);
+       /*
+        * Key length must be equal to block size. If key is shorter,
+        * we pad it with zero (note: key cannot be longer, since
+        * longer keys are hashed by kmb_ocs_hcu_setkey()).
+        */
+       if (ctx->key_len > rctx->blk_sz) {
+               WARN(1, "%s: Invalid key length in tfm context\n", __func__);
+               return -EINVAL;
+       }
+       memzero_explicit(&ctx->key[ctx->key_len],
+                        rctx->blk_sz - ctx->key_len);
+       ctx->key_len = rctx->blk_sz;
+       /*
+        * Prepare IPAD for HMAC. Only done for first block.
+        * HMAC(k,m) = H(k ^ opad || H(k ^ ipad || m))
+        * k ^ ipad will be first hashed block.
+        * k ^ opad will be calculated in the final request.
+        * Only needed if not using HW HMAC.
+        */
+       for (i = 0; i < rctx->blk_sz; i++)
+               rctx->buffer[i] = ctx->key[i] ^ HMAC_IPAD_VALUE;
+       rctx->buf_cnt = rctx->blk_sz;
+
+       return 0;
+}
+
+static int kmb_ocs_hcu_do_one_request(struct crypto_engine *engine, void *areq)
+{
+       struct ahash_request *req = container_of(areq, struct ahash_request,
+                                                base);
+       struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+       struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
+       struct ocs_hcu_ctx *tctx = crypto_ahash_ctx(tfm);
+       int rc;
+       int i;
+
+       if (!hcu_dev) {
+               rc = -ENOENT;
+               goto error;
+       }
+
+       /*
+        * If hardware HMAC flag is set, perform HMAC in hardware.
+        *
+        * NOTE: this flag implies REQ_FINAL && kmb_get_total_data(rctx)
+        */
+       if (rctx->flags & REQ_FLAGS_HMAC_HW) {
+               /* Map input data into the HCU DMA linked list. */
+               rc = kmb_ocs_dma_prepare(req);
+               if (rc)
+                       goto error;
+
+               rc = ocs_hcu_hmac(hcu_dev, rctx->algo, tctx->key, tctx->key_len,
+                                 rctx->dma_list, req->result, rctx->dig_sz);
+
+               /* Unmap data and free DMA list regardless of return code. */
+               kmb_ocs_hcu_dma_cleanup(req, rctx);
+
+               /* Process previous return code. */
+               if (rc)
+                       goto error;
+
+               goto done;
+       }
+
+       /* Handle update request case. */
+       if (!(rctx->flags & REQ_FINAL)) {
+               /* Update should always have input data. */
+               if (!kmb_get_total_data(rctx))
+                       return -EINVAL;
+
+               /* Map input data into the HCU DMA linked list. */
+               rc = kmb_ocs_dma_prepare(req);
+               if (rc)
+                       goto error;
+
+               /* Do hashing step. */
+               rc = ocs_hcu_hash_update(hcu_dev, &rctx->hash_ctx,
+                                        rctx->dma_list);
+
+               /* Unmap data and free DMA list regardless of return code. */
+               kmb_ocs_hcu_dma_cleanup(req, rctx);
+
+               /* Process previous return code. */
+               if (rc)
+                       goto error;
+
+               /*
+                * Reset request buffer count (data in the buffer was just
+                * processed).
+                */
+               rctx->buf_cnt = 0;
+               /*
+                * Move remaining sg data into the request buffer, so that it
+                * will be processed during the next request.
+                *
+                * NOTE: we have remaining data if kmb_get_total_data() was not
+                * a multiple of block size.
+                */
+               rc = flush_sg_to_ocs_buffer(rctx);
+               if (rc)
+                       goto error;
+
+               goto done;
+       }
+
+       /* If we get here, this is a final request. */
+
+       /* If there is data to process, use finup. */
+       if (kmb_get_total_data(rctx)) {
+               /* Map input data into the HCU DMA linked list. */
+               rc = kmb_ocs_dma_prepare(req);
+               if (rc)
+                       goto error;
+
+               /* Do hashing step. */
+               rc = ocs_hcu_hash_finup(hcu_dev, &rctx->hash_ctx,
+                                       rctx->dma_list,
+                                       req->result, rctx->dig_sz);
+               /* Free DMA list regardless of return code. */
+               kmb_ocs_hcu_dma_cleanup(req, rctx);
+
+               /* Process previous return code. */
+               if (rc)
+                       goto error;
+
+       } else {  /* Otherwise (if we have no data), use final. */
+               rc = ocs_hcu_hash_final(hcu_dev, &rctx->hash_ctx, req->result,
+                                       rctx->dig_sz);
+               if (rc)
+                       goto error;
+       }
+
+       /*
+        * If we are finalizing a SW HMAC request, we just computed the result
+        * of: H(k ^ ipad || m).
+        *
+        * We now need to complete the HMAC calculation with the OPAD step,
+        * that is, we need to compute H(k ^ opad || digest), where digest is
+        * the digest we just obtained, i.e., H(k ^ ipad || m).
+        */
+       if (rctx->flags & REQ_FLAGS_HMAC_SW) {
+               /*
+                * Compute k ^ opad and store it in the request buffer (which
+                * is not used anymore at this point).
+                * Note: key has been padded / hashed already (so keylen ==
+                * blksz) .
+                */
+               WARN_ON(tctx->key_len != rctx->blk_sz);
+               for (i = 0; i < rctx->blk_sz; i++)
+                       rctx->buffer[i] = tctx->key[i] ^ HMAC_OPAD_VALUE;
+               /* Now append the digest to the rest of the buffer. */
+               for (i = 0; (i < rctx->dig_sz); i++)
+                       rctx->buffer[rctx->blk_sz + i] = req->result[i];
+
+               /* Now hash the buffer to obtain the final HMAC. */
+               rc = ocs_hcu_digest(hcu_dev, rctx->algo, rctx->buffer,
+                                   rctx->blk_sz + rctx->dig_sz, req->result,
+                                   rctx->dig_sz);
+               if (rc)
+                       goto error;
+       }
+
+       /* Perform secure clean-up. */
+       kmb_ocs_hcu_secure_cleanup(req);
+done:
+       crypto_finalize_hash_request(hcu_dev->engine, req, 0);
+
+       return 0;
+
+error:
+       kmb_ocs_hcu_secure_cleanup(req);
+       return rc;
+}
+
+static int kmb_ocs_hcu_init(struct ahash_request *req)
+{
+       struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
+       struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+       struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
+
+       if (!hcu_dev)
+               return -ENOENT;
+
+       /* Initialize entire request context to zero. */
+       memset(rctx, 0, sizeof(*rctx));
+
+       rctx->hcu_dev = hcu_dev;
+       rctx->dig_sz = crypto_ahash_digestsize(tfm);
+
+       switch (rctx->dig_sz) {
+#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
+       case SHA224_DIGEST_SIZE:
+               rctx->blk_sz = SHA224_BLOCK_SIZE;
+               rctx->algo = OCS_HCU_ALGO_SHA224;
+               break;
+#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224 */
+       case SHA256_DIGEST_SIZE:
+               rctx->blk_sz = SHA256_BLOCK_SIZE;
+               /*
+                * SHA256 and SM3 have the same digest size: use info from tfm
+                * context to find out which one we should use.
+                */
+               rctx->algo = ctx->is_sm3_tfm ? OCS_HCU_ALGO_SM3 :
+                                              OCS_HCU_ALGO_SHA256;
+               break;
+       case SHA384_DIGEST_SIZE:
+               rctx->blk_sz = SHA384_BLOCK_SIZE;
+               rctx->algo = OCS_HCU_ALGO_SHA384;
+               break;
+       case SHA512_DIGEST_SIZE:
+               rctx->blk_sz = SHA512_BLOCK_SIZE;
+               rctx->algo = OCS_HCU_ALGO_SHA512;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       /* Initialize intermediate data. */
+       ocs_hcu_hash_init(&rctx->hash_ctx, rctx->algo);
+
+       /* If this a HMAC request, set HMAC flag. */
+       if (ctx->is_hmac_tfm)
+               rctx->flags |= REQ_FLAGS_HMAC;
+
+       return 0;
+}
+
+static int kmb_ocs_hcu_update(struct ahash_request *req)
+{
+       struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
+       int rc;
+
+       if (!req->nbytes)
+               return 0;
+
+       rctx->sg_data_total = req->nbytes;
+       rctx->sg_data_offset = 0;
+       rctx->sg = req->src;
+
+       /*
+        * If we are doing HMAC, then we must use SW-assisted HMAC, since HW
+        * HMAC does not support context switching (there it can only be used
+        * with finup() or digest()).
+        */
+       if (rctx->flags & REQ_FLAGS_HMAC &&
+           !(rctx->flags & REQ_FLAGS_HMAC_SW)) {
+               rctx->flags |= REQ_FLAGS_HMAC_SW;
+               rc = prepare_ipad(req);
+               if (rc)
+                       return rc;
+       }
+
+       /*
+        * If remaining sg_data fits into ctx buffer, just copy it there; we'll
+        * process it at the next update() or final().
+        */
+       if (rctx->sg_data_total <= (sizeof(rctx->buffer) - rctx->buf_cnt))
+               return flush_sg_to_ocs_buffer(rctx);
+
+       return kmb_ocs_hcu_handle_queue(req);
+}
+
+/* Common logic for kmb_ocs_hcu_final() and kmb_ocs_hcu_finup(). */
+static int kmb_ocs_hcu_fin_common(struct ahash_request *req)
+{
+       struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+       struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
+       int rc;
+
+       rctx->flags |= REQ_FINAL;
+
+       /*
+        * If this is a HMAC request and, so far, we didn't have to switch to
+        * SW HMAC, check if we can use HW HMAC.
+        */
+       if (rctx->flags & REQ_FLAGS_HMAC &&
+           !(rctx->flags & REQ_FLAGS_HMAC_SW)) {
+               /*
+                * If we are here, it means we never processed any data so far,
+                * so we can use HW HMAC, but only if there is some data to
+                * process (since OCS HW MAC does not support zero-length
+                * messages) and the key length is supported by the hardware
+                * (OCS HCU HW only supports length <= 64); if HW HMAC cannot
+                * be used, fall back to SW-assisted HMAC.
+                */
+               if (kmb_get_total_data(rctx) &&
+                   ctx->key_len <= OCS_HCU_HW_KEY_LEN) {
+                       rctx->flags |= REQ_FLAGS_HMAC_HW;
+               } else {
+                       rctx->flags |= REQ_FLAGS_HMAC_SW;
+                       rc = prepare_ipad(req);
+                       if (rc)
+                               return rc;
+               }
+       }
+
+       return kmb_ocs_hcu_handle_queue(req);
+}
+
+static int kmb_ocs_hcu_final(struct ahash_request *req)
+{
+       struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
+
+       rctx->sg_data_total = 0;
+       rctx->sg_data_offset = 0;
+       rctx->sg = NULL;
+
+       return kmb_ocs_hcu_fin_common(req);
+}
+
+static int kmb_ocs_hcu_finup(struct ahash_request *req)
+{
+       struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
+
+       rctx->sg_data_total = req->nbytes;
+       rctx->sg_data_offset = 0;
+       rctx->sg = req->src;
+
+       return kmb_ocs_hcu_fin_common(req);
+}
+
+static int kmb_ocs_hcu_digest(struct ahash_request *req)
+{
+       int rc = 0;
+       struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
+
+       if (!hcu_dev)
+               return -ENOENT;
+
+       rc = kmb_ocs_hcu_init(req);
+       if (rc)
+               return rc;
+
+       rc = kmb_ocs_hcu_finup(req);
+
+       return rc;
+}
+
+static int kmb_ocs_hcu_export(struct ahash_request *req, void *out)
+{
+       struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
+
+       /* Intermediate data is always stored and applied per request. */
+       memcpy(out, rctx, sizeof(*rctx));
+
+       return 0;
+}
+
+static int kmb_ocs_hcu_import(struct ahash_request *req, const void *in)
+{
+       struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
+
+       /* Intermediate data is always stored and applied per request. */
+       memcpy(rctx, in, sizeof(*rctx));
+
+       return 0;
+}
+
+static int kmb_ocs_hcu_setkey(struct crypto_ahash *tfm, const u8 *key,
+                             unsigned int keylen)
+{
+       unsigned int digestsize = crypto_ahash_digestsize(tfm);
+       struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
+       size_t blk_sz = crypto_ahash_blocksize(tfm);
+       struct crypto_ahash *ahash_tfm;
+       struct ahash_request *req;
+       struct crypto_wait wait;
+       struct scatterlist sg;
+       const char *alg_name;
+       int rc;
+
+       /*
+        * Key length must be equal to block size:
+        * - If key is shorter, we are done for now (the key will be padded
+        *   later on); this is to maximize the use of HW HMAC (which works
+        *   only for keys <= 64 bytes).
+        * - If key is longer, we hash it.
+        */
+       if (keylen <= blk_sz) {
+               memcpy(ctx->key, key, keylen);
+               ctx->key_len = keylen;
+               return 0;
+       }
+
+       switch (digestsize) {
+#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
+       case SHA224_DIGEST_SIZE:
+               alg_name = "sha224-keembay-ocs";
+               break;
+#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224 */
+       case SHA256_DIGEST_SIZE:
+               alg_name = ctx->is_sm3_tfm ? "sm3-keembay-ocs" :
+                                            "sha256-keembay-ocs";
+               break;
+       case SHA384_DIGEST_SIZE:
+               alg_name = "sha384-keembay-ocs";
+               break;
+       case SHA512_DIGEST_SIZE:
+               alg_name = "sha512-keembay-ocs";
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       ahash_tfm = crypto_alloc_ahash(alg_name, 0, 0);
+       if (IS_ERR(ahash_tfm))
+               return PTR_ERR(ahash_tfm);
+
+       req = ahash_request_alloc(ahash_tfm, GFP_KERNEL);
+       if (!req) {
+               rc = -ENOMEM;
+               goto err_free_ahash;
+       }
+
+       crypto_init_wait(&wait);
+       ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+                                  crypto_req_done, &wait);
+       crypto_ahash_clear_flags(ahash_tfm, ~0);
+
+       sg_init_one(&sg, key, keylen);
+       ahash_request_set_crypt(req, &sg, ctx->key, keylen);
+
+       rc = crypto_wait_req(crypto_ahash_digest(req), &wait);
+       if (rc == 0)
+               ctx->key_len = digestsize;
+
+       ahash_request_free(req);
+err_free_ahash:
+       crypto_free_ahash(ahash_tfm);
+
+       return rc;
+}
+
+/* Set request size and initialize tfm context. */
+static void __cra_init(struct crypto_tfm *tfm, struct ocs_hcu_ctx *ctx)
+{
+       crypto_ahash_set_reqsize_dma(__crypto_ahash_cast(tfm),
+                                    sizeof(struct ocs_hcu_rctx));
+
+       /* Init context to 0. */
+       memzero_explicit(ctx, sizeof(*ctx));
+       /* Set engine ops. */
+       ctx->engine_ctx.op.do_one_request = kmb_ocs_hcu_do_one_request;
+}
+
+static int kmb_ocs_hcu_sha_cra_init(struct crypto_tfm *tfm)
+{
+       struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       __cra_init(tfm, ctx);
+
+       return 0;
+}
+
+static int kmb_ocs_hcu_sm3_cra_init(struct crypto_tfm *tfm)
+{
+       struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       __cra_init(tfm, ctx);
+
+       ctx->is_sm3_tfm = true;
+
+       return 0;
+}
+
+static int kmb_ocs_hcu_hmac_sm3_cra_init(struct crypto_tfm *tfm)
+{
+       struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       __cra_init(tfm, ctx);
+
+       ctx->is_sm3_tfm = true;
+       ctx->is_hmac_tfm = true;
+
+       return 0;
+}
+
+static int kmb_ocs_hcu_hmac_cra_init(struct crypto_tfm *tfm)
+{
+       struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       __cra_init(tfm, ctx);
+
+       ctx->is_hmac_tfm = true;
+
+       return 0;
+}
+
+/* Function called when 'tfm' is de-initialized. */
+static void kmb_ocs_hcu_hmac_cra_exit(struct crypto_tfm *tfm)
+{
+       struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       /* Clear the key. */
+       memzero_explicit(ctx->key, sizeof(ctx->key));
+}
+
+static struct ahash_alg ocs_hcu_algs[] = {
+#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
+{
+       .init           = kmb_ocs_hcu_init,
+       .update         = kmb_ocs_hcu_update,
+       .final          = kmb_ocs_hcu_final,
+       .finup          = kmb_ocs_hcu_finup,
+       .digest         = kmb_ocs_hcu_digest,
+       .export         = kmb_ocs_hcu_export,
+       .import         = kmb_ocs_hcu_import,
+       .halg = {
+               .digestsize     = SHA224_DIGEST_SIZE,
+               .statesize      = sizeof(struct ocs_hcu_rctx),
+               .base   = {
+                       .cra_name               = "sha224",
+                       .cra_driver_name        = "sha224-keembay-ocs",
+                       .cra_priority           = 255,
+                       .cra_flags              = CRYPTO_ALG_ASYNC,
+                       .cra_blocksize          = SHA224_BLOCK_SIZE,
+                       .cra_ctxsize            = sizeof(struct ocs_hcu_ctx),
+                       .cra_alignmask          = 0,
+                       .cra_module             = THIS_MODULE,
+                       .cra_init               = kmb_ocs_hcu_sha_cra_init,
+               }
+       }
+},
+{
+       .init           = kmb_ocs_hcu_init,
+       .update         = kmb_ocs_hcu_update,
+       .final          = kmb_ocs_hcu_final,
+       .finup          = kmb_ocs_hcu_finup,
+       .digest         = kmb_ocs_hcu_digest,
+       .export         = kmb_ocs_hcu_export,
+       .import         = kmb_ocs_hcu_import,
+       .setkey         = kmb_ocs_hcu_setkey,
+       .halg = {
+               .digestsize     = SHA224_DIGEST_SIZE,
+               .statesize      = sizeof(struct ocs_hcu_rctx),
+               .base   = {
+                       .cra_name               = "hmac(sha224)",
+                       .cra_driver_name        = "hmac-sha224-keembay-ocs",
+                       .cra_priority           = 255,
+                       .cra_flags              = CRYPTO_ALG_ASYNC,
+                       .cra_blocksize          = SHA224_BLOCK_SIZE,
+                       .cra_ctxsize            = sizeof(struct ocs_hcu_ctx),
+                       .cra_alignmask          = 0,
+                       .cra_module             = THIS_MODULE,
+                       .cra_init               = kmb_ocs_hcu_hmac_cra_init,
+                       .cra_exit               = kmb_ocs_hcu_hmac_cra_exit,
+               }
+       }
+},
+#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224 */
+{
+       .init           = kmb_ocs_hcu_init,
+       .update         = kmb_ocs_hcu_update,
+       .final          = kmb_ocs_hcu_final,
+       .finup          = kmb_ocs_hcu_finup,
+       .digest         = kmb_ocs_hcu_digest,
+       .export         = kmb_ocs_hcu_export,
+       .import         = kmb_ocs_hcu_import,
+       .halg = {
+               .digestsize     = SHA256_DIGEST_SIZE,
+               .statesize      = sizeof(struct ocs_hcu_rctx),
+               .base   = {
+                       .cra_name               = "sha256",
+                       .cra_driver_name        = "sha256-keembay-ocs",
+                       .cra_priority           = 255,
+                       .cra_flags              = CRYPTO_ALG_ASYNC,
+                       .cra_blocksize          = SHA256_BLOCK_SIZE,
+                       .cra_ctxsize            = sizeof(struct ocs_hcu_ctx),
+                       .cra_alignmask          = 0,
+                       .cra_module             = THIS_MODULE,
+                       .cra_init               = kmb_ocs_hcu_sha_cra_init,
+               }
+       }
+},
+{
+       .init           = kmb_ocs_hcu_init,
+       .update         = kmb_ocs_hcu_update,
+       .final          = kmb_ocs_hcu_final,
+       .finup          = kmb_ocs_hcu_finup,
+       .digest         = kmb_ocs_hcu_digest,
+       .export         = kmb_ocs_hcu_export,
+       .import         = kmb_ocs_hcu_import,
+       .setkey         = kmb_ocs_hcu_setkey,
+       .halg = {
+               .digestsize     = SHA256_DIGEST_SIZE,
+               .statesize      = sizeof(struct ocs_hcu_rctx),
+               .base   = {
+                       .cra_name               = "hmac(sha256)",
+                       .cra_driver_name        = "hmac-sha256-keembay-ocs",
+                       .cra_priority           = 255,
+                       .cra_flags              = CRYPTO_ALG_ASYNC,
+                       .cra_blocksize          = SHA256_BLOCK_SIZE,
+                       .cra_ctxsize            = sizeof(struct ocs_hcu_ctx),
+                       .cra_alignmask          = 0,
+                       .cra_module             = THIS_MODULE,
+                       .cra_init               = kmb_ocs_hcu_hmac_cra_init,
+                       .cra_exit               = kmb_ocs_hcu_hmac_cra_exit,
+               }
+       }
+},
+{
+       .init           = kmb_ocs_hcu_init,
+       .update         = kmb_ocs_hcu_update,
+       .final          = kmb_ocs_hcu_final,
+       .finup          = kmb_ocs_hcu_finup,
+       .digest         = kmb_ocs_hcu_digest,
+       .export         = kmb_ocs_hcu_export,
+       .import         = kmb_ocs_hcu_import,
+       .halg = {
+               .digestsize     = SM3_DIGEST_SIZE,
+               .statesize      = sizeof(struct ocs_hcu_rctx),
+               .base   = {
+                       .cra_name               = "sm3",
+                       .cra_driver_name        = "sm3-keembay-ocs",
+                       .cra_priority           = 255,
+                       .cra_flags              = CRYPTO_ALG_ASYNC,
+                       .cra_blocksize          = SM3_BLOCK_SIZE,
+                       .cra_ctxsize            = sizeof(struct ocs_hcu_ctx),
+                       .cra_alignmask          = 0,
+                       .cra_module             = THIS_MODULE,
+                       .cra_init               = kmb_ocs_hcu_sm3_cra_init,
+               }
+       }
+},
+{
+       .init           = kmb_ocs_hcu_init,
+       .update         = kmb_ocs_hcu_update,
+       .final          = kmb_ocs_hcu_final,
+       .finup          = kmb_ocs_hcu_finup,
+       .digest         = kmb_ocs_hcu_digest,
+       .export         = kmb_ocs_hcu_export,
+       .import         = kmb_ocs_hcu_import,
+       .setkey         = kmb_ocs_hcu_setkey,
+       .halg = {
+               .digestsize     = SM3_DIGEST_SIZE,
+               .statesize      = sizeof(struct ocs_hcu_rctx),
+               .base   = {
+                       .cra_name               = "hmac(sm3)",
+                       .cra_driver_name        = "hmac-sm3-keembay-ocs",
+                       .cra_priority           = 255,
+                       .cra_flags              = CRYPTO_ALG_ASYNC,
+                       .cra_blocksize          = SM3_BLOCK_SIZE,
+                       .cra_ctxsize            = sizeof(struct ocs_hcu_ctx),
+                       .cra_alignmask          = 0,
+                       .cra_module             = THIS_MODULE,
+                       .cra_init               = kmb_ocs_hcu_hmac_sm3_cra_init,
+                       .cra_exit               = kmb_ocs_hcu_hmac_cra_exit,
+               }
+       }
+},
+{
+       .init           = kmb_ocs_hcu_init,
+       .update         = kmb_ocs_hcu_update,
+       .final          = kmb_ocs_hcu_final,
+       .finup          = kmb_ocs_hcu_finup,
+       .digest         = kmb_ocs_hcu_digest,
+       .export         = kmb_ocs_hcu_export,
+       .import         = kmb_ocs_hcu_import,
+       .halg = {
+               .digestsize     = SHA384_DIGEST_SIZE,
+               .statesize      = sizeof(struct ocs_hcu_rctx),
+               .base   = {
+                       .cra_name               = "sha384",
+                       .cra_driver_name        = "sha384-keembay-ocs",
+                       .cra_priority           = 255,
+                       .cra_flags              = CRYPTO_ALG_ASYNC,
+                       .cra_blocksize          = SHA384_BLOCK_SIZE,
+                       .cra_ctxsize            = sizeof(struct ocs_hcu_ctx),
+                       .cra_alignmask          = 0,
+                       .cra_module             = THIS_MODULE,
+                       .cra_init               = kmb_ocs_hcu_sha_cra_init,
+               }
+       }
+},
+{
+       .init           = kmb_ocs_hcu_init,
+       .update         = kmb_ocs_hcu_update,
+       .final          = kmb_ocs_hcu_final,
+       .finup          = kmb_ocs_hcu_finup,
+       .digest         = kmb_ocs_hcu_digest,
+       .export         = kmb_ocs_hcu_export,
+       .import         = kmb_ocs_hcu_import,
+       .setkey         = kmb_ocs_hcu_setkey,
+       .halg = {
+               .digestsize     = SHA384_DIGEST_SIZE,
+               .statesize      = sizeof(struct ocs_hcu_rctx),
+               .base   = {
+                       .cra_name               = "hmac(sha384)",
+                       .cra_driver_name        = "hmac-sha384-keembay-ocs",
+                       .cra_priority           = 255,
+                       .cra_flags              = CRYPTO_ALG_ASYNC,
+                       .cra_blocksize          = SHA384_BLOCK_SIZE,
+                       .cra_ctxsize            = sizeof(struct ocs_hcu_ctx),
+                       .cra_alignmask          = 0,
+                       .cra_module             = THIS_MODULE,
+                       .cra_init               = kmb_ocs_hcu_hmac_cra_init,
+                       .cra_exit               = kmb_ocs_hcu_hmac_cra_exit,
+               }
+       }
+},
+{
+       .init           = kmb_ocs_hcu_init,
+       .update         = kmb_ocs_hcu_update,
+       .final          = kmb_ocs_hcu_final,
+       .finup          = kmb_ocs_hcu_finup,
+       .digest         = kmb_ocs_hcu_digest,
+       .export         = kmb_ocs_hcu_export,
+       .import         = kmb_ocs_hcu_import,
+       .halg = {
+               .digestsize     = SHA512_DIGEST_SIZE,
+               .statesize      = sizeof(struct ocs_hcu_rctx),
+               .base   = {
+                       .cra_name               = "sha512",
+                       .cra_driver_name        = "sha512-keembay-ocs",
+                       .cra_priority           = 255,
+                       .cra_flags              = CRYPTO_ALG_ASYNC,
+                       .cra_blocksize          = SHA512_BLOCK_SIZE,
+                       .cra_ctxsize            = sizeof(struct ocs_hcu_ctx),
+                       .cra_alignmask          = 0,
+                       .cra_module             = THIS_MODULE,
+                       .cra_init               = kmb_ocs_hcu_sha_cra_init,
+               }
+       }
+},
+{
+       .init           = kmb_ocs_hcu_init,
+       .update         = kmb_ocs_hcu_update,
+       .final          = kmb_ocs_hcu_final,
+       .finup          = kmb_ocs_hcu_finup,
+       .digest         = kmb_ocs_hcu_digest,
+       .export         = kmb_ocs_hcu_export,
+       .import         = kmb_ocs_hcu_import,
+       .setkey         = kmb_ocs_hcu_setkey,
+       .halg = {
+               .digestsize     = SHA512_DIGEST_SIZE,
+               .statesize      = sizeof(struct ocs_hcu_rctx),
+               .base   = {
+                       .cra_name               = "hmac(sha512)",
+                       .cra_driver_name        = "hmac-sha512-keembay-ocs",
+                       .cra_priority           = 255,
+                       .cra_flags              = CRYPTO_ALG_ASYNC,
+                       .cra_blocksize          = SHA512_BLOCK_SIZE,
+                       .cra_ctxsize            = sizeof(struct ocs_hcu_ctx),
+                       .cra_alignmask          = 0,
+                       .cra_module             = THIS_MODULE,
+                       .cra_init               = kmb_ocs_hcu_hmac_cra_init,
+                       .cra_exit               = kmb_ocs_hcu_hmac_cra_exit,
+               }
+       }
+},
+};
+
+/* Device tree driver match. */
+static const struct of_device_id kmb_ocs_hcu_of_match[] = {
+       {
+               .compatible = "intel,keembay-ocs-hcu",
+       },
+       {}
+};
+
+static int kmb_ocs_hcu_remove(struct platform_device *pdev)
+{
+       struct ocs_hcu_dev *hcu_dev;
+       int rc;
+
+       hcu_dev = platform_get_drvdata(pdev);
+       if (!hcu_dev)
+               return -ENODEV;
+
+       crypto_unregister_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs));
+
+       rc = crypto_engine_exit(hcu_dev->engine);
+
+       spin_lock_bh(&ocs_hcu.lock);
+       list_del(&hcu_dev->list);
+       spin_unlock_bh(&ocs_hcu.lock);
+
+       return rc;
+}
+
+static int kmb_ocs_hcu_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct ocs_hcu_dev *hcu_dev;
+       struct resource *hcu_mem;
+       int rc;
+
+       hcu_dev = devm_kzalloc(dev, sizeof(*hcu_dev), GFP_KERNEL);
+       if (!hcu_dev)
+               return -ENOMEM;
+
+       hcu_dev->dev = dev;
+
+       platform_set_drvdata(pdev, hcu_dev);
+       rc = dma_set_mask_and_coherent(&pdev->dev, OCS_HCU_DMA_BIT_MASK);
+       if (rc)
+               return rc;
+
+       /* Get the memory address and remap. */
+       hcu_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!hcu_mem) {
+               dev_err(dev, "Could not retrieve io mem resource.\n");
+               return -ENODEV;
+       }
+
+       hcu_dev->io_base = devm_ioremap_resource(dev, hcu_mem);
+       if (IS_ERR(hcu_dev->io_base))
+               return PTR_ERR(hcu_dev->io_base);
+
+       init_completion(&hcu_dev->irq_done);
+
+       /* Get and request IRQ. */
+       hcu_dev->irq = platform_get_irq(pdev, 0);
+       if (hcu_dev->irq < 0)
+               return hcu_dev->irq;
+
+       rc = devm_request_threaded_irq(&pdev->dev, hcu_dev->irq,
+                                      ocs_hcu_irq_handler, NULL, 0,
+                                      "keembay-ocs-hcu", hcu_dev);
+       if (rc < 0) {
+               dev_err(dev, "Could not request IRQ.\n");
+               return rc;
+       }
+
+       INIT_LIST_HEAD(&hcu_dev->list);
+
+       spin_lock_bh(&ocs_hcu.lock);
+       list_add_tail(&hcu_dev->list, &ocs_hcu.dev_list);
+       spin_unlock_bh(&ocs_hcu.lock);
+
+       /* Initialize crypto engine */
+       hcu_dev->engine = crypto_engine_alloc_init(dev, 1);
+       if (!hcu_dev->engine) {
+               rc = -ENOMEM;
+               goto list_del;
+       }
+
+       rc = crypto_engine_start(hcu_dev->engine);
+       if (rc) {
+               dev_err(dev, "Could not start engine.\n");
+               goto cleanup;
+       }
+
+       /* Security infrastructure guarantees OCS clock is enabled. */
+
+       rc = crypto_register_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs));
+       if (rc) {
+               dev_err(dev, "Could not register algorithms.\n");
+               goto cleanup;
+       }
+
+       return 0;
+
+cleanup:
+       crypto_engine_exit(hcu_dev->engine);
+list_del:
+       spin_lock_bh(&ocs_hcu.lock);
+       list_del(&hcu_dev->list);
+       spin_unlock_bh(&ocs_hcu.lock);
+
+       return rc;
+}
+
+/* The OCS driver is a platform device. */
+static struct platform_driver kmb_ocs_hcu_driver = {
+       .probe = kmb_ocs_hcu_probe,
+       .remove = kmb_ocs_hcu_remove,
+       .driver = {
+                       .name = DRV_NAME,
+                       .of_match_table = kmb_ocs_hcu_of_match,
+               },
+};
+
+module_platform_driver(kmb_ocs_hcu_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/intel/keembay/ocs-aes.c b/drivers/crypto/intel/keembay/ocs-aes.c
new file mode 100644 (file)
index 0000000..be9f32f
--- /dev/null
@@ -0,0 +1,1489 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Keem Bay OCS AES Crypto Driver.
+ *
+ * Copyright (C) 2018-2020 Intel Corporation
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/swab.h>
+
+#include <asm/byteorder.h>
+#include <asm/errno.h>
+
+#include <crypto/aes.h>
+#include <crypto/gcm.h>
+
+#include "ocs-aes.h"
+
+#define AES_COMMAND_OFFSET                     0x0000
+#define AES_KEY_0_OFFSET                       0x0004
+#define AES_KEY_1_OFFSET                       0x0008
+#define AES_KEY_2_OFFSET                       0x000C
+#define AES_KEY_3_OFFSET                       0x0010
+#define AES_KEY_4_OFFSET                       0x0014
+#define AES_KEY_5_OFFSET                       0x0018
+#define AES_KEY_6_OFFSET                       0x001C
+#define AES_KEY_7_OFFSET                       0x0020
+#define AES_IV_0_OFFSET                                0x0024
+#define AES_IV_1_OFFSET                                0x0028
+#define AES_IV_2_OFFSET                                0x002C
+#define AES_IV_3_OFFSET                                0x0030
+#define AES_ACTIVE_OFFSET                      0x0034
+#define AES_STATUS_OFFSET                      0x0038
+#define AES_KEY_SIZE_OFFSET                    0x0044
+#define AES_IER_OFFSET                         0x0048
+#define AES_ISR_OFFSET                         0x005C
+#define AES_MULTIPURPOSE1_0_OFFSET             0x0200
+#define AES_MULTIPURPOSE1_1_OFFSET             0x0204
+#define AES_MULTIPURPOSE1_2_OFFSET             0x0208
+#define AES_MULTIPURPOSE1_3_OFFSET             0x020C
+#define AES_MULTIPURPOSE2_0_OFFSET             0x0220
+#define AES_MULTIPURPOSE2_1_OFFSET             0x0224
+#define AES_MULTIPURPOSE2_2_OFFSET             0x0228
+#define AES_MULTIPURPOSE2_3_OFFSET             0x022C
+#define AES_BYTE_ORDER_CFG_OFFSET              0x02C0
+#define AES_TLEN_OFFSET                                0x0300
+#define AES_T_MAC_0_OFFSET                     0x0304
+#define AES_T_MAC_1_OFFSET                     0x0308
+#define AES_T_MAC_2_OFFSET                     0x030C
+#define AES_T_MAC_3_OFFSET                     0x0310
+#define AES_PLEN_OFFSET                                0x0314
+#define AES_A_DMA_SRC_ADDR_OFFSET              0x0400
+#define AES_A_DMA_DST_ADDR_OFFSET              0x0404
+#define AES_A_DMA_SRC_SIZE_OFFSET              0x0408
+#define AES_A_DMA_DST_SIZE_OFFSET              0x040C
+#define AES_A_DMA_DMA_MODE_OFFSET              0x0410
+#define AES_A_DMA_NEXT_SRC_DESCR_OFFSET                0x0418
+#define AES_A_DMA_NEXT_DST_DESCR_OFFSET                0x041C
+#define AES_A_DMA_WHILE_ACTIVE_MODE_OFFSET     0x0420
+#define AES_A_DMA_LOG_OFFSET                   0x0424
+#define AES_A_DMA_STATUS_OFFSET                        0x0428
+#define AES_A_DMA_PERF_CNTR_OFFSET             0x042C
+#define AES_A_DMA_MSI_ISR_OFFSET               0x0480
+#define AES_A_DMA_MSI_IER_OFFSET               0x0484
+#define AES_A_DMA_MSI_MASK_OFFSET              0x0488
+#define AES_A_DMA_INBUFFER_WRITE_FIFO_OFFSET   0x0600
+#define AES_A_DMA_OUTBUFFER_READ_FIFO_OFFSET   0x0700
+
+/*
+ * AES_A_DMA_DMA_MODE register.
+ * Default: 0x00000000.
+ * bit[31]     ACTIVE
+ *             This bit activates the DMA. When the DMA finishes, it resets
+ *             this bit to zero.
+ * bit[30:26]  Unused by this driver.
+ * bit[25]     SRC_LINK_LIST_EN
+ *             Source link list enable bit. When the linked list is terminated
+ *             this bit is reset by the DMA.
+ * bit[24]     DST_LINK_LIST_EN
+ *             Destination link list enable bit. When the linked list is
+ *             terminated this bit is reset by the DMA.
+ * bit[23:0]   Unused by this driver.
+ */
+#define AES_A_DMA_DMA_MODE_ACTIVE              BIT(31)
+#define AES_A_DMA_DMA_MODE_SRC_LINK_LIST_EN    BIT(25)
+#define AES_A_DMA_DMA_MODE_DST_LINK_LIST_EN    BIT(24)
+
+/*
+ * AES_ACTIVE register
+ * default 0x00000000
+ * bit[31:10]  Reserved
+ * bit[9]      LAST_ADATA
+ * bit[8]      LAST_GCX
+ * bit[7:2]    Reserved
+ * bit[1]      TERMINATION
+ * bit[0]      TRIGGER
+ */
+#define AES_ACTIVE_LAST_ADATA                  BIT(9)
+#define AES_ACTIVE_LAST_CCM_GCM                        BIT(8)
+#define AES_ACTIVE_TERMINATION                 BIT(1)
+#define AES_ACTIVE_TRIGGER                     BIT(0)
+
+#define AES_DISABLE_INT                                0x00000000
+#define AES_DMA_CPD_ERR_INT                    BIT(8)
+#define AES_DMA_OUTBUF_RD_ERR_INT              BIT(7)
+#define AES_DMA_OUTBUF_WR_ERR_INT              BIT(6)
+#define AES_DMA_INBUF_RD_ERR_INT               BIT(5)
+#define AES_DMA_INBUF_WR_ERR_INT               BIT(4)
+#define AES_DMA_BAD_COMP_INT                   BIT(3)
+#define AES_DMA_SAI_INT                                BIT(2)
+#define AES_DMA_SRC_DONE_INT                   BIT(0)
+#define AES_COMPLETE_INT                       BIT(1)
+
+#define AES_DMA_MSI_MASK_CLEAR                 BIT(0)
+
+#define AES_128_BIT_KEY                                0x00000000
+#define AES_256_BIT_KEY                                BIT(0)
+
+#define AES_DEACTIVATE_PERF_CNTR               0x00000000
+#define AES_ACTIVATE_PERF_CNTR                 BIT(0)
+
+#define AES_MAX_TAG_SIZE_U32                   4
+
+#define OCS_LL_DMA_FLAG_TERMINATE              BIT(31)
+
+/*
+ * There is an inconsistency in the documentation. This is documented as a
+ * 11-bit value, but it is actually 10-bits.
+ */
+#define AES_DMA_STATUS_INPUT_BUFFER_OCCUPANCY_MASK     0x3FF
+
+/*
+ * During CCM decrypt, the OCS block needs to finish processing the ciphertext
+ * before the tag is written. For 128-bit mode this required delay is 28 OCS
+ * clock cycles. For 256-bit mode it is 36 OCS clock cycles.
+ */
+#define CCM_DECRYPT_DELAY_TAG_CLK_COUNT                36UL
+
+/*
+ * During CCM decrypt there must be a delay of at least 42 OCS clock cycles
+ * between setting the TRIGGER bit in AES_ACTIVE and setting the LAST_CCM_GCM
+ * bit in the same register (as stated in the OCS databook)
+ */
+#define CCM_DECRYPT_DELAY_LAST_GCX_CLK_COUNT   42UL
+
+/* See RFC3610 section 2.2 */
+#define L_PRIME_MIN (1)
+#define L_PRIME_MAX (7)
+/*
+ * CCM IV format from RFC 3610 section 2.3
+ *
+ *   Octet Number   Contents
+ *   ------------   ---------
+ *   0              Flags
+ *   1 ... 15-L     Nonce N
+ *   16-L ... 15    Counter i
+ *
+ * Flags = L' = L - 1
+ */
+#define L_PRIME_IDX            0
+#define COUNTER_START(lprime)  (16 - ((lprime) + 1))
+#define COUNTER_LEN(lprime)    ((lprime) + 1)
+
+enum aes_counter_mode {
+       AES_CTR_M_NO_INC = 0,
+       AES_CTR_M_32_INC = 1,
+       AES_CTR_M_64_INC = 2,
+       AES_CTR_M_128_INC = 3,
+};
+
+/**
+ * struct ocs_dma_linked_list - OCS DMA linked list entry.
+ * @src_addr:   Source address of the data.
+ * @src_len:    Length of data to be fetched.
+ * @next:      Next dma_list to fetch.
+ * @ll_flags:   Flags (Freeze @ terminate) for the DMA engine.
+ */
+struct ocs_dma_linked_list {
+       u32 src_addr;
+       u32 src_len;
+       u32 next;
+       u32 ll_flags;
+} __packed;
+
+/*
+ * Set endianness of inputs and outputs
+ * AES_BYTE_ORDER_CFG
+ * default 0x00000000
+ * bit [10] - KEY_HI_LO_SWAP
+ * bit [9] - KEY_HI_SWAP_DWORDS_IN_OCTWORD
+ * bit [8] - KEY_HI_SWAP_BYTES_IN_DWORD
+ * bit [7] - KEY_LO_SWAP_DWORDS_IN_OCTWORD
+ * bit [6] - KEY_LO_SWAP_BYTES_IN_DWORD
+ * bit [5] - IV_SWAP_DWORDS_IN_OCTWORD
+ * bit [4] - IV_SWAP_BYTES_IN_DWORD
+ * bit [3] - DOUT_SWAP_DWORDS_IN_OCTWORD
+ * bit [2] - DOUT_SWAP_BYTES_IN_DWORD
+ * bit [1] - DOUT_SWAP_DWORDS_IN_OCTWORD
+ * bit [0] - DOUT_SWAP_BYTES_IN_DWORD
+ */
+static inline void aes_a_set_endianness(const struct ocs_aes_dev *aes_dev)
+{
+       iowrite32(0x7FF, aes_dev->base_reg + AES_BYTE_ORDER_CFG_OFFSET);
+}
+
+/* Trigger AES process start. */
+static inline void aes_a_op_trigger(const struct ocs_aes_dev *aes_dev)
+{
+       iowrite32(AES_ACTIVE_TRIGGER, aes_dev->base_reg + AES_ACTIVE_OFFSET);
+}
+
+/* Indicate last bulk of data. */
+static inline void aes_a_op_termination(const struct ocs_aes_dev *aes_dev)
+{
+       iowrite32(AES_ACTIVE_TERMINATION,
+                 aes_dev->base_reg + AES_ACTIVE_OFFSET);
+}
+
+/*
+ * Set LAST_CCM_GCM in AES_ACTIVE register and clear all other bits.
+ *
+ * Called when DMA is programmed to fetch the last batch of data.
+ * - For AES-CCM it is called for the last batch of Payload data and Ciphertext
+ *   data.
+ * - For AES-GCM, it is called for the last batch of Plaintext data and
+ *   Ciphertext data.
+ */
+static inline void aes_a_set_last_gcx(const struct ocs_aes_dev *aes_dev)
+{
+       iowrite32(AES_ACTIVE_LAST_CCM_GCM,
+                 aes_dev->base_reg + AES_ACTIVE_OFFSET);
+}
+
+/* Wait for LAST_CCM_GCM bit to be unset. */
+static inline void aes_a_wait_last_gcx(const struct ocs_aes_dev *aes_dev)
+{
+       u32 aes_active_reg;
+
+       do {
+               aes_active_reg = ioread32(aes_dev->base_reg +
+                                         AES_ACTIVE_OFFSET);
+       } while (aes_active_reg & AES_ACTIVE_LAST_CCM_GCM);
+}
+
+/* Wait for 10 bits of input occupancy. */
+static void aes_a_dma_wait_input_buffer_occupancy(const struct ocs_aes_dev *aes_dev)
+{
+       u32 reg;
+
+       do {
+               reg = ioread32(aes_dev->base_reg + AES_A_DMA_STATUS_OFFSET);
+       } while (reg & AES_DMA_STATUS_INPUT_BUFFER_OCCUPANCY_MASK);
+}
+
+ /*
+  * Set LAST_CCM_GCM and LAST_ADATA bits in AES_ACTIVE register (and clear all
+  * other bits).
+  *
+  * Called when DMA is programmed to fetch the last batch of Associated Data
+  * (CCM case) or Additional Authenticated Data (GCM case).
+  */
+static inline void aes_a_set_last_gcx_and_adata(const struct ocs_aes_dev *aes_dev)
+{
+       iowrite32(AES_ACTIVE_LAST_ADATA | AES_ACTIVE_LAST_CCM_GCM,
+                 aes_dev->base_reg + AES_ACTIVE_OFFSET);
+}
+
+/* Set DMA src and dst transfer size to 0 */
+static inline void aes_a_dma_set_xfer_size_zero(const struct ocs_aes_dev *aes_dev)
+{
+       iowrite32(0, aes_dev->base_reg + AES_A_DMA_SRC_SIZE_OFFSET);
+       iowrite32(0, aes_dev->base_reg + AES_A_DMA_DST_SIZE_OFFSET);
+}
+
+/* Activate DMA for zero-byte transfer case. */
+static inline void aes_a_dma_active(const struct ocs_aes_dev *aes_dev)
+{
+       iowrite32(AES_A_DMA_DMA_MODE_ACTIVE,
+                 aes_dev->base_reg + AES_A_DMA_DMA_MODE_OFFSET);
+}
+
+/* Activate DMA and enable src linked list */
+static inline void aes_a_dma_active_src_ll_en(const struct ocs_aes_dev *aes_dev)
+{
+       iowrite32(AES_A_DMA_DMA_MODE_ACTIVE |
+                 AES_A_DMA_DMA_MODE_SRC_LINK_LIST_EN,
+                 aes_dev->base_reg + AES_A_DMA_DMA_MODE_OFFSET);
+}
+
+/* Activate DMA and enable dst linked list */
+static inline void aes_a_dma_active_dst_ll_en(const struct ocs_aes_dev *aes_dev)
+{
+       iowrite32(AES_A_DMA_DMA_MODE_ACTIVE |
+                 AES_A_DMA_DMA_MODE_DST_LINK_LIST_EN,
+                 aes_dev->base_reg + AES_A_DMA_DMA_MODE_OFFSET);
+}
+
+/* Activate DMA and enable src and dst linked lists */
+static inline void aes_a_dma_active_src_dst_ll_en(const struct ocs_aes_dev *aes_dev)
+{
+       iowrite32(AES_A_DMA_DMA_MODE_ACTIVE |
+                 AES_A_DMA_DMA_MODE_SRC_LINK_LIST_EN |
+                 AES_A_DMA_DMA_MODE_DST_LINK_LIST_EN,
+                 aes_dev->base_reg + AES_A_DMA_DMA_MODE_OFFSET);
+}
+
+/* Reset PERF_CNTR to 0 and activate it */
+static inline void aes_a_dma_reset_and_activate_perf_cntr(const struct ocs_aes_dev *aes_dev)
+{
+       iowrite32(0x00000000, aes_dev->base_reg + AES_A_DMA_PERF_CNTR_OFFSET);
+       iowrite32(AES_ACTIVATE_PERF_CNTR,
+                 aes_dev->base_reg + AES_A_DMA_WHILE_ACTIVE_MODE_OFFSET);
+}
+
+/* Wait until PERF_CNTR is > delay, then deactivate it */
+static inline void aes_a_dma_wait_and_deactivate_perf_cntr(const struct ocs_aes_dev *aes_dev,
+                                                          int delay)
+{
+       while (ioread32(aes_dev->base_reg + AES_A_DMA_PERF_CNTR_OFFSET) < delay)
+               ;
+       iowrite32(AES_DEACTIVATE_PERF_CNTR,
+                 aes_dev->base_reg + AES_A_DMA_WHILE_ACTIVE_MODE_OFFSET);
+}
+
+/* Disable AES and DMA IRQ. */
+static void aes_irq_disable(struct ocs_aes_dev *aes_dev)
+{
+       u32 isr_val = 0;
+
+       /* Disable interrupts */
+       iowrite32(AES_DISABLE_INT,
+                 aes_dev->base_reg + AES_A_DMA_MSI_IER_OFFSET);
+       iowrite32(AES_DISABLE_INT, aes_dev->base_reg + AES_IER_OFFSET);
+
+       /* Clear any pending interrupt */
+       isr_val = ioread32(aes_dev->base_reg + AES_A_DMA_MSI_ISR_OFFSET);
+       if (isr_val)
+               iowrite32(isr_val,
+                         aes_dev->base_reg + AES_A_DMA_MSI_ISR_OFFSET);
+
+       isr_val = ioread32(aes_dev->base_reg + AES_A_DMA_MSI_MASK_OFFSET);
+       if (isr_val)
+               iowrite32(isr_val,
+                         aes_dev->base_reg + AES_A_DMA_MSI_MASK_OFFSET);
+
+       isr_val = ioread32(aes_dev->base_reg + AES_ISR_OFFSET);
+       if (isr_val)
+               iowrite32(isr_val, aes_dev->base_reg + AES_ISR_OFFSET);
+}
+
+/* Enable AES or DMA IRQ.  IRQ is disabled once fired. */
+static void aes_irq_enable(struct ocs_aes_dev *aes_dev, u8 irq)
+{
+       if (irq == AES_COMPLETE_INT) {
+               /* Ensure DMA error interrupts are enabled */
+               iowrite32(AES_DMA_CPD_ERR_INT |
+                         AES_DMA_OUTBUF_RD_ERR_INT |
+                         AES_DMA_OUTBUF_WR_ERR_INT |
+                         AES_DMA_INBUF_RD_ERR_INT |
+                         AES_DMA_INBUF_WR_ERR_INT |
+                         AES_DMA_BAD_COMP_INT |
+                         AES_DMA_SAI_INT,
+                         aes_dev->base_reg + AES_A_DMA_MSI_IER_OFFSET);
+               /*
+                * AES_IER
+                * default 0x00000000
+                * bits [31:3] - reserved
+                * bit [2] - EN_SKS_ERR
+                * bit [1] - EN_AES_COMPLETE
+                * bit [0] - reserved
+                */
+               iowrite32(AES_COMPLETE_INT, aes_dev->base_reg + AES_IER_OFFSET);
+               return;
+       }
+       if (irq == AES_DMA_SRC_DONE_INT) {
+               /* Ensure AES interrupts are disabled */
+               iowrite32(AES_DISABLE_INT, aes_dev->base_reg + AES_IER_OFFSET);
+               /*
+                * DMA_MSI_IER
+                * default 0x00000000
+                * bits [31:9] - reserved
+                * bit [8] - CPD_ERR_INT_EN
+                * bit [7] - OUTBUF_RD_ERR_INT_EN
+                * bit [6] - OUTBUF_WR_ERR_INT_EN
+                * bit [5] - INBUF_RD_ERR_INT_EN
+                * bit [4] - INBUF_WR_ERR_INT_EN
+                * bit [3] - BAD_COMP_INT_EN
+                * bit [2] - SAI_INT_EN
+                * bit [1] - DST_DONE_INT_EN
+                * bit [0] - SRC_DONE_INT_EN
+                */
+               iowrite32(AES_DMA_CPD_ERR_INT |
+                         AES_DMA_OUTBUF_RD_ERR_INT |
+                         AES_DMA_OUTBUF_WR_ERR_INT |
+                         AES_DMA_INBUF_RD_ERR_INT |
+                         AES_DMA_INBUF_WR_ERR_INT |
+                         AES_DMA_BAD_COMP_INT |
+                         AES_DMA_SAI_INT |
+                         AES_DMA_SRC_DONE_INT,
+                         aes_dev->base_reg + AES_A_DMA_MSI_IER_OFFSET);
+       }
+}
+
+/* Enable and wait for IRQ (either from OCS AES engine or DMA) */
+static int ocs_aes_irq_enable_and_wait(struct ocs_aes_dev *aes_dev, u8 irq)
+{
+       int rc;
+
+       reinit_completion(&aes_dev->irq_completion);
+       aes_irq_enable(aes_dev, irq);
+       rc = wait_for_completion_interruptible(&aes_dev->irq_completion);
+       if (rc)
+               return rc;
+
+       return aes_dev->dma_err_mask ? -EIO : 0;
+}
+
+/* Configure DMA to OCS, linked list mode */
+static inline void dma_to_ocs_aes_ll(struct ocs_aes_dev *aes_dev,
+                                    dma_addr_t dma_list)
+{
+       iowrite32(0, aes_dev->base_reg + AES_A_DMA_SRC_SIZE_OFFSET);
+       iowrite32(dma_list,
+                 aes_dev->base_reg + AES_A_DMA_NEXT_SRC_DESCR_OFFSET);
+}
+
+/* Configure DMA from OCS, linked list mode */
+static inline void dma_from_ocs_aes_ll(struct ocs_aes_dev *aes_dev,
+                                      dma_addr_t dma_list)
+{
+       iowrite32(0, aes_dev->base_reg + AES_A_DMA_DST_SIZE_OFFSET);
+       iowrite32(dma_list,
+                 aes_dev->base_reg + AES_A_DMA_NEXT_DST_DESCR_OFFSET);
+}
+
+irqreturn_t ocs_aes_irq_handler(int irq, void *dev_id)
+{
+       struct ocs_aes_dev *aes_dev = dev_id;
+       u32 aes_dma_isr;
+
+       /* Read DMA ISR status. */
+       aes_dma_isr = ioread32(aes_dev->base_reg + AES_A_DMA_MSI_ISR_OFFSET);
+
+       /* Disable and clear interrupts. */
+       aes_irq_disable(aes_dev);
+
+       /* Save DMA error status. */
+       aes_dev->dma_err_mask = aes_dma_isr &
+                               (AES_DMA_CPD_ERR_INT |
+                                AES_DMA_OUTBUF_RD_ERR_INT |
+                                AES_DMA_OUTBUF_WR_ERR_INT |
+                                AES_DMA_INBUF_RD_ERR_INT |
+                                AES_DMA_INBUF_WR_ERR_INT |
+                                AES_DMA_BAD_COMP_INT |
+                                AES_DMA_SAI_INT);
+
+       /* Signal IRQ completion. */
+       complete(&aes_dev->irq_completion);
+
+       return IRQ_HANDLED;
+}
+
+/**
+ * ocs_aes_set_key() - Write key into OCS AES hardware.
+ * @aes_dev:   The OCS AES device to write the key to.
+ * @key_size:  The size of the key (in bytes).
+ * @key:       The key to write.
+ * @cipher:    The cipher the key is for.
+ *
+ * For AES @key_size must be either 16 or 32. For SM4 @key_size must be 16.
+ *
+ * Return:     0 on success, negative error code otherwise.
+ */
+int ocs_aes_set_key(struct ocs_aes_dev *aes_dev, u32 key_size, const u8 *key,
+                   enum ocs_cipher cipher)
+{
+       const u32 *key_u32;
+       u32 val;
+       int i;
+
+       /* OCS AES supports 128-bit and 256-bit keys only. */
+       if (cipher == OCS_AES && !(key_size == 32 || key_size == 16)) {
+               dev_err(aes_dev->dev,
+                       "%d-bit keys not supported by AES cipher\n",
+                       key_size * 8);
+               return -EINVAL;
+       }
+       /* OCS SM4 supports 128-bit keys only. */
+       if (cipher == OCS_SM4 && key_size != 16) {
+               dev_err(aes_dev->dev,
+                       "%d-bit keys not supported for SM4 cipher\n",
+                       key_size * 8);
+               return -EINVAL;
+       }
+
+       if (!key)
+               return -EINVAL;
+
+       key_u32 = (const u32 *)key;
+
+       /* Write key to AES_KEY[0-7] registers */
+       for (i = 0; i < (key_size / sizeof(u32)); i++) {
+               iowrite32(key_u32[i],
+                         aes_dev->base_reg + AES_KEY_0_OFFSET +
+                         (i * sizeof(u32)));
+       }
+       /*
+        * Write key size
+        * bits [31:1] - reserved
+        * bit [0] - AES_KEY_SIZE
+        *           0 - 128 bit key
+        *           1 - 256 bit key
+        */
+       val = (key_size == 16) ? AES_128_BIT_KEY : AES_256_BIT_KEY;
+       iowrite32(val, aes_dev->base_reg + AES_KEY_SIZE_OFFSET);
+
+       return 0;
+}
+
+/* Write AES_COMMAND */
+static inline void set_ocs_aes_command(struct ocs_aes_dev *aes_dev,
+                                      enum ocs_cipher cipher,
+                                      enum ocs_mode mode,
+                                      enum ocs_instruction instruction)
+{
+       u32 val;
+
+       /* AES_COMMAND
+        * default 0x000000CC
+        * bit [14] - CIPHER_SELECT
+        *            0 - AES
+        *            1 - SM4
+        * bits [11:8] - OCS_AES_MODE
+        *               0000 - ECB
+        *               0001 - CBC
+        *               0010 - CTR
+        *               0110 - CCM
+        *               0111 - GCM
+        *               1001 - CTS
+        * bits [7:6] - AES_INSTRUCTION
+        *              00 - ENCRYPT
+        *              01 - DECRYPT
+        *              10 - EXPAND
+        *              11 - BYPASS
+        * bits [3:2] - CTR_M_BITS
+        *              00 - No increment
+        *              01 - Least significant 32 bits are incremented
+        *              10 - Least significant 64 bits are incremented
+        *              11 - Full 128 bits are incremented
+        */
+       val = (cipher << 14) | (mode << 8) | (instruction << 6) |
+             (AES_CTR_M_128_INC << 2);
+       iowrite32(val, aes_dev->base_reg + AES_COMMAND_OFFSET);
+}
+
+static void ocs_aes_init(struct ocs_aes_dev *aes_dev,
+                        enum ocs_mode mode,
+                        enum ocs_cipher cipher,
+                        enum ocs_instruction instruction)
+{
+       /* Ensure interrupts are disabled and pending interrupts cleared. */
+       aes_irq_disable(aes_dev);
+
+       /* Set endianness recommended by data-sheet. */
+       aes_a_set_endianness(aes_dev);
+
+       /* Set AES_COMMAND register. */
+       set_ocs_aes_command(aes_dev, cipher, mode, instruction);
+}
+
+/*
+ * Write the byte length of the last AES/SM4 block of Payload data (without
+ * zero padding and without the length of the MAC) in register AES_PLEN.
+ */
+static inline void ocs_aes_write_last_data_blk_len(struct ocs_aes_dev *aes_dev,
+                                                  u32 size)
+{
+       u32 val;
+
+       if (size == 0) {
+               val = 0;
+               goto exit;
+       }
+
+       val = size % AES_BLOCK_SIZE;
+       if (val == 0)
+               val = AES_BLOCK_SIZE;
+
+exit:
+       iowrite32(val, aes_dev->base_reg + AES_PLEN_OFFSET);
+}
+
+/*
+ * Validate inputs according to mode.
+ * If OK return 0; else return -EINVAL.
+ */
+static int ocs_aes_validate_inputs(dma_addr_t src_dma_list, u32 src_size,
+                                  const u8 *iv, u32 iv_size,
+                                  dma_addr_t aad_dma_list, u32 aad_size,
+                                  const u8 *tag, u32 tag_size,
+                                  enum ocs_cipher cipher, enum ocs_mode mode,
+                                  enum ocs_instruction instruction,
+                                  dma_addr_t dst_dma_list)
+{
+       /* Ensure cipher, mode and instruction are valid. */
+       if (!(cipher == OCS_AES || cipher == OCS_SM4))
+               return -EINVAL;
+
+       if (mode != OCS_MODE_ECB && mode != OCS_MODE_CBC &&
+           mode != OCS_MODE_CTR && mode != OCS_MODE_CCM &&
+           mode != OCS_MODE_GCM && mode != OCS_MODE_CTS)
+               return -EINVAL;
+
+       if (instruction != OCS_ENCRYPT && instruction != OCS_DECRYPT &&
+           instruction != OCS_EXPAND  && instruction != OCS_BYPASS)
+               return -EINVAL;
+
+       /*
+        * When instruction is OCS_BYPASS, OCS simply copies data from source
+        * to destination using DMA.
+        *
+        * AES mode is irrelevant, but both source and destination DMA
+        * linked-list must be defined.
+        */
+       if (instruction == OCS_BYPASS) {
+               if (src_dma_list == DMA_MAPPING_ERROR ||
+                   dst_dma_list == DMA_MAPPING_ERROR)
+                       return -EINVAL;
+
+               return 0;
+       }
+
+       /*
+        * For performance reasons switch based on mode to limit unnecessary
+        * conditionals for each mode
+        */
+       switch (mode) {
+       case OCS_MODE_ECB:
+               /* Ensure input length is multiple of block size */
+               if (src_size % AES_BLOCK_SIZE != 0)
+                       return -EINVAL;
+
+               /* Ensure source and destination linked lists are created */
+               if (src_dma_list == DMA_MAPPING_ERROR ||
+                   dst_dma_list == DMA_MAPPING_ERROR)
+                       return -EINVAL;
+
+               return 0;
+
+       case OCS_MODE_CBC:
+               /* Ensure input length is multiple of block size */
+               if (src_size % AES_BLOCK_SIZE != 0)
+                       return -EINVAL;
+
+               /* Ensure source and destination linked lists are created */
+               if (src_dma_list == DMA_MAPPING_ERROR ||
+                   dst_dma_list == DMA_MAPPING_ERROR)
+                       return -EINVAL;
+
+               /* Ensure IV is present and block size in length */
+               if (!iv || iv_size != AES_BLOCK_SIZE)
+                       return -EINVAL;
+
+               return 0;
+
+       case OCS_MODE_CTR:
+               /* Ensure input length of 1 byte or greater */
+               if (src_size == 0)
+                       return -EINVAL;
+
+               /* Ensure source and destination linked lists are created */
+               if (src_dma_list == DMA_MAPPING_ERROR ||
+                   dst_dma_list == DMA_MAPPING_ERROR)
+                       return -EINVAL;
+
+               /* Ensure IV is present and block size in length */
+               if (!iv || iv_size != AES_BLOCK_SIZE)
+                       return -EINVAL;
+
+               return 0;
+
+       case OCS_MODE_CTS:
+               /* Ensure input length >= block size */
+               if (src_size < AES_BLOCK_SIZE)
+                       return -EINVAL;
+
+               /* Ensure source and destination linked lists are created */
+               if (src_dma_list == DMA_MAPPING_ERROR ||
+                   dst_dma_list == DMA_MAPPING_ERROR)
+                       return -EINVAL;
+
+               /* Ensure IV is present and block size in length */
+               if (!iv || iv_size != AES_BLOCK_SIZE)
+                       return -EINVAL;
+
+               return 0;
+
+       case OCS_MODE_GCM:
+               /* Ensure IV is present and GCM_AES_IV_SIZE in length */
+               if (!iv || iv_size != GCM_AES_IV_SIZE)
+                       return -EINVAL;
+
+               /*
+                * If input data present ensure source and destination linked
+                * lists are created
+                */
+               if (src_size && (src_dma_list == DMA_MAPPING_ERROR ||
+                                dst_dma_list == DMA_MAPPING_ERROR))
+                       return -EINVAL;
+
+               /* If aad present ensure aad linked list is created */
+               if (aad_size && aad_dma_list == DMA_MAPPING_ERROR)
+                       return -EINVAL;
+
+               /* Ensure tag destination is set */
+               if (!tag)
+                       return -EINVAL;
+
+               /* Just ensure that tag_size doesn't cause overflows. */
+               if (tag_size > (AES_MAX_TAG_SIZE_U32 * sizeof(u32)))
+                       return -EINVAL;
+
+               return 0;
+
+       case OCS_MODE_CCM:
+               /* Ensure IV is present and block size in length */
+               if (!iv || iv_size != AES_BLOCK_SIZE)
+                       return -EINVAL;
+
+               /* 2 <= L <= 8, so 1 <= L' <= 7 */
+               if (iv[L_PRIME_IDX] < L_PRIME_MIN ||
+                   iv[L_PRIME_IDX] > L_PRIME_MAX)
+                       return -EINVAL;
+
+               /* If aad present ensure aad linked list is created */
+               if (aad_size && aad_dma_list == DMA_MAPPING_ERROR)
+                       return -EINVAL;
+
+               /* Just ensure that tag_size doesn't cause overflows. */
+               if (tag_size > (AES_MAX_TAG_SIZE_U32 * sizeof(u32)))
+                       return -EINVAL;
+
+               if (instruction == OCS_DECRYPT) {
+                       /*
+                        * If input data present ensure source and destination
+                        * linked lists are created
+                        */
+                       if (src_size && (src_dma_list == DMA_MAPPING_ERROR ||
+                                        dst_dma_list == DMA_MAPPING_ERROR))
+                               return -EINVAL;
+
+                       /* Ensure input tag is present */
+                       if (!tag)
+                               return -EINVAL;
+
+                       return 0;
+               }
+
+               /* Instruction == OCS_ENCRYPT */
+
+               /*
+                * Destination linked list always required (for tag even if no
+                * input data)
+                */
+               if (dst_dma_list == DMA_MAPPING_ERROR)
+                       return -EINVAL;
+
+               /* If input data present ensure src linked list is created */
+               if (src_size && src_dma_list == DMA_MAPPING_ERROR)
+                       return -EINVAL;
+
+               return 0;
+
+       default:
+               return -EINVAL;
+       }
+}
+
+/**
+ * ocs_aes_op() - Perform AES/SM4 operation.
+ * @aes_dev:           The OCS AES device to use.
+ * @mode:              The mode to use (ECB, CBC, CTR, or CTS).
+ * @cipher:            The cipher to use (AES or SM4).
+ * @instruction:       The instruction to perform (encrypt or decrypt).
+ * @dst_dma_list:      The OCS DMA list mapping output memory.
+ * @src_dma_list:      The OCS DMA list mapping input payload data.
+ * @src_size:          The amount of data mapped by @src_dma_list.
+ * @iv:                        The IV vector.
+ * @iv_size:           The size (in bytes) of @iv.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int ocs_aes_op(struct ocs_aes_dev *aes_dev,
+              enum ocs_mode mode,
+              enum ocs_cipher cipher,
+              enum ocs_instruction instruction,
+              dma_addr_t dst_dma_list,
+              dma_addr_t src_dma_list,
+              u32 src_size,
+              u8 *iv,
+              u32 iv_size)
+{
+       u32 *iv32;
+       int rc;
+
+       rc = ocs_aes_validate_inputs(src_dma_list, src_size, iv, iv_size, 0, 0,
+                                    NULL, 0, cipher, mode, instruction,
+                                    dst_dma_list);
+       if (rc)
+               return rc;
+       /*
+        * ocs_aes_validate_inputs() is a generic check, now ensure mode is not
+        * GCM or CCM.
+        */
+       if (mode == OCS_MODE_GCM || mode == OCS_MODE_CCM)
+               return -EINVAL;
+
+       /* Cast IV to u32 array. */
+       iv32 = (u32 *)iv;
+
+       ocs_aes_init(aes_dev, mode, cipher, instruction);
+
+       if (mode == OCS_MODE_CTS) {
+               /* Write the byte length of the last data block to engine. */
+               ocs_aes_write_last_data_blk_len(aes_dev, src_size);
+       }
+
+       /* ECB is the only mode that doesn't use IV. */
+       if (mode != OCS_MODE_ECB) {
+               iowrite32(iv32[0], aes_dev->base_reg + AES_IV_0_OFFSET);
+               iowrite32(iv32[1], aes_dev->base_reg + AES_IV_1_OFFSET);
+               iowrite32(iv32[2], aes_dev->base_reg + AES_IV_2_OFFSET);
+               iowrite32(iv32[3], aes_dev->base_reg + AES_IV_3_OFFSET);
+       }
+
+       /* Set AES_ACTIVE.TRIGGER to start the operation. */
+       aes_a_op_trigger(aes_dev);
+
+       /* Configure and activate input / output DMA. */
+       dma_to_ocs_aes_ll(aes_dev, src_dma_list);
+       dma_from_ocs_aes_ll(aes_dev, dst_dma_list);
+       aes_a_dma_active_src_dst_ll_en(aes_dev);
+
+       if (mode == OCS_MODE_CTS) {
+               /*
+                * For CTS mode, instruct engine to activate ciphertext
+                * stealing if last block of data is incomplete.
+                */
+               aes_a_set_last_gcx(aes_dev);
+       } else {
+               /* For all other modes, just write the 'termination' bit. */
+               aes_a_op_termination(aes_dev);
+       }
+
+       /* Wait for engine to complete processing. */
+       rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_COMPLETE_INT);
+       if (rc)
+               return rc;
+
+       if (mode == OCS_MODE_CTR) {
+               /* Read back IV for streaming mode */
+               iv32[0] = ioread32(aes_dev->base_reg + AES_IV_0_OFFSET);
+               iv32[1] = ioread32(aes_dev->base_reg + AES_IV_1_OFFSET);
+               iv32[2] = ioread32(aes_dev->base_reg + AES_IV_2_OFFSET);
+               iv32[3] = ioread32(aes_dev->base_reg + AES_IV_3_OFFSET);
+       }
+
+       return 0;
+}
+
+/* Compute and write J0 to engine registers. */
+static void ocs_aes_gcm_write_j0(const struct ocs_aes_dev *aes_dev,
+                                const u8 *iv)
+{
+       const u32 *j0 = (u32 *)iv;
+
+       /*
+        * IV must be 12 bytes; Other sizes not supported as Linux crypto API
+        * does only expects/allows 12 byte IV for GCM
+        */
+       iowrite32(0x00000001, aes_dev->base_reg + AES_IV_0_OFFSET);
+       iowrite32(__swab32(j0[2]), aes_dev->base_reg + AES_IV_1_OFFSET);
+       iowrite32(__swab32(j0[1]), aes_dev->base_reg + AES_IV_2_OFFSET);
+       iowrite32(__swab32(j0[0]), aes_dev->base_reg + AES_IV_3_OFFSET);
+}
+
+/* Read GCM tag from engine registers. */
+static inline void ocs_aes_gcm_read_tag(struct ocs_aes_dev *aes_dev,
+                                       u8 *tag, u32 tag_size)
+{
+       u32 tag_u32[AES_MAX_TAG_SIZE_U32];
+
+       /*
+        * The Authentication Tag T is stored in Little Endian order in the
+        * registers with the most significant bytes stored from AES_T_MAC[3]
+        * downward.
+        */
+       tag_u32[0] = __swab32(ioread32(aes_dev->base_reg + AES_T_MAC_3_OFFSET));
+       tag_u32[1] = __swab32(ioread32(aes_dev->base_reg + AES_T_MAC_2_OFFSET));
+       tag_u32[2] = __swab32(ioread32(aes_dev->base_reg + AES_T_MAC_1_OFFSET));
+       tag_u32[3] = __swab32(ioread32(aes_dev->base_reg + AES_T_MAC_0_OFFSET));
+
+       memcpy(tag, tag_u32, tag_size);
+}
+
+/**
+ * ocs_aes_gcm_op() - Perform GCM operation.
+ * @aes_dev:           The OCS AES device to use.
+ * @cipher:            The Cipher to use (AES or SM4).
+ * @instruction:       The instruction to perform (encrypt or decrypt).
+ * @dst_dma_list:      The OCS DMA list mapping output memory.
+ * @src_dma_list:      The OCS DMA list mapping input payload data.
+ * @src_size:          The amount of data mapped by @src_dma_list.
+ * @iv:                        The input IV vector.
+ * @aad_dma_list:      The OCS DMA list mapping input AAD data.
+ * @aad_size:          The amount of data mapped by @aad_dma_list.
+ * @out_tag:           Where to store computed tag.
+ * @tag_size:          The size (in bytes) of @out_tag.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int ocs_aes_gcm_op(struct ocs_aes_dev *aes_dev,
+                  enum ocs_cipher cipher,
+                  enum ocs_instruction instruction,
+                  dma_addr_t dst_dma_list,
+                  dma_addr_t src_dma_list,
+                  u32 src_size,
+                  const u8 *iv,
+                  dma_addr_t aad_dma_list,
+                  u32 aad_size,
+                  u8 *out_tag,
+                  u32 tag_size)
+{
+       u64 bit_len;
+       u32 val;
+       int rc;
+
+       rc = ocs_aes_validate_inputs(src_dma_list, src_size, iv,
+                                    GCM_AES_IV_SIZE, aad_dma_list,
+                                    aad_size, out_tag, tag_size, cipher,
+                                    OCS_MODE_GCM, instruction,
+                                    dst_dma_list);
+       if (rc)
+               return rc;
+
+       ocs_aes_init(aes_dev, OCS_MODE_GCM, cipher, instruction);
+
+       /* Compute and write J0 to OCS HW. */
+       ocs_aes_gcm_write_j0(aes_dev, iv);
+
+       /* Write out_tag byte length */
+       iowrite32(tag_size, aes_dev->base_reg + AES_TLEN_OFFSET);
+
+       /* Write the byte length of the last plaintext / ciphertext block. */
+       ocs_aes_write_last_data_blk_len(aes_dev, src_size);
+
+       /* Write ciphertext bit length */
+       bit_len = (u64)src_size * 8;
+       val = bit_len & 0xFFFFFFFF;
+       iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_0_OFFSET);
+       val = bit_len >> 32;
+       iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_1_OFFSET);
+
+       /* Write aad bit length */
+       bit_len = (u64)aad_size * 8;
+       val = bit_len & 0xFFFFFFFF;
+       iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_2_OFFSET);
+       val = bit_len >> 32;
+       iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_3_OFFSET);
+
+       /* Set AES_ACTIVE.TRIGGER to start the operation. */
+       aes_a_op_trigger(aes_dev);
+
+       /* Process AAD. */
+       if (aad_size) {
+               /* If aad present, configure DMA to feed it to the engine. */
+               dma_to_ocs_aes_ll(aes_dev, aad_dma_list);
+               aes_a_dma_active_src_ll_en(aes_dev);
+
+               /* Instructs engine to pad last block of aad, if needed. */
+               aes_a_set_last_gcx_and_adata(aes_dev);
+
+               /* Wait for DMA transfer to complete. */
+               rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_DMA_SRC_DONE_INT);
+               if (rc)
+                       return rc;
+       } else {
+               aes_a_set_last_gcx_and_adata(aes_dev);
+       }
+
+       /* Wait until adata (if present) has been processed. */
+       aes_a_wait_last_gcx(aes_dev);
+       aes_a_dma_wait_input_buffer_occupancy(aes_dev);
+
+       /* Now process payload. */
+       if (src_size) {
+               /* Configure and activate DMA for both input and output data. */
+               dma_to_ocs_aes_ll(aes_dev, src_dma_list);
+               dma_from_ocs_aes_ll(aes_dev, dst_dma_list);
+               aes_a_dma_active_src_dst_ll_en(aes_dev);
+       } else {
+               aes_a_dma_set_xfer_size_zero(aes_dev);
+               aes_a_dma_active(aes_dev);
+       }
+
+       /* Instruct AES/SMA4 engine payload processing is over. */
+       aes_a_set_last_gcx(aes_dev);
+
+       /* Wait for OCS AES engine to complete processing. */
+       rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_COMPLETE_INT);
+       if (rc)
+               return rc;
+
+       ocs_aes_gcm_read_tag(aes_dev, out_tag, tag_size);
+
+       return 0;
+}
+
+/* Write encrypted tag to AES/SM4 engine. */
+static void ocs_aes_ccm_write_encrypted_tag(struct ocs_aes_dev *aes_dev,
+                                           const u8 *in_tag, u32 tag_size)
+{
+       int i;
+
+       /* Ensure DMA input buffer is empty */
+       aes_a_dma_wait_input_buffer_occupancy(aes_dev);
+
+       /*
+        * During CCM decrypt, the OCS block needs to finish processing the
+        * ciphertext before the tag is written.  So delay needed after DMA has
+        * completed writing the ciphertext
+        */
+       aes_a_dma_reset_and_activate_perf_cntr(aes_dev);
+       aes_a_dma_wait_and_deactivate_perf_cntr(aes_dev,
+                                               CCM_DECRYPT_DELAY_TAG_CLK_COUNT);
+
+       /* Write encrypted tag to AES/SM4 engine. */
+       for (i = 0; i < tag_size; i++) {
+               iowrite8(in_tag[i], aes_dev->base_reg +
+                                   AES_A_DMA_INBUFFER_WRITE_FIFO_OFFSET);
+       }
+}
+
+/*
+ * Write B0 CCM block to OCS AES HW.
+ *
+ * Note: B0 format is documented in NIST Special Publication 800-38C
+ * https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38c.pdf
+ * (see Section A.2.1)
+ */
+static int ocs_aes_ccm_write_b0(const struct ocs_aes_dev *aes_dev,
+                               const u8 *iv, u32 adata_size, u32 tag_size,
+                               u32 cryptlen)
+{
+       u8 b0[16]; /* CCM B0 block is 16 bytes long. */
+       int i, q;
+
+       /* Initialize B0 to 0. */
+       memset(b0, 0, sizeof(b0));
+
+       /*
+        * B0[0] is the 'Flags Octet' and has the following structure:
+        *   bit 7: Reserved
+        *   bit 6: Adata flag
+        *   bit 5-3: t value encoded as (t-2)/2
+        *   bit 2-0: q value encoded as q - 1
+        */
+       /* If there is AAD data, set the Adata flag. */
+       if (adata_size)
+               b0[0] |= BIT(6);
+       /*
+        * t denotes the octet length of T.
+        * t can only be an element of { 4, 6, 8, 10, 12, 14, 16} and is
+        * encoded as (t - 2) / 2
+        */
+       b0[0] |= (((tag_size - 2) / 2) & 0x7)  << 3;
+       /*
+        * q is the octet length of Q.
+        * q can only be an element of {2, 3, 4, 5, 6, 7, 8} and is encoded as
+        * q - 1 == iv[0] & 0x7;
+        */
+       b0[0] |= iv[0] & 0x7;
+       /*
+        * Copy the Nonce N from IV to B0; N is located in iv[1]..iv[15 - q]
+        * and must be copied to b0[1]..b0[15-q].
+        * q == (iv[0] & 0x7) + 1
+        */
+       q = (iv[0] & 0x7) + 1;
+       for (i = 1; i <= 15 - q; i++)
+               b0[i] = iv[i];
+       /*
+        * The rest of B0 must contain Q, i.e., the message length.
+        * Q is encoded in q octets, in big-endian order, so to write it, we
+        * start from the end of B0 and we move backward.
+        */
+       i = sizeof(b0) - 1;
+       while (q) {
+               b0[i] = cryptlen & 0xff;
+               cryptlen >>= 8;
+               i--;
+               q--;
+       }
+       /*
+        * If cryptlen is not zero at this point, it means that its original
+        * value was too big.
+        */
+       if (cryptlen)
+               return -EOVERFLOW;
+       /* Now write B0 to OCS AES input buffer. */
+       for (i = 0; i < sizeof(b0); i++)
+               iowrite8(b0[i], aes_dev->base_reg +
+                               AES_A_DMA_INBUFFER_WRITE_FIFO_OFFSET);
+       return 0;
+}
+
+/*
+ * Write adata length to OCS AES HW.
+ *
+ * Note: adata len encoding is documented in NIST Special Publication 800-38C
+ * https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38c.pdf
+ * (see Section A.2.2)
+ */
+static void ocs_aes_ccm_write_adata_len(const struct ocs_aes_dev *aes_dev,
+                                       u64 adata_len)
+{
+       u8 enc_a[10]; /* Maximum encoded size: 10 octets. */
+       int i, len;
+
+       /*
+        * adata_len ('a') is encoded as follows:
+        * If 0 < a < 2^16 - 2^8    ==> 'a' encoded as [a]16, i.e., two octets
+        *                              (big endian).
+        * If 2^16 - 2^8 â‰¤ a < 2^32 ==> 'a' encoded as 0xff || 0xfe || [a]32,
+        *                              i.e., six octets (big endian).
+        * If 2^32 â‰¤ a < 2^64       ==> 'a' encoded as 0xff || 0xff || [a]64,
+        *                              i.e., ten octets (big endian).
+        */
+       if (adata_len < 65280) {
+               len = 2;
+               *(__be16 *)enc_a = cpu_to_be16(adata_len);
+       } else if (adata_len <= 0xFFFFFFFF) {
+               len = 6;
+               *(__be16 *)enc_a = cpu_to_be16(0xfffe);
+               *(__be32 *)&enc_a[2] = cpu_to_be32(adata_len);
+       } else { /* adata_len >= 2^32 */
+               len = 10;
+               *(__be16 *)enc_a = cpu_to_be16(0xffff);
+               *(__be64 *)&enc_a[2] = cpu_to_be64(adata_len);
+       }
+       for (i = 0; i < len; i++)
+               iowrite8(enc_a[i],
+                        aes_dev->base_reg +
+                        AES_A_DMA_INBUFFER_WRITE_FIFO_OFFSET);
+}
+
+static int ocs_aes_ccm_do_adata(struct ocs_aes_dev *aes_dev,
+                               dma_addr_t adata_dma_list, u32 adata_size)
+{
+       int rc;
+
+       if (!adata_size) {
+               /* Since no aad the LAST_GCX bit can be set now */
+               aes_a_set_last_gcx_and_adata(aes_dev);
+               goto exit;
+       }
+
+       /* Adata case. */
+
+       /*
+        * Form the encoding of the Associated data length and write it
+        * to the AES/SM4 input buffer.
+        */
+       ocs_aes_ccm_write_adata_len(aes_dev, adata_size);
+
+       /* Configure the AES/SM4 DMA to fetch the Associated Data */
+       dma_to_ocs_aes_ll(aes_dev, adata_dma_list);
+
+       /* Activate DMA to fetch Associated data. */
+       aes_a_dma_active_src_ll_en(aes_dev);
+
+       /* Set LAST_GCX and LAST_ADATA in AES ACTIVE register. */
+       aes_a_set_last_gcx_and_adata(aes_dev);
+
+       /* Wait for DMA transfer to complete. */
+       rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_DMA_SRC_DONE_INT);
+       if (rc)
+               return rc;
+
+exit:
+       /* Wait until adata (if present) has been processed. */
+       aes_a_wait_last_gcx(aes_dev);
+       aes_a_dma_wait_input_buffer_occupancy(aes_dev);
+
+       return 0;
+}
+
+static int ocs_aes_ccm_encrypt_do_payload(struct ocs_aes_dev *aes_dev,
+                                         dma_addr_t dst_dma_list,
+                                         dma_addr_t src_dma_list,
+                                         u32 src_size)
+{
+       if (src_size) {
+               /*
+                * Configure and activate DMA for both input and output
+                * data.
+                */
+               dma_to_ocs_aes_ll(aes_dev, src_dma_list);
+               dma_from_ocs_aes_ll(aes_dev, dst_dma_list);
+               aes_a_dma_active_src_dst_ll_en(aes_dev);
+       } else {
+               /* Configure and activate DMA for output data only. */
+               dma_from_ocs_aes_ll(aes_dev, dst_dma_list);
+               aes_a_dma_active_dst_ll_en(aes_dev);
+       }
+
+       /*
+        * Set the LAST GCX bit in AES_ACTIVE Register to instruct
+        * AES/SM4 engine to pad the last block of data.
+        */
+       aes_a_set_last_gcx(aes_dev);
+
+       /* We are done, wait for IRQ and return. */
+       return ocs_aes_irq_enable_and_wait(aes_dev, AES_COMPLETE_INT);
+}
+
+static int ocs_aes_ccm_decrypt_do_payload(struct ocs_aes_dev *aes_dev,
+                                         dma_addr_t dst_dma_list,
+                                         dma_addr_t src_dma_list,
+                                         u32 src_size)
+{
+       if (!src_size) {
+               /* Let engine process 0-length input. */
+               aes_a_dma_set_xfer_size_zero(aes_dev);
+               aes_a_dma_active(aes_dev);
+               aes_a_set_last_gcx(aes_dev);
+
+               return 0;
+       }
+
+       /*
+        * Configure and activate DMA for both input and output
+        * data.
+        */
+       dma_to_ocs_aes_ll(aes_dev, src_dma_list);
+       dma_from_ocs_aes_ll(aes_dev, dst_dma_list);
+       aes_a_dma_active_src_dst_ll_en(aes_dev);
+       /*
+        * Set the LAST GCX bit in AES_ACTIVE Register; this allows the
+        * AES/SM4 engine to differentiate between encrypted data and
+        * encrypted MAC.
+        */
+       aes_a_set_last_gcx(aes_dev);
+        /*
+         * Enable DMA DONE interrupt; once DMA transfer is over,
+         * interrupt handler will process the MAC/tag.
+         */
+       return ocs_aes_irq_enable_and_wait(aes_dev, AES_DMA_SRC_DONE_INT);
+}
+
+/*
+ * Compare Tag to Yr.
+ *
+ * Only used at the end of CCM decrypt. If tag == yr, message authentication
+ * has succeeded.
+ */
+static inline int ccm_compare_tag_to_yr(struct ocs_aes_dev *aes_dev,
+                                       u8 tag_size_bytes)
+{
+       u32 tag[AES_MAX_TAG_SIZE_U32];
+       u32 yr[AES_MAX_TAG_SIZE_U32];
+       u8 i;
+
+       /* Read Tag and Yr from AES registers. */
+       for (i = 0; i < AES_MAX_TAG_SIZE_U32; i++) {
+               tag[i] = ioread32(aes_dev->base_reg +
+                                 AES_T_MAC_0_OFFSET + (i * sizeof(u32)));
+               yr[i] = ioread32(aes_dev->base_reg +
+                                AES_MULTIPURPOSE2_0_OFFSET +
+                                (i * sizeof(u32)));
+       }
+
+       return memcmp(tag, yr, tag_size_bytes) ? -EBADMSG : 0;
+}
+
+/**
+ * ocs_aes_ccm_op() - Perform CCM operation.
+ * @aes_dev:           The OCS AES device to use.
+ * @cipher:            The Cipher to use (AES or SM4).
+ * @instruction:       The instruction to perform (encrypt or decrypt).
+ * @dst_dma_list:      The OCS DMA list mapping output memory.
+ * @src_dma_list:      The OCS DMA list mapping input payload data.
+ * @src_size:          The amount of data mapped by @src_dma_list.
+ * @iv:                        The input IV vector.
+ * @adata_dma_list:    The OCS DMA list mapping input A-data.
+ * @adata_size:                The amount of data mapped by @adata_dma_list.
+ * @in_tag:            Input tag.
+ * @tag_size:          The size (in bytes) of @in_tag.
+ *
+ * Note: for encrypt the tag is appended to the ciphertext (in the memory
+ *      mapped by @dst_dma_list).
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int ocs_aes_ccm_op(struct ocs_aes_dev *aes_dev,
+                  enum ocs_cipher cipher,
+                  enum ocs_instruction instruction,
+                  dma_addr_t dst_dma_list,
+                  dma_addr_t src_dma_list,
+                  u32 src_size,
+                  u8 *iv,
+                  dma_addr_t adata_dma_list,
+                  u32 adata_size,
+                  u8 *in_tag,
+                  u32 tag_size)
+{
+       u32 *iv_32;
+       u8 lprime;
+       int rc;
+
+       rc = ocs_aes_validate_inputs(src_dma_list, src_size, iv,
+                                    AES_BLOCK_SIZE, adata_dma_list, adata_size,
+                                    in_tag, tag_size, cipher, OCS_MODE_CCM,
+                                    instruction, dst_dma_list);
+       if (rc)
+               return rc;
+
+       ocs_aes_init(aes_dev, OCS_MODE_CCM, cipher, instruction);
+
+       /*
+        * Note: rfc 3610 and NIST 800-38C require counter of zero to encrypt
+        * auth tag so ensure this is the case
+        */
+       lprime = iv[L_PRIME_IDX];
+       memset(&iv[COUNTER_START(lprime)], 0, COUNTER_LEN(lprime));
+
+       /*
+        * Nonce is already converted to ctr0 before being passed into this
+        * function as iv.
+        */
+       iv_32 = (u32 *)iv;
+       iowrite32(__swab32(iv_32[0]),
+                 aes_dev->base_reg + AES_MULTIPURPOSE1_3_OFFSET);
+       iowrite32(__swab32(iv_32[1]),
+                 aes_dev->base_reg + AES_MULTIPURPOSE1_2_OFFSET);
+       iowrite32(__swab32(iv_32[2]),
+                 aes_dev->base_reg + AES_MULTIPURPOSE1_1_OFFSET);
+       iowrite32(__swab32(iv_32[3]),
+                 aes_dev->base_reg + AES_MULTIPURPOSE1_0_OFFSET);
+
+       /* Write MAC/tag length in register AES_TLEN */
+       iowrite32(tag_size, aes_dev->base_reg + AES_TLEN_OFFSET);
+       /*
+        * Write the byte length of the last AES/SM4 block of Payload data
+        * (without zero padding and without the length of the MAC) in register
+        * AES_PLEN.
+        */
+       ocs_aes_write_last_data_blk_len(aes_dev, src_size);
+
+       /* Set AES_ACTIVE.TRIGGER to start the operation. */
+       aes_a_op_trigger(aes_dev);
+
+       aes_a_dma_reset_and_activate_perf_cntr(aes_dev);
+
+       /* Form block B0 and write it to the AES/SM4 input buffer. */
+       rc = ocs_aes_ccm_write_b0(aes_dev, iv, adata_size, tag_size, src_size);
+       if (rc)
+               return rc;
+       /*
+        * Ensure there has been at least CCM_DECRYPT_DELAY_LAST_GCX_CLK_COUNT
+        * clock cycles since TRIGGER bit was set
+        */
+       aes_a_dma_wait_and_deactivate_perf_cntr(aes_dev,
+                                               CCM_DECRYPT_DELAY_LAST_GCX_CLK_COUNT);
+
+       /* Process Adata. */
+       ocs_aes_ccm_do_adata(aes_dev, adata_dma_list, adata_size);
+
+       /* For Encrypt case we just process the payload and return. */
+       if (instruction == OCS_ENCRYPT) {
+               return ocs_aes_ccm_encrypt_do_payload(aes_dev, dst_dma_list,
+                                                     src_dma_list, src_size);
+       }
+       /* For Decypt we need to process the payload and then the tag. */
+       rc = ocs_aes_ccm_decrypt_do_payload(aes_dev, dst_dma_list,
+                                           src_dma_list, src_size);
+       if (rc)
+               return rc;
+
+       /* Process MAC/tag directly: feed tag to engine and wait for IRQ. */
+       ocs_aes_ccm_write_encrypted_tag(aes_dev, in_tag, tag_size);
+       rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_COMPLETE_INT);
+       if (rc)
+               return rc;
+
+       return ccm_compare_tag_to_yr(aes_dev, tag_size);
+}
+
+/**
+ * ocs_create_linked_list_from_sg() - Create OCS DMA linked list from SG list.
+ * @aes_dev:     The OCS AES device the list will be created for.
+ * @sg:                  The SG list OCS DMA linked list will be created from. When
+ *               passed to this function, @sg must have been already mapped
+ *               with dma_map_sg().
+ * @sg_dma_count: The number of DMA-mapped entries in @sg. This must be the
+ *               value returned by dma_map_sg() when @sg was mapped.
+ * @dll_desc:    The OCS DMA dma_list to use to store information about the
+ *               created linked list.
+ * @data_size:   The size of the data (from the SG list) to be mapped into the
+ *               OCS DMA linked list.
+ * @data_offset:  The offset (within the SG list) of the data to be mapped.
+ *
+ * Return:     0 on success, negative error code otherwise.
+ */
+int ocs_create_linked_list_from_sg(const struct ocs_aes_dev *aes_dev,
+                                  struct scatterlist *sg,
+                                  int sg_dma_count,
+                                  struct ocs_dll_desc *dll_desc,
+                                  size_t data_size, size_t data_offset)
+{
+       struct ocs_dma_linked_list *ll = NULL;
+       struct scatterlist *sg_tmp;
+       unsigned int tmp;
+       int dma_nents;
+       int i;
+
+       if (!dll_desc || !sg || !aes_dev)
+               return -EINVAL;
+
+       /* Default values for when no ddl_desc is created. */
+       dll_desc->vaddr = NULL;
+       dll_desc->dma_addr = DMA_MAPPING_ERROR;
+       dll_desc->size = 0;
+
+       if (data_size == 0)
+               return 0;
+
+       /* Loop over sg_list until we reach entry at specified offset. */
+       while (data_offset >= sg_dma_len(sg)) {
+               data_offset -= sg_dma_len(sg);
+               sg_dma_count--;
+               sg = sg_next(sg);
+               /* If we reach the end of the list, offset was invalid. */
+               if (!sg || sg_dma_count == 0)
+                       return -EINVAL;
+       }
+
+       /* Compute number of DMA-mapped SG entries to add into OCS DMA list. */
+       dma_nents = 0;
+       tmp = 0;
+       sg_tmp = sg;
+       while (tmp < data_offset + data_size) {
+               /* If we reach the end of the list, data_size was invalid. */
+               if (!sg_tmp)
+                       return -EINVAL;
+               tmp += sg_dma_len(sg_tmp);
+               dma_nents++;
+               sg_tmp = sg_next(sg_tmp);
+       }
+       if (dma_nents > sg_dma_count)
+               return -EINVAL;
+
+       /* Allocate the DMA list, one entry for each SG entry. */
+       dll_desc->size = sizeof(struct ocs_dma_linked_list) * dma_nents;
+       dll_desc->vaddr = dma_alloc_coherent(aes_dev->dev, dll_desc->size,
+                                            &dll_desc->dma_addr, GFP_KERNEL);
+       if (!dll_desc->vaddr)
+               return -ENOMEM;
+
+       /* Populate DMA linked list entries. */
+       ll = dll_desc->vaddr;
+       for (i = 0; i < dma_nents; i++, sg = sg_next(sg)) {
+               ll[i].src_addr = sg_dma_address(sg) + data_offset;
+               ll[i].src_len = (sg_dma_len(sg) - data_offset) < data_size ?
+                               (sg_dma_len(sg) - data_offset) : data_size;
+               data_offset = 0;
+               data_size -= ll[i].src_len;
+               /* Current element points to the DMA address of the next one. */
+               ll[i].next = dll_desc->dma_addr + (sizeof(*ll) * (i + 1));
+               ll[i].ll_flags = 0;
+       }
+       /* Terminate last element. */
+       ll[i - 1].next = 0;
+       ll[i - 1].ll_flags = OCS_LL_DMA_FLAG_TERMINATE;
+
+       return 0;
+}
diff --git a/drivers/crypto/intel/keembay/ocs-aes.h b/drivers/crypto/intel/keembay/ocs-aes.h
new file mode 100644 (file)
index 0000000..c035fc4
--- /dev/null
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Intel Keem Bay OCS AES Crypto Driver.
+ *
+ * Copyright (C) 2018-2020 Intel Corporation
+ */
+
+#ifndef _CRYPTO_OCS_AES_H
+#define _CRYPTO_OCS_AES_H
+
+#include <linux/dma-mapping.h>
+
+enum ocs_cipher {
+       OCS_AES = 0,
+       OCS_SM4 = 1,
+};
+
+enum ocs_mode {
+       OCS_MODE_ECB = 0,
+       OCS_MODE_CBC = 1,
+       OCS_MODE_CTR = 2,
+       OCS_MODE_CCM = 6,
+       OCS_MODE_GCM = 7,
+       OCS_MODE_CTS = 9,
+};
+
+enum ocs_instruction {
+       OCS_ENCRYPT = 0,
+       OCS_DECRYPT = 1,
+       OCS_EXPAND  = 2,
+       OCS_BYPASS  = 3,
+};
+
+/**
+ * struct ocs_aes_dev - AES device context.
+ * @list:                      List head for insertion into device list hold
+ *                             by driver.
+ * @dev:                       OCS AES device.
+ * @irq:                       IRQ number.
+ * @base_reg:                  IO base address of OCS AES.
+ * @irq_copy_completion:       Completion to indicate IRQ has been triggered.
+ * @dma_err_mask:              Error reported by OCS DMA interrupts.
+ * @engine:                    Crypto engine for the device.
+ */
+struct ocs_aes_dev {
+       struct list_head list;
+       struct device *dev;
+       int irq;
+       void __iomem *base_reg;
+       struct completion irq_completion;
+       u32 dma_err_mask;
+       struct crypto_engine *engine;
+};
+
+/**
+ * struct ocs_dll_desc - Descriptor of an OCS DMA Linked List.
+ * @vaddr:     Virtual address of the linked list head.
+ * @dma_addr:  DMA address of the linked list head.
+ * @size:      Size (in bytes) of the linked list.
+ */
+struct ocs_dll_desc {
+       void            *vaddr;
+       dma_addr_t      dma_addr;
+       size_t          size;
+};
+
+int ocs_aes_set_key(struct ocs_aes_dev *aes_dev, const u32 key_size,
+                   const u8 *key, const enum ocs_cipher cipher);
+
+int ocs_aes_op(struct ocs_aes_dev *aes_dev,
+              enum ocs_mode mode,
+              enum ocs_cipher cipher,
+              enum ocs_instruction instruction,
+              dma_addr_t dst_dma_list,
+              dma_addr_t src_dma_list,
+              u32 src_size,
+              u8 *iv,
+              u32 iv_size);
+
+/**
+ * ocs_aes_bypass_op() - Use OCS DMA to copy data.
+ * @aes_dev:            The OCS AES device to use.
+ * @dst_dma_list:      The OCS DMA list mapping the memory where input data
+ *                     will be copied to.
+ * @src_dma_list:      The OCS DMA list mapping input data.
+ * @src_size:          The amount of data to copy.
+ */
+static inline int ocs_aes_bypass_op(struct ocs_aes_dev *aes_dev,
+                                   dma_addr_t dst_dma_list,
+                                   dma_addr_t src_dma_list, u32 src_size)
+{
+       return ocs_aes_op(aes_dev, OCS_MODE_ECB, OCS_AES, OCS_BYPASS,
+                         dst_dma_list, src_dma_list, src_size, NULL, 0);
+}
+
+int ocs_aes_gcm_op(struct ocs_aes_dev *aes_dev,
+                  enum ocs_cipher cipher,
+                  enum ocs_instruction instruction,
+                  dma_addr_t dst_dma_list,
+                  dma_addr_t src_dma_list,
+                  u32 src_size,
+                  const u8 *iv,
+                  dma_addr_t aad_dma_list,
+                  u32 aad_size,
+                  u8 *out_tag,
+                  u32 tag_size);
+
+int ocs_aes_ccm_op(struct ocs_aes_dev *aes_dev,
+                  enum ocs_cipher cipher,
+                  enum ocs_instruction instruction,
+                  dma_addr_t dst_dma_list,
+                  dma_addr_t src_dma_list,
+                  u32 src_size,
+                  u8 *iv,
+                  dma_addr_t adata_dma_list,
+                  u32 adata_size,
+                  u8 *in_tag,
+                  u32 tag_size);
+
+int ocs_create_linked_list_from_sg(const struct ocs_aes_dev *aes_dev,
+                                  struct scatterlist *sg,
+                                  int sg_dma_count,
+                                  struct ocs_dll_desc *dll_desc,
+                                  size_t data_size,
+                                  size_t data_offset);
+
+irqreturn_t ocs_aes_irq_handler(int irq, void *dev_id);
+
+#endif
diff --git a/drivers/crypto/intel/keembay/ocs-hcu.c b/drivers/crypto/intel/keembay/ocs-hcu.c
new file mode 100644 (file)
index 0000000..deb9bd4
--- /dev/null
@@ -0,0 +1,840 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Keem Bay OCS HCU Crypto Driver.
+ *
+ * Copyright (C) 2018-2020 Intel Corporation
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+
+#include <crypto/sha2.h>
+
+#include "ocs-hcu.h"
+
+/* Registers. */
+#define OCS_HCU_MODE                   0x00
+#define OCS_HCU_CHAIN                  0x04
+#define OCS_HCU_OPERATION              0x08
+#define OCS_HCU_KEY_0                  0x0C
+#define OCS_HCU_ISR                    0x50
+#define OCS_HCU_IER                    0x54
+#define OCS_HCU_STATUS                 0x58
+#define OCS_HCU_MSG_LEN_LO             0x60
+#define OCS_HCU_MSG_LEN_HI             0x64
+#define OCS_HCU_KEY_BYTE_ORDER_CFG     0x80
+#define OCS_HCU_DMA_SRC_ADDR           0x400
+#define OCS_HCU_DMA_SRC_SIZE           0x408
+#define OCS_HCU_DMA_DST_SIZE           0x40C
+#define OCS_HCU_DMA_DMA_MODE           0x410
+#define OCS_HCU_DMA_NEXT_SRC_DESCR     0x418
+#define OCS_HCU_DMA_MSI_ISR            0x480
+#define OCS_HCU_DMA_MSI_IER            0x484
+#define OCS_HCU_DMA_MSI_MASK           0x488
+
+/* Register bit definitions. */
+#define HCU_MODE_ALGO_SHIFT            16
+#define HCU_MODE_HMAC_SHIFT            22
+
+#define HCU_STATUS_BUSY                        BIT(0)
+
+#define HCU_BYTE_ORDER_SWAP            BIT(0)
+
+#define HCU_IRQ_HASH_DONE              BIT(2)
+#define HCU_IRQ_HASH_ERR_MASK          (BIT(3) | BIT(1) | BIT(0))
+
+#define HCU_DMA_IRQ_SRC_DONE           BIT(0)
+#define HCU_DMA_IRQ_SAI_ERR            BIT(2)
+#define HCU_DMA_IRQ_BAD_COMP_ERR       BIT(3)
+#define HCU_DMA_IRQ_INBUF_RD_ERR       BIT(4)
+#define HCU_DMA_IRQ_INBUF_WD_ERR       BIT(5)
+#define HCU_DMA_IRQ_OUTBUF_WR_ERR      BIT(6)
+#define HCU_DMA_IRQ_OUTBUF_RD_ERR      BIT(7)
+#define HCU_DMA_IRQ_CRD_ERR            BIT(8)
+#define HCU_DMA_IRQ_ERR_MASK           (HCU_DMA_IRQ_SAI_ERR | \
+                                        HCU_DMA_IRQ_BAD_COMP_ERR | \
+                                        HCU_DMA_IRQ_INBUF_RD_ERR | \
+                                        HCU_DMA_IRQ_INBUF_WD_ERR | \
+                                        HCU_DMA_IRQ_OUTBUF_WR_ERR | \
+                                        HCU_DMA_IRQ_OUTBUF_RD_ERR | \
+                                        HCU_DMA_IRQ_CRD_ERR)
+
+#define HCU_DMA_SNOOP_MASK             (0x7 << 28)
+#define HCU_DMA_SRC_LL_EN              BIT(25)
+#define HCU_DMA_EN                     BIT(31)
+
+#define OCS_HCU_ENDIANNESS_VALUE       0x2A
+
+#define HCU_DMA_MSI_UNMASK             BIT(0)
+#define HCU_DMA_MSI_DISABLE            0
+#define HCU_IRQ_DISABLE                        0
+
+#define OCS_HCU_START                  BIT(0)
+#define OCS_HCU_TERMINATE              BIT(1)
+
+#define OCS_LL_DMA_FLAG_TERMINATE      BIT(31)
+
+#define OCS_HCU_HW_KEY_LEN_U32         (OCS_HCU_HW_KEY_LEN / sizeof(u32))
+
+#define HCU_DATA_WRITE_ENDIANNESS_OFFSET       26
+
+#define OCS_HCU_NUM_CHAINS_SHA256_224_SM3      (SHA256_DIGEST_SIZE / sizeof(u32))
+#define OCS_HCU_NUM_CHAINS_SHA384_512          (SHA512_DIGEST_SIZE / sizeof(u32))
+
+/*
+ * While polling on a busy HCU, wait maximum 200us between one check and the
+ * other.
+ */
+#define OCS_HCU_WAIT_BUSY_RETRY_DELAY_US       200
+/* Wait on a busy HCU for maximum 1 second. */
+#define OCS_HCU_WAIT_BUSY_TIMEOUT_US           1000000
+
+/**
+ * struct ocs_hcu_dma_entry - An entry in an OCS DMA linked list.
+ * @src_addr:  Source address of the data.
+ * @src_len:   Length of data to be fetched.
+ * @nxt_desc:  Next descriptor to fetch.
+ * @ll_flags:  Flags (Freeze @ terminate) for the DMA engine.
+ */
+struct ocs_hcu_dma_entry {
+       u32 src_addr;
+       u32 src_len;
+       u32 nxt_desc;
+       u32 ll_flags;
+};
+
+/**
+ * struct ocs_hcu_dma_list - OCS-specific DMA linked list.
+ * @head:      The head of the list (points to the array backing the list).
+ * @tail:      The current tail of the list; NULL if the list is empty.
+ * @dma_addr:  The DMA address of @head (i.e., the DMA address of the backing
+ *             array).
+ * @max_nents: Maximum number of entries in the list (i.e., number of elements
+ *             in the backing array).
+ *
+ * The OCS DMA list is an array-backed list of OCS DMA descriptors. The array
+ * backing the list is allocated with dma_alloc_coherent() and pointed by
+ * @head.
+ */
+struct ocs_hcu_dma_list {
+       struct ocs_hcu_dma_entry        *head;
+       struct ocs_hcu_dma_entry        *tail;
+       dma_addr_t                      dma_addr;
+       size_t                          max_nents;
+};
+
+static inline u32 ocs_hcu_num_chains(enum ocs_hcu_algo algo)
+{
+       switch (algo) {
+       case OCS_HCU_ALGO_SHA224:
+       case OCS_HCU_ALGO_SHA256:
+       case OCS_HCU_ALGO_SM3:
+               return OCS_HCU_NUM_CHAINS_SHA256_224_SM3;
+       case OCS_HCU_ALGO_SHA384:
+       case OCS_HCU_ALGO_SHA512:
+               return OCS_HCU_NUM_CHAINS_SHA384_512;
+       default:
+               return 0;
+       };
+}
+
+static inline u32 ocs_hcu_digest_size(enum ocs_hcu_algo algo)
+{
+       switch (algo) {
+       case OCS_HCU_ALGO_SHA224:
+               return SHA224_DIGEST_SIZE;
+       case OCS_HCU_ALGO_SHA256:
+       case OCS_HCU_ALGO_SM3:
+               /* SM3 shares the same block size. */
+               return SHA256_DIGEST_SIZE;
+       case OCS_HCU_ALGO_SHA384:
+               return SHA384_DIGEST_SIZE;
+       case OCS_HCU_ALGO_SHA512:
+               return SHA512_DIGEST_SIZE;
+       default:
+               return 0;
+       }
+}
+
+/**
+ * ocs_hcu_wait_busy() - Wait for HCU OCS hardware to became usable.
+ * @hcu_dev:   OCS HCU device to wait for.
+ *
+ * Return: 0 if device free, -ETIMEOUT if device busy and internal timeout has
+ *        expired.
+ */
+static int ocs_hcu_wait_busy(struct ocs_hcu_dev *hcu_dev)
+{
+       long val;
+
+       return readl_poll_timeout(hcu_dev->io_base + OCS_HCU_STATUS, val,
+                                 !(val & HCU_STATUS_BUSY),
+                                 OCS_HCU_WAIT_BUSY_RETRY_DELAY_US,
+                                 OCS_HCU_WAIT_BUSY_TIMEOUT_US);
+}
+
+static void ocs_hcu_done_irq_en(struct ocs_hcu_dev *hcu_dev)
+{
+       /* Clear any pending interrupts. */
+       writel(0xFFFFFFFF, hcu_dev->io_base + OCS_HCU_ISR);
+       hcu_dev->irq_err = false;
+       /* Enable error and HCU done interrupts. */
+       writel(HCU_IRQ_HASH_DONE | HCU_IRQ_HASH_ERR_MASK,
+              hcu_dev->io_base + OCS_HCU_IER);
+}
+
+static void ocs_hcu_dma_irq_en(struct ocs_hcu_dev *hcu_dev)
+{
+       /* Clear any pending interrupts. */
+       writel(0xFFFFFFFF, hcu_dev->io_base + OCS_HCU_DMA_MSI_ISR);
+       hcu_dev->irq_err = false;
+       /* Only operating on DMA source completion and error interrupts. */
+       writel(HCU_DMA_IRQ_ERR_MASK | HCU_DMA_IRQ_SRC_DONE,
+              hcu_dev->io_base + OCS_HCU_DMA_MSI_IER);
+       /* Unmask */
+       writel(HCU_DMA_MSI_UNMASK, hcu_dev->io_base + OCS_HCU_DMA_MSI_MASK);
+}
+
+static void ocs_hcu_irq_dis(struct ocs_hcu_dev *hcu_dev)
+{
+       writel(HCU_IRQ_DISABLE, hcu_dev->io_base + OCS_HCU_IER);
+       writel(HCU_DMA_MSI_DISABLE, hcu_dev->io_base + OCS_HCU_DMA_MSI_IER);
+}
+
+static int ocs_hcu_wait_and_disable_irq(struct ocs_hcu_dev *hcu_dev)
+{
+       int rc;
+
+       rc = wait_for_completion_interruptible(&hcu_dev->irq_done);
+       if (rc)
+               goto exit;
+
+       if (hcu_dev->irq_err) {
+               /* Unset flag and return error. */
+               hcu_dev->irq_err = false;
+               rc = -EIO;
+               goto exit;
+       }
+
+exit:
+       ocs_hcu_irq_dis(hcu_dev);
+
+       return rc;
+}
+
+/**
+ * ocs_hcu_get_intermediate_data() - Get intermediate data.
+ * @hcu_dev:   The target HCU device.
+ * @data:      Where to store the intermediate.
+ * @algo:      The algorithm being used.
+ *
+ * This function is used to save the current hashing process state in order to
+ * continue it in the future.
+ *
+ * Note: once all data has been processed, the intermediate data actually
+ * contains the hashing result. So this function is also used to retrieve the
+ * final result of a hashing process.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+static int ocs_hcu_get_intermediate_data(struct ocs_hcu_dev *hcu_dev,
+                                        struct ocs_hcu_idata *data,
+                                        enum ocs_hcu_algo algo)
+{
+       const int n = ocs_hcu_num_chains(algo);
+       u32 *chain;
+       int rc;
+       int i;
+
+       /* Data not requested. */
+       if (!data)
+               return -EINVAL;
+
+       chain = (u32 *)data->digest;
+
+       /* Ensure that the OCS is no longer busy before reading the chains. */
+       rc = ocs_hcu_wait_busy(hcu_dev);
+       if (rc)
+               return rc;
+
+       /*
+        * This loops is safe because data->digest is an array of
+        * SHA512_DIGEST_SIZE bytes and the maximum value returned by
+        * ocs_hcu_num_chains() is OCS_HCU_NUM_CHAINS_SHA384_512 which is equal
+        * to SHA512_DIGEST_SIZE / sizeof(u32).
+        */
+       for (i = 0; i < n; i++)
+               chain[i] = readl(hcu_dev->io_base + OCS_HCU_CHAIN);
+
+       data->msg_len_lo = readl(hcu_dev->io_base + OCS_HCU_MSG_LEN_LO);
+       data->msg_len_hi = readl(hcu_dev->io_base + OCS_HCU_MSG_LEN_HI);
+
+       return 0;
+}
+
+/**
+ * ocs_hcu_set_intermediate_data() - Set intermediate data.
+ * @hcu_dev:   The target HCU device.
+ * @data:      The intermediate data to be set.
+ * @algo:      The algorithm being used.
+ *
+ * This function is used to continue a previous hashing process.
+ */
+static void ocs_hcu_set_intermediate_data(struct ocs_hcu_dev *hcu_dev,
+                                         const struct ocs_hcu_idata *data,
+                                         enum ocs_hcu_algo algo)
+{
+       const int n = ocs_hcu_num_chains(algo);
+       u32 *chain = (u32 *)data->digest;
+       int i;
+
+       /*
+        * This loops is safe because data->digest is an array of
+        * SHA512_DIGEST_SIZE bytes and the maximum value returned by
+        * ocs_hcu_num_chains() is OCS_HCU_NUM_CHAINS_SHA384_512 which is equal
+        * to SHA512_DIGEST_SIZE / sizeof(u32).
+        */
+       for (i = 0; i < n; i++)
+               writel(chain[i], hcu_dev->io_base + OCS_HCU_CHAIN);
+
+       writel(data->msg_len_lo, hcu_dev->io_base + OCS_HCU_MSG_LEN_LO);
+       writel(data->msg_len_hi, hcu_dev->io_base + OCS_HCU_MSG_LEN_HI);
+}
+
+static int ocs_hcu_get_digest(struct ocs_hcu_dev *hcu_dev,
+                             enum ocs_hcu_algo algo, u8 *dgst, size_t dgst_len)
+{
+       u32 *chain;
+       int rc;
+       int i;
+
+       if (!dgst)
+               return -EINVAL;
+
+       /* Length of the output buffer must match the algo digest size. */
+       if (dgst_len != ocs_hcu_digest_size(algo))
+               return -EINVAL;
+
+       /* Ensure that the OCS is no longer busy before reading the chains. */
+       rc = ocs_hcu_wait_busy(hcu_dev);
+       if (rc)
+               return rc;
+
+       chain = (u32 *)dgst;
+       for (i = 0; i < dgst_len / sizeof(u32); i++)
+               chain[i] = readl(hcu_dev->io_base + OCS_HCU_CHAIN);
+
+       return 0;
+}
+
+/**
+ * ocs_hcu_hw_cfg() - Configure the HCU hardware.
+ * @hcu_dev:   The HCU device to configure.
+ * @algo:      The algorithm to be used by the HCU device.
+ * @use_hmac:  Whether or not HW HMAC should be used.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+static int ocs_hcu_hw_cfg(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
+                         bool use_hmac)
+{
+       u32 cfg;
+       int rc;
+
+       if (algo != OCS_HCU_ALGO_SHA256 && algo != OCS_HCU_ALGO_SHA224 &&
+           algo != OCS_HCU_ALGO_SHA384 && algo != OCS_HCU_ALGO_SHA512 &&
+           algo != OCS_HCU_ALGO_SM3)
+               return -EINVAL;
+
+       rc = ocs_hcu_wait_busy(hcu_dev);
+       if (rc)
+               return rc;
+
+       /* Ensure interrupts are disabled. */
+       ocs_hcu_irq_dis(hcu_dev);
+
+       /* Configure endianness, hashing algorithm and HW HMAC (if needed) */
+       cfg = OCS_HCU_ENDIANNESS_VALUE << HCU_DATA_WRITE_ENDIANNESS_OFFSET;
+       cfg |= algo << HCU_MODE_ALGO_SHIFT;
+       if (use_hmac)
+               cfg |= BIT(HCU_MODE_HMAC_SHIFT);
+
+       writel(cfg, hcu_dev->io_base + OCS_HCU_MODE);
+
+       return 0;
+}
+
+/**
+ * ocs_hcu_clear_key() - Clear key stored in OCS HMAC KEY registers.
+ * @hcu_dev:   The OCS HCU device whose key registers should be cleared.
+ */
+static void ocs_hcu_clear_key(struct ocs_hcu_dev *hcu_dev)
+{
+       int reg_off;
+
+       /* Clear OCS_HCU_KEY_[0..15] */
+       for (reg_off = 0; reg_off < OCS_HCU_HW_KEY_LEN; reg_off += sizeof(u32))
+               writel(0, hcu_dev->io_base + OCS_HCU_KEY_0 + reg_off);
+}
+
+/**
+ * ocs_hcu_write_key() - Write key to OCS HMAC KEY registers.
+ * @hcu_dev:   The OCS HCU device the key should be written to.
+ * @key:       The key to be written.
+ * @len:       The size of the key to write. It must be OCS_HCU_HW_KEY_LEN.
+ *
+ * Return:     0 on success, negative error code otherwise.
+ */
+static int ocs_hcu_write_key(struct ocs_hcu_dev *hcu_dev, const u8 *key, size_t len)
+{
+       u32 key_u32[OCS_HCU_HW_KEY_LEN_U32];
+       int i;
+
+       if (len > OCS_HCU_HW_KEY_LEN)
+               return -EINVAL;
+
+       /* Copy key into temporary u32 array. */
+       memcpy(key_u32, key, len);
+
+       /*
+        * Hardware requires all the bytes of the HW Key vector to be
+        * written. So pad with zero until we reach OCS_HCU_HW_KEY_LEN.
+        */
+       memzero_explicit((u8 *)key_u32 + len, OCS_HCU_HW_KEY_LEN - len);
+
+       /*
+        * OCS hardware expects the MSB of the key to be written at the highest
+        * address of the HCU Key vector; in other word, the key must be
+        * written in reverse order.
+        *
+        * Therefore, we first enable byte swapping for the HCU key vector;
+        * so that bytes of 32-bit word written to OCS_HCU_KEY_[0..15] will be
+        * swapped:
+        * 3 <---> 0, 2 <---> 1.
+        */
+       writel(HCU_BYTE_ORDER_SWAP,
+              hcu_dev->io_base + OCS_HCU_KEY_BYTE_ORDER_CFG);
+       /*
+        * And then we write the 32-bit words composing the key starting from
+        * the end of the key.
+        */
+       for (i = 0; i < OCS_HCU_HW_KEY_LEN_U32; i++)
+               writel(key_u32[OCS_HCU_HW_KEY_LEN_U32 - 1 - i],
+                      hcu_dev->io_base + OCS_HCU_KEY_0 + (sizeof(u32) * i));
+
+       memzero_explicit(key_u32, OCS_HCU_HW_KEY_LEN);
+
+       return 0;
+}
+
+/**
+ * ocs_hcu_ll_dma_start() - Start OCS HCU hashing via DMA
+ * @hcu_dev:   The OCS HCU device to use.
+ * @dma_list:  The OCS DMA list mapping the data to hash.
+ * @finalize:  Whether or not this is the last hashing operation and therefore
+ *             the final hash should be compute even if data is not
+ *             block-aligned.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+static int ocs_hcu_ll_dma_start(struct ocs_hcu_dev *hcu_dev,
+                               const struct ocs_hcu_dma_list *dma_list,
+                               bool finalize)
+{
+       u32 cfg = HCU_DMA_SNOOP_MASK | HCU_DMA_SRC_LL_EN | HCU_DMA_EN;
+       int rc;
+
+       if (!dma_list)
+               return -EINVAL;
+
+       /*
+        * For final requests we use HCU_DONE IRQ to be notified when all input
+        * data has been processed by the HCU; however, we cannot do so for
+        * non-final requests, because we don't get a HCU_DONE IRQ when we
+        * don't terminate the operation.
+        *
+        * Therefore, for non-final requests, we use the DMA IRQ, which
+        * triggers when DMA has finishing feeding all the input data to the
+        * HCU, but the HCU may still be processing it. This is fine, since we
+        * will wait for the HCU processing to be completed when we try to read
+        * intermediate results, in ocs_hcu_get_intermediate_data().
+        */
+       if (finalize)
+               ocs_hcu_done_irq_en(hcu_dev);
+       else
+               ocs_hcu_dma_irq_en(hcu_dev);
+
+       reinit_completion(&hcu_dev->irq_done);
+       writel(dma_list->dma_addr, hcu_dev->io_base + OCS_HCU_DMA_NEXT_SRC_DESCR);
+       writel(0, hcu_dev->io_base + OCS_HCU_DMA_SRC_SIZE);
+       writel(0, hcu_dev->io_base + OCS_HCU_DMA_DST_SIZE);
+
+       writel(OCS_HCU_START, hcu_dev->io_base + OCS_HCU_OPERATION);
+
+       writel(cfg, hcu_dev->io_base + OCS_HCU_DMA_DMA_MODE);
+
+       if (finalize)
+               writel(OCS_HCU_TERMINATE, hcu_dev->io_base + OCS_HCU_OPERATION);
+
+       rc = ocs_hcu_wait_and_disable_irq(hcu_dev);
+       if (rc)
+               return rc;
+
+       return 0;
+}
+
+struct ocs_hcu_dma_list *ocs_hcu_dma_list_alloc(struct ocs_hcu_dev *hcu_dev,
+                                               int max_nents)
+{
+       struct ocs_hcu_dma_list *dma_list;
+
+       dma_list = kmalloc(sizeof(*dma_list), GFP_KERNEL);
+       if (!dma_list)
+               return NULL;
+
+       /* Total size of the DMA list to allocate. */
+       dma_list->head = dma_alloc_coherent(hcu_dev->dev,
+                                           sizeof(*dma_list->head) * max_nents,
+                                           &dma_list->dma_addr, GFP_KERNEL);
+       if (!dma_list->head) {
+               kfree(dma_list);
+               return NULL;
+       }
+       dma_list->max_nents = max_nents;
+       dma_list->tail = NULL;
+
+       return dma_list;
+}
+
+void ocs_hcu_dma_list_free(struct ocs_hcu_dev *hcu_dev,
+                          struct ocs_hcu_dma_list *dma_list)
+{
+       if (!dma_list)
+               return;
+
+       dma_free_coherent(hcu_dev->dev,
+                         sizeof(*dma_list->head) * dma_list->max_nents,
+                         dma_list->head, dma_list->dma_addr);
+
+       kfree(dma_list);
+}
+
+/* Add a new DMA entry at the end of the OCS DMA list. */
+int ocs_hcu_dma_list_add_tail(struct ocs_hcu_dev *hcu_dev,
+                             struct ocs_hcu_dma_list *dma_list,
+                             dma_addr_t addr, u32 len)
+{
+       struct device *dev = hcu_dev->dev;
+       struct ocs_hcu_dma_entry *old_tail;
+       struct ocs_hcu_dma_entry *new_tail;
+
+       if (!len)
+               return 0;
+
+       if (!dma_list)
+               return -EINVAL;
+
+       if (addr & ~OCS_HCU_DMA_BIT_MASK) {
+               dev_err(dev,
+                       "Unexpected error: Invalid DMA address for OCS HCU\n");
+               return -EINVAL;
+       }
+
+       old_tail = dma_list->tail;
+       new_tail = old_tail ? old_tail + 1 : dma_list->head;
+
+       /* Check if list is full. */
+       if (new_tail - dma_list->head >= dma_list->max_nents)
+               return -ENOMEM;
+
+       /*
+        * If there was an old tail (i.e., this is not the first element we are
+        * adding), un-terminate the old tail and make it point to the new one.
+        */
+       if (old_tail) {
+               old_tail->ll_flags &= ~OCS_LL_DMA_FLAG_TERMINATE;
+               /*
+                * The old tail 'nxt_desc' must point to the DMA address of the
+                * new tail.
+                */
+               old_tail->nxt_desc = dma_list->dma_addr +
+                                    sizeof(*dma_list->tail) * (new_tail -
+                                                               dma_list->head);
+       }
+
+       new_tail->src_addr = (u32)addr;
+       new_tail->src_len = (u32)len;
+       new_tail->ll_flags = OCS_LL_DMA_FLAG_TERMINATE;
+       new_tail->nxt_desc = 0;
+
+       /* Update list tail with new tail. */
+       dma_list->tail = new_tail;
+
+       return 0;
+}
+
+/**
+ * ocs_hcu_hash_init() - Initialize hash operation context.
+ * @ctx:       The context to initialize.
+ * @algo:      The hashing algorithm to use.
+ *
+ * Return:     0 on success, negative error code otherwise.
+ */
+int ocs_hcu_hash_init(struct ocs_hcu_hash_ctx *ctx, enum ocs_hcu_algo algo)
+{
+       if (!ctx)
+               return -EINVAL;
+
+       ctx->algo = algo;
+       ctx->idata.msg_len_lo = 0;
+       ctx->idata.msg_len_hi = 0;
+       /* No need to set idata.digest to 0. */
+
+       return 0;
+}
+
+/**
+ * ocs_hcu_hash_update() - Perform a hashing iteration.
+ * @hcu_dev:   The OCS HCU device to use.
+ * @ctx:       The OCS HCU hashing context.
+ * @dma_list:  The OCS DMA list mapping the input data to process.
+ *
+ * Return: 0 on success; negative error code otherwise.
+ */
+int ocs_hcu_hash_update(struct ocs_hcu_dev *hcu_dev,
+                       struct ocs_hcu_hash_ctx *ctx,
+                       const struct ocs_hcu_dma_list *dma_list)
+{
+       int rc;
+
+       if (!hcu_dev || !ctx)
+               return -EINVAL;
+
+       /* Configure the hardware for the current request. */
+       rc = ocs_hcu_hw_cfg(hcu_dev, ctx->algo, false);
+       if (rc)
+               return rc;
+
+       /* If we already processed some data, idata needs to be set. */
+       if (ctx->idata.msg_len_lo || ctx->idata.msg_len_hi)
+               ocs_hcu_set_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
+
+       /* Start linked-list DMA hashing. */
+       rc = ocs_hcu_ll_dma_start(hcu_dev, dma_list, false);
+       if (rc)
+               return rc;
+
+       /* Update idata and return. */
+       return ocs_hcu_get_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
+}
+
+/**
+ * ocs_hcu_hash_finup() - Update and finalize hash computation.
+ * @hcu_dev:   The OCS HCU device to use.
+ * @ctx:       The OCS HCU hashing context.
+ * @dma_list:  The OCS DMA list mapping the input data to process.
+ * @dgst:      The buffer where to save the computed digest.
+ * @dgst_len:  The length of @dgst.
+ *
+ * Return: 0 on success; negative error code otherwise.
+ */
+int ocs_hcu_hash_finup(struct ocs_hcu_dev *hcu_dev,
+                      const struct ocs_hcu_hash_ctx *ctx,
+                      const struct ocs_hcu_dma_list *dma_list,
+                      u8 *dgst, size_t dgst_len)
+{
+       int rc;
+
+       if (!hcu_dev || !ctx)
+               return -EINVAL;
+
+       /* Configure the hardware for the current request. */
+       rc = ocs_hcu_hw_cfg(hcu_dev, ctx->algo, false);
+       if (rc)
+               return rc;
+
+       /* If we already processed some data, idata needs to be set. */
+       if (ctx->idata.msg_len_lo || ctx->idata.msg_len_hi)
+               ocs_hcu_set_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
+
+       /* Start linked-list DMA hashing. */
+       rc = ocs_hcu_ll_dma_start(hcu_dev, dma_list, true);
+       if (rc)
+               return rc;
+
+       /* Get digest and return. */
+       return ocs_hcu_get_digest(hcu_dev, ctx->algo, dgst, dgst_len);
+}
+
+/**
+ * ocs_hcu_hash_final() - Finalize hash computation.
+ * @hcu_dev:           The OCS HCU device to use.
+ * @ctx:               The OCS HCU hashing context.
+ * @dgst:              The buffer where to save the computed digest.
+ * @dgst_len:          The length of @dgst.
+ *
+ * Return: 0 on success; negative error code otherwise.
+ */
+int ocs_hcu_hash_final(struct ocs_hcu_dev *hcu_dev,
+                      const struct ocs_hcu_hash_ctx *ctx, u8 *dgst,
+                      size_t dgst_len)
+{
+       int rc;
+
+       if (!hcu_dev || !ctx)
+               return -EINVAL;
+
+       /* Configure the hardware for the current request. */
+       rc = ocs_hcu_hw_cfg(hcu_dev, ctx->algo, false);
+       if (rc)
+               return rc;
+
+       /* If we already processed some data, idata needs to be set. */
+       if (ctx->idata.msg_len_lo || ctx->idata.msg_len_hi)
+               ocs_hcu_set_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
+
+       /*
+        * Enable HCU interrupts, so that HCU_DONE will be triggered once the
+        * final hash is computed.
+        */
+       ocs_hcu_done_irq_en(hcu_dev);
+       reinit_completion(&hcu_dev->irq_done);
+       writel(OCS_HCU_TERMINATE, hcu_dev->io_base + OCS_HCU_OPERATION);
+
+       rc = ocs_hcu_wait_and_disable_irq(hcu_dev);
+       if (rc)
+               return rc;
+
+       /* Get digest and return. */
+       return ocs_hcu_get_digest(hcu_dev, ctx->algo, dgst, dgst_len);
+}
+
+/**
+ * ocs_hcu_digest() - Compute hash digest.
+ * @hcu_dev:           The OCS HCU device to use.
+ * @algo:              The hash algorithm to use.
+ * @data:              The input data to process.
+ * @data_len:          The length of @data.
+ * @dgst:              The buffer where to save the computed digest.
+ * @dgst_len:          The length of @dgst.
+ *
+ * Return: 0 on success; negative error code otherwise.
+ */
+int ocs_hcu_digest(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
+                  void *data, size_t data_len, u8 *dgst, size_t dgst_len)
+{
+       struct device *dev = hcu_dev->dev;
+       dma_addr_t dma_handle;
+       u32 reg;
+       int rc;
+
+       /* Configure the hardware for the current request. */
+       rc = ocs_hcu_hw_cfg(hcu_dev, algo, false);
+       if (rc)
+               return rc;
+
+       dma_handle = dma_map_single(dev, data, data_len, DMA_TO_DEVICE);
+       if (dma_mapping_error(dev, dma_handle))
+               return -EIO;
+
+       reg = HCU_DMA_SNOOP_MASK | HCU_DMA_EN;
+
+       ocs_hcu_done_irq_en(hcu_dev);
+
+       reinit_completion(&hcu_dev->irq_done);
+
+       writel(dma_handle, hcu_dev->io_base + OCS_HCU_DMA_SRC_ADDR);
+       writel(data_len, hcu_dev->io_base + OCS_HCU_DMA_SRC_SIZE);
+       writel(OCS_HCU_START, hcu_dev->io_base + OCS_HCU_OPERATION);
+       writel(reg, hcu_dev->io_base + OCS_HCU_DMA_DMA_MODE);
+
+       writel(OCS_HCU_TERMINATE, hcu_dev->io_base + OCS_HCU_OPERATION);
+
+       rc = ocs_hcu_wait_and_disable_irq(hcu_dev);
+       if (rc)
+               return rc;
+
+       dma_unmap_single(dev, dma_handle, data_len, DMA_TO_DEVICE);
+
+       return ocs_hcu_get_digest(hcu_dev, algo, dgst, dgst_len);
+}
+
+/**
+ * ocs_hcu_hmac() - Compute HMAC.
+ * @hcu_dev:           The OCS HCU device to use.
+ * @algo:              The hash algorithm to use with HMAC.
+ * @key:               The key to use.
+ * @dma_list:  The OCS DMA list mapping the input data to process.
+ * @key_len:           The length of @key.
+ * @dgst:              The buffer where to save the computed HMAC.
+ * @dgst_len:          The length of @dgst.
+ *
+ * Return: 0 on success; negative error code otherwise.
+ */
+int ocs_hcu_hmac(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
+                const u8 *key, size_t key_len,
+                const struct ocs_hcu_dma_list *dma_list,
+                u8 *dgst, size_t dgst_len)
+{
+       int rc;
+
+       /* Ensure 'key' is not NULL. */
+       if (!key || key_len == 0)
+               return -EINVAL;
+
+       /* Configure the hardware for the current request. */
+       rc = ocs_hcu_hw_cfg(hcu_dev, algo, true);
+       if (rc)
+               return rc;
+
+       rc = ocs_hcu_write_key(hcu_dev, key, key_len);
+       if (rc)
+               return rc;
+
+       rc = ocs_hcu_ll_dma_start(hcu_dev, dma_list, true);
+
+       /* Clear HW key before processing return code. */
+       ocs_hcu_clear_key(hcu_dev);
+
+       if (rc)
+               return rc;
+
+       return ocs_hcu_get_digest(hcu_dev, algo, dgst, dgst_len);
+}
+
+irqreturn_t ocs_hcu_irq_handler(int irq, void *dev_id)
+{
+       struct ocs_hcu_dev *hcu_dev = dev_id;
+       u32 hcu_irq;
+       u32 dma_irq;
+
+       /* Read and clear the HCU interrupt. */
+       hcu_irq = readl(hcu_dev->io_base + OCS_HCU_ISR);
+       writel(hcu_irq, hcu_dev->io_base + OCS_HCU_ISR);
+
+       /* Read and clear the HCU DMA interrupt. */
+       dma_irq = readl(hcu_dev->io_base + OCS_HCU_DMA_MSI_ISR);
+       writel(dma_irq, hcu_dev->io_base + OCS_HCU_DMA_MSI_ISR);
+
+       /* Check for errors. */
+       if (hcu_irq & HCU_IRQ_HASH_ERR_MASK || dma_irq & HCU_DMA_IRQ_ERR_MASK) {
+               hcu_dev->irq_err = true;
+               goto complete;
+       }
+
+       /* Check for DONE IRQs. */
+       if (hcu_irq & HCU_IRQ_HASH_DONE || dma_irq & HCU_DMA_IRQ_SRC_DONE)
+               goto complete;
+
+       return IRQ_NONE;
+
+complete:
+       complete(&hcu_dev->irq_done);
+
+       return IRQ_HANDLED;
+}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/intel/keembay/ocs-hcu.h b/drivers/crypto/intel/keembay/ocs-hcu.h
new file mode 100644 (file)
index 0000000..fbbbb92
--- /dev/null
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Intel Keem Bay OCS HCU Crypto Driver.
+ *
+ * Copyright (C) 2018-2020 Intel Corporation
+ */
+
+#include <linux/dma-mapping.h>
+
+#ifndef _CRYPTO_OCS_HCU_H
+#define _CRYPTO_OCS_HCU_H
+
+#define OCS_HCU_DMA_BIT_MASK           DMA_BIT_MASK(32)
+
+#define OCS_HCU_HW_KEY_LEN             64
+
+struct ocs_hcu_dma_list;
+
+enum ocs_hcu_algo {
+       OCS_HCU_ALGO_SHA256 = 2,
+       OCS_HCU_ALGO_SHA224 = 3,
+       OCS_HCU_ALGO_SHA384 = 4,
+       OCS_HCU_ALGO_SHA512 = 5,
+       OCS_HCU_ALGO_SM3    = 6,
+};
+
+/**
+ * struct ocs_hcu_dev - OCS HCU device context.
+ * @list:      List of device contexts.
+ * @dev:       OCS HCU device.
+ * @io_base:   Base address of OCS HCU registers.
+ * @engine:    Crypto engine for the device.
+ * @irq:       IRQ number.
+ * @irq_done:  Completion for IRQ.
+ * @irq_err:   Flag indicating an IRQ error has happened.
+ */
+struct ocs_hcu_dev {
+       struct list_head list;
+       struct device *dev;
+       void __iomem *io_base;
+       struct crypto_engine *engine;
+       int irq;
+       struct completion irq_done;
+       bool irq_err;
+};
+
+/**
+ * struct ocs_hcu_idata - Intermediate data generated by the HCU.
+ * @msg_len_lo: Length of data the HCU has operated on in bits, low 32b.
+ * @msg_len_hi: Length of data the HCU has operated on in bits, high 32b.
+ * @digest: The digest read from the HCU. If the HCU is terminated, it will
+ *         contain the actual hash digest. Otherwise it is the intermediate
+ *         state.
+ */
+struct ocs_hcu_idata {
+       u32 msg_len_lo;
+       u32 msg_len_hi;
+       u8  digest[SHA512_DIGEST_SIZE];
+};
+
+/**
+ * struct ocs_hcu_hash_ctx - Context for OCS HCU hashing operation.
+ * @algo:      The hashing algorithm being used.
+ * @idata:     The current intermediate data.
+ */
+struct ocs_hcu_hash_ctx {
+       enum ocs_hcu_algo       algo;
+       struct ocs_hcu_idata    idata;
+};
+
+irqreturn_t ocs_hcu_irq_handler(int irq, void *dev_id);
+
+struct ocs_hcu_dma_list *ocs_hcu_dma_list_alloc(struct ocs_hcu_dev *hcu_dev,
+                                               int max_nents);
+
+void ocs_hcu_dma_list_free(struct ocs_hcu_dev *hcu_dev,
+                          struct ocs_hcu_dma_list *dma_list);
+
+int ocs_hcu_dma_list_add_tail(struct ocs_hcu_dev *hcu_dev,
+                             struct ocs_hcu_dma_list *dma_list,
+                             dma_addr_t addr, u32 len);
+
+int ocs_hcu_hash_init(struct ocs_hcu_hash_ctx *ctx, enum ocs_hcu_algo algo);
+
+int ocs_hcu_hash_update(struct ocs_hcu_dev *hcu_dev,
+                       struct ocs_hcu_hash_ctx *ctx,
+                       const struct ocs_hcu_dma_list *dma_list);
+
+int ocs_hcu_hash_finup(struct ocs_hcu_dev *hcu_dev,
+                      const struct ocs_hcu_hash_ctx *ctx,
+                      const struct ocs_hcu_dma_list *dma_list,
+                      u8 *dgst, size_t dgst_len);
+
+int ocs_hcu_hash_final(struct ocs_hcu_dev *hcu_dev,
+                      const struct ocs_hcu_hash_ctx *ctx, u8 *dgst,
+                      size_t dgst_len);
+
+int ocs_hcu_digest(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
+                  void *data, size_t data_len, u8 *dgst, size_t dgst_len);
+
+int ocs_hcu_hmac(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
+                const u8 *key, size_t key_len,
+                const struct ocs_hcu_dma_list *dma_list,
+                u8 *dgst, size_t dgst_len);
+
+#endif /* _CRYPTO_OCS_HCU_H */
diff --git a/drivers/crypto/intel/qat/Kconfig b/drivers/crypto/intel/qat/Kconfig
new file mode 100644 (file)
index 0000000..1220cc8
--- /dev/null
@@ -0,0 +1,97 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config CRYPTO_DEV_QAT
+       tristate
+       select CRYPTO_AEAD
+       select CRYPTO_AUTHENC
+       select CRYPTO_SKCIPHER
+       select CRYPTO_AKCIPHER
+       select CRYPTO_DH
+       select CRYPTO_HMAC
+       select CRYPTO_RSA
+       select CRYPTO_SHA1
+       select CRYPTO_SHA256
+       select CRYPTO_SHA512
+       select CRYPTO_LIB_AES
+       select FW_LOADER
+       select CRC8
+
+config CRYPTO_DEV_QAT_DH895xCC
+       tristate "Support for Intel(R) DH895xCC"
+       depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
+       select CRYPTO_DEV_QAT
+       help
+         Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology
+         for accelerating crypto and compression workloads.
+
+         To compile this as a module, choose M here: the module
+         will be called qat_dh895xcc.
+
+config CRYPTO_DEV_QAT_C3XXX
+       tristate "Support for Intel(R) C3XXX"
+       depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
+       select CRYPTO_DEV_QAT
+       help
+         Support for Intel(R) C3xxx with Intel(R) QuickAssist Technology
+         for accelerating crypto and compression workloads.
+
+         To compile this as a module, choose M here: the module
+         will be called qat_c3xxx.
+
+config CRYPTO_DEV_QAT_C62X
+       tristate "Support for Intel(R) C62X"
+       depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
+       select CRYPTO_DEV_QAT
+       help
+         Support for Intel(R) C62x with Intel(R) QuickAssist Technology
+         for accelerating crypto and compression workloads.
+
+         To compile this as a module, choose M here: the module
+         will be called qat_c62x.
+
+config CRYPTO_DEV_QAT_4XXX
+       tristate "Support for Intel(R) QAT_4XXX"
+       depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
+       select CRYPTO_DEV_QAT
+       help
+         Support for Intel(R) QuickAssist Technology QAT_4xxx
+         for accelerating crypto and compression workloads.
+
+         To compile this as a module, choose M here: the module
+         will be called qat_4xxx.
+
+config CRYPTO_DEV_QAT_DH895xCCVF
+       tristate "Support for Intel(R) DH895xCC Virtual Function"
+       depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
+       select PCI_IOV
+       select CRYPTO_DEV_QAT
+
+       help
+         Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology
+         Virtual Function for accelerating crypto and compression workloads.
+
+         To compile this as a module, choose M here: the module
+         will be called qat_dh895xccvf.
+
+config CRYPTO_DEV_QAT_C3XXXVF
+       tristate "Support for Intel(R) C3XXX Virtual Function"
+       depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
+       select PCI_IOV
+       select CRYPTO_DEV_QAT
+       help
+         Support for Intel(R) C3xxx with Intel(R) QuickAssist Technology
+         Virtual Function for accelerating crypto and compression workloads.
+
+         To compile this as a module, choose M here: the module
+         will be called qat_c3xxxvf.
+
+config CRYPTO_DEV_QAT_C62XVF
+       tristate "Support for Intel(R) C62X Virtual Function"
+       depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
+       select PCI_IOV
+       select CRYPTO_DEV_QAT
+       help
+         Support for Intel(R) C62x with Intel(R) QuickAssist Technology
+         Virtual Function for accelerating crypto and compression workloads.
+
+         To compile this as a module, choose M here: the module
+         will be called qat_c62xvf.
diff --git a/drivers/crypto/intel/qat/Makefile b/drivers/crypto/intel/qat/Makefile
new file mode 100644 (file)
index 0000000..258c8a6
--- /dev/null
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/
+obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx/
+obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x/
+obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx/
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/
+obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf/
+obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf/
diff --git a/drivers/crypto/intel/qat/qat_4xxx/Makefile b/drivers/crypto/intel/qat/qat_4xxx/Makefile
new file mode 100644 (file)
index 0000000..ff9c8b5
--- /dev/null
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+ccflags-y := -I $(srctree)/$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx.o
+qat_4xxx-objs := adf_drv.o adf_4xxx_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
new file mode 100644 (file)
index 0000000..7324b86
--- /dev/null
@@ -0,0 +1,417 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2020 - 2021 Intel Corporation */
+#include <linux/iopoll.h>
+#include <adf_accel_devices.h>
+#include <adf_cfg.h>
+#include <adf_common_drv.h>
+#include <adf_gen4_dc.h>
+#include <adf_gen4_hw_data.h>
+#include <adf_gen4_pfvf.h>
+#include <adf_gen4_pm.h>
+#include "adf_4xxx_hw_data.h"
+#include "icp_qat_hw.h"
+
+struct adf_fw_config {
+       u32 ae_mask;
+       char *obj_name;
+};
+
+static struct adf_fw_config adf_4xxx_fw_cy_config[] = {
+       {0xF0, ADF_4XXX_SYM_OBJ},
+       {0xF, ADF_4XXX_ASYM_OBJ},
+       {0x100, ADF_4XXX_ADMIN_OBJ},
+};
+
+static struct adf_fw_config adf_4xxx_fw_dc_config[] = {
+       {0xF0, ADF_4XXX_DC_OBJ},
+       {0xF, ADF_4XXX_DC_OBJ},
+       {0x100, ADF_4XXX_ADMIN_OBJ},
+};
+
+static struct adf_fw_config adf_402xx_fw_cy_config[] = {
+       {0xF0, ADF_402XX_SYM_OBJ},
+       {0xF, ADF_402XX_ASYM_OBJ},
+       {0x100, ADF_402XX_ADMIN_OBJ},
+};
+
+static struct adf_fw_config adf_402xx_fw_dc_config[] = {
+       {0xF0, ADF_402XX_DC_OBJ},
+       {0xF, ADF_402XX_DC_OBJ},
+       {0x100, ADF_402XX_ADMIN_OBJ},
+};
+
+/* Worker thread to service arbiter mappings */
+static const u32 thrd_to_arb_map_cy[ADF_4XXX_MAX_ACCELENGINES] = {
+       0x5555555, 0x5555555, 0x5555555, 0x5555555,
+       0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA,
+       0x0
+};
+
+static const u32 thrd_to_arb_map_dc[ADF_4XXX_MAX_ACCELENGINES] = {
+       0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF,
+       0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF,
+       0x0
+};
+
+static struct adf_hw_device_class adf_4xxx_class = {
+       .name = ADF_4XXX_DEVICE_NAME,
+       .type = DEV_4XXX,
+       .instances = 0,
+};
+
+enum dev_services {
+       SVC_CY = 0,
+       SVC_DC,
+};
+
+static const char *const dev_cfg_services[] = {
+       [SVC_CY] = ADF_CFG_CY,
+       [SVC_DC] = ADF_CFG_DC,
+};
+
+static int get_service_enabled(struct adf_accel_dev *accel_dev)
+{
+       char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
+       int ret;
+
+       ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+                                     ADF_SERVICES_ENABLED, services);
+       if (ret) {
+               dev_err(&GET_DEV(accel_dev),
+                       ADF_SERVICES_ENABLED " param not found\n");
+               return ret;
+       }
+
+       ret = match_string(dev_cfg_services, ARRAY_SIZE(dev_cfg_services),
+                          services);
+       if (ret < 0)
+               dev_err(&GET_DEV(accel_dev),
+                       "Invalid value of " ADF_SERVICES_ENABLED " param: %s\n",
+                       services);
+
+       return ret;
+}
+
+static u32 get_accel_mask(struct adf_hw_device_data *self)
+{
+       return ADF_4XXX_ACCELERATORS_MASK;
+}
+
+static u32 get_ae_mask(struct adf_hw_device_data *self)
+{
+       u32 me_disable = self->fuses;
+
+       return ~me_disable & ADF_4XXX_ACCELENGINES_MASK;
+}
+
+static u32 get_num_accels(struct adf_hw_device_data *self)
+{
+       return ADF_4XXX_MAX_ACCELERATORS;
+}
+
+static u32 get_num_aes(struct adf_hw_device_data *self)
+{
+       if (!self || !self->ae_mask)
+               return 0;
+
+       return hweight32(self->ae_mask);
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_4XXX_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_4XXX_ETR_BAR;
+}
+
+static u32 get_sram_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_4XXX_SRAM_BAR;
+}
+
+/*
+ * The vector routing table is used to select the MSI-X entry to use for each
+ * interrupt source.
+ * The first ADF_4XXX_ETR_MAX_BANKS entries correspond to ring interrupts.
+ * The final entry corresponds to VF2PF or error interrupts.
+ * This vector table could be used to configure one MSI-X entry to be shared
+ * between multiple interrupt sources.
+ *
+ * The default routing is set to have a one to one correspondence between the
+ * interrupt source and the MSI-X entry used.
+ */
+static void set_msix_default_rttable(struct adf_accel_dev *accel_dev)
+{
+       void __iomem *csr;
+       int i;
+
+       csr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
+       for (i = 0; i <= ADF_4XXX_ETR_MAX_BANKS; i++)
+               ADF_CSR_WR(csr, ADF_4XXX_MSIX_RTTABLE_OFFSET(i), i);
+}
+
+static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
+{
+       struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
+       u32 capabilities_cy, capabilities_dc;
+       u32 fusectl1;
+
+       /* Read accelerator capabilities mask */
+       pci_read_config_dword(pdev, ADF_4XXX_FUSECTL1_OFFSET, &fusectl1);
+
+       capabilities_cy = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
+                         ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
+                         ICP_ACCEL_CAPABILITIES_CIPHER |
+                         ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
+                         ICP_ACCEL_CAPABILITIES_SHA3 |
+                         ICP_ACCEL_CAPABILITIES_SHA3_EXT |
+                         ICP_ACCEL_CAPABILITIES_HKDF |
+                         ICP_ACCEL_CAPABILITIES_ECEDMONT |
+                         ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
+                         ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
+                         ICP_ACCEL_CAPABILITIES_AES_V2;
+
+       /* A set bit in fusectl1 means the feature is OFF in this SKU */
+       if (fusectl1 & ICP_ACCEL_4XXX_MASK_CIPHER_SLICE) {
+               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
+               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_HKDF;
+               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+       }
+       if (fusectl1 & ICP_ACCEL_4XXX_MASK_UCS_SLICE) {
+               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
+               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
+               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
+               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+       }
+       if (fusectl1 & ICP_ACCEL_4XXX_MASK_AUTH_SLICE) {
+               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_SHA3;
+               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
+               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+       }
+       if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE) {
+               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
+               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
+       }
+
+       capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION |
+                         ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
+                         ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
+                         ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
+
+       if (fusectl1 & ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE) {
+               capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
+               capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION;
+               capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION;
+               capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
+       }
+
+       switch (get_service_enabled(accel_dev)) {
+       case SVC_CY:
+               return capabilities_cy;
+       case SVC_DC:
+               return capabilities_dc;
+       }
+
+       return 0;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+       return DEV_SKU_1;
+}
+
+static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
+{
+       switch (get_service_enabled(accel_dev)) {
+       case SVC_CY:
+               return thrd_to_arb_map_cy;
+       case SVC_DC:
+               return thrd_to_arb_map_dc;
+       }
+
+       return NULL;
+}
+
+static void get_arb_info(struct arb_info *arb_info)
+{
+       arb_info->arb_cfg = ADF_4XXX_ARB_CONFIG;
+       arb_info->arb_offset = ADF_4XXX_ARB_OFFSET;
+       arb_info->wt2sam_offset = ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET;
+}
+
+static void get_admin_info(struct admin_info *admin_csrs_info)
+{
+       admin_csrs_info->mailbox_offset = ADF_4XXX_MAILBOX_BASE_OFFSET;
+       admin_csrs_info->admin_msg_ur = ADF_4XXX_ADMINMSGUR_OFFSET;
+       admin_csrs_info->admin_msg_lr = ADF_4XXX_ADMINMSGLR_OFFSET;
+}
+
+static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
+{
+       struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR];
+       void __iomem *csr = misc_bar->virt_addr;
+
+       /* Enable all in errsou3 except VFLR notification on host */
+       ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_VFLNOTIFY);
+}
+
+static void adf_enable_ints(struct adf_accel_dev *accel_dev)
+{
+       void __iomem *addr;
+
+       addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
+
+       /* Enable bundle interrupts */
+       ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET, 0);
+       ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET, 0);
+
+       /* Enable misc interrupts */
+       ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_MASK_OFFSET, 0);
+}
+
+static int adf_init_device(struct adf_accel_dev *accel_dev)
+{
+       void __iomem *addr;
+       u32 status;
+       u32 csr;
+       int ret;
+
+       addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
+
+       /* Temporarily mask PM interrupt */
+       csr = ADF_CSR_RD(addr, ADF_GEN4_ERRMSK2);
+       csr |= ADF_GEN4_PM_SOU;
+       ADF_CSR_WR(addr, ADF_GEN4_ERRMSK2, csr);
+
+       /* Set DRV_ACTIVE bit to power up the device */
+       ADF_CSR_WR(addr, ADF_GEN4_PM_INTERRUPT, ADF_GEN4_PM_DRV_ACTIVE);
+
+       /* Poll status register to make sure the device is powered up */
+       ret = read_poll_timeout(ADF_CSR_RD, status,
+                               status & ADF_GEN4_PM_INIT_STATE,
+                               ADF_GEN4_PM_POLL_DELAY_US,
+                               ADF_GEN4_PM_POLL_TIMEOUT_US, true, addr,
+                               ADF_GEN4_PM_STATUS);
+       if (ret)
+               dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n");
+
+       return ret;
+}
+
+static u32 uof_get_num_objs(void)
+{
+       BUILD_BUG_ON_MSG(ARRAY_SIZE(adf_4xxx_fw_cy_config) !=
+                        ARRAY_SIZE(adf_4xxx_fw_dc_config),
+                        "Size mismatch between adf_4xxx_fw_*_config arrays");
+
+       return ARRAY_SIZE(adf_4xxx_fw_cy_config);
+}
+
+static char *uof_get_name_4xxx(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+       switch (get_service_enabled(accel_dev)) {
+       case SVC_CY:
+               return adf_4xxx_fw_cy_config[obj_num].obj_name;
+       case SVC_DC:
+               return adf_4xxx_fw_dc_config[obj_num].obj_name;
+       }
+
+       return NULL;
+}
+
+static char *uof_get_name_402xx(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+       switch (get_service_enabled(accel_dev)) {
+       case SVC_CY:
+               return adf_402xx_fw_cy_config[obj_num].obj_name;
+       case SVC_DC:
+               return adf_402xx_fw_dc_config[obj_num].obj_name;
+       }
+
+       return NULL;
+}
+
+static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+       switch (get_service_enabled(accel_dev)) {
+       case SVC_CY:
+               return adf_4xxx_fw_cy_config[obj_num].ae_mask;
+       case SVC_DC:
+               return adf_4xxx_fw_dc_config[obj_num].ae_mask;
+       }
+
+       return 0;
+}
+
+void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
+{
+       hw_data->dev_class = &adf_4xxx_class;
+       hw_data->instance_id = adf_4xxx_class.instances++;
+       hw_data->num_banks = ADF_4XXX_ETR_MAX_BANKS;
+       hw_data->num_banks_per_vf = ADF_4XXX_NUM_BANKS_PER_VF;
+       hw_data->num_rings_per_bank = ADF_4XXX_NUM_RINGS_PER_BANK;
+       hw_data->num_accel = ADF_4XXX_MAX_ACCELERATORS;
+       hw_data->num_engines = ADF_4XXX_MAX_ACCELENGINES;
+       hw_data->num_logical_accel = 1;
+       hw_data->tx_rx_gap = ADF_4XXX_RX_RINGS_OFFSET;
+       hw_data->tx_rings_mask = ADF_4XXX_TX_RINGS_MASK;
+       hw_data->ring_to_svc_map = ADF_GEN4_DEFAULT_RING_TO_SRV_MAP;
+       hw_data->alloc_irq = adf_isr_resource_alloc;
+       hw_data->free_irq = adf_isr_resource_free;
+       hw_data->enable_error_correction = adf_enable_error_correction;
+       hw_data->get_accel_mask = get_accel_mask;
+       hw_data->get_ae_mask = get_ae_mask;
+       hw_data->get_num_accels = get_num_accels;
+       hw_data->get_num_aes = get_num_aes;
+       hw_data->get_sram_bar_id = get_sram_bar_id;
+       hw_data->get_etr_bar_id = get_etr_bar_id;
+       hw_data->get_misc_bar_id = get_misc_bar_id;
+       hw_data->get_arb_info = get_arb_info;
+       hw_data->get_admin_info = get_admin_info;
+       hw_data->get_accel_cap = get_accel_cap;
+       hw_data->get_sku = get_sku;
+       hw_data->init_admin_comms = adf_init_admin_comms;
+       hw_data->exit_admin_comms = adf_exit_admin_comms;
+       hw_data->send_admin_init = adf_send_admin_init;
+       hw_data->init_arb = adf_init_arb;
+       hw_data->exit_arb = adf_exit_arb;
+       hw_data->get_arb_mapping = adf_get_arbiter_mapping;
+       hw_data->enable_ints = adf_enable_ints;
+       hw_data->init_device = adf_init_device;
+       hw_data->reset_device = adf_reset_flr;
+       hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK;
+       switch (dev_id) {
+       case ADF_402XX_PCI_DEVICE_ID:
+               hw_data->fw_name = ADF_402XX_FW;
+               hw_data->fw_mmp_name = ADF_402XX_MMP;
+               hw_data->uof_get_name = uof_get_name_402xx;
+               break;
+
+       default:
+               hw_data->fw_name = ADF_4XXX_FW;
+               hw_data->fw_mmp_name = ADF_4XXX_MMP;
+               hw_data->uof_get_name = uof_get_name_4xxx;
+       }
+       hw_data->uof_get_num_objs = uof_get_num_objs;
+       hw_data->uof_get_ae_mask = uof_get_ae_mask;
+       hw_data->set_msix_rttable = set_msix_default_rttable;
+       hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
+       hw_data->disable_iov = adf_disable_sriov;
+       hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
+       hw_data->enable_pm = adf_gen4_enable_pm;
+       hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
+       hw_data->dev_config = adf_gen4_dev_config;
+
+       adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
+       adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops);
+       adf_gen4_init_dc_ops(&hw_data->dc_ops);
+}
+
+void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data)
+{
+       hw_data->dev_class->instances--;
+}
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
new file mode 100644 (file)
index 0000000..085e259
--- /dev/null
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef ADF_4XXX_HW_DATA_H_
+#define ADF_4XXX_HW_DATA_H_
+
+#include <adf_accel_devices.h>
+
+/* PCIe configuration space */
+#define ADF_4XXX_SRAM_BAR              0
+#define ADF_4XXX_PMISC_BAR             1
+#define ADF_4XXX_ETR_BAR               2
+#define ADF_4XXX_RX_RINGS_OFFSET       1
+#define ADF_4XXX_TX_RINGS_MASK         0x1
+#define ADF_4XXX_MAX_ACCELERATORS      1
+#define ADF_4XXX_MAX_ACCELENGINES      9
+#define ADF_4XXX_BAR_MASK              (BIT(0) | BIT(2) | BIT(4))
+
+/* Physical function fuses */
+#define ADF_4XXX_FUSECTL0_OFFSET       (0x2C8)
+#define ADF_4XXX_FUSECTL1_OFFSET       (0x2CC)
+#define ADF_4XXX_FUSECTL2_OFFSET       (0x2D0)
+#define ADF_4XXX_FUSECTL3_OFFSET       (0x2D4)
+#define ADF_4XXX_FUSECTL4_OFFSET       (0x2D8)
+#define ADF_4XXX_FUSECTL5_OFFSET       (0x2DC)
+
+#define ADF_4XXX_ACCELERATORS_MASK     (0x1)
+#define ADF_4XXX_ACCELENGINES_MASK     (0x1FF)
+#define ADF_4XXX_ADMIN_AE_MASK         (0x100)
+
+#define ADF_4XXX_ETR_MAX_BANKS         64
+
+/* MSIX interrupt */
+#define ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET      (0x41A040)
+#define ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET      (0x41A044)
+#define ADF_4XXX_SMIAPF_MASK_OFFSET            (0x41A084)
+#define ADF_4XXX_MSIX_RTTABLE_OFFSET(i)                (0x409000 + ((i) * 0x04))
+
+/* Bank and ring configuration */
+#define ADF_4XXX_NUM_RINGS_PER_BANK    2
+#define ADF_4XXX_NUM_BANKS_PER_VF      4
+
+/* Arbiter configuration */
+#define ADF_4XXX_ARB_CONFIG                    (BIT(31) | BIT(6) | BIT(0))
+#define ADF_4XXX_ARB_OFFSET                    (0x0)
+#define ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET      (0x400)
+
+/* Admin Interface Reg Offset */
+#define ADF_4XXX_ADMINMSGUR_OFFSET     (0x500574)
+#define ADF_4XXX_ADMINMSGLR_OFFSET     (0x500578)
+#define ADF_4XXX_MAILBOX_BASE_OFFSET   (0x600970)
+
+/* Firmware Binaries */
+#define ADF_4XXX_FW            "qat_4xxx.bin"
+#define ADF_4XXX_MMP           "qat_4xxx_mmp.bin"
+#define ADF_4XXX_SYM_OBJ       "qat_4xxx_sym.bin"
+#define ADF_4XXX_DC_OBJ                "qat_4xxx_dc.bin"
+#define ADF_4XXX_ASYM_OBJ      "qat_4xxx_asym.bin"
+#define ADF_4XXX_ADMIN_OBJ     "qat_4xxx_admin.bin"
+/* Firmware for 402XXX */
+#define ADF_402XX_FW           "qat_402xx.bin"
+#define ADF_402XX_MMP          "qat_402xx_mmp.bin"
+#define ADF_402XX_SYM_OBJ      "qat_402xx_sym.bin"
+#define ADF_402XX_DC_OBJ       "qat_402xx_dc.bin"
+#define ADF_402XX_ASYM_OBJ     "qat_402xx_asym.bin"
+#define ADF_402XX_ADMIN_OBJ    "qat_402xx_admin.bin"
+
+/* qat_4xxx fuse bits are different from old GENs, redefine them */
+enum icp_qat_4xxx_slice_mask {
+       ICP_ACCEL_4XXX_MASK_CIPHER_SLICE = BIT(0),
+       ICP_ACCEL_4XXX_MASK_AUTH_SLICE = BIT(1),
+       ICP_ACCEL_4XXX_MASK_PKE_SLICE = BIT(2),
+       ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE = BIT(3),
+       ICP_ACCEL_4XXX_MASK_UCS_SLICE = BIT(4),
+       ICP_ACCEL_4XXX_MASK_EIA3_SLICE = BIT(5),
+       ICP_ACCEL_4XXX_MASK_SMX_SLICE = BIT(6),
+};
+
+void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id);
+void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data);
+int adf_gen4_dev_config(struct adf_accel_dev *accel_dev);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
new file mode 100644 (file)
index 0000000..ceb8732
--- /dev/null
@@ -0,0 +1,459 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2020 Intel Corporation */
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include <adf_accel_devices.h>
+#include <adf_cfg.h>
+#include <adf_common_drv.h>
+
+#include "adf_4xxx_hw_data.h"
+#include "qat_compression.h"
+#include "qat_crypto.h"
+#include "adf_transport_access_macros.h"
+
+static const struct pci_device_id adf_pci_tbl[] = {
+       { PCI_VDEVICE(INTEL, ADF_4XXX_PCI_DEVICE_ID), },
+       { PCI_VDEVICE(INTEL, ADF_401XX_PCI_DEVICE_ID), },
+       { PCI_VDEVICE(INTEL, ADF_402XX_PCI_DEVICE_ID), },
+       { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+enum configs {
+       DEV_CFG_CY = 0,
+       DEV_CFG_DC,
+};
+
+static const char * const services_operations[] = {
+       ADF_CFG_CY,
+       ADF_CFG_DC,
+};
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+       if (accel_dev->hw_device) {
+               adf_clean_hw_data_4xxx(accel_dev->hw_device);
+               accel_dev->hw_device = NULL;
+       }
+       adf_cfg_dev_remove(accel_dev);
+       debugfs_remove(accel_dev->debugfs_dir);
+       adf_devmgr_rm_dev(accel_dev, NULL);
+}
+
+static int adf_cfg_dev_init(struct adf_accel_dev *accel_dev)
+{
+       const char *config;
+       int ret;
+
+       config = accel_dev->accel_id % 2 ? ADF_CFG_DC : ADF_CFG_CY;
+
+       ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
+       if (ret)
+               return ret;
+
+       /* Default configuration is crypto only for even devices
+        * and compression for odd devices
+        */
+       ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
+                                         ADF_SERVICES_ENABLED, config,
+                                         ADF_STR);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
+{
+       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+       int banks = GET_MAX_BANKS(accel_dev);
+       int cpus = num_online_cpus();
+       unsigned long bank, val;
+       int instances;
+       int ret;
+       int i;
+
+       if (adf_hw_dev_has_crypto(accel_dev))
+               instances = min(cpus, banks / 2);
+       else
+               instances = 0;
+
+       for (i = 0; i < instances; i++) {
+               val = i;
+               bank = i * 2;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &bank, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               bank += 1;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &bank, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
+                        i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
+               val = 128;
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = 512;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = 0;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = 0;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = 1;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = 1;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = ADF_COALESCING_DEF_TIME;
+               snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+       }
+
+       val = i;
+       ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
+                                         &val, ADF_DEC);
+       if (ret)
+               goto err;
+
+       val = 0;
+       ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
+                                         &val, ADF_DEC);
+       if (ret)
+               goto err;
+
+       return 0;
+err:
+       dev_err(&GET_DEV(accel_dev), "Failed to add configuration for crypto\n");
+       return ret;
+}
+
+static int adf_comp_dev_config(struct adf_accel_dev *accel_dev)
+{
+       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+       int banks = GET_MAX_BANKS(accel_dev);
+       int cpus = num_online_cpus();
+       unsigned long val;
+       int instances;
+       int ret;
+       int i;
+
+       if (adf_hw_dev_has_compression(accel_dev))
+               instances = min(cpus, banks);
+       else
+               instances = 0;
+
+       for (i = 0; i < instances; i++) {
+               val = i;
+               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = 512;
+               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = 0;
+               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = 1;
+               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = ADF_COALESCING_DEF_TIME;
+               snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+       }
+
+       val = i;
+       ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
+                                         &val, ADF_DEC);
+       if (ret)
+               goto err;
+
+       val = 0;
+       ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
+                                         &val, ADF_DEC);
+       if (ret)
+               goto err;
+
+       return 0;
+err:
+       dev_err(&GET_DEV(accel_dev), "Failed to add configuration for compression\n");
+       return ret;
+}
+
+int adf_gen4_dev_config(struct adf_accel_dev *accel_dev)
+{
+       char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
+       int ret;
+
+       ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
+       if (ret)
+               goto err;
+
+       ret = adf_cfg_section_add(accel_dev, "Accelerator0");
+       if (ret)
+               goto err;
+
+       ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+                                     ADF_SERVICES_ENABLED, services);
+       if (ret)
+               goto err;
+
+       ret = sysfs_match_string(services_operations, services);
+       if (ret < 0)
+               goto err;
+
+       switch (ret) {
+       case DEV_CFG_CY:
+               ret = adf_crypto_dev_config(accel_dev);
+               break;
+       case DEV_CFG_DC:
+               ret = adf_comp_dev_config(accel_dev);
+               break;
+       }
+
+       if (ret)
+               goto err;
+
+       set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+
+       return ret;
+
+err:
+       dev_err(&GET_DEV(accel_dev), "Failed to configure QAT driver\n");
+       return ret;
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       struct adf_accel_dev *accel_dev;
+       struct adf_accel_pci *accel_pci_dev;
+       struct adf_hw_device_data *hw_data;
+       char name[ADF_DEVICE_NAME_LENGTH];
+       unsigned int i, bar_nr;
+       unsigned long bar_mask;
+       struct adf_bar *bar;
+       int ret;
+
+       if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
+               /*
+                * If the accelerator is connected to a node with no memory
+                * there is no point in using the accelerator since the remote
+                * memory transaction will be very slow.
+                */
+               dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
+               return -EINVAL;
+       }
+
+       accel_dev = devm_kzalloc(&pdev->dev, sizeof(*accel_dev), GFP_KERNEL);
+       if (!accel_dev)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&accel_dev->crypto_list);
+       accel_pci_dev = &accel_dev->accel_pci_dev;
+       accel_pci_dev->pci_dev = pdev;
+
+       /*
+        * Add accel device to accel table
+        * This should be called before adf_cleanup_accel is called
+        */
+       if (adf_devmgr_add_dev(accel_dev, NULL)) {
+               dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+               return -EFAULT;
+       }
+
+       accel_dev->owner = THIS_MODULE;
+       /* Allocate and initialise device hardware meta-data structure */
+       hw_data = devm_kzalloc(&pdev->dev, sizeof(*hw_data), GFP_KERNEL);
+       if (!hw_data) {
+               ret = -ENOMEM;
+               goto out_err;
+       }
+
+       accel_dev->hw_device = hw_data;
+       adf_init_hw_data_4xxx(accel_dev->hw_device, ent->device);
+
+       pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
+       pci_read_config_dword(pdev, ADF_4XXX_FUSECTL4_OFFSET, &hw_data->fuses);
+
+       /* Get Accelerators and Accelerators Engines masks */
+       hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
+       hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
+       accel_pci_dev->sku = hw_data->get_sku(hw_data);
+       /* If the device has no acceleration engines then ignore it */
+       if (!hw_data->accel_mask || !hw_data->ae_mask ||
+           (~hw_data->ae_mask & 0x01)) {
+               dev_err(&pdev->dev, "No acceleration units found.\n");
+               ret = -EFAULT;
+               goto out_err;
+       }
+
+       /* Create dev top level debugfs entry */
+       snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
+                hw_data->dev_class->name, pci_name(pdev));
+
+       accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+
+       /* Create device configuration table */
+       ret = adf_cfg_dev_add(accel_dev);
+       if (ret)
+               goto out_err;
+
+       /* Enable PCI device */
+       ret = pcim_enable_device(pdev);
+       if (ret) {
+               dev_err(&pdev->dev, "Can't enable PCI device.\n");
+               goto out_err;
+       }
+
+       /* Set DMA identifier */
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       if (ret) {
+               dev_err(&pdev->dev, "No usable DMA configuration.\n");
+               goto out_err;
+       }
+
+       ret = adf_cfg_dev_init(accel_dev);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed to initialize configuration.\n");
+               goto out_err;
+       }
+
+       /* Get accelerator capabilities mask */
+       hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
+       if (!hw_data->accel_capabilities_mask) {
+               dev_err(&pdev->dev, "Failed to get capabilities mask.\n");
+               ret = -EINVAL;
+               goto out_err;
+       }
+
+       /* Find and map all the device's BARS */
+       bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_4XXX_BAR_MASK;
+
+       ret = pcim_iomap_regions_request_all(pdev, bar_mask, pci_name(pdev));
+       if (ret) {
+               dev_err(&pdev->dev, "Failed to map pci regions.\n");
+               goto out_err;
+       }
+
+       i = 0;
+       for_each_set_bit(bar_nr, &bar_mask, PCI_STD_NUM_BARS) {
+               bar = &accel_pci_dev->pci_bars[i++];
+               bar->virt_addr = pcim_iomap_table(pdev)[bar_nr];
+       }
+
+       pci_set_master(pdev);
+
+       if (pci_save_state(pdev)) {
+               dev_err(&pdev->dev, "Failed to save pci state.\n");
+               ret = -ENOMEM;
+               goto out_err;
+       }
+
+       ret = adf_dev_up(accel_dev, true);
+       if (ret)
+               goto out_err_dev_stop;
+
+       ret = adf_sysfs_init(accel_dev);
+       if (ret)
+               goto out_err_dev_stop;
+
+       return ret;
+
+out_err_dev_stop:
+       adf_dev_down(accel_dev, false);
+out_err:
+       adf_cleanup_accel(accel_dev);
+       return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+       if (!accel_dev) {
+               pr_err("QAT: Driver removal failed\n");
+               return;
+       }
+       adf_dev_down(accel_dev, false);
+       adf_cleanup_accel(accel_dev);
+}
+
+static struct pci_driver adf_driver = {
+       .id_table = adf_pci_tbl,
+       .name = ADF_4XXX_DEVICE_NAME,
+       .probe = adf_probe,
+       .remove = adf_remove,
+       .sriov_configure = adf_sriov_configure,
+       .err_handler = &adf_err_handler,
+};
+
+module_pci_driver(adf_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_FIRMWARE(ADF_4XXX_FW);
+MODULE_FIRMWARE(ADF_4XXX_MMP);
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
+MODULE_SOFTDEP("pre: crypto-intel_qat");
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/Makefile b/drivers/crypto/intel/qat/qat_c3xxx/Makefile
new file mode 100644 (file)
index 0000000..92ef416
--- /dev/null
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y := -I $(srctree)/$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx.o
+qat_c3xxx-objs := adf_drv.o adf_c3xxx_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
new file mode 100644 (file)
index 0000000..4756436
--- /dev/null
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2021 Intel Corporation */
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_gen2_config.h>
+#include <adf_gen2_dc.h>
+#include <adf_gen2_hw_data.h>
+#include <adf_gen2_pfvf.h>
+#include "adf_c3xxx_hw_data.h"
+#include "icp_qat_hw.h"
+
+/* Worker thread to service arbiter mappings */
+static const u32 thrd_to_arb_map[ADF_C3XXX_MAX_ACCELENGINES] = {
+       0x12222AAA, 0x11222AAA, 0x12222AAA,
+       0x11222AAA, 0x12222AAA, 0x11222AAA
+};
+
+static struct adf_hw_device_class c3xxx_class = {
+       .name = ADF_C3XXX_DEVICE_NAME,
+       .type = DEV_C3XXX,
+       .instances = 0
+};
+
+static u32 get_accel_mask(struct adf_hw_device_data *self)
+{
+       u32 straps = self->straps;
+       u32 fuses = self->fuses;
+       u32 accel;
+
+       accel = ~(fuses | straps) >> ADF_C3XXX_ACCELERATORS_REG_OFFSET;
+       accel &= ADF_C3XXX_ACCELERATORS_MASK;
+
+       return accel;
+}
+
+static u32 get_ae_mask(struct adf_hw_device_data *self)
+{
+       u32 straps = self->straps;
+       u32 fuses = self->fuses;
+       unsigned long disabled;
+       u32 ae_disable;
+       int accel;
+
+       /* If an accel is disabled, then disable the corresponding two AEs */
+       disabled = ~get_accel_mask(self) & ADF_C3XXX_ACCELERATORS_MASK;
+       ae_disable = BIT(1) | BIT(0);
+       for_each_set_bit(accel, &disabled, ADF_C3XXX_MAX_ACCELERATORS)
+               straps |= ae_disable << (accel << 1);
+
+       return ~(fuses | straps) & ADF_C3XXX_ACCELENGINES_MASK;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_C3XXX_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_C3XXX_ETR_BAR;
+}
+
+static u32 get_sram_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_C3XXX_SRAM_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+       int aes = self->get_num_aes(self);
+
+       if (aes == 6)
+               return DEV_SKU_4;
+
+       return DEV_SKU_UNKNOWN;
+}
+
+static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
+{
+       return thrd_to_arb_map;
+}
+
+static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
+{
+       adf_gen2_cfg_iov_thds(accel_dev, enable,
+                             ADF_C3XXX_AE2FUNC_MAP_GRP_A_NUM_REGS,
+                             ADF_C3XXX_AE2FUNC_MAP_GRP_B_NUM_REGS);
+}
+
+void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
+{
+       hw_data->dev_class = &c3xxx_class;
+       hw_data->instance_id = c3xxx_class.instances++;
+       hw_data->num_banks = ADF_C3XXX_ETR_MAX_BANKS;
+       hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
+       hw_data->num_accel = ADF_C3XXX_MAX_ACCELERATORS;
+       hw_data->num_logical_accel = 1;
+       hw_data->num_engines = ADF_C3XXX_MAX_ACCELENGINES;
+       hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET;
+       hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK;
+       hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
+       hw_data->alloc_irq = adf_isr_resource_alloc;
+       hw_data->free_irq = adf_isr_resource_free;
+       hw_data->enable_error_correction = adf_gen2_enable_error_correction;
+       hw_data->get_accel_mask = get_accel_mask;
+       hw_data->get_ae_mask = get_ae_mask;
+       hw_data->get_accel_cap = adf_gen2_get_accel_cap;
+       hw_data->get_num_accels = adf_gen2_get_num_accels;
+       hw_data->get_num_aes = adf_gen2_get_num_aes;
+       hw_data->get_sram_bar_id = get_sram_bar_id;
+       hw_data->get_etr_bar_id = get_etr_bar_id;
+       hw_data->get_misc_bar_id = get_misc_bar_id;
+       hw_data->get_admin_info = adf_gen2_get_admin_info;
+       hw_data->get_arb_info = adf_gen2_get_arb_info;
+       hw_data->get_sku = get_sku;
+       hw_data->fw_name = ADF_C3XXX_FW;
+       hw_data->fw_mmp_name = ADF_C3XXX_MMP;
+       hw_data->init_admin_comms = adf_init_admin_comms;
+       hw_data->exit_admin_comms = adf_exit_admin_comms;
+       hw_data->configure_iov_threads = configure_iov_threads;
+       hw_data->send_admin_init = adf_send_admin_init;
+       hw_data->init_arb = adf_init_arb;
+       hw_data->exit_arb = adf_exit_arb;
+       hw_data->get_arb_mapping = adf_get_arbiter_mapping;
+       hw_data->enable_ints = adf_gen2_enable_ints;
+       hw_data->reset_device = adf_reset_flr;
+       hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
+       hw_data->disable_iov = adf_disable_sriov;
+       hw_data->dev_config = adf_gen2_dev_config;
+
+       adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
+       adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
+       adf_gen2_init_dc_ops(&hw_data->dc_ops);
+}
+
+void adf_clean_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
+{
+       hw_data->dev_class->instances--;
+}
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.h b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.h
new file mode 100644 (file)
index 0000000..336a06f
--- /dev/null
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef ADF_C3XXX_HW_DATA_H_
+#define ADF_C3XXX_HW_DATA_H_
+
+/* PCIe configuration space */
+#define ADF_C3XXX_PMISC_BAR 0
+#define ADF_C3XXX_ETR_BAR 1
+#define ADF_C3XXX_SRAM_BAR 0
+#define ADF_C3XXX_MAX_ACCELERATORS 3
+#define ADF_C3XXX_MAX_ACCELENGINES 6
+#define ADF_C3XXX_ACCELERATORS_REG_OFFSET 16
+#define ADF_C3XXX_ACCELERATORS_MASK 0x7
+#define ADF_C3XXX_ACCELENGINES_MASK 0x3F
+#define ADF_C3XXX_ETR_MAX_BANKS 16
+#define ADF_C3XXX_SOFTSTRAP_CSR_OFFSET 0x2EC
+
+/* AE to function mapping */
+#define ADF_C3XXX_AE2FUNC_MAP_GRP_A_NUM_REGS 48
+#define ADF_C3XXX_AE2FUNC_MAP_GRP_B_NUM_REGS 6
+
+/* Firmware Binary */
+#define ADF_C3XXX_FW "qat_c3xxx.bin"
+#define ADF_C3XXX_MMP "qat_c3xxx_mmp.bin"
+
+void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_c3xxx(struct adf_hw_device_data *hw_data);
+#endif
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
new file mode 100644 (file)
index 0000000..bb4dca7
--- /dev/null
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include "adf_c3xxx_hw_data.h"
+
+static const struct pci_device_id adf_pci_tbl[] = {
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C3XXX), },
+       { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+       .id_table = adf_pci_tbl,
+       .name = ADF_C3XXX_DEVICE_NAME,
+       .probe = adf_probe,
+       .remove = adf_remove,
+       .sriov_configure = adf_sriov_configure,
+       .err_handler = &adf_err_handler,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+       pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+       pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+       int i;
+
+       for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+               struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+               if (bar->virt_addr)
+                       pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+       }
+
+       if (accel_dev->hw_device) {
+               switch (accel_pci_dev->pci_dev->device) {
+               case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
+                       adf_clean_hw_data_c3xxx(accel_dev->hw_device);
+                       break;
+               default:
+                       break;
+               }
+               kfree(accel_dev->hw_device);
+               accel_dev->hw_device = NULL;
+       }
+       adf_cfg_dev_remove(accel_dev);
+       debugfs_remove(accel_dev->debugfs_dir);
+       adf_devmgr_rm_dev(accel_dev, NULL);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       struct adf_accel_dev *accel_dev;
+       struct adf_accel_pci *accel_pci_dev;
+       struct adf_hw_device_data *hw_data;
+       char name[ADF_DEVICE_NAME_LENGTH];
+       unsigned int i, bar_nr;
+       unsigned long bar_mask;
+       int ret;
+
+       switch (ent->device) {
+       case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
+               break;
+       default:
+               dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+               return -ENODEV;
+       }
+
+       if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
+               /* If the accelerator is connected to a node with no memory
+                * there is no point in using the accelerator since the remote
+                * memory transaction will be very slow. */
+               dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
+               return -EINVAL;
+       }
+
+       accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+                                dev_to_node(&pdev->dev));
+       if (!accel_dev)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&accel_dev->crypto_list);
+       accel_pci_dev = &accel_dev->accel_pci_dev;
+       accel_pci_dev->pci_dev = pdev;
+
+       /* Add accel device to accel table.
+        * This should be called before adf_cleanup_accel is called */
+       if (adf_devmgr_add_dev(accel_dev, NULL)) {
+               dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+               kfree(accel_dev);
+               return -EFAULT;
+       }
+
+       accel_dev->owner = THIS_MODULE;
+       /* Allocate and configure device configuration structure */
+       hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+                              dev_to_node(&pdev->dev));
+       if (!hw_data) {
+               ret = -ENOMEM;
+               goto out_err;
+       }
+
+       accel_dev->hw_device = hw_data;
+       adf_init_hw_data_c3xxx(accel_dev->hw_device);
+       pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
+       pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET,
+                             &hw_data->fuses);
+       pci_read_config_dword(pdev, ADF_C3XXX_SOFTSTRAP_CSR_OFFSET,
+                             &hw_data->straps);
+
+       /* Get Accelerators and Accelerators Engines masks */
+       hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
+       hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
+       accel_pci_dev->sku = hw_data->get_sku(hw_data);
+       /* If the device has no acceleration engines then ignore it. */
+       if (!hw_data->accel_mask || !hw_data->ae_mask ||
+           ((~hw_data->ae_mask) & 0x01)) {
+               dev_err(&pdev->dev, "No acceleration units found");
+               ret = -EFAULT;
+               goto out_err;
+       }
+
+       /* Create dev top level debugfs entry */
+       snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
+                hw_data->dev_class->name, pci_name(pdev));
+
+       accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+
+       /* Create device configuration table */
+       ret = adf_cfg_dev_add(accel_dev);
+       if (ret)
+               goto out_err;
+
+       /* enable PCI device */
+       if (pci_enable_device(pdev)) {
+               ret = -EFAULT;
+               goto out_err;
+       }
+
+       /* set dma identifier */
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+       if (ret) {
+               dev_err(&pdev->dev, "No usable DMA configuration\n");
+               goto out_err_disable;
+       }
+
+       if (pci_request_regions(pdev, ADF_C3XXX_DEVICE_NAME)) {
+               ret = -EFAULT;
+               goto out_err_disable;
+       }
+
+       /* Get accelerator capabilities mask */
+       hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
+
+       /* Find and map all the device's BARS */
+       i = 0;
+       bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
+               struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+               bar->base_addr = pci_resource_start(pdev, bar_nr);
+               if (!bar->base_addr)
+                       break;
+               bar->size = pci_resource_len(pdev, bar_nr);
+               bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+               if (!bar->virt_addr) {
+                       dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+                       ret = -EFAULT;
+                       goto out_err_free_reg;
+               }
+       }
+       pci_set_master(pdev);
+
+       if (pci_save_state(pdev)) {
+               dev_err(&pdev->dev, "Failed to save pci state\n");
+               ret = -ENOMEM;
+               goto out_err_free_reg;
+       }
+
+       ret = adf_dev_up(accel_dev, true);
+       if (ret)
+               goto out_err_dev_stop;
+
+       return ret;
+
+out_err_dev_stop:
+       adf_dev_down(accel_dev, false);
+out_err_free_reg:
+       pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+       pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+       adf_cleanup_accel(accel_dev);
+       kfree(accel_dev);
+       return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+       if (!accel_dev) {
+               pr_err("QAT: Driver removal failed\n");
+               return;
+       }
+       adf_dev_down(accel_dev, false);
+       adf_cleanup_accel(accel_dev);
+       adf_cleanup_pci_dev(accel_dev);
+       kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+       request_module("intel_qat");
+
+       if (pci_register_driver(&adf_driver)) {
+               pr_err("QAT: Driver initialization failed\n");
+               return -EFAULT;
+       }
+       return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+       pci_unregister_driver(&adf_driver);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_FIRMWARE(ADF_C3XXX_FW);
+MODULE_FIRMWARE(ADF_C3XXX_MMP);
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile b/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile
new file mode 100644 (file)
index 0000000..b6d7682
--- /dev/null
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y := -I $(srctree)/$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf.o
+qat_c3xxxvf-objs := adf_drv.o adf_c3xxxvf_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
new file mode 100644 (file)
index 0000000..84d9486
--- /dev/null
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_gen2_config.h>
+#include <adf_gen2_dc.h>
+#include <adf_gen2_hw_data.h>
+#include <adf_gen2_pfvf.h>
+#include <adf_pfvf_vf_msg.h>
+#include "adf_c3xxxvf_hw_data.h"
+
+static struct adf_hw_device_class c3xxxiov_class = {
+       .name = ADF_C3XXXVF_DEVICE_NAME,
+       .type = DEV_C3XXXVF,
+       .instances = 0
+};
+
+static u32 get_accel_mask(struct adf_hw_device_data *self)
+{
+       return ADF_C3XXXIOV_ACCELERATORS_MASK;
+}
+
+static u32 get_ae_mask(struct adf_hw_device_data *self)
+{
+       return ADF_C3XXXIOV_ACCELENGINES_MASK;
+}
+
+static u32 get_num_accels(struct adf_hw_device_data *self)
+{
+       return ADF_C3XXXIOV_MAX_ACCELERATORS;
+}
+
+static u32 get_num_aes(struct adf_hw_device_data *self)
+{
+       return ADF_C3XXXIOV_MAX_ACCELENGINES;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_C3XXXIOV_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_C3XXXIOV_ETR_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+       return DEV_SKU_VF;
+}
+
+static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
+{
+       return 0;
+}
+
+static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
+{
+}
+
+void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
+{
+       hw_data->dev_class = &c3xxxiov_class;
+       hw_data->num_banks = ADF_C3XXXIOV_ETR_MAX_BANKS;
+       hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
+       hw_data->num_accel = ADF_C3XXXIOV_MAX_ACCELERATORS;
+       hw_data->num_logical_accel = 1;
+       hw_data->num_engines = ADF_C3XXXIOV_MAX_ACCELENGINES;
+       hw_data->tx_rx_gap = ADF_C3XXXIOV_RX_RINGS_OFFSET;
+       hw_data->tx_rings_mask = ADF_C3XXXIOV_TX_RINGS_MASK;
+       hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
+       hw_data->alloc_irq = adf_vf_isr_resource_alloc;
+       hw_data->free_irq = adf_vf_isr_resource_free;
+       hw_data->enable_error_correction = adf_vf_void_noop;
+       hw_data->init_admin_comms = adf_vf_int_noop;
+       hw_data->exit_admin_comms = adf_vf_void_noop;
+       hw_data->send_admin_init = adf_vf2pf_notify_init;
+       hw_data->init_arb = adf_vf_int_noop;
+       hw_data->exit_arb = adf_vf_void_noop;
+       hw_data->disable_iov = adf_vf2pf_notify_shutdown;
+       hw_data->get_accel_mask = get_accel_mask;
+       hw_data->get_ae_mask = get_ae_mask;
+       hw_data->get_num_accels = get_num_accels;
+       hw_data->get_num_aes = get_num_aes;
+       hw_data->get_etr_bar_id = get_etr_bar_id;
+       hw_data->get_misc_bar_id = get_misc_bar_id;
+       hw_data->get_sku = get_sku;
+       hw_data->enable_ints = adf_vf_void_noop;
+       hw_data->dev_class->instances++;
+       hw_data->dev_config = adf_gen2_dev_config;
+       adf_devmgr_update_class_index(hw_data);
+       adf_gen2_init_vf_pfvf_ops(&hw_data->pfvf_ops);
+       adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
+       adf_gen2_init_dc_ops(&hw_data->dc_ops);
+}
+
+void adf_clean_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
+{
+       hw_data->dev_class->instances--;
+       adf_devmgr_update_class_index(hw_data);
+}
diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h
new file mode 100644 (file)
index 0000000..6b4bf18
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2015 - 2020 Intel Corporation */
+#ifndef ADF_C3XXXVF_HW_DATA_H_
+#define ADF_C3XXXVF_HW_DATA_H_
+
+#define ADF_C3XXXIOV_PMISC_BAR 1
+#define ADF_C3XXXIOV_ACCELERATORS_MASK 0x1
+#define ADF_C3XXXIOV_ACCELENGINES_MASK 0x1
+#define ADF_C3XXXIOV_MAX_ACCELERATORS 1
+#define ADF_C3XXXIOV_MAX_ACCELENGINES 1
+#define ADF_C3XXXIOV_RX_RINGS_OFFSET 8
+#define ADF_C3XXXIOV_TX_RINGS_MASK 0xFF
+#define ADF_C3XXXIOV_ETR_BAR 0
+#define ADF_C3XXXIOV_ETR_MAX_BANKS 1
+
+void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data);
+#endif
diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c
new file mode 100644 (file)
index 0000000..e8cc10f
--- /dev/null
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include "adf_c3xxxvf_hw_data.h"
+
+static const struct pci_device_id adf_pci_tbl[] = {
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF), },
+       { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+       .id_table = adf_pci_tbl,
+       .name = ADF_C3XXXVF_DEVICE_NAME,
+       .probe = adf_probe,
+       .remove = adf_remove,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+       pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+       pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+       struct adf_accel_dev *pf;
+       int i;
+
+       for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+               struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+               if (bar->virt_addr)
+                       pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+       }
+
+       if (accel_dev->hw_device) {
+               switch (accel_pci_dev->pci_dev->device) {
+               case PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF:
+                       adf_clean_hw_data_c3xxxiov(accel_dev->hw_device);
+                       break;
+               default:
+                       break;
+               }
+               kfree(accel_dev->hw_device);
+               accel_dev->hw_device = NULL;
+       }
+       adf_cfg_dev_remove(accel_dev);
+       debugfs_remove(accel_dev->debugfs_dir);
+       pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
+       adf_devmgr_rm_dev(accel_dev, pf);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       struct adf_accel_dev *accel_dev;
+       struct adf_accel_dev *pf;
+       struct adf_accel_pci *accel_pci_dev;
+       struct adf_hw_device_data *hw_data;
+       char name[ADF_DEVICE_NAME_LENGTH];
+       unsigned int i, bar_nr;
+       unsigned long bar_mask;
+       int ret;
+
+       switch (ent->device) {
+       case PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF:
+               break;
+       default:
+               dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+               return -ENODEV;
+       }
+
+       accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+                                dev_to_node(&pdev->dev));
+       if (!accel_dev)
+               return -ENOMEM;
+
+       accel_dev->is_vf = true;
+       pf = adf_devmgr_pci_to_accel_dev(pdev->physfn);
+       accel_pci_dev = &accel_dev->accel_pci_dev;
+       accel_pci_dev->pci_dev = pdev;
+
+       /* Add accel device to accel table */
+       if (adf_devmgr_add_dev(accel_dev, pf)) {
+               dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+               kfree(accel_dev);
+               return -EFAULT;
+       }
+       INIT_LIST_HEAD(&accel_dev->crypto_list);
+
+       accel_dev->owner = THIS_MODULE;
+       /* Allocate and configure device configuration structure */
+       hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+                              dev_to_node(&pdev->dev));
+       if (!hw_data) {
+               ret = -ENOMEM;
+               goto out_err;
+       }
+       accel_dev->hw_device = hw_data;
+       adf_init_hw_data_c3xxxiov(accel_dev->hw_device);
+
+       /* Get Accelerators and Accelerators Engines masks */
+       hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
+       hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
+       accel_pci_dev->sku = hw_data->get_sku(hw_data);
+
+       /* Create dev top level debugfs entry */
+       snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
+                hw_data->dev_class->name, pci_name(pdev));
+
+       accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+
+       /* Create device configuration table */
+       ret = adf_cfg_dev_add(accel_dev);
+       if (ret)
+               goto out_err;
+
+       /* enable PCI device */
+       if (pci_enable_device(pdev)) {
+               ret = -EFAULT;
+               goto out_err;
+       }
+
+       /* set dma identifier */
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+       if (ret) {
+               dev_err(&pdev->dev, "No usable DMA configuration\n");
+               goto out_err_disable;
+       }
+
+       if (pci_request_regions(pdev, ADF_C3XXXVF_DEVICE_NAME)) {
+               ret = -EFAULT;
+               goto out_err_disable;
+       }
+
+       /* Find and map all the device's BARS */
+       i = 0;
+       bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
+               struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+               bar->base_addr = pci_resource_start(pdev, bar_nr);
+               if (!bar->base_addr)
+                       break;
+               bar->size = pci_resource_len(pdev, bar_nr);
+               bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+               if (!bar->virt_addr) {
+                       dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+                       ret = -EFAULT;
+                       goto out_err_free_reg;
+               }
+       }
+       pci_set_master(pdev);
+       /* Completion for VF2PF request/response message exchange */
+       init_completion(&accel_dev->vf.msg_received);
+
+       ret = adf_dev_up(accel_dev, false);
+       if (ret)
+               goto out_err_dev_stop;
+
+       return ret;
+
+out_err_dev_stop:
+       adf_dev_down(accel_dev, false);
+out_err_free_reg:
+       pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+       pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+       adf_cleanup_accel(accel_dev);
+       kfree(accel_dev);
+       return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+       if (!accel_dev) {
+               pr_err("QAT: Driver removal failed\n");
+               return;
+       }
+       adf_flush_vf_wq(accel_dev);
+       adf_dev_down(accel_dev, false);
+       adf_cleanup_accel(accel_dev);
+       adf_cleanup_pci_dev(accel_dev);
+       kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+       request_module("intel_qat");
+
+       if (pci_register_driver(&adf_driver)) {
+               pr_err("QAT: Driver initialization failed\n");
+               return -EFAULT;
+       }
+       return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+       pci_unregister_driver(&adf_driver);
+       adf_clean_vf_map(true);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/intel/qat/qat_c62x/Makefile b/drivers/crypto/intel/qat/qat_c62x/Makefile
new file mode 100644 (file)
index 0000000..d581f7c
--- /dev/null
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y := -I $(srctree)/$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x.o
+qat_c62x-objs := adf_drv.o adf_c62x_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
new file mode 100644 (file)
index 0000000..e142707
--- /dev/null
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2021 Intel Corporation */
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_gen2_config.h>
+#include <adf_gen2_dc.h>
+#include <adf_gen2_hw_data.h>
+#include <adf_gen2_pfvf.h>
+#include "adf_c62x_hw_data.h"
+#include "icp_qat_hw.h"
+
+/* Worker thread to service arbiter mappings */
+static const u32 thrd_to_arb_map[ADF_C62X_MAX_ACCELENGINES] = {
+       0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA,
+       0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA
+};
+
+static struct adf_hw_device_class c62x_class = {
+       .name = ADF_C62X_DEVICE_NAME,
+       .type = DEV_C62X,
+       .instances = 0
+};
+
+static u32 get_accel_mask(struct adf_hw_device_data *self)
+{
+       u32 straps = self->straps;
+       u32 fuses = self->fuses;
+       u32 accel;
+
+       accel = ~(fuses | straps) >> ADF_C62X_ACCELERATORS_REG_OFFSET;
+       accel &= ADF_C62X_ACCELERATORS_MASK;
+
+       return accel;
+}
+
+static u32 get_ae_mask(struct adf_hw_device_data *self)
+{
+       u32 straps = self->straps;
+       u32 fuses = self->fuses;
+       unsigned long disabled;
+       u32 ae_disable;
+       int accel;
+
+       /* If an accel is disabled, then disable the corresponding two AEs */
+       disabled = ~get_accel_mask(self) & ADF_C62X_ACCELERATORS_MASK;
+       ae_disable = BIT(1) | BIT(0);
+       for_each_set_bit(accel, &disabled, ADF_C62X_MAX_ACCELERATORS)
+               straps |= ae_disable << (accel << 1);
+
+       return ~(fuses | straps) & ADF_C62X_ACCELENGINES_MASK;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_C62X_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_C62X_ETR_BAR;
+}
+
+static u32 get_sram_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_C62X_SRAM_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+       int aes = self->get_num_aes(self);
+
+       if (aes == 8)
+               return DEV_SKU_2;
+       else if (aes == 10)
+               return DEV_SKU_4;
+
+       return DEV_SKU_UNKNOWN;
+}
+
+static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
+{
+       return thrd_to_arb_map;
+}
+
+static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
+{
+       adf_gen2_cfg_iov_thds(accel_dev, enable,
+                             ADF_C62X_AE2FUNC_MAP_GRP_A_NUM_REGS,
+                             ADF_C62X_AE2FUNC_MAP_GRP_B_NUM_REGS);
+}
+
+void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
+{
+       hw_data->dev_class = &c62x_class;
+       hw_data->instance_id = c62x_class.instances++;
+       hw_data->num_banks = ADF_C62X_ETR_MAX_BANKS;
+       hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
+       hw_data->num_accel = ADF_C62X_MAX_ACCELERATORS;
+       hw_data->num_logical_accel = 1;
+       hw_data->num_engines = ADF_C62X_MAX_ACCELENGINES;
+       hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET;
+       hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK;
+       hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
+       hw_data->alloc_irq = adf_isr_resource_alloc;
+       hw_data->free_irq = adf_isr_resource_free;
+       hw_data->enable_error_correction = adf_gen2_enable_error_correction;
+       hw_data->get_accel_mask = get_accel_mask;
+       hw_data->get_ae_mask = get_ae_mask;
+       hw_data->get_accel_cap = adf_gen2_get_accel_cap;
+       hw_data->get_num_accels = adf_gen2_get_num_accels;
+       hw_data->get_num_aes = adf_gen2_get_num_aes;
+       hw_data->get_sram_bar_id = get_sram_bar_id;
+       hw_data->get_etr_bar_id = get_etr_bar_id;
+       hw_data->get_misc_bar_id = get_misc_bar_id;
+       hw_data->get_admin_info = adf_gen2_get_admin_info;
+       hw_data->get_arb_info = adf_gen2_get_arb_info;
+       hw_data->get_sku = get_sku;
+       hw_data->fw_name = ADF_C62X_FW;
+       hw_data->fw_mmp_name = ADF_C62X_MMP;
+       hw_data->init_admin_comms = adf_init_admin_comms;
+       hw_data->exit_admin_comms = adf_exit_admin_comms;
+       hw_data->configure_iov_threads = configure_iov_threads;
+       hw_data->send_admin_init = adf_send_admin_init;
+       hw_data->init_arb = adf_init_arb;
+       hw_data->exit_arb = adf_exit_arb;
+       hw_data->get_arb_mapping = adf_get_arbiter_mapping;
+       hw_data->enable_ints = adf_gen2_enable_ints;
+       hw_data->reset_device = adf_reset_flr;
+       hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
+       hw_data->disable_iov = adf_disable_sriov;
+       hw_data->dev_config = adf_gen2_dev_config;
+
+       adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
+       adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
+       adf_gen2_init_dc_ops(&hw_data->dc_ops);
+}
+
+void adf_clean_hw_data_c62x(struct adf_hw_device_data *hw_data)
+{
+       hw_data->dev_class->instances--;
+}
diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.h b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.h
new file mode 100644 (file)
index 0000000..008c0a3
--- /dev/null
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef ADF_C62X_HW_DATA_H_
+#define ADF_C62X_HW_DATA_H_
+
+/* PCIe configuration space */
+#define ADF_C62X_SRAM_BAR 0
+#define ADF_C62X_PMISC_BAR 1
+#define ADF_C62X_ETR_BAR 2
+#define ADF_C62X_MAX_ACCELERATORS 5
+#define ADF_C62X_MAX_ACCELENGINES 10
+#define ADF_C62X_ACCELERATORS_REG_OFFSET 16
+#define ADF_C62X_ACCELERATORS_MASK 0x1F
+#define ADF_C62X_ACCELENGINES_MASK 0x3FF
+#define ADF_C62X_ETR_MAX_BANKS 16
+#define ADF_C62X_SOFTSTRAP_CSR_OFFSET 0x2EC
+
+/* AE to function mapping */
+#define ADF_C62X_AE2FUNC_MAP_GRP_A_NUM_REGS 80
+#define ADF_C62X_AE2FUNC_MAP_GRP_B_NUM_REGS 10
+
+/* Firmware Binary */
+#define ADF_C62X_FW "qat_c62x.bin"
+#define ADF_C62X_MMP "qat_c62x_mmp.bin"
+
+void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_c62x(struct adf_hw_device_data *hw_data);
+#endif
diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
new file mode 100644 (file)
index 0000000..ca18ae1
--- /dev/null
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include "adf_c62x_hw_data.h"
+
+static const struct pci_device_id adf_pci_tbl[] = {
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C62X), },
+       { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+       .id_table = adf_pci_tbl,
+       .name = ADF_C62X_DEVICE_NAME,
+       .probe = adf_probe,
+       .remove = adf_remove,
+       .sriov_configure = adf_sriov_configure,
+       .err_handler = &adf_err_handler,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+       pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+       pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+       int i;
+
+       for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+               struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+               if (bar->virt_addr)
+                       pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+       }
+
+       if (accel_dev->hw_device) {
+               switch (accel_pci_dev->pci_dev->device) {
+               case PCI_DEVICE_ID_INTEL_QAT_C62X:
+                       adf_clean_hw_data_c62x(accel_dev->hw_device);
+                       break;
+               default:
+                       break;
+               }
+               kfree(accel_dev->hw_device);
+               accel_dev->hw_device = NULL;
+       }
+       adf_cfg_dev_remove(accel_dev);
+       debugfs_remove(accel_dev->debugfs_dir);
+       adf_devmgr_rm_dev(accel_dev, NULL);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       struct adf_accel_dev *accel_dev;
+       struct adf_accel_pci *accel_pci_dev;
+       struct adf_hw_device_data *hw_data;
+       char name[ADF_DEVICE_NAME_LENGTH];
+       unsigned int i, bar_nr;
+       unsigned long bar_mask;
+       int ret;
+
+       switch (ent->device) {
+       case PCI_DEVICE_ID_INTEL_QAT_C62X:
+               break;
+       default:
+               dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+               return -ENODEV;
+       }
+
+       if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
+               /* If the accelerator is connected to a node with no memory
+                * there is no point in using the accelerator since the remote
+                * memory transaction will be very slow. */
+               dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
+               return -EINVAL;
+       }
+
+       accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+                                dev_to_node(&pdev->dev));
+       if (!accel_dev)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&accel_dev->crypto_list);
+       accel_pci_dev = &accel_dev->accel_pci_dev;
+       accel_pci_dev->pci_dev = pdev;
+
+       /* Add accel device to accel table.
+        * This should be called before adf_cleanup_accel is called */
+       if (adf_devmgr_add_dev(accel_dev, NULL)) {
+               dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+               kfree(accel_dev);
+               return -EFAULT;
+       }
+
+       accel_dev->owner = THIS_MODULE;
+       /* Allocate and configure device configuration structure */
+       hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+                              dev_to_node(&pdev->dev));
+       if (!hw_data) {
+               ret = -ENOMEM;
+               goto out_err;
+       }
+
+       accel_dev->hw_device = hw_data;
+       adf_init_hw_data_c62x(accel_dev->hw_device);
+       pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
+       pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET,
+                             &hw_data->fuses);
+       pci_read_config_dword(pdev, ADF_C62X_SOFTSTRAP_CSR_OFFSET,
+                             &hw_data->straps);
+
+       /* Get Accelerators and Accelerators Engines masks */
+       hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
+       hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
+       accel_pci_dev->sku = hw_data->get_sku(hw_data);
+       /* If the device has no acceleration engines then ignore it. */
+       if (!hw_data->accel_mask || !hw_data->ae_mask ||
+           ((~hw_data->ae_mask) & 0x01)) {
+               dev_err(&pdev->dev, "No acceleration units found");
+               ret = -EFAULT;
+               goto out_err;
+       }
+
+       /* Create dev top level debugfs entry */
+       snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
+                hw_data->dev_class->name, pci_name(pdev));
+
+       accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+
+       /* Create device configuration table */
+       ret = adf_cfg_dev_add(accel_dev);
+       if (ret)
+               goto out_err;
+
+       /* enable PCI device */
+       if (pci_enable_device(pdev)) {
+               ret = -EFAULT;
+               goto out_err;
+       }
+
+       /* set dma identifier */
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+       if (ret) {
+               dev_err(&pdev->dev, "No usable DMA configuration\n");
+               goto out_err_disable;
+       }
+
+       if (pci_request_regions(pdev, ADF_C62X_DEVICE_NAME)) {
+               ret = -EFAULT;
+               goto out_err_disable;
+       }
+
+       /* Get accelerator capabilities mask */
+       hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
+
+       /* Find and map all the device's BARS */
+       i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
+       bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
+               struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+               bar->base_addr = pci_resource_start(pdev, bar_nr);
+               if (!bar->base_addr)
+                       break;
+               bar->size = pci_resource_len(pdev, bar_nr);
+               bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+               if (!bar->virt_addr) {
+                       dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+                       ret = -EFAULT;
+                       goto out_err_free_reg;
+               }
+       }
+       pci_set_master(pdev);
+
+       if (pci_save_state(pdev)) {
+               dev_err(&pdev->dev, "Failed to save pci state\n");
+               ret = -ENOMEM;
+               goto out_err_free_reg;
+       }
+
+       ret = adf_dev_up(accel_dev, true);
+       if (ret)
+               goto out_err_dev_stop;
+
+       return ret;
+
+out_err_dev_stop:
+       adf_dev_down(accel_dev, false);
+out_err_free_reg:
+       pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+       pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+       adf_cleanup_accel(accel_dev);
+       kfree(accel_dev);
+       return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+       if (!accel_dev) {
+               pr_err("QAT: Driver removal failed\n");
+               return;
+       }
+       adf_dev_down(accel_dev, false);
+       adf_cleanup_accel(accel_dev);
+       adf_cleanup_pci_dev(accel_dev);
+       kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+       request_module("intel_qat");
+
+       if (pci_register_driver(&adf_driver)) {
+               pr_err("QAT: Driver initialization failed\n");
+               return -EFAULT;
+       }
+       return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+       pci_unregister_driver(&adf_driver);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_FIRMWARE(ADF_C62X_FW);
+MODULE_FIRMWARE(ADF_C62X_MMP);
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/intel/qat/qat_c62xvf/Makefile b/drivers/crypto/intel/qat/qat_c62xvf/Makefile
new file mode 100644 (file)
index 0000000..446c3d6
--- /dev/null
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y := -I $(srctree)/$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf.o
+qat_c62xvf-objs := adf_drv.o adf_c62xvf_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c
new file mode 100644 (file)
index 0000000..751d7aa
--- /dev/null
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_gen2_config.h>
+#include <adf_gen2_dc.h>
+#include <adf_gen2_hw_data.h>
+#include <adf_gen2_pfvf.h>
+#include <adf_pfvf_vf_msg.h>
+#include "adf_c62xvf_hw_data.h"
+
+static struct adf_hw_device_class c62xiov_class = {
+       .name = ADF_C62XVF_DEVICE_NAME,
+       .type = DEV_C62XVF,
+       .instances = 0
+};
+
+static u32 get_accel_mask(struct adf_hw_device_data *self)
+{
+       return ADF_C62XIOV_ACCELERATORS_MASK;
+}
+
+static u32 get_ae_mask(struct adf_hw_device_data *self)
+{
+       return ADF_C62XIOV_ACCELENGINES_MASK;
+}
+
+static u32 get_num_accels(struct adf_hw_device_data *self)
+{
+       return ADF_C62XIOV_MAX_ACCELERATORS;
+}
+
+static u32 get_num_aes(struct adf_hw_device_data *self)
+{
+       return ADF_C62XIOV_MAX_ACCELENGINES;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_C62XIOV_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_C62XIOV_ETR_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+       return DEV_SKU_VF;
+}
+
+static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
+{
+       return 0;
+}
+
+static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
+{
+}
+
+void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
+{
+       hw_data->dev_class = &c62xiov_class;
+       hw_data->num_banks = ADF_C62XIOV_ETR_MAX_BANKS;
+       hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
+       hw_data->num_accel = ADF_C62XIOV_MAX_ACCELERATORS;
+       hw_data->num_logical_accel = 1;
+       hw_data->num_engines = ADF_C62XIOV_MAX_ACCELENGINES;
+       hw_data->tx_rx_gap = ADF_C62XIOV_RX_RINGS_OFFSET;
+       hw_data->tx_rings_mask = ADF_C62XIOV_TX_RINGS_MASK;
+       hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
+       hw_data->alloc_irq = adf_vf_isr_resource_alloc;
+       hw_data->free_irq = adf_vf_isr_resource_free;
+       hw_data->enable_error_correction = adf_vf_void_noop;
+       hw_data->init_admin_comms = adf_vf_int_noop;
+       hw_data->exit_admin_comms = adf_vf_void_noop;
+       hw_data->send_admin_init = adf_vf2pf_notify_init;
+       hw_data->init_arb = adf_vf_int_noop;
+       hw_data->exit_arb = adf_vf_void_noop;
+       hw_data->disable_iov = adf_vf2pf_notify_shutdown;
+       hw_data->get_accel_mask = get_accel_mask;
+       hw_data->get_ae_mask = get_ae_mask;
+       hw_data->get_num_accels = get_num_accels;
+       hw_data->get_num_aes = get_num_aes;
+       hw_data->get_etr_bar_id = get_etr_bar_id;
+       hw_data->get_misc_bar_id = get_misc_bar_id;
+       hw_data->get_sku = get_sku;
+       hw_data->enable_ints = adf_vf_void_noop;
+       hw_data->dev_class->instances++;
+       hw_data->dev_config = adf_gen2_dev_config;
+       adf_devmgr_update_class_index(hw_data);
+       adf_gen2_init_vf_pfvf_ops(&hw_data->pfvf_ops);
+       adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
+       adf_gen2_init_dc_ops(&hw_data->dc_ops);
+}
+
+void adf_clean_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
+{
+       hw_data->dev_class->instances--;
+       adf_devmgr_update_class_index(hw_data);
+}
diff --git a/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.h b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.h
new file mode 100644 (file)
index 0000000..a1a62c0
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2015 - 2020 Intel Corporation */
+#ifndef ADF_C62XVF_HW_DATA_H_
+#define ADF_C62XVF_HW_DATA_H_
+
+#define ADF_C62XIOV_PMISC_BAR 1
+#define ADF_C62XIOV_ACCELERATORS_MASK 0x1
+#define ADF_C62XIOV_ACCELENGINES_MASK 0x1
+#define ADF_C62XIOV_MAX_ACCELERATORS 1
+#define ADF_C62XIOV_MAX_ACCELENGINES 1
+#define ADF_C62XIOV_RX_RINGS_OFFSET 8
+#define ADF_C62XIOV_TX_RINGS_MASK 0xFF
+#define ADF_C62XIOV_ETR_BAR 0
+#define ADF_C62XIOV_ETR_MAX_BANKS 1
+
+void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_c62xiov(struct adf_hw_device_data *hw_data);
+#endif
diff --git a/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c
new file mode 100644 (file)
index 0000000..3756630
--- /dev/null
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include "adf_c62xvf_hw_data.h"
+
+static const struct pci_device_id adf_pci_tbl[] = {
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C62X_VF), },
+       { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+       .id_table = adf_pci_tbl,
+       .name = ADF_C62XVF_DEVICE_NAME,
+       .probe = adf_probe,
+       .remove = adf_remove,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+       pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+       pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+       struct adf_accel_dev *pf;
+       int i;
+
+       for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+               struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+               if (bar->virt_addr)
+                       pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+       }
+
+       if (accel_dev->hw_device) {
+               switch (accel_pci_dev->pci_dev->device) {
+               case PCI_DEVICE_ID_INTEL_QAT_C62X_VF:
+                       adf_clean_hw_data_c62xiov(accel_dev->hw_device);
+                       break;
+               default:
+                       break;
+               }
+               kfree(accel_dev->hw_device);
+               accel_dev->hw_device = NULL;
+       }
+       adf_cfg_dev_remove(accel_dev);
+       debugfs_remove(accel_dev->debugfs_dir);
+       pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
+       adf_devmgr_rm_dev(accel_dev, pf);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       struct adf_accel_dev *accel_dev;
+       struct adf_accel_dev *pf;
+       struct adf_accel_pci *accel_pci_dev;
+       struct adf_hw_device_data *hw_data;
+       char name[ADF_DEVICE_NAME_LENGTH];
+       unsigned int i, bar_nr;
+       unsigned long bar_mask;
+       int ret;
+
+       switch (ent->device) {
+       case PCI_DEVICE_ID_INTEL_QAT_C62X_VF:
+               break;
+       default:
+               dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+               return -ENODEV;
+       }
+
+       accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+                                dev_to_node(&pdev->dev));
+       if (!accel_dev)
+               return -ENOMEM;
+
+       accel_dev->is_vf = true;
+       pf = adf_devmgr_pci_to_accel_dev(pdev->physfn);
+       accel_pci_dev = &accel_dev->accel_pci_dev;
+       accel_pci_dev->pci_dev = pdev;
+
+       /* Add accel device to accel table */
+       if (adf_devmgr_add_dev(accel_dev, pf)) {
+               dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+               kfree(accel_dev);
+               return -EFAULT;
+       }
+       INIT_LIST_HEAD(&accel_dev->crypto_list);
+
+       accel_dev->owner = THIS_MODULE;
+       /* Allocate and configure device configuration structure */
+       hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+                              dev_to_node(&pdev->dev));
+       if (!hw_data) {
+               ret = -ENOMEM;
+               goto out_err;
+       }
+       accel_dev->hw_device = hw_data;
+       adf_init_hw_data_c62xiov(accel_dev->hw_device);
+
+       /* Get Accelerators and Accelerators Engines masks */
+       hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
+       hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
+       accel_pci_dev->sku = hw_data->get_sku(hw_data);
+
+       /* Create dev top level debugfs entry */
+       snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
+                hw_data->dev_class->name, pci_name(pdev));
+
+       accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+
+       /* Create device configuration table */
+       ret = adf_cfg_dev_add(accel_dev);
+       if (ret)
+               goto out_err;
+
+       /* enable PCI device */
+       if (pci_enable_device(pdev)) {
+               ret = -EFAULT;
+               goto out_err;
+       }
+
+       /* set dma identifier */
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+       if (ret) {
+               dev_err(&pdev->dev, "No usable DMA configuration\n");
+               goto out_err_disable;
+       }
+
+       if (pci_request_regions(pdev, ADF_C62XVF_DEVICE_NAME)) {
+               ret = -EFAULT;
+               goto out_err_disable;
+       }
+
+       /* Find and map all the device's BARS */
+       i = 0;
+       bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
+               struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+               bar->base_addr = pci_resource_start(pdev, bar_nr);
+               if (!bar->base_addr)
+                       break;
+               bar->size = pci_resource_len(pdev, bar_nr);
+               bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+               if (!bar->virt_addr) {
+                       dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+                       ret = -EFAULT;
+                       goto out_err_free_reg;
+               }
+       }
+       pci_set_master(pdev);
+       /* Completion for VF2PF request/response message exchange */
+       init_completion(&accel_dev->vf.msg_received);
+
+       ret = adf_dev_up(accel_dev, false);
+       if (ret)
+               goto out_err_dev_stop;
+
+       return ret;
+
+out_err_dev_stop:
+       adf_dev_down(accel_dev, false);
+out_err_free_reg:
+       pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+       pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+       adf_cleanup_accel(accel_dev);
+       kfree(accel_dev);
+       return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+       if (!accel_dev) {
+               pr_err("QAT: Driver removal failed\n");
+               return;
+       }
+       adf_flush_vf_wq(accel_dev);
+       adf_dev_down(accel_dev, false);
+       adf_cleanup_accel(accel_dev);
+       adf_cleanup_pci_dev(accel_dev);
+       kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+       request_module("intel_qat");
+
+       if (pci_register_driver(&adf_driver)) {
+               pr_err("QAT: Driver initialization failed\n");
+               return -EFAULT;
+       }
+       return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+       pci_unregister_driver(&adf_driver);
+       adf_clean_vf_map(true);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
new file mode 100644 (file)
index 0000000..1fb8d50
--- /dev/null
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
+intel_qat-objs := adf_cfg.o \
+       adf_isr.o \
+       adf_ctl_drv.o \
+       adf_dev_mgr.o \
+       adf_init.o \
+       adf_accel_engine.o \
+       adf_aer.o \
+       adf_transport.o \
+       adf_admin.o \
+       adf_hw_arbiter.o \
+       adf_sysfs.o \
+       adf_gen2_hw_data.o \
+       adf_gen2_config.o \
+       adf_gen4_hw_data.o \
+       adf_gen4_pm.o \
+       adf_gen2_dc.o \
+       adf_gen4_dc.o \
+       qat_crypto.o \
+       qat_compression.o \
+       qat_comp_algs.o \
+       qat_algs.o \
+       qat_asym_algs.o \
+       qat_algs_send.o \
+       qat_uclo.o \
+       qat_hal.o \
+       qat_bl.o
+
+intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o
+intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \
+                              adf_pfvf_pf_msg.o adf_pfvf_pf_proto.o \
+                              adf_pfvf_vf_msg.o adf_pfvf_vf_proto.o \
+                              adf_gen2_pfvf.o adf_gen4_pfvf.o
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
new file mode 100644 (file)
index 0000000..bd19e64
--- /dev/null
@@ -0,0 +1,319 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef ADF_ACCEL_DEVICES_H_
+#define ADF_ACCEL_DEVICES_H_
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/ratelimit.h>
+#include "adf_cfg_common.h"
+#include "adf_pfvf_msg.h"
+
+#define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
+#define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
+#define ADF_C62X_DEVICE_NAME "c6xx"
+#define ADF_C62XVF_DEVICE_NAME "c6xxvf"
+#define ADF_C3XXX_DEVICE_NAME "c3xxx"
+#define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf"
+#define ADF_4XXX_DEVICE_NAME "4xxx"
+#define ADF_4XXX_PCI_DEVICE_ID 0x4940
+#define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941
+#define ADF_401XX_PCI_DEVICE_ID 0x4942
+#define ADF_401XXIOV_PCI_DEVICE_ID 0x4943
+#define ADF_402XX_PCI_DEVICE_ID 0x4944
+#define ADF_402XXIOV_PCI_DEVICE_ID 0x4945
+#define ADF_DEVICE_FUSECTL_OFFSET 0x40
+#define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
+#define ADF_DEVICE_FUSECTL_MASK 0x80000000
+#define ADF_PCI_MAX_BARS 3
+#define ADF_DEVICE_NAME_LENGTH 32
+#define ADF_ETR_MAX_RINGS_PER_BANK 16
+#define ADF_MAX_MSIX_VECTOR_NAME 16
+#define ADF_DEVICE_NAME_PREFIX "qat_"
+
+enum adf_accel_capabilities {
+       ADF_ACCEL_CAPABILITIES_NULL = 0,
+       ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = 1,
+       ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = 2,
+       ADF_ACCEL_CAPABILITIES_CIPHER = 4,
+       ADF_ACCEL_CAPABILITIES_AUTHENTICATION = 8,
+       ADF_ACCEL_CAPABILITIES_COMPRESSION = 32,
+       ADF_ACCEL_CAPABILITIES_LZS_COMPRESSION = 64,
+       ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128
+};
+
+struct adf_bar {
+       resource_size_t base_addr;
+       void __iomem *virt_addr;
+       resource_size_t size;
+};
+
+struct adf_irq {
+       bool enabled;
+       char name[ADF_MAX_MSIX_VECTOR_NAME];
+};
+
+struct adf_accel_msix {
+       struct adf_irq *irqs;
+       u32 num_entries;
+};
+
+struct adf_accel_pci {
+       struct pci_dev *pci_dev;
+       struct adf_accel_msix msix_entries;
+       struct adf_bar pci_bars[ADF_PCI_MAX_BARS];
+       u8 revid;
+       u8 sku;
+};
+
+enum dev_state {
+       DEV_DOWN = 0,
+       DEV_UP
+};
+
+enum dev_sku_info {
+       DEV_SKU_1 = 0,
+       DEV_SKU_2,
+       DEV_SKU_3,
+       DEV_SKU_4,
+       DEV_SKU_VF,
+       DEV_SKU_UNKNOWN,
+};
+
+static inline const char *get_sku_info(enum dev_sku_info info)
+{
+       switch (info) {
+       case DEV_SKU_1:
+               return "SKU1";
+       case DEV_SKU_2:
+               return "SKU2";
+       case DEV_SKU_3:
+               return "SKU3";
+       case DEV_SKU_4:
+               return "SKU4";
+       case DEV_SKU_VF:
+               return "SKUVF";
+       case DEV_SKU_UNKNOWN:
+       default:
+               break;
+       }
+       return "Unknown SKU";
+}
+
+struct adf_hw_device_class {
+       const char *name;
+       const enum adf_device_type type;
+       u32 instances;
+};
+
+struct arb_info {
+       u32 arb_cfg;
+       u32 arb_offset;
+       u32 wt2sam_offset;
+};
+
+struct admin_info {
+       u32 admin_msg_ur;
+       u32 admin_msg_lr;
+       u32 mailbox_offset;
+};
+
+struct adf_hw_csr_ops {
+       u64 (*build_csr_ring_base_addr)(dma_addr_t addr, u32 size);
+       u32 (*read_csr_ring_head)(void __iomem *csr_base_addr, u32 bank,
+                                 u32 ring);
+       void (*write_csr_ring_head)(void __iomem *csr_base_addr, u32 bank,
+                                   u32 ring, u32 value);
+       u32 (*read_csr_ring_tail)(void __iomem *csr_base_addr, u32 bank,
+                                 u32 ring);
+       void (*write_csr_ring_tail)(void __iomem *csr_base_addr, u32 bank,
+                                   u32 ring, u32 value);
+       u32 (*read_csr_e_stat)(void __iomem *csr_base_addr, u32 bank);
+       void (*write_csr_ring_config)(void __iomem *csr_base_addr, u32 bank,
+                                     u32 ring, u32 value);
+       void (*write_csr_ring_base)(void __iomem *csr_base_addr, u32 bank,
+                                   u32 ring, dma_addr_t addr);
+       void (*write_csr_int_flag)(void __iomem *csr_base_addr, u32 bank,
+                                  u32 value);
+       void (*write_csr_int_srcsel)(void __iomem *csr_base_addr, u32 bank);
+       void (*write_csr_int_col_en)(void __iomem *csr_base_addr, u32 bank,
+                                    u32 value);
+       void (*write_csr_int_col_ctl)(void __iomem *csr_base_addr, u32 bank,
+                                     u32 value);
+       void (*write_csr_int_flag_and_col)(void __iomem *csr_base_addr,
+                                          u32 bank, u32 value);
+       void (*write_csr_ring_srv_arb_en)(void __iomem *csr_base_addr, u32 bank,
+                                         u32 value);
+};
+
+struct adf_cfg_device_data;
+struct adf_accel_dev;
+struct adf_etr_data;
+struct adf_etr_ring_data;
+
+struct adf_pfvf_ops {
+       int (*enable_comms)(struct adf_accel_dev *accel_dev);
+       u32 (*get_pf2vf_offset)(u32 i);
+       u32 (*get_vf2pf_offset)(u32 i);
+       void (*enable_vf2pf_interrupts)(void __iomem *pmisc_addr, u32 vf_mask);
+       void (*disable_all_vf2pf_interrupts)(void __iomem *pmisc_addr);
+       u32 (*disable_pending_vf2pf_interrupts)(void __iomem *pmisc_addr);
+       int (*send_msg)(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
+                       u32 pfvf_offset, struct mutex *csr_lock);
+       struct pfvf_message (*recv_msg)(struct adf_accel_dev *accel_dev,
+                                       u32 pfvf_offset, u8 compat_ver);
+};
+
+struct adf_dc_ops {
+       void (*build_deflate_ctx)(void *ctx);
+};
+
+struct adf_hw_device_data {
+       struct adf_hw_device_class *dev_class;
+       u32 (*get_accel_mask)(struct adf_hw_device_data *self);
+       u32 (*get_ae_mask)(struct adf_hw_device_data *self);
+       u32 (*get_accel_cap)(struct adf_accel_dev *accel_dev);
+       u32 (*get_sram_bar_id)(struct adf_hw_device_data *self);
+       u32 (*get_misc_bar_id)(struct adf_hw_device_data *self);
+       u32 (*get_etr_bar_id)(struct adf_hw_device_data *self);
+       u32 (*get_num_aes)(struct adf_hw_device_data *self);
+       u32 (*get_num_accels)(struct adf_hw_device_data *self);
+       void (*get_arb_info)(struct arb_info *arb_csrs_info);
+       void (*get_admin_info)(struct admin_info *admin_csrs_info);
+       enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
+       int (*alloc_irq)(struct adf_accel_dev *accel_dev);
+       void (*free_irq)(struct adf_accel_dev *accel_dev);
+       void (*enable_error_correction)(struct adf_accel_dev *accel_dev);
+       int (*init_admin_comms)(struct adf_accel_dev *accel_dev);
+       void (*exit_admin_comms)(struct adf_accel_dev *accel_dev);
+       int (*send_admin_init)(struct adf_accel_dev *accel_dev);
+       int (*init_arb)(struct adf_accel_dev *accel_dev);
+       void (*exit_arb)(struct adf_accel_dev *accel_dev);
+       const u32 *(*get_arb_mapping)(struct adf_accel_dev *accel_dev);
+       int (*init_device)(struct adf_accel_dev *accel_dev);
+       int (*enable_pm)(struct adf_accel_dev *accel_dev);
+       bool (*handle_pm_interrupt)(struct adf_accel_dev *accel_dev);
+       void (*disable_iov)(struct adf_accel_dev *accel_dev);
+       void (*configure_iov_threads)(struct adf_accel_dev *accel_dev,
+                                     bool enable);
+       void (*enable_ints)(struct adf_accel_dev *accel_dev);
+       void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev);
+       int (*ring_pair_reset)(struct adf_accel_dev *accel_dev, u32 bank_nr);
+       void (*reset_device)(struct adf_accel_dev *accel_dev);
+       void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
+       char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num);
+       u32 (*uof_get_num_objs)(void);
+       u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
+       int (*dev_config)(struct adf_accel_dev *accel_dev);
+       struct adf_pfvf_ops pfvf_ops;
+       struct adf_hw_csr_ops csr_ops;
+       struct adf_dc_ops dc_ops;
+       const char *fw_name;
+       const char *fw_mmp_name;
+       u32 fuses;
+       u32 straps;
+       u32 accel_capabilities_mask;
+       u32 extended_dc_capabilities;
+       u32 clock_frequency;
+       u32 instance_id;
+       u16 accel_mask;
+       u32 ae_mask;
+       u32 admin_ae_mask;
+       u16 tx_rings_mask;
+       u16 ring_to_svc_map;
+       u8 tx_rx_gap;
+       u8 num_banks;
+       u16 num_banks_per_vf;
+       u8 num_rings_per_bank;
+       u8 num_accel;
+       u8 num_logical_accel;
+       u8 num_engines;
+};
+
+/* CSR write macro */
+#define ADF_CSR_WR(csr_base, csr_offset, val) \
+       __raw_writel(val, csr_base + csr_offset)
+
+/* CSR read macro */
+#define ADF_CSR_RD(csr_base, csr_offset) __raw_readl(csr_base + csr_offset)
+
+#define ADF_CFG_NUM_SERVICES   4
+#define ADF_SRV_TYPE_BIT_LEN   3
+#define ADF_SRV_TYPE_MASK      0x7
+
+#define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev->dev)
+#define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars)
+#define GET_HW_DATA(accel_dev) (accel_dev->hw_device)
+#define GET_MAX_BANKS(accel_dev) (GET_HW_DATA(accel_dev)->num_banks)
+#define GET_NUM_RINGS_PER_BANK(accel_dev) \
+       GET_HW_DATA(accel_dev)->num_rings_per_bank
+#define GET_SRV_TYPE(accel_dev, idx) \
+       (((GET_HW_DATA(accel_dev)->ring_to_svc_map) >> (ADF_SRV_TYPE_BIT_LEN * (idx))) \
+       & ADF_SRV_TYPE_MASK)
+#define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines)
+#define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_ops)
+#define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops)
+#define GET_DC_OPS(accel_dev) (&(accel_dev)->hw_device->dc_ops)
+#define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev
+
+struct adf_admin_comms;
+struct icp_qat_fw_loader_handle;
+struct adf_fw_loader_data {
+       struct icp_qat_fw_loader_handle *fw_loader;
+       const struct firmware *uof_fw;
+       const struct firmware *mmp_fw;
+};
+
+struct adf_accel_vf_info {
+       struct adf_accel_dev *accel_dev;
+       struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */
+       struct ratelimit_state vf2pf_ratelimit;
+       u32 vf_nr;
+       bool init;
+       u8 vf_compat_ver;
+};
+
+struct adf_dc_data {
+       u8 *ovf_buff;
+       size_t ovf_buff_sz;
+       dma_addr_t ovf_buff_p;
+};
+
+struct adf_accel_dev {
+       struct adf_etr_data *transport;
+       struct adf_hw_device_data *hw_device;
+       struct adf_cfg_device_data *cfg;
+       struct adf_fw_loader_data *fw_loader;
+       struct adf_admin_comms *admin;
+       struct adf_dc_data *dc_data;
+       struct list_head crypto_list;
+       struct list_head compression_list;
+       unsigned long status;
+       atomic_t ref_count;
+       struct dentry *debugfs_dir;
+       struct list_head list;
+       struct module *owner;
+       struct adf_accel_pci accel_pci_dev;
+       union {
+               struct {
+                       /* protects VF2PF interrupts access */
+                       spinlock_t vf2pf_ints_lock;
+                       /* vf_info is non-zero when SR-IOV is init'ed */
+                       struct adf_accel_vf_info *vf_info;
+               } pf;
+               struct {
+                       bool irq_enabled;
+                       char irq_name[ADF_MAX_MSIX_VECTOR_NAME];
+                       struct tasklet_struct pf2vf_bh_tasklet;
+                       struct mutex vf2pf_lock; /* protect CSR access */
+                       struct completion msg_received;
+                       struct pfvf_message response; /* temp field holding pf2vf response */
+                       u8 pf_compat_ver;
+               } vf;
+       };
+       struct mutex state_lock; /* protect state of the device */
+       bool is_vf;
+       u32 accel_id;
+};
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c b/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c
new file mode 100644 (file)
index 0000000..4ce2b66
--- /dev/null
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/firmware.h>
+#include <linux/pci.h>
+#include "adf_cfg.h"
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "icp_qat_uclo.h"
+
+static int adf_ae_fw_load_images(struct adf_accel_dev *accel_dev, void *fw_addr,
+                                u32 fw_size)
+{
+       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+       struct icp_qat_fw_loader_handle *loader;
+       char *obj_name;
+       u32 num_objs;
+       u32 ae_mask;
+       int i;
+
+       loader = loader_data->fw_loader;
+       num_objs = hw_device->uof_get_num_objs();
+
+       for (i = 0; i < num_objs; i++) {
+               obj_name = hw_device->uof_get_name(accel_dev, i);
+               ae_mask = hw_device->uof_get_ae_mask(accel_dev, i);
+               if (!obj_name || !ae_mask) {
+                       dev_err(&GET_DEV(accel_dev), "Invalid UOF image\n");
+                       goto out_err;
+               }
+
+               if (qat_uclo_set_cfg_ae_mask(loader, ae_mask)) {
+                       dev_err(&GET_DEV(accel_dev),
+                               "Invalid mask for UOF image\n");
+                       goto out_err;
+               }
+               if (qat_uclo_map_obj(loader, fw_addr, fw_size, obj_name)) {
+                       dev_err(&GET_DEV(accel_dev),
+                               "Failed to map UOF firmware\n");
+                       goto out_err;
+               }
+               if (qat_uclo_wr_all_uimage(loader)) {
+                       dev_err(&GET_DEV(accel_dev),
+                               "Failed to load UOF firmware\n");
+                       goto out_err;
+               }
+               qat_uclo_del_obj(loader);
+       }
+
+       return 0;
+
+out_err:
+       adf_ae_fw_release(accel_dev);
+       return -EFAULT;
+}
+
+int adf_ae_fw_load(struct adf_accel_dev *accel_dev)
+{
+       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+       void *fw_addr, *mmp_addr;
+       u32 fw_size, mmp_size;
+
+       if (!hw_device->fw_name)
+               return 0;
+
+       if (request_firmware(&loader_data->mmp_fw, hw_device->fw_mmp_name,
+                            &accel_dev->accel_pci_dev.pci_dev->dev)) {
+               dev_err(&GET_DEV(accel_dev), "Failed to load MMP firmware %s\n",
+                       hw_device->fw_mmp_name);
+               return -EFAULT;
+       }
+       if (request_firmware(&loader_data->uof_fw, hw_device->fw_name,
+                            &accel_dev->accel_pci_dev.pci_dev->dev)) {
+               dev_err(&GET_DEV(accel_dev), "Failed to load UOF firmware %s\n",
+                       hw_device->fw_name);
+               goto out_err;
+       }
+
+       fw_size = loader_data->uof_fw->size;
+       fw_addr = (void *)loader_data->uof_fw->data;
+       mmp_size = loader_data->mmp_fw->size;
+       mmp_addr = (void *)loader_data->mmp_fw->data;
+
+       if (qat_uclo_wr_mimage(loader_data->fw_loader, mmp_addr, mmp_size)) {
+               dev_err(&GET_DEV(accel_dev), "Failed to load MMP\n");
+               goto out_err;
+       }
+
+       if (hw_device->uof_get_num_objs)
+               return adf_ae_fw_load_images(accel_dev, fw_addr, fw_size);
+
+       if (qat_uclo_map_obj(loader_data->fw_loader, fw_addr, fw_size, NULL)) {
+               dev_err(&GET_DEV(accel_dev), "Failed to map FW\n");
+               goto out_err;
+       }
+       if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) {
+               dev_err(&GET_DEV(accel_dev), "Failed to load UOF\n");
+               goto out_err;
+       }
+       return 0;
+
+out_err:
+       adf_ae_fw_release(accel_dev);
+       return -EFAULT;
+}
+
+void adf_ae_fw_release(struct adf_accel_dev *accel_dev)
+{
+       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+       if (!hw_device->fw_name)
+               return;
+
+       qat_uclo_del_obj(loader_data->fw_loader);
+       qat_hal_deinit(loader_data->fw_loader);
+       release_firmware(loader_data->uof_fw);
+       release_firmware(loader_data->mmp_fw);
+       loader_data->uof_fw = NULL;
+       loader_data->mmp_fw = NULL;
+       loader_data->fw_loader = NULL;
+}
+
+int adf_ae_start(struct adf_accel_dev *accel_dev)
+{
+       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       u32 ae_ctr;
+
+       if (!hw_data->fw_name)
+               return 0;
+
+       ae_ctr = qat_hal_start(loader_data->fw_loader);
+       dev_info(&GET_DEV(accel_dev),
+                "qat_dev%d started %d acceleration engines\n",
+                accel_dev->accel_id, ae_ctr);
+       return 0;
+}
+
+int adf_ae_stop(struct adf_accel_dev *accel_dev)
+{
+       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       u32 ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
+
+       if (!hw_data->fw_name)
+               return 0;
+
+       for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
+               if (hw_data->ae_mask & (1 << ae)) {
+                       qat_hal_stop(loader_data->fw_loader, ae, 0xFF);
+                       ae_ctr++;
+               }
+       }
+       dev_info(&GET_DEV(accel_dev),
+                "qat_dev%d stopped %d acceleration engines\n",
+                accel_dev->accel_id, ae_ctr);
+       return 0;
+}
+
+static int adf_ae_reset(struct adf_accel_dev *accel_dev, int ae)
+{
+       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+
+       qat_hal_reset(loader_data->fw_loader);
+       if (qat_hal_clr_reset(loader_data->fw_loader))
+               return -EFAULT;
+
+       return 0;
+}
+
+int adf_ae_init(struct adf_accel_dev *accel_dev)
+{
+       struct adf_fw_loader_data *loader_data;
+       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+       if (!hw_device->fw_name)
+               return 0;
+
+       loader_data = kzalloc(sizeof(*loader_data), GFP_KERNEL);
+       if (!loader_data)
+               return -ENOMEM;
+
+       accel_dev->fw_loader = loader_data;
+       if (qat_hal_init(accel_dev)) {
+               dev_err(&GET_DEV(accel_dev), "Failed to init the AEs\n");
+               kfree(loader_data);
+               return -EFAULT;
+       }
+       if (adf_ae_reset(accel_dev, 0)) {
+               dev_err(&GET_DEV(accel_dev), "Failed to reset the AEs\n");
+               qat_hal_deinit(loader_data->fw_loader);
+               kfree(loader_data);
+               return -EFAULT;
+       }
+       return 0;
+}
+
+int adf_ae_shutdown(struct adf_accel_dev *accel_dev)
+{
+       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+       if (!hw_device->fw_name)
+               return 0;
+
+       qat_hal_deinit(loader_data->fw_loader);
+       kfree(accel_dev->fw_loader);
+       accel_dev->fw_loader = NULL;
+       return 0;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c
new file mode 100644 (file)
index 0000000..3b6184c
--- /dev/null
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/iopoll.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "icp_qat_fw_init_admin.h"
+
+#define ADF_ADMIN_MAILBOX_STRIDE 0x1000
+#define ADF_ADMINMSG_LEN 32
+#define ADF_CONST_TABLE_SIZE 1024
+#define ADF_ADMIN_POLL_DELAY_US 20
+#define ADF_ADMIN_POLL_TIMEOUT_US (5 * USEC_PER_SEC)
+
+static const u8 const_tab[1024] __aligned(1024) = {
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x03, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x02, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13,
+0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x33, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76,
+0x54, 0x32, 0x10, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab,
+0x89, 0x98, 0xba, 0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0,
+0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc1, 0x05, 0x9e,
+0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd, 0x17, 0xf7, 0x0e, 0x59, 0x39,
+0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58, 0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe,
+0xfa, 0x4f, 0xa4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae,
+0x85, 0x3c, 0x6e, 0xf3, 0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f,
+0x9b, 0x05, 0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19, 0x05,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
+0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17,
+0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67, 0x33, 0x26, 0x67, 0xff,
+0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c,
+0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f,
+0xa4, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb,
+0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
+0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51, 0x0e, 0x52,
+0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f,
+0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13,
+0x7e, 0x21, 0x79, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x18,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x01, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x15, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x02, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x14, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x02,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x24, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x25,
+0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x12, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x43, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x43, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x45, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x01, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x01,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x2B, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+struct adf_admin_comms {
+       dma_addr_t phy_addr;
+       dma_addr_t const_tbl_addr;
+       void *virt_addr;
+       void *virt_tbl_addr;
+       void __iomem *mailbox_addr;
+       struct mutex lock;      /* protects adf_admin_comms struct */
+};
+
+static int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae,
+                                 void *in, void *out)
+{
+       int ret;
+       u32 status;
+       struct adf_admin_comms *admin = accel_dev->admin;
+       int offset = ae * ADF_ADMINMSG_LEN * 2;
+       void __iomem *mailbox = admin->mailbox_addr;
+       int mb_offset = ae * ADF_ADMIN_MAILBOX_STRIDE;
+       struct icp_qat_fw_init_admin_req *request = in;
+
+       mutex_lock(&admin->lock);
+
+       if (ADF_CSR_RD(mailbox, mb_offset) == 1) {
+               mutex_unlock(&admin->lock);
+               return -EAGAIN;
+       }
+
+       memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN);
+       ADF_CSR_WR(mailbox, mb_offset, 1);
+
+       ret = read_poll_timeout(ADF_CSR_RD, status, status == 0,
+                               ADF_ADMIN_POLL_DELAY_US,
+                               ADF_ADMIN_POLL_TIMEOUT_US, true,
+                               mailbox, mb_offset);
+       if (ret < 0) {
+               /* Response timeout */
+               dev_err(&GET_DEV(accel_dev),
+                       "Failed to send admin msg %d to accelerator %d\n",
+                       request->cmd_id, ae);
+       } else {
+               /* Response received from admin message, we can now
+                * make response data available in "out" parameter.
+                */
+               memcpy(out, admin->virt_addr + offset +
+                      ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN);
+       }
+
+       mutex_unlock(&admin->lock);
+       return ret;
+}
+
+static int adf_send_admin(struct adf_accel_dev *accel_dev,
+                         struct icp_qat_fw_init_admin_req *req,
+                         struct icp_qat_fw_init_admin_resp *resp,
+                         const unsigned long ae_mask)
+{
+       u32 ae;
+
+       for_each_set_bit(ae, &ae_mask, ICP_QAT_HW_AE_DELIMITER)
+               if (adf_put_admin_msg_sync(accel_dev, ae, req, resp) ||
+                   resp->status)
+                       return -EFAULT;
+
+       return 0;
+}
+
+static int adf_init_ae(struct adf_accel_dev *accel_dev)
+{
+       struct icp_qat_fw_init_admin_req req;
+       struct icp_qat_fw_init_admin_resp resp;
+       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+       u32 ae_mask = hw_device->ae_mask;
+
+       memset(&req, 0, sizeof(req));
+       memset(&resp, 0, sizeof(resp));
+       req.cmd_id = ICP_QAT_FW_INIT_AE;
+
+       return adf_send_admin(accel_dev, &req, &resp, ae_mask);
+}
+
+static int adf_set_fw_constants(struct adf_accel_dev *accel_dev)
+{
+       struct icp_qat_fw_init_admin_req req;
+       struct icp_qat_fw_init_admin_resp resp;
+       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+       u32 ae_mask = hw_device->admin_ae_mask ?: hw_device->ae_mask;
+
+       memset(&req, 0, sizeof(req));
+       memset(&resp, 0, sizeof(resp));
+       req.cmd_id = ICP_QAT_FW_CONSTANTS_CFG;
+
+       req.init_cfg_sz = ADF_CONST_TABLE_SIZE;
+       req.init_cfg_ptr = accel_dev->admin->const_tbl_addr;
+
+       return adf_send_admin(accel_dev, &req, &resp, ae_mask);
+}
+
+static int adf_get_dc_capabilities(struct adf_accel_dev *accel_dev,
+                                  u32 *capabilities)
+{
+       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+       struct icp_qat_fw_init_admin_resp resp;
+       struct icp_qat_fw_init_admin_req req;
+       unsigned long ae_mask;
+       unsigned long ae;
+       int ret;
+
+       /* Target only service accelerator engines */
+       ae_mask = hw_device->ae_mask & ~hw_device->admin_ae_mask;
+
+       memset(&req, 0, sizeof(req));
+       memset(&resp, 0, sizeof(resp));
+       req.cmd_id = ICP_QAT_FW_COMP_CAPABILITY_GET;
+
+       *capabilities = 0;
+       for_each_set_bit(ae, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) {
+               ret = adf_send_admin(accel_dev, &req, &resp, 1ULL << ae);
+               if (ret)
+                       return ret;
+
+               *capabilities |= resp.extended_features;
+       }
+
+       return 0;
+}
+
+/**
+ * adf_send_admin_init() - Function sends init message to FW
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function sends admin init message to the FW
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_send_admin_init(struct adf_accel_dev *accel_dev)
+{
+       u32 dc_capabilities = 0;
+       int ret;
+
+       ret = adf_get_dc_capabilities(accel_dev, &dc_capabilities);
+       if (ret) {
+               dev_err(&GET_DEV(accel_dev), "Cannot get dc capabilities\n");
+               return ret;
+       }
+       accel_dev->hw_device->extended_dc_capabilities = dc_capabilities;
+
+       ret = adf_set_fw_constants(accel_dev);
+       if (ret)
+               return ret;
+
+       return adf_init_ae(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_send_admin_init);
+
+/**
+ * adf_init_admin_pm() - Function sends PM init message to FW
+ * @accel_dev: Pointer to acceleration device.
+ * @idle_delay: QAT HW idle time before power gating is initiated.
+ *             000 - 64us
+ *             001 - 128us
+ *             010 - 256us
+ *             011 - 512us
+ *             100 - 1ms
+ *             101 - 2ms
+ *             110 - 4ms
+ *             111 - 8ms
+ *
+ * Function sends to the FW the admin init message for the PM state
+ * configuration.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct icp_qat_fw_init_admin_resp resp = {0};
+       struct icp_qat_fw_init_admin_req req = {0};
+       u32 ae_mask = hw_data->admin_ae_mask;
+
+       if (!accel_dev->admin) {
+               dev_err(&GET_DEV(accel_dev), "adf_admin is not available\n");
+               return -EFAULT;
+       }
+
+       req.cmd_id = ICP_QAT_FW_PM_STATE_CONFIG;
+       req.idle_filter = idle_delay;
+
+       return adf_send_admin(accel_dev, &req, &resp, ae_mask);
+}
+EXPORT_SYMBOL_GPL(adf_init_admin_pm);
+
+int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
+{
+       struct adf_admin_comms *admin;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+       struct admin_info admin_csrs_info;
+       u32 mailbox_offset, adminmsg_u, adminmsg_l;
+       void __iomem *mailbox;
+       u64 reg_val;
+
+       admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
+                            dev_to_node(&GET_DEV(accel_dev)));
+       if (!admin)
+               return -ENOMEM;
+       admin->virt_addr = dma_alloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+                                             &admin->phy_addr, GFP_KERNEL);
+       if (!admin->virt_addr) {
+               dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n");
+               kfree(admin);
+               return -ENOMEM;
+       }
+
+       admin->virt_tbl_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
+                                                 PAGE_SIZE,
+                                                 &admin->const_tbl_addr,
+                                                 GFP_KERNEL);
+       if (!admin->virt_tbl_addr) {
+               dev_err(&GET_DEV(accel_dev), "Failed to allocate const_tbl\n");
+               dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+                                 admin->virt_addr, admin->phy_addr);
+               kfree(admin);
+               return -ENOMEM;
+       }
+
+       memcpy(admin->virt_tbl_addr, const_tab, sizeof(const_tab));
+       hw_data->get_admin_info(&admin_csrs_info);
+
+       mailbox_offset = admin_csrs_info.mailbox_offset;
+       mailbox = pmisc_addr + mailbox_offset;
+       adminmsg_u = admin_csrs_info.admin_msg_ur;
+       adminmsg_l = admin_csrs_info.admin_msg_lr;
+
+       reg_val = (u64)admin->phy_addr;
+       ADF_CSR_WR(pmisc_addr, adminmsg_u, upper_32_bits(reg_val));
+       ADF_CSR_WR(pmisc_addr, adminmsg_l, lower_32_bits(reg_val));
+
+       mutex_init(&admin->lock);
+       admin->mailbox_addr = mailbox;
+       accel_dev->admin = admin;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_init_admin_comms);
+
+void adf_exit_admin_comms(struct adf_accel_dev *accel_dev)
+{
+       struct adf_admin_comms *admin = accel_dev->admin;
+
+       if (!admin)
+               return;
+
+       if (admin->virt_addr)
+               dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+                                 admin->virt_addr, admin->phy_addr);
+       if (admin->virt_tbl_addr)
+               dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+                                 admin->virt_tbl_addr, admin->const_tbl_addr);
+
+       mutex_destroy(&admin->lock);
+       kfree(admin);
+       accel_dev->admin = NULL;
+}
+EXPORT_SYMBOL_GPL(adf_exit_admin_comms);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c
new file mode 100644 (file)
index 0000000..04af32a
--- /dev/null
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+
+static struct workqueue_struct *device_reset_wq;
+
+static pci_ers_result_t adf_error_detected(struct pci_dev *pdev,
+                                          pci_channel_state_t state)
+{
+       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+       dev_info(&pdev->dev, "Acceleration driver hardware error detected.\n");
+       if (!accel_dev) {
+               dev_err(&pdev->dev, "Can't find acceleration device\n");
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+
+       if (state == pci_channel_io_perm_failure) {
+               dev_err(&pdev->dev, "Can't recover from device error\n");
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+
+       return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/* reset dev data */
+struct adf_reset_dev_data {
+       int mode;
+       struct adf_accel_dev *accel_dev;
+       struct completion compl;
+       struct work_struct reset_work;
+};
+
+void adf_reset_sbr(struct adf_accel_dev *accel_dev)
+{
+       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+       struct pci_dev *parent = pdev->bus->self;
+       u16 bridge_ctl = 0;
+
+       if (!parent)
+               parent = pdev;
+
+       if (!pci_wait_for_pending_transaction(pdev))
+               dev_info(&GET_DEV(accel_dev),
+                        "Transaction still in progress. Proceeding\n");
+
+       dev_info(&GET_DEV(accel_dev), "Secondary bus reset\n");
+
+       pci_read_config_word(parent, PCI_BRIDGE_CONTROL, &bridge_ctl);
+       bridge_ctl |= PCI_BRIDGE_CTL_BUS_RESET;
+       pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl);
+       msleep(100);
+       bridge_ctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
+       pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl);
+       msleep(100);
+}
+EXPORT_SYMBOL_GPL(adf_reset_sbr);
+
+void adf_reset_flr(struct adf_accel_dev *accel_dev)
+{
+       pcie_flr(accel_to_pci_dev(accel_dev));
+}
+EXPORT_SYMBOL_GPL(adf_reset_flr);
+
+void adf_dev_restore(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+       if (hw_device->reset_device) {
+               dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n",
+                        accel_dev->accel_id);
+               hw_device->reset_device(accel_dev);
+               pci_restore_state(pdev);
+               pci_save_state(pdev);
+       }
+}
+
+static void adf_device_reset_worker(struct work_struct *work)
+{
+       struct adf_reset_dev_data *reset_data =
+                 container_of(work, struct adf_reset_dev_data, reset_work);
+       struct adf_accel_dev *accel_dev = reset_data->accel_dev;
+
+       adf_dev_restarting_notify(accel_dev);
+       if (adf_dev_restart(accel_dev)) {
+               /* The device hanged and we can't restart it so stop here */
+               dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
+               kfree(reset_data);
+               WARN(1, "QAT: device restart failed. Device is unusable\n");
+               return;
+       }
+       adf_dev_restarted_notify(accel_dev);
+       clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
+
+       /* The dev is back alive. Notify the caller if in sync mode */
+       if (reset_data->mode == ADF_DEV_RESET_SYNC)
+               complete(&reset_data->compl);
+       else
+               kfree(reset_data);
+}
+
+static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
+                                     enum adf_dev_reset_mode mode)
+{
+       struct adf_reset_dev_data *reset_data;
+
+       if (!adf_dev_started(accel_dev) ||
+           test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
+               return 0;
+
+       set_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
+       reset_data = kzalloc(sizeof(*reset_data), GFP_KERNEL);
+       if (!reset_data)
+               return -ENOMEM;
+       reset_data->accel_dev = accel_dev;
+       init_completion(&reset_data->compl);
+       reset_data->mode = mode;
+       INIT_WORK(&reset_data->reset_work, adf_device_reset_worker);
+       queue_work(device_reset_wq, &reset_data->reset_work);
+
+       /* If in sync mode wait for the result */
+       if (mode == ADF_DEV_RESET_SYNC) {
+               int ret = 0;
+               /* Maximum device reset time is 10 seconds */
+               unsigned long wait_jiffies = msecs_to_jiffies(10000);
+               unsigned long timeout = wait_for_completion_timeout(
+                                  &reset_data->compl, wait_jiffies);
+               if (!timeout) {
+                       dev_err(&GET_DEV(accel_dev),
+                               "Reset device timeout expired\n");
+                       ret = -EFAULT;
+               }
+               kfree(reset_data);
+               return ret;
+       }
+       return 0;
+}
+
+static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev)
+{
+       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+       if (!accel_dev) {
+               pr_err("QAT: Can't find acceleration device\n");
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+       if (adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_SYNC))
+               return PCI_ERS_RESULT_DISCONNECT;
+
+       return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void adf_resume(struct pci_dev *pdev)
+{
+       dev_info(&pdev->dev, "Acceleration driver reset completed\n");
+       dev_info(&pdev->dev, "Device is up and running\n");
+}
+
+const struct pci_error_handlers adf_err_handler = {
+       .error_detected = adf_error_detected,
+       .slot_reset = adf_slot_reset,
+       .resume = adf_resume,
+};
+EXPORT_SYMBOL_GPL(adf_err_handler);
+
+int adf_init_aer(void)
+{
+       device_reset_wq = alloc_workqueue("qat_device_reset_wq",
+                                         WQ_MEM_RECLAIM, 0);
+       return !device_reset_wq ? -EFAULT : 0;
+}
+
+void adf_exit_aer(void)
+{
+       if (device_reset_wq)
+               destroy_workqueue(device_reset_wq);
+       device_reset_wq = NULL;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg.c b/drivers/crypto/intel/qat/qat_common/adf_cfg.c
new file mode 100644 (file)
index 0000000..1931e5b
--- /dev/null
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/seq_file.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+
+static DEFINE_MUTEX(qat_cfg_read_lock);
+
+static void *qat_dev_cfg_start(struct seq_file *sfile, loff_t *pos)
+{
+       struct adf_cfg_device_data *dev_cfg = sfile->private;
+
+       mutex_lock(&qat_cfg_read_lock);
+       return seq_list_start(&dev_cfg->sec_list, *pos);
+}
+
+static int qat_dev_cfg_show(struct seq_file *sfile, void *v)
+{
+       struct list_head *list;
+       struct adf_cfg_section *sec =
+                               list_entry(v, struct adf_cfg_section, list);
+
+       seq_printf(sfile, "[%s]\n", sec->name);
+       list_for_each(list, &sec->param_head) {
+               struct adf_cfg_key_val *ptr =
+                       list_entry(list, struct adf_cfg_key_val, list);
+               seq_printf(sfile, "%s = %s\n", ptr->key, ptr->val);
+       }
+       return 0;
+}
+
+static void *qat_dev_cfg_next(struct seq_file *sfile, void *v, loff_t *pos)
+{
+       struct adf_cfg_device_data *dev_cfg = sfile->private;
+
+       return seq_list_next(v, &dev_cfg->sec_list, pos);
+}
+
+static void qat_dev_cfg_stop(struct seq_file *sfile, void *v)
+{
+       mutex_unlock(&qat_cfg_read_lock);
+}
+
+static const struct seq_operations qat_dev_cfg_sops = {
+       .start = qat_dev_cfg_start,
+       .next = qat_dev_cfg_next,
+       .stop = qat_dev_cfg_stop,
+       .show = qat_dev_cfg_show
+};
+
+DEFINE_SEQ_ATTRIBUTE(qat_dev_cfg);
+
+/**
+ * adf_cfg_dev_add() - Create an acceleration device configuration table.
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function creates a configuration table for the given acceleration device.
+ * The table stores device specific config values.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_cfg_dev_add(struct adf_accel_dev *accel_dev)
+{
+       struct adf_cfg_device_data *dev_cfg_data;
+
+       dev_cfg_data = kzalloc(sizeof(*dev_cfg_data), GFP_KERNEL);
+       if (!dev_cfg_data)
+               return -ENOMEM;
+       INIT_LIST_HEAD(&dev_cfg_data->sec_list);
+       init_rwsem(&dev_cfg_data->lock);
+       accel_dev->cfg = dev_cfg_data;
+
+       /* accel_dev->debugfs_dir should always be non-NULL here */
+       dev_cfg_data->debug = debugfs_create_file("dev_cfg", S_IRUSR,
+                                                 accel_dev->debugfs_dir,
+                                                 dev_cfg_data,
+                                                 &qat_dev_cfg_fops);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_dev_add);
+
+static void adf_cfg_section_del_all(struct list_head *head);
+
+void adf_cfg_del_all(struct adf_accel_dev *accel_dev)
+{
+       struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
+
+       down_write(&dev_cfg_data->lock);
+       adf_cfg_section_del_all(&dev_cfg_data->sec_list);
+       up_write(&dev_cfg_data->lock);
+       clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+}
+
+/**
+ * adf_cfg_dev_remove() - Clears acceleration device configuration table.
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function removes configuration table from the given acceleration device
+ * and frees all allocated memory.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev)
+{
+       struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
+
+       if (!dev_cfg_data)
+               return;
+
+       down_write(&dev_cfg_data->lock);
+       adf_cfg_section_del_all(&dev_cfg_data->sec_list);
+       up_write(&dev_cfg_data->lock);
+       debugfs_remove(dev_cfg_data->debug);
+       kfree(dev_cfg_data);
+       accel_dev->cfg = NULL;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_dev_remove);
+
+static void adf_cfg_keyval_add(struct adf_cfg_key_val *new,
+                              struct adf_cfg_section *sec)
+{
+       list_add_tail(&new->list, &sec->param_head);
+}
+
+static void adf_cfg_keyval_remove(const char *key, struct adf_cfg_section *sec)
+{
+       struct list_head *head = &sec->param_head;
+       struct list_head *list_ptr, *tmp;
+
+       list_for_each_prev_safe(list_ptr, tmp, head) {
+               struct adf_cfg_key_val *ptr =
+                       list_entry(list_ptr, struct adf_cfg_key_val, list);
+
+               if (strncmp(ptr->key, key, sizeof(ptr->key)))
+                       continue;
+
+               list_del(list_ptr);
+               kfree(ptr);
+               break;
+       }
+}
+
+static void adf_cfg_keyval_del_all(struct list_head *head)
+{
+       struct list_head *list_ptr, *tmp;
+
+       list_for_each_prev_safe(list_ptr, tmp, head) {
+               struct adf_cfg_key_val *ptr =
+                       list_entry(list_ptr, struct adf_cfg_key_val, list);
+               list_del(list_ptr);
+               kfree(ptr);
+       }
+}
+
+static void adf_cfg_section_del_all(struct list_head *head)
+{
+       struct adf_cfg_section *ptr;
+       struct list_head *list, *tmp;
+
+       list_for_each_prev_safe(list, tmp, head) {
+               ptr = list_entry(list, struct adf_cfg_section, list);
+               adf_cfg_keyval_del_all(&ptr->param_head);
+               list_del(list);
+               kfree(ptr);
+       }
+}
+
+static struct adf_cfg_key_val *adf_cfg_key_value_find(struct adf_cfg_section *s,
+                                                     const char *key)
+{
+       struct list_head *list;
+
+       list_for_each(list, &s->param_head) {
+               struct adf_cfg_key_val *ptr =
+                       list_entry(list, struct adf_cfg_key_val, list);
+               if (!strcmp(ptr->key, key))
+                       return ptr;
+       }
+       return NULL;
+}
+
+static struct adf_cfg_section *adf_cfg_sec_find(struct adf_accel_dev *accel_dev,
+                                               const char *sec_name)
+{
+       struct adf_cfg_device_data *cfg = accel_dev->cfg;
+       struct list_head *list;
+
+       list_for_each(list, &cfg->sec_list) {
+               struct adf_cfg_section *ptr =
+                       list_entry(list, struct adf_cfg_section, list);
+               if (!strcmp(ptr->name, sec_name))
+                       return ptr;
+       }
+       return NULL;
+}
+
+static int adf_cfg_key_val_get(struct adf_accel_dev *accel_dev,
+                              const char *sec_name,
+                              const char *key_name,
+                              char *val)
+{
+       struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, sec_name);
+       struct adf_cfg_key_val *keyval = NULL;
+
+       if (sec)
+               keyval = adf_cfg_key_value_find(sec, key_name);
+       if (keyval) {
+               memcpy(val, keyval->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES);
+               return 0;
+       }
+       return -ENODATA;
+}
+
+/**
+ * adf_cfg_add_key_value_param() - Add key-value config entry to config table.
+ * @accel_dev:  Pointer to acceleration device.
+ * @section_name: Name of the section where the param will be added
+ * @key: The key string
+ * @val: Value pain for the given @key
+ * @type: Type - string, int or address
+ *
+ * Function adds configuration key - value entry in the appropriate section
+ * in the given acceleration device. If the key exists already, the value
+ * is updated.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
+                               const char *section_name,
+                               const char *key, const void *val,
+                               enum adf_cfg_val_type type)
+{
+       struct adf_cfg_device_data *cfg = accel_dev->cfg;
+       struct adf_cfg_key_val *key_val;
+       struct adf_cfg_section *section = adf_cfg_sec_find(accel_dev,
+                                                          section_name);
+       char temp_val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+
+       if (!section)
+               return -EFAULT;
+
+       key_val = kzalloc(sizeof(*key_val), GFP_KERNEL);
+       if (!key_val)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&key_val->list);
+       strscpy(key_val->key, key, sizeof(key_val->key));
+
+       if (type == ADF_DEC) {
+               snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
+                        "%ld", (*((long *)val)));
+       } else if (type == ADF_STR) {
+               strscpy(key_val->val, (char *)val, sizeof(key_val->val));
+       } else if (type == ADF_HEX) {
+               snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
+                        "0x%lx", (unsigned long)val);
+       } else {
+               dev_err(&GET_DEV(accel_dev), "Unknown type given.\n");
+               kfree(key_val);
+               return -EINVAL;
+       }
+       key_val->type = type;
+
+       /* Add the key-value pair as below policy:
+        * 1. if the key doesn't exist, add it;
+        * 2. if the key already exists with a different value then update it
+        *    to the new value (the key is deleted and the newly created
+        *    key_val containing the new value is added to the database);
+        * 3. if the key exists with the same value, then return without doing
+        *    anything (the newly created key_val is freed).
+        */
+       if (!adf_cfg_key_val_get(accel_dev, section_name, key, temp_val)) {
+               if (strncmp(temp_val, key_val->val, sizeof(temp_val))) {
+                       adf_cfg_keyval_remove(key, section);
+               } else {
+                       kfree(key_val);
+                       return 0;
+               }
+       }
+
+       down_write(&cfg->lock);
+       adf_cfg_keyval_add(key_val, section);
+       up_write(&cfg->lock);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_add_key_value_param);
+
+/**
+ * adf_cfg_section_add() - Add config section entry to config table.
+ * @accel_dev:  Pointer to acceleration device.
+ * @name: Name of the section
+ *
+ * Function adds configuration section where key - value entries
+ * will be stored.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name)
+{
+       struct adf_cfg_device_data *cfg = accel_dev->cfg;
+       struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, name);
+
+       if (sec)
+               return 0;
+
+       sec = kzalloc(sizeof(*sec), GFP_KERNEL);
+       if (!sec)
+               return -ENOMEM;
+
+       strscpy(sec->name, name, sizeof(sec->name));
+       INIT_LIST_HEAD(&sec->param_head);
+       down_write(&cfg->lock);
+       list_add_tail(&sec->list, &cfg->sec_list);
+       up_write(&cfg->lock);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_section_add);
+
+int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev,
+                           const char *section, const char *name,
+                           char *value)
+{
+       struct adf_cfg_device_data *cfg = accel_dev->cfg;
+       int ret;
+
+       down_read(&cfg->lock);
+       ret = adf_cfg_key_val_get(accel_dev, section, name, value);
+       up_read(&cfg->lock);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_get_param_value);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg.h b/drivers/crypto/intel/qat/qat_common/adf_cfg.h
new file mode 100644 (file)
index 0000000..376cde6
--- /dev/null
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef ADF_CFG_H_
+#define ADF_CFG_H_
+
+#include <linux/list.h>
+#include <linux/rwsem.h>
+#include <linux/debugfs.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg_common.h"
+#include "adf_cfg_strings.h"
+
+struct adf_cfg_key_val {
+       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+       char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+       enum adf_cfg_val_type type;
+       struct list_head list;
+};
+
+struct adf_cfg_section {
+       char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
+       struct list_head list;
+       struct list_head param_head;
+};
+
+struct adf_cfg_device_data {
+       struct list_head sec_list;
+       struct dentry *debug;
+       struct rw_semaphore lock;
+};
+
+int adf_cfg_dev_add(struct adf_accel_dev *accel_dev);
+void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev);
+int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name);
+void adf_cfg_del_all(struct adf_accel_dev *accel_dev);
+int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
+                               const char *section_name,
+                               const char *key, const void *val,
+                               enum adf_cfg_val_type type);
+int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev,
+                           const char *section, const char *name, char *value);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
new file mode 100644 (file)
index 0000000..6e5de1d
--- /dev/null
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef ADF_CFG_COMMON_H_
+#define ADF_CFG_COMMON_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define ADF_CFG_MAX_STR_LEN 64
+#define ADF_CFG_MAX_KEY_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN
+#define ADF_CFG_MAX_VAL_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN
+#define ADF_CFG_MAX_SECTION_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN
+#define ADF_CFG_BASE_DEC 10
+#define ADF_CFG_BASE_HEX 16
+#define ADF_CFG_ALL_DEVICES 0xFE
+#define ADF_CFG_NO_DEVICE 0xFF
+#define ADF_CFG_AFFINITY_WHATEVER 0xFF
+#define MAX_DEVICE_NAME_SIZE 32
+#define ADF_MAX_DEVICES (32 * 32)
+#define ADF_DEVS_ARRAY_SIZE BITS_TO_LONGS(ADF_MAX_DEVICES)
+
+#define ADF_CFG_SERV_RING_PAIR_0_SHIFT 0
+#define ADF_CFG_SERV_RING_PAIR_1_SHIFT 3
+#define ADF_CFG_SERV_RING_PAIR_2_SHIFT 6
+#define ADF_CFG_SERV_RING_PAIR_3_SHIFT 9
+enum adf_cfg_service_type {
+       UNUSED = 0,
+       CRYPTO,
+       COMP,
+       SYM,
+       ASYM,
+       USED
+};
+
+enum adf_cfg_val_type {
+       ADF_DEC,
+       ADF_HEX,
+       ADF_STR
+};
+
+enum adf_device_type {
+       DEV_UNKNOWN = 0,
+       DEV_DH895XCC,
+       DEV_DH895XCCVF,
+       DEV_C62X,
+       DEV_C62XVF,
+       DEV_C3XXX,
+       DEV_C3XXXVF,
+       DEV_4XXX,
+};
+
+struct adf_dev_status_info {
+       enum adf_device_type type;
+       __u32 accel_id;
+       __u32 instance_id;
+       __u8 num_ae;
+       __u8 num_accel;
+       __u8 num_logical_accel;
+       __u8 banks_per_accel;
+       __u8 state;
+       __u8 bus;
+       __u8 dev;
+       __u8 fun;
+       char name[MAX_DEVICE_NAME_SIZE];
+};
+
+#define ADF_CTL_IOC_MAGIC 'a'
+#define IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS _IOW(ADF_CTL_IOC_MAGIC, 0, \
+               struct adf_user_cfg_ctl_data)
+#define IOCTL_STOP_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 1, \
+               struct adf_user_cfg_ctl_data)
+#define IOCTL_START_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 2, \
+               struct adf_user_cfg_ctl_data)
+#define IOCTL_STATUS_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 3, __u32)
+#define IOCTL_GET_NUM_DEVICES _IOW(ADF_CTL_IOC_MAGIC, 4, __s32)
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
new file mode 100644 (file)
index 0000000..5d8c3bd
--- /dev/null
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef ADF_CFG_STRINGS_H_
+#define ADF_CFG_STRINGS_H_
+
+#define ADF_GENERAL_SEC "GENERAL"
+#define ADF_KERNEL_SEC "KERNEL"
+#define ADF_ACCEL_SEC "Accelerator"
+#define ADF_NUM_CY "NumberCyInstances"
+#define ADF_NUM_DC "NumberDcInstances"
+#define ADF_RING_SYM_SIZE "NumConcurrentSymRequests"
+#define ADF_RING_ASYM_SIZE "NumConcurrentAsymRequests"
+#define ADF_RING_DC_SIZE "NumConcurrentRequests"
+#define ADF_RING_ASYM_TX "RingAsymTx"
+#define ADF_RING_SYM_TX "RingSymTx"
+#define ADF_RING_ASYM_RX "RingAsymRx"
+#define ADF_RING_SYM_RX "RingSymRx"
+#define ADF_RING_DC_TX "RingTx"
+#define ADF_RING_DC_RX "RingRx"
+#define ADF_ETRMGR_BANK "Bank"
+#define ADF_RING_SYM_BANK_NUM "BankSymNumber"
+#define ADF_RING_ASYM_BANK_NUM "BankAsymNumber"
+#define ADF_RING_DC_BANK_NUM "BankDcNumber"
+#define ADF_CY "Cy"
+#define ADF_DC "Dc"
+#define ADF_CFG_DC "dc"
+#define ADF_CFG_CY "sym;asym"
+#define ADF_SERVICES_ENABLED "ServicesEnabled"
+#define ADF_ETRMGR_COALESCING_ENABLED "InterruptCoalescingEnabled"
+#define ADF_ETRMGR_COALESCING_ENABLED_FORMAT \
+       ADF_ETRMGR_BANK "%d" ADF_ETRMGR_COALESCING_ENABLED
+#define ADF_ETRMGR_COALESCE_TIMER "InterruptCoalescingTimerNs"
+#define ADF_ETRMGR_COALESCE_TIMER_FORMAT \
+       ADF_ETRMGR_BANK "%d" ADF_ETRMGR_COALESCE_TIMER
+#define ADF_ETRMGR_COALESCING_MSG_ENABLED "InterruptCoalescingNumResponses"
+#define ADF_ETRMGR_COALESCING_MSG_ENABLED_FORMAT \
+       ADF_ETRMGR_BANK "%d" ADF_ETRMGR_COALESCING_MSG_ENABLED
+#define ADF_ETRMGR_CORE_AFFINITY "CoreAffinity"
+#define ADF_ETRMGR_CORE_AFFINITY_FORMAT \
+       ADF_ETRMGR_BANK "%d" ADF_ETRMGR_CORE_AFFINITY
+#define ADF_ACCEL_STR "Accelerator%d"
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_user.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_user.h
new file mode 100644 (file)
index 0000000..421f4fb
--- /dev/null
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef ADF_CFG_USER_H_
+#define ADF_CFG_USER_H_
+
+#include "adf_cfg_common.h"
+#include "adf_cfg_strings.h"
+
+struct adf_user_cfg_key_val {
+       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+       char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+       union {
+               struct adf_user_cfg_key_val *next;
+               __u64 padding3;
+       };
+       enum adf_cfg_val_type type;
+} __packed;
+
+struct adf_user_cfg_section {
+       char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
+       union {
+               struct adf_user_cfg_key_val *params;
+               __u64 padding1;
+       };
+       union {
+               struct adf_user_cfg_section *next;
+               __u64 padding3;
+       };
+} __packed;
+
+struct adf_user_cfg_ctl_data {
+       union {
+               struct adf_user_cfg_section *config_section;
+               __u64 padding;
+       };
+       __u8 device_id;
+} __packed;
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
new file mode 100644 (file)
index 0000000..db79759
--- /dev/null
@@ -0,0 +1,249 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2021 Intel Corporation */
+#ifndef ADF_DRV_H
+#define ADF_DRV_H
+
+#include <linux/list.h>
+#include <linux/pci.h>
+#include "adf_accel_devices.h"
+#include "icp_qat_fw_loader_handle.h"
+#include "icp_qat_hal.h"
+
+#define ADF_MAJOR_VERSION      0
+#define ADF_MINOR_VERSION      6
+#define ADF_BUILD_VERSION      0
+#define ADF_DRV_VERSION                __stringify(ADF_MAJOR_VERSION) "." \
+                               __stringify(ADF_MINOR_VERSION) "." \
+                               __stringify(ADF_BUILD_VERSION)
+
+#define ADF_STATUS_RESTARTING 0
+#define ADF_STATUS_STARTING 1
+#define ADF_STATUS_CONFIGURED 2
+#define ADF_STATUS_STARTED 3
+#define ADF_STATUS_AE_INITIALISED 4
+#define ADF_STATUS_AE_UCODE_LOADED 5
+#define ADF_STATUS_AE_STARTED 6
+#define ADF_STATUS_PF_RUNNING 7
+#define ADF_STATUS_IRQ_ALLOCATED 8
+
+enum adf_dev_reset_mode {
+       ADF_DEV_RESET_ASYNC = 0,
+       ADF_DEV_RESET_SYNC
+};
+
+enum adf_event {
+       ADF_EVENT_INIT = 0,
+       ADF_EVENT_START,
+       ADF_EVENT_STOP,
+       ADF_EVENT_SHUTDOWN,
+       ADF_EVENT_RESTARTING,
+       ADF_EVENT_RESTARTED,
+};
+
+struct service_hndl {
+       int (*event_hld)(struct adf_accel_dev *accel_dev,
+                        enum adf_event event);
+       unsigned long init_status[ADF_DEVS_ARRAY_SIZE];
+       unsigned long start_status[ADF_DEVS_ARRAY_SIZE];
+       char *name;
+       struct list_head list;
+};
+
+int adf_service_register(struct service_hndl *service);
+int adf_service_unregister(struct service_hndl *service);
+
+int adf_dev_up(struct adf_accel_dev *accel_dev, bool init_config);
+int adf_dev_down(struct adf_accel_dev *accel_dev, bool cache_config);
+int adf_dev_restart(struct adf_accel_dev *accel_dev);
+
+void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data);
+void adf_clean_vf_map(bool);
+
+int adf_ctl_dev_register(void);
+void adf_ctl_dev_unregister(void);
+int adf_processes_dev_register(void);
+void adf_processes_dev_unregister(void);
+
+int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
+                      struct adf_accel_dev *pf);
+void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
+                      struct adf_accel_dev *pf);
+struct list_head *adf_devmgr_get_head(void);
+struct adf_accel_dev *adf_devmgr_get_dev_by_id(u32 id);
+struct adf_accel_dev *adf_devmgr_get_first(void);
+struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev);
+int adf_devmgr_verify_id(u32 id);
+void adf_devmgr_get_num_dev(u32 *num);
+int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev);
+int adf_dev_started(struct adf_accel_dev *accel_dev);
+int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev);
+int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev);
+int adf_ae_init(struct adf_accel_dev *accel_dev);
+int adf_ae_shutdown(struct adf_accel_dev *accel_dev);
+int adf_ae_fw_load(struct adf_accel_dev *accel_dev);
+void adf_ae_fw_release(struct adf_accel_dev *accel_dev);
+int adf_ae_start(struct adf_accel_dev *accel_dev);
+int adf_ae_stop(struct adf_accel_dev *accel_dev);
+
+extern const struct pci_error_handlers adf_err_handler;
+void adf_reset_sbr(struct adf_accel_dev *accel_dev);
+void adf_reset_flr(struct adf_accel_dev *accel_dev);
+void adf_dev_restore(struct adf_accel_dev *accel_dev);
+int adf_init_aer(void);
+void adf_exit_aer(void);
+int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
+void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
+int adf_send_admin_init(struct adf_accel_dev *accel_dev);
+int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay);
+int adf_init_arb(struct adf_accel_dev *accel_dev);
+void adf_exit_arb(struct adf_accel_dev *accel_dev);
+void adf_update_ring_arb(struct adf_etr_ring_data *ring);
+
+int adf_dev_get(struct adf_accel_dev *accel_dev);
+void adf_dev_put(struct adf_accel_dev *accel_dev);
+int adf_dev_in_use(struct adf_accel_dev *accel_dev);
+int adf_init_etr_data(struct adf_accel_dev *accel_dev);
+void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev);
+int qat_crypto_register(void);
+int qat_crypto_unregister(void);
+int qat_crypto_vf_dev_config(struct adf_accel_dev *accel_dev);
+struct qat_crypto_instance *qat_crypto_get_instance_node(int node);
+void qat_crypto_put_instance(struct qat_crypto_instance *inst);
+void qat_alg_callback(void *resp);
+void qat_alg_asym_callback(void *resp);
+int qat_algs_register(void);
+void qat_algs_unregister(void);
+int qat_asym_algs_register(void);
+void qat_asym_algs_unregister(void);
+
+struct qat_compression_instance *qat_compression_get_instance_node(int node);
+void qat_compression_put_instance(struct qat_compression_instance *inst);
+int qat_compression_register(void);
+int qat_compression_unregister(void);
+int qat_comp_algs_register(void);
+void qat_comp_algs_unregister(void);
+void qat_comp_alg_callback(void *resp);
+
+int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
+void adf_isr_resource_free(struct adf_accel_dev *accel_dev);
+int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
+void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev);
+
+int adf_pfvf_comms_disabled(struct adf_accel_dev *accel_dev);
+
+int adf_sysfs_init(struct adf_accel_dev *accel_dev);
+
+int qat_hal_init(struct adf_accel_dev *accel_dev);
+void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle);
+int qat_hal_start(struct icp_qat_fw_loader_handle *handle);
+void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+                 unsigned int ctx_mask);
+void qat_hal_reset(struct icp_qat_fw_loader_handle *handle);
+int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle);
+void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
+                         unsigned char ae, unsigned int ctx_mask);
+int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle,
+                           unsigned int ae);
+int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
+                          unsigned char ae, enum icp_qat_uof_regtype lm_type,
+                          unsigned char mode);
+int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
+                           unsigned char ae, unsigned char mode);
+int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
+                          unsigned char ae, unsigned char mode);
+void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
+                   unsigned char ae, unsigned int ctx_mask, unsigned int upc);
+void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
+                      unsigned char ae, unsigned int uaddr,
+                      unsigned int words_num, u64 *uword);
+void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+                    unsigned int uword_addr, unsigned int words_num,
+                    unsigned int *data);
+int qat_hal_get_ins_num(void);
+int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
+                       unsigned char ae,
+                       struct icp_qat_uof_batch_init *lm_init_header);
+int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle,
+                    unsigned char ae, unsigned long ctx_mask,
+                    enum icp_qat_uof_regtype reg_type,
+                    unsigned short reg_num, unsigned int regdata);
+int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle,
+                        unsigned char ae, unsigned long ctx_mask,
+                        enum icp_qat_uof_regtype reg_type,
+                        unsigned short reg_num, unsigned int regdata);
+int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
+                        unsigned char ae, unsigned long ctx_mask,
+                        enum icp_qat_uof_regtype reg_type,
+                        unsigned short reg_num, unsigned int regdata);
+int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
+                   unsigned char ae, unsigned long ctx_mask,
+                   unsigned short reg_num, unsigned int regdata);
+int qat_hal_wr_lm(struct icp_qat_fw_loader_handle *handle,
+                 unsigned char ae, unsigned short lm_addr, unsigned int value);
+void qat_hal_set_ae_tindex_mode(struct icp_qat_fw_loader_handle *handle,
+                               unsigned char ae, unsigned char mode);
+int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle);
+void qat_uclo_del_obj(struct icp_qat_fw_loader_handle *handle);
+int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, void *addr_ptr,
+                      int mem_size);
+int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
+                    void *addr_ptr, u32 mem_size, char *obj_name);
+int qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle,
+                            unsigned int cfg_ae_mask);
+int adf_init_misc_wq(void);
+void adf_exit_misc_wq(void);
+bool adf_misc_wq_queue_work(struct work_struct *work);
+#if defined(CONFIG_PCI_IOV)
+int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
+void adf_disable_sriov(struct adf_accel_dev *accel_dev);
+void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask);
+void adf_disable_all_vf2pf_interrupts(struct adf_accel_dev *accel_dev);
+bool adf_recv_and_handle_pf2vf_msg(struct adf_accel_dev *accel_dev);
+bool adf_recv_and_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u32 vf_nr);
+int adf_pf2vf_handle_pf_restarting(struct adf_accel_dev *accel_dev);
+void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
+void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
+void adf_schedule_vf2pf_handler(struct adf_accel_vf_info *vf_info);
+int adf_init_pf_wq(void);
+void adf_exit_pf_wq(void);
+int adf_init_vf_wq(void);
+void adf_exit_vf_wq(void);
+void adf_flush_vf_wq(struct adf_accel_dev *accel_dev);
+#else
+#define adf_sriov_configure NULL
+
+static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline int adf_init_pf_wq(void)
+{
+       return 0;
+}
+
+static inline void adf_exit_pf_wq(void)
+{
+}
+
+static inline int adf_init_vf_wq(void)
+{
+       return 0;
+}
+
+static inline void adf_exit_vf_wq(void)
+{
+}
+
+#endif
+
+static inline void __iomem *adf_get_pmisc_base(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct adf_bar *pmisc;
+
+       pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+
+       return pmisc->virt_addr;
+}
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c
new file mode 100644 (file)
index 0000000..88c41d6
--- /dev/null
@@ -0,0 +1,475 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+
+#include <crypto/algapi.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/bitops.h>
+#include <linux/pci.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_cfg.h"
+#include "adf_cfg_common.h"
+#include "adf_cfg_user.h"
+
+#define ADF_CFG_MAX_SECTION 512
+#define ADF_CFG_MAX_KEY_VAL 256
+
+#define DEVICE_NAME "qat_adf_ctl"
+
+static DEFINE_MUTEX(adf_ctl_lock);
+static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
+
+static const struct file_operations adf_ctl_ops = {
+       .owner = THIS_MODULE,
+       .unlocked_ioctl = adf_ctl_ioctl,
+       .compat_ioctl = compat_ptr_ioctl,
+};
+
+struct adf_ctl_drv_info {
+       unsigned int major;
+       struct cdev drv_cdev;
+       struct class *drv_class;
+};
+
+static struct adf_ctl_drv_info adf_ctl_drv;
+
+static void adf_chr_drv_destroy(void)
+{
+       device_destroy(adf_ctl_drv.drv_class, MKDEV(adf_ctl_drv.major, 0));
+       cdev_del(&adf_ctl_drv.drv_cdev);
+       class_destroy(adf_ctl_drv.drv_class);
+       unregister_chrdev_region(MKDEV(adf_ctl_drv.major, 0), 1);
+}
+
+static int adf_chr_drv_create(void)
+{
+       dev_t dev_id;
+       struct device *drv_device;
+
+       if (alloc_chrdev_region(&dev_id, 0, 1, DEVICE_NAME)) {
+               pr_err("QAT: unable to allocate chrdev region\n");
+               return -EFAULT;
+       }
+
+       adf_ctl_drv.drv_class = class_create(THIS_MODULE, DEVICE_NAME);
+       if (IS_ERR(adf_ctl_drv.drv_class)) {
+               pr_err("QAT: class_create failed for adf_ctl\n");
+               goto err_chrdev_unreg;
+       }
+       adf_ctl_drv.major = MAJOR(dev_id);
+       cdev_init(&adf_ctl_drv.drv_cdev, &adf_ctl_ops);
+       if (cdev_add(&adf_ctl_drv.drv_cdev, dev_id, 1)) {
+               pr_err("QAT: cdev add failed\n");
+               goto err_class_destr;
+       }
+
+       drv_device = device_create(adf_ctl_drv.drv_class, NULL,
+                                  MKDEV(adf_ctl_drv.major, 0),
+                                  NULL, DEVICE_NAME);
+       if (IS_ERR(drv_device)) {
+               pr_err("QAT: failed to create device\n");
+               goto err_cdev_del;
+       }
+       return 0;
+err_cdev_del:
+       cdev_del(&adf_ctl_drv.drv_cdev);
+err_class_destr:
+       class_destroy(adf_ctl_drv.drv_class);
+err_chrdev_unreg:
+       unregister_chrdev_region(dev_id, 1);
+       return -EFAULT;
+}
+
+static int adf_ctl_alloc_resources(struct adf_user_cfg_ctl_data **ctl_data,
+                                  unsigned long arg)
+{
+       struct adf_user_cfg_ctl_data *cfg_data;
+
+       cfg_data = kzalloc(sizeof(*cfg_data), GFP_KERNEL);
+       if (!cfg_data)
+               return -ENOMEM;
+
+       /* Initialize device id to NO DEVICE as 0 is a valid device id */
+       cfg_data->device_id = ADF_CFG_NO_DEVICE;
+
+       if (copy_from_user(cfg_data, (void __user *)arg, sizeof(*cfg_data))) {
+               pr_err("QAT: failed to copy from user cfg_data.\n");
+               kfree(cfg_data);
+               return -EIO;
+       }
+
+       *ctl_data = cfg_data;
+       return 0;
+}
+
+static int adf_add_key_value_data(struct adf_accel_dev *accel_dev,
+                                 const char *section,
+                                 const struct adf_user_cfg_key_val *key_val)
+{
+       if (key_val->type == ADF_HEX) {
+               long *ptr = (long *)key_val->val;
+               long val = *ptr;
+
+               if (adf_cfg_add_key_value_param(accel_dev, section,
+                                               key_val->key, (void *)val,
+                                               key_val->type)) {
+                       dev_err(&GET_DEV(accel_dev),
+                               "failed to add hex keyvalue.\n");
+                       return -EFAULT;
+               }
+       } else {
+               if (adf_cfg_add_key_value_param(accel_dev, section,
+                                               key_val->key, key_val->val,
+                                               key_val->type)) {
+                       dev_err(&GET_DEV(accel_dev),
+                               "failed to add keyvalue.\n");
+                       return -EFAULT;
+               }
+       }
+       return 0;
+}
+
+static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
+                                  struct adf_user_cfg_ctl_data *ctl_data)
+{
+       struct adf_user_cfg_key_val key_val;
+       struct adf_user_cfg_key_val *params_head;
+       struct adf_user_cfg_section section, *section_head;
+       int i, j;
+
+       section_head = ctl_data->config_section;
+
+       for (i = 0; section_head && i < ADF_CFG_MAX_SECTION; i++) {
+               if (copy_from_user(&section, (void __user *)section_head,
+                                  sizeof(*section_head))) {
+                       dev_err(&GET_DEV(accel_dev),
+                               "failed to copy section info\n");
+                       goto out_err;
+               }
+
+               if (adf_cfg_section_add(accel_dev, section.name)) {
+                       dev_err(&GET_DEV(accel_dev),
+                               "failed to add section.\n");
+                       goto out_err;
+               }
+
+               params_head = section.params;
+
+               for (j = 0; params_head && j < ADF_CFG_MAX_KEY_VAL; j++) {
+                       if (copy_from_user(&key_val, (void __user *)params_head,
+                                          sizeof(key_val))) {
+                               dev_err(&GET_DEV(accel_dev),
+                                       "Failed to copy keyvalue.\n");
+                               goto out_err;
+                       }
+                       if (adf_add_key_value_data(accel_dev, section.name,
+                                                  &key_val)) {
+                               goto out_err;
+                       }
+                       params_head = key_val.next;
+               }
+               section_head = section.next;
+       }
+       return 0;
+out_err:
+       adf_cfg_del_all(accel_dev);
+       return -EFAULT;
+}
+
+static int adf_ctl_ioctl_dev_config(struct file *fp, unsigned int cmd,
+                                   unsigned long arg)
+{
+       int ret;
+       struct adf_user_cfg_ctl_data *ctl_data;
+       struct adf_accel_dev *accel_dev;
+
+       ret = adf_ctl_alloc_resources(&ctl_data, arg);
+       if (ret)
+               return ret;
+
+       accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
+       if (!accel_dev) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       if (adf_dev_started(accel_dev)) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       if (adf_copy_key_value_data(accel_dev, ctl_data)) {
+               ret = -EFAULT;
+               goto out;
+       }
+       set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+out:
+       kfree(ctl_data);
+       return ret;
+}
+
+static int adf_ctl_is_device_in_use(int id)
+{
+       struct adf_accel_dev *dev;
+
+       list_for_each_entry(dev, adf_devmgr_get_head(), list) {
+               if (id == dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
+                       if (adf_devmgr_in_reset(dev) || adf_dev_in_use(dev)) {
+                               dev_info(&GET_DEV(dev),
+                                        "device qat_dev%d is busy\n",
+                                        dev->accel_id);
+                               return -EBUSY;
+                       }
+               }
+       }
+       return 0;
+}
+
+static void adf_ctl_stop_devices(u32 id)
+{
+       struct adf_accel_dev *accel_dev;
+
+       list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
+               if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
+                       if (!adf_dev_started(accel_dev))
+                               continue;
+
+                       /* First stop all VFs */
+                       if (!accel_dev->is_vf)
+                               continue;
+
+                       adf_dev_down(accel_dev, false);
+               }
+       }
+
+       list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
+               if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
+                       if (!adf_dev_started(accel_dev))
+                               continue;
+
+                       adf_dev_down(accel_dev, false);
+               }
+       }
+}
+
+static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd,
+                                 unsigned long arg)
+{
+       int ret;
+       struct adf_user_cfg_ctl_data *ctl_data;
+
+       ret = adf_ctl_alloc_resources(&ctl_data, arg);
+       if (ret)
+               return ret;
+
+       if (adf_devmgr_verify_id(ctl_data->device_id)) {
+               pr_err("QAT: Device %d not found\n", ctl_data->device_id);
+               ret = -ENODEV;
+               goto out;
+       }
+
+       ret = adf_ctl_is_device_in_use(ctl_data->device_id);
+       if (ret)
+               goto out;
+
+       if (ctl_data->device_id == ADF_CFG_ALL_DEVICES)
+               pr_info("QAT: Stopping all acceleration devices.\n");
+       else
+               pr_info("QAT: Stopping acceleration device qat_dev%d.\n",
+                       ctl_data->device_id);
+
+       adf_ctl_stop_devices(ctl_data->device_id);
+
+out:
+       kfree(ctl_data);
+       return ret;
+}
+
+static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
+                                  unsigned long arg)
+{
+       int ret;
+       struct adf_user_cfg_ctl_data *ctl_data;
+       struct adf_accel_dev *accel_dev;
+
+       ret = adf_ctl_alloc_resources(&ctl_data, arg);
+       if (ret)
+               return ret;
+
+       ret = -ENODEV;
+       accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
+       if (!accel_dev)
+               goto out;
+
+       dev_info(&GET_DEV(accel_dev),
+                "Starting acceleration device qat_dev%d.\n",
+                ctl_data->device_id);
+
+       ret = adf_dev_up(accel_dev, false);
+
+       if (ret) {
+               dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
+                       ctl_data->device_id);
+               adf_dev_down(accel_dev, false);
+       }
+out:
+       kfree(ctl_data);
+       return ret;
+}
+
+static int adf_ctl_ioctl_get_num_devices(struct file *fp, unsigned int cmd,
+                                        unsigned long arg)
+{
+       u32 num_devices = 0;
+
+       adf_devmgr_get_num_dev(&num_devices);
+       if (copy_to_user((void __user *)arg, &num_devices, sizeof(num_devices)))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd,
+                                   unsigned long arg)
+{
+       struct adf_hw_device_data *hw_data;
+       struct adf_dev_status_info dev_info;
+       struct adf_accel_dev *accel_dev;
+
+       if (copy_from_user(&dev_info, (void __user *)arg,
+                          sizeof(struct adf_dev_status_info))) {
+               pr_err("QAT: failed to copy from user.\n");
+               return -EFAULT;
+       }
+
+       accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id);
+       if (!accel_dev)
+               return -ENODEV;
+
+       hw_data = accel_dev->hw_device;
+       dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN;
+       dev_info.num_ae = hw_data->get_num_aes(hw_data);
+       dev_info.num_accel = hw_data->get_num_accels(hw_data);
+       dev_info.num_logical_accel = hw_data->num_logical_accel;
+       dev_info.banks_per_accel = hw_data->num_banks
+                                       / hw_data->num_logical_accel;
+       strscpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name));
+       dev_info.instance_id = hw_data->instance_id;
+       dev_info.type = hw_data->dev_class->type;
+       dev_info.bus = accel_to_pci_dev(accel_dev)->bus->number;
+       dev_info.dev = PCI_SLOT(accel_to_pci_dev(accel_dev)->devfn);
+       dev_info.fun = PCI_FUNC(accel_to_pci_dev(accel_dev)->devfn);
+
+       if (copy_to_user((void __user *)arg, &dev_info,
+                        sizeof(struct adf_dev_status_info))) {
+               dev_err(&GET_DEV(accel_dev), "failed to copy status.\n");
+               return -EFAULT;
+       }
+       return 0;
+}
+
+static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+       int ret;
+
+       if (mutex_lock_interruptible(&adf_ctl_lock))
+               return -EFAULT;
+
+       switch (cmd) {
+       case IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS:
+               ret = adf_ctl_ioctl_dev_config(fp, cmd, arg);
+               break;
+
+       case IOCTL_STOP_ACCEL_DEV:
+               ret = adf_ctl_ioctl_dev_stop(fp, cmd, arg);
+               break;
+
+       case IOCTL_START_ACCEL_DEV:
+               ret = adf_ctl_ioctl_dev_start(fp, cmd, arg);
+               break;
+
+       case IOCTL_GET_NUM_DEVICES:
+               ret = adf_ctl_ioctl_get_num_devices(fp, cmd, arg);
+               break;
+
+       case IOCTL_STATUS_ACCEL_DEV:
+               ret = adf_ctl_ioctl_get_status(fp, cmd, arg);
+               break;
+       default:
+               pr_err_ratelimited("QAT: Invalid ioctl %d\n", cmd);
+               ret = -EFAULT;
+               break;
+       }
+       mutex_unlock(&adf_ctl_lock);
+       return ret;
+}
+
+static int __init adf_register_ctl_device_driver(void)
+{
+       if (adf_chr_drv_create())
+               goto err_chr_dev;
+
+       if (adf_init_misc_wq())
+               goto err_misc_wq;
+
+       if (adf_init_aer())
+               goto err_aer;
+
+       if (adf_init_pf_wq())
+               goto err_pf_wq;
+
+       if (adf_init_vf_wq())
+               goto err_vf_wq;
+
+       if (qat_crypto_register())
+               goto err_crypto_register;
+
+       if (qat_compression_register())
+               goto err_compression_register;
+
+       return 0;
+
+err_compression_register:
+       qat_crypto_unregister();
+err_crypto_register:
+       adf_exit_vf_wq();
+err_vf_wq:
+       adf_exit_pf_wq();
+err_pf_wq:
+       adf_exit_aer();
+err_aer:
+       adf_exit_misc_wq();
+err_misc_wq:
+       adf_chr_drv_destroy();
+err_chr_dev:
+       mutex_destroy(&adf_ctl_lock);
+       return -EFAULT;
+}
+
+static void __exit adf_unregister_ctl_device_driver(void)
+{
+       adf_chr_drv_destroy();
+       adf_exit_misc_wq();
+       adf_exit_aer();
+       adf_exit_vf_wq();
+       adf_exit_pf_wq();
+       qat_crypto_unregister();
+       qat_compression_unregister();
+       adf_clean_vf_map(false);
+       mutex_destroy(&adf_ctl_lock);
+}
+
+module_init(adf_register_ctl_device_driver);
+module_exit(adf_unregister_ctl_device_driver);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_ALIAS_CRYPTO("intel_qat");
+MODULE_VERSION(ADF_DRV_VERSION);
+MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
new file mode 100644 (file)
index 0000000..86ee36f
--- /dev/null
@@ -0,0 +1,452 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+
+static LIST_HEAD(accel_table);
+static LIST_HEAD(vfs_table);
+static DEFINE_MUTEX(table_lock);
+static u32 num_devices;
+static u8 id_map[ADF_MAX_DEVICES];
+
+struct vf_id_map {
+       u32 bdf;
+       u32 id;
+       u32 fake_id;
+       bool attached;
+       struct list_head list;
+};
+
+static int adf_get_vf_id(struct adf_accel_dev *vf)
+{
+       return ((7 * (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1)) +
+               PCI_FUNC(accel_to_pci_dev(vf)->devfn) +
+               (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1));
+}
+
+static int adf_get_vf_num(struct adf_accel_dev *vf)
+{
+       return (accel_to_pci_dev(vf)->bus->number << 8) | adf_get_vf_id(vf);
+}
+
+static struct vf_id_map *adf_find_vf(u32 bdf)
+{
+       struct list_head *itr;
+
+       list_for_each(itr, &vfs_table) {
+               struct vf_id_map *ptr =
+                       list_entry(itr, struct vf_id_map, list);
+
+               if (ptr->bdf == bdf)
+                       return ptr;
+       }
+       return NULL;
+}
+
+static int adf_get_vf_real_id(u32 fake)
+{
+       struct list_head *itr;
+
+       list_for_each(itr, &vfs_table) {
+               struct vf_id_map *ptr =
+                       list_entry(itr, struct vf_id_map, list);
+               if (ptr->fake_id == fake)
+                       return ptr->id;
+       }
+       return -1;
+}
+
+/**
+ * adf_clean_vf_map() - Cleans VF id mapings
+ *
+ * Function cleans internal ids for virtual functions.
+ * @vf: flag indicating whether mappings is cleaned
+ *     for vfs only or for vfs and pfs
+ */
+void adf_clean_vf_map(bool vf)
+{
+       struct vf_id_map *map;
+       struct list_head *ptr, *tmp;
+
+       mutex_lock(&table_lock);
+       list_for_each_safe(ptr, tmp, &vfs_table) {
+               map = list_entry(ptr, struct vf_id_map, list);
+               if (map->bdf != -1) {
+                       id_map[map->id] = 0;
+                       num_devices--;
+               }
+
+               if (vf && map->bdf == -1)
+                       continue;
+
+               list_del(ptr);
+               kfree(map);
+       }
+       mutex_unlock(&table_lock);
+}
+EXPORT_SYMBOL_GPL(adf_clean_vf_map);
+
+/**
+ * adf_devmgr_update_class_index() - Update internal index
+ * @hw_data:  Pointer to internal device data.
+ *
+ * Function updates internal dev index for VFs
+ */
+void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data)
+{
+       struct adf_hw_device_class *class = hw_data->dev_class;
+       struct list_head *itr;
+       int i = 0;
+
+       list_for_each(itr, &accel_table) {
+               struct adf_accel_dev *ptr =
+                               list_entry(itr, struct adf_accel_dev, list);
+
+               if (ptr->hw_device->dev_class == class)
+                       ptr->hw_device->instance_id = i++;
+
+               if (i == class->instances)
+                       break;
+       }
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_update_class_index);
+
+static unsigned int adf_find_free_id(void)
+{
+       unsigned int i;
+
+       for (i = 0; i < ADF_MAX_DEVICES; i++) {
+               if (!id_map[i]) {
+                       id_map[i] = 1;
+                       return i;
+               }
+       }
+       return ADF_MAX_DEVICES + 1;
+}
+
+/**
+ * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework
+ * @accel_dev:  Pointer to acceleration device.
+ * @pf:                Corresponding PF if the accel_dev is a VF
+ *
+ * Function adds acceleration device to the acceleration framework.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
+                      struct adf_accel_dev *pf)
+{
+       struct list_head *itr;
+       int ret = 0;
+
+       if (num_devices == ADF_MAX_DEVICES) {
+               dev_err(&GET_DEV(accel_dev), "Only support up to %d devices\n",
+                       ADF_MAX_DEVICES);
+               return -EFAULT;
+       }
+
+       mutex_lock(&table_lock);
+       atomic_set(&accel_dev->ref_count, 0);
+
+       /* PF on host or VF on guest - optimized to remove redundant is_vf */
+       if (!accel_dev->is_vf || !pf) {
+               struct vf_id_map *map;
+
+               list_for_each(itr, &accel_table) {
+                       struct adf_accel_dev *ptr =
+                               list_entry(itr, struct adf_accel_dev, list);
+
+                       if (ptr == accel_dev) {
+                               ret = -EEXIST;
+                               goto unlock;
+                       }
+               }
+
+               list_add_tail(&accel_dev->list, &accel_table);
+               accel_dev->accel_id = adf_find_free_id();
+               if (accel_dev->accel_id > ADF_MAX_DEVICES) {
+                       ret = -EFAULT;
+                       goto unlock;
+               }
+               num_devices++;
+               map = kzalloc(sizeof(*map), GFP_KERNEL);
+               if (!map) {
+                       ret = -ENOMEM;
+                       goto unlock;
+               }
+               map->bdf = ~0;
+               map->id = accel_dev->accel_id;
+               map->fake_id = map->id;
+               map->attached = true;
+               list_add_tail(&map->list, &vfs_table);
+       } else if (accel_dev->is_vf && pf) {
+               /* VF on host */
+               struct vf_id_map *map;
+
+               map = adf_find_vf(adf_get_vf_num(accel_dev));
+               if (map) {
+                       struct vf_id_map *next;
+
+                       accel_dev->accel_id = map->id;
+                       list_add_tail(&accel_dev->list, &accel_table);
+                       map->fake_id++;
+                       map->attached = true;
+                       next = list_next_entry(map, list);
+                       while (next && &next->list != &vfs_table) {
+                               next->fake_id++;
+                               next = list_next_entry(next, list);
+                       }
+
+                       ret = 0;
+                       goto unlock;
+               }
+
+               map = kzalloc(sizeof(*map), GFP_KERNEL);
+               if (!map) {
+                       ret = -ENOMEM;
+                       goto unlock;
+               }
+               accel_dev->accel_id = adf_find_free_id();
+               if (accel_dev->accel_id > ADF_MAX_DEVICES) {
+                       kfree(map);
+                       ret = -EFAULT;
+                       goto unlock;
+               }
+               num_devices++;
+               list_add_tail(&accel_dev->list, &accel_table);
+               map->bdf = adf_get_vf_num(accel_dev);
+               map->id = accel_dev->accel_id;
+               map->fake_id = map->id;
+               map->attached = true;
+               list_add_tail(&map->list, &vfs_table);
+       }
+       mutex_init(&accel_dev->state_lock);
+unlock:
+       mutex_unlock(&table_lock);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_add_dev);
+
+struct list_head *adf_devmgr_get_head(void)
+{
+       return &accel_table;
+}
+
+/**
+ * adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework.
+ * @accel_dev:  Pointer to acceleration device.
+ * @pf:                Corresponding PF if the accel_dev is a VF
+ *
+ * Function removes acceleration device from the acceleration framework.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
+                      struct adf_accel_dev *pf)
+{
+       mutex_lock(&table_lock);
+       /* PF on host or VF on guest - optimized to remove redundant is_vf */
+       if (!accel_dev->is_vf || !pf) {
+               id_map[accel_dev->accel_id] = 0;
+               num_devices--;
+       } else if (accel_dev->is_vf && pf) {
+               struct vf_id_map *map, *next;
+
+               map = adf_find_vf(adf_get_vf_num(accel_dev));
+               if (!map) {
+                       dev_err(&GET_DEV(accel_dev), "Failed to find VF map\n");
+                       goto unlock;
+               }
+               map->fake_id--;
+               map->attached = false;
+               next = list_next_entry(map, list);
+               while (next && &next->list != &vfs_table) {
+                       next->fake_id--;
+                       next = list_next_entry(next, list);
+               }
+       }
+unlock:
+       mutex_destroy(&accel_dev->state_lock);
+       list_del(&accel_dev->list);
+       mutex_unlock(&table_lock);
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev);
+
+struct adf_accel_dev *adf_devmgr_get_first(void)
+{
+       struct adf_accel_dev *dev = NULL;
+
+       if (!list_empty(&accel_table))
+               dev = list_first_entry(&accel_table, struct adf_accel_dev,
+                                      list);
+       return dev;
+}
+
+/**
+ * adf_devmgr_pci_to_accel_dev() - Get accel_dev associated with the pci_dev.
+ * @pci_dev:  Pointer to PCI device.
+ *
+ * Function returns acceleration device associated with the given PCI device.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: pointer to accel_dev or NULL if not found.
+ */
+struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev)
+{
+       struct list_head *itr;
+
+       mutex_lock(&table_lock);
+       list_for_each(itr, &accel_table) {
+               struct adf_accel_dev *ptr =
+                               list_entry(itr, struct adf_accel_dev, list);
+
+               if (ptr->accel_pci_dev.pci_dev == pci_dev) {
+                       mutex_unlock(&table_lock);
+                       return ptr;
+               }
+       }
+       mutex_unlock(&table_lock);
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev);
+
+struct adf_accel_dev *adf_devmgr_get_dev_by_id(u32 id)
+{
+       struct list_head *itr;
+       int real_id;
+
+       mutex_lock(&table_lock);
+       real_id = adf_get_vf_real_id(id);
+       if (real_id < 0)
+               goto unlock;
+
+       id = real_id;
+
+       list_for_each(itr, &accel_table) {
+               struct adf_accel_dev *ptr =
+                               list_entry(itr, struct adf_accel_dev, list);
+               if (ptr->accel_id == id) {
+                       mutex_unlock(&table_lock);
+                       return ptr;
+               }
+       }
+unlock:
+       mutex_unlock(&table_lock);
+       return NULL;
+}
+
+int adf_devmgr_verify_id(u32 id)
+{
+       if (id == ADF_CFG_ALL_DEVICES)
+               return 0;
+
+       if (adf_devmgr_get_dev_by_id(id))
+               return 0;
+
+       return -ENODEV;
+}
+
+static int adf_get_num_dettached_vfs(void)
+{
+       struct list_head *itr;
+       int vfs = 0;
+
+       mutex_lock(&table_lock);
+       list_for_each(itr, &vfs_table) {
+               struct vf_id_map *ptr =
+                       list_entry(itr, struct vf_id_map, list);
+               if (ptr->bdf != ~0 && !ptr->attached)
+                       vfs++;
+       }
+       mutex_unlock(&table_lock);
+       return vfs;
+}
+
+void adf_devmgr_get_num_dev(u32 *num)
+{
+       *num = num_devices - adf_get_num_dettached_vfs();
+}
+
+/**
+ * adf_dev_in_use() - Check whether accel_dev is currently in use
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 1 when device is in use, 0 otherwise.
+ */
+int adf_dev_in_use(struct adf_accel_dev *accel_dev)
+{
+       return atomic_read(&accel_dev->ref_count) != 0;
+}
+EXPORT_SYMBOL_GPL(adf_dev_in_use);
+
+/**
+ * adf_dev_get() - Increment accel_dev reference count
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Increment the accel_dev refcount and if this is the first time
+ * incrementing it during this period the accel_dev is in use,
+ * increment the module refcount too.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 when successful, EFAULT when fail to bump module refcount
+ */
+int adf_dev_get(struct adf_accel_dev *accel_dev)
+{
+       if (atomic_add_return(1, &accel_dev->ref_count) == 1)
+               if (!try_module_get(accel_dev->owner))
+                       return -EFAULT;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_dev_get);
+
+/**
+ * adf_dev_put() - Decrement accel_dev reference count
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Decrement the accel_dev refcount and if this is the last time
+ * decrementing it during this period the accel_dev is in use,
+ * decrement the module refcount too.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_dev_put(struct adf_accel_dev *accel_dev)
+{
+       if (atomic_sub_return(1, &accel_dev->ref_count) == 0)
+               module_put(accel_dev->owner);
+}
+EXPORT_SYMBOL_GPL(adf_dev_put);
+
+/**
+ * adf_devmgr_in_reset() - Check whether device is in reset
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 1 when the device is being reset, 0 otherwise.
+ */
+int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev)
+{
+       return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_in_reset);
+
+/**
+ * adf_dev_started() - Check whether device has started
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 1 when the device has started, 0 otherwise
+ */
+int adf_dev_started(struct adf_accel_dev *accel_dev)
+{
+       return test_bit(ADF_STATUS_STARTED, &accel_dev->status);
+}
+EXPORT_SYMBOL_GPL(adf_dev_started);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_config.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_config.c
new file mode 100644 (file)
index 0000000..eeb30da
--- /dev/null
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation */
+#include "adf_accel_devices.h"
+#include "adf_cfg.h"
+#include "adf_cfg_strings.h"
+#include "adf_gen2_config.h"
+#include "adf_common_drv.h"
+#include "qat_crypto.h"
+#include "qat_compression.h"
+#include "adf_transport_access_macros.h"
+
+static int adf_gen2_crypto_dev_config(struct adf_accel_dev *accel_dev)
+{
+       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+       int banks = GET_MAX_BANKS(accel_dev);
+       int cpus = num_online_cpus();
+       unsigned long val;
+       int instances;
+       int ret;
+       int i;
+
+       if (adf_hw_dev_has_crypto(accel_dev))
+               instances = min(cpus, banks);
+       else
+               instances = 0;
+
+       for (i = 0; i < instances; i++) {
+               val = i;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
+                        i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
+               val = 128;
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = 512;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = 0;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = 2;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = 8;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = 10;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = ADF_COALESCING_DEF_TIME;
+               snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+       }
+
+       val = i;
+       ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
+                                         &val, ADF_DEC);
+       if (ret)
+               goto err;
+
+       return ret;
+
+err:
+       dev_err(&GET_DEV(accel_dev), "Failed to add configuration for crypto\n");
+       return ret;
+}
+
+static int adf_gen2_comp_dev_config(struct adf_accel_dev *accel_dev)
+{
+       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+       int banks = GET_MAX_BANKS(accel_dev);
+       int cpus = num_online_cpus();
+       unsigned long val;
+       int instances;
+       int ret;
+       int i;
+
+       if (adf_hw_dev_has_compression(accel_dev))
+               instances = min(cpus, banks);
+       else
+               instances = 0;
+
+       for (i = 0; i < instances; i++) {
+               val = i;
+               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = 512;
+               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = 6;
+               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = 14;
+               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+       }
+
+       val = i;
+       ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
+                                         &val, ADF_DEC);
+       if (ret)
+               return ret;
+
+       return ret;
+
+err:
+       dev_err(&GET_DEV(accel_dev), "Failed to add configuration for compression\n");
+       return ret;
+}
+
+/**
+ * adf_gen2_dev_config() - create dev config required to create instances
+ *
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function creates device configuration required to create instances
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_gen2_dev_config(struct adf_accel_dev *accel_dev)
+{
+       int ret;
+
+       ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
+       if (ret)
+               goto err;
+
+       ret = adf_cfg_section_add(accel_dev, "Accelerator0");
+       if (ret)
+               goto err;
+
+       ret = adf_gen2_crypto_dev_config(accel_dev);
+       if (ret)
+               goto err;
+
+       ret = adf_gen2_comp_dev_config(accel_dev);
+       if (ret)
+               goto err;
+
+       set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+
+       return ret;
+
+err:
+       dev_err(&GET_DEV(accel_dev), "Failed to configure QAT driver\n");
+       return ret;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_dev_config);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_config.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_config.h
new file mode 100644 (file)
index 0000000..4bf9da2
--- /dev/null
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef ADF_GEN2_CONFIG_H_
+#define ADF_GEN2_CONFIG_H_
+
+#include "adf_accel_devices.h"
+
+int adf_gen2_dev_config(struct adf_accel_dev *accel_dev);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.c
new file mode 100644 (file)
index 0000000..47261b1
--- /dev/null
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation */
+#include "adf_accel_devices.h"
+#include "adf_gen2_dc.h"
+#include "icp_qat_fw_comp.h"
+
+static void qat_comp_build_deflate_ctx(void *ctx)
+{
+       struct icp_qat_fw_comp_req *req_tmpl = (struct icp_qat_fw_comp_req *)ctx;
+       struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+       struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+       struct icp_qat_fw_comp_req_params *req_pars = &req_tmpl->comp_pars;
+       struct icp_qat_fw_comp_cd_hdr *comp_cd_ctrl = &req_tmpl->comp_cd_ctrl;
+
+       memset(req_tmpl, 0, sizeof(*req_tmpl));
+       header->hdr_flags =
+               ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+       header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
+       header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
+       header->comn_req_flags =
+               ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_16BYTE_DATA,
+                                           QAT_COMN_PTR_TYPE_SGL);
+       header->serv_specif_flags =
+               ICP_QAT_FW_COMP_FLAGS_BUILD(ICP_QAT_FW_COMP_STATELESS_SESSION,
+                                           ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
+                                           ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
+                                           ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
+                                           ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
+       cd_pars->u.sl.comp_slice_cfg_word[0] =
+               ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_COMPRESS,
+                                                   ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED,
+                                                   ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE,
+                                                   ICP_QAT_HW_COMPRESSION_DEPTH_1,
+                                                   ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
+       req_pars->crc.legacy.initial_adler = COMP_CPR_INITIAL_ADLER;
+       req_pars->crc.legacy.initial_crc32 = COMP_CPR_INITIAL_CRC;
+       req_pars->req_par_flags =
+               ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(ICP_QAT_FW_COMP_SOP,
+                                                     ICP_QAT_FW_COMP_EOP,
+                                                     ICP_QAT_FW_COMP_BFINAL,
+                                                     ICP_QAT_FW_COMP_CNV,
+                                                     ICP_QAT_FW_COMP_CNV_RECOVERY,
+                                                     ICP_QAT_FW_COMP_NO_CNV_DFX,
+                                                     ICP_QAT_FW_COMP_CRC_MODE_LEGACY,
+                                                     ICP_QAT_FW_COMP_NO_XXHASH_ACC,
+                                                     ICP_QAT_FW_COMP_CNV_ERROR_NONE,
+                                                     ICP_QAT_FW_COMP_NO_APPEND_CRC,
+                                                     ICP_QAT_FW_COMP_NO_DROP_DATA);
+       ICP_QAT_FW_COMN_NEXT_ID_SET(comp_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+       ICP_QAT_FW_COMN_CURR_ID_SET(comp_cd_ctrl, ICP_QAT_FW_SLICE_COMP);
+
+       /* Fill second half of the template for decompression */
+       memcpy(req_tmpl + 1, req_tmpl, sizeof(*req_tmpl));
+       req_tmpl++;
+       header = &req_tmpl->comn_hdr;
+       header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
+       cd_pars = &req_tmpl->cd_pars;
+       cd_pars->u.sl.comp_slice_cfg_word[0] =
+               ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS,
+                                                   ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED,
+                                                   ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE,
+                                                   ICP_QAT_HW_COMPRESSION_DEPTH_1,
+                                                   ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
+}
+
+void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops)
+{
+       dc_ops->build_deflate_ctx = qat_comp_build_deflate_ctx;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_init_dc_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h
new file mode 100644 (file)
index 0000000..6eae023
--- /dev/null
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef ADF_GEN2_DC_H
+#define ADF_GEN2_DC_H
+
+#include "adf_accel_devices.h"
+
+void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops);
+
+#endif /* ADF_GEN2_DC_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c
new file mode 100644 (file)
index 0000000..d188454
--- /dev/null
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2020 Intel Corporation */
+#include "adf_common_drv.h"
+#include "adf_gen2_hw_data.h"
+#include "icp_qat_hw.h"
+#include <linux/pci.h>
+
+u32 adf_gen2_get_num_accels(struct adf_hw_device_data *self)
+{
+       if (!self || !self->accel_mask)
+               return 0;
+
+       return hweight16(self->accel_mask);
+}
+EXPORT_SYMBOL_GPL(adf_gen2_get_num_accels);
+
+u32 adf_gen2_get_num_aes(struct adf_hw_device_data *self)
+{
+       if (!self || !self->ae_mask)
+               return 0;
+
+       return hweight32(self->ae_mask);
+}
+EXPORT_SYMBOL_GPL(adf_gen2_get_num_aes);
+
+void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+       unsigned long accel_mask = hw_data->accel_mask;
+       unsigned long ae_mask = hw_data->ae_mask;
+       unsigned int val, i;
+
+       /* Enable Accel Engine error detection & correction */
+       for_each_set_bit(i, &ae_mask, hw_data->num_engines) {
+               val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_AE_CTX_ENABLES(i));
+               val |= ADF_GEN2_ENABLE_AE_ECC_ERR;
+               ADF_CSR_WR(pmisc_addr, ADF_GEN2_AE_CTX_ENABLES(i), val);
+               val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_AE_MISC_CONTROL(i));
+               val |= ADF_GEN2_ENABLE_AE_ECC_PARITY_CORR;
+               ADF_CSR_WR(pmisc_addr, ADF_GEN2_AE_MISC_CONTROL(i), val);
+       }
+
+       /* Enable shared memory error detection & correction */
+       for_each_set_bit(i, &accel_mask, hw_data->num_accel) {
+               val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_UERRSSMSH(i));
+               val |= ADF_GEN2_ERRSSMSH_EN;
+               ADF_CSR_WR(pmisc_addr, ADF_GEN2_UERRSSMSH(i), val);
+               val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_CERRSSMSH(i));
+               val |= ADF_GEN2_ERRSSMSH_EN;
+               ADF_CSR_WR(pmisc_addr, ADF_GEN2_CERRSSMSH(i), val);
+       }
+}
+EXPORT_SYMBOL_GPL(adf_gen2_enable_error_correction);
+
+void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable,
+                          int num_a_regs, int num_b_regs)
+{
+       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+       u32 reg;
+       int i;
+
+       /* Set/Unset Valid bit in AE Thread to PCIe Function Mapping Group A */
+       for (i = 0; i < num_a_regs; i++) {
+               reg = READ_CSR_AE2FUNCTION_MAP_A(pmisc_addr, i);
+               if (enable)
+                       reg |= AE2FUNCTION_MAP_VALID;
+               else
+                       reg &= ~AE2FUNCTION_MAP_VALID;
+               WRITE_CSR_AE2FUNCTION_MAP_A(pmisc_addr, i, reg);
+       }
+
+       /* Set/Unset Valid bit in AE Thread to PCIe Function Mapping Group B */
+       for (i = 0; i < num_b_regs; i++) {
+               reg = READ_CSR_AE2FUNCTION_MAP_B(pmisc_addr, i);
+               if (enable)
+                       reg |= AE2FUNCTION_MAP_VALID;
+               else
+                       reg &= ~AE2FUNCTION_MAP_VALID;
+               WRITE_CSR_AE2FUNCTION_MAP_B(pmisc_addr, i, reg);
+       }
+}
+EXPORT_SYMBOL_GPL(adf_gen2_cfg_iov_thds);
+
+void adf_gen2_get_admin_info(struct admin_info *admin_csrs_info)
+{
+       admin_csrs_info->mailbox_offset = ADF_MAILBOX_BASE_OFFSET;
+       admin_csrs_info->admin_msg_ur = ADF_ADMINMSGUR_OFFSET;
+       admin_csrs_info->admin_msg_lr = ADF_ADMINMSGLR_OFFSET;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_get_admin_info);
+
+void adf_gen2_get_arb_info(struct arb_info *arb_info)
+{
+       arb_info->arb_cfg = ADF_ARB_CONFIG;
+       arb_info->arb_offset = ADF_ARB_OFFSET;
+       arb_info->wt2sam_offset = ADF_ARB_WRK_2_SER_MAP_OFFSET;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_get_arb_info);
+
+void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev)
+{
+       void __iomem *addr = adf_get_pmisc_base(accel_dev);
+       u32 val;
+
+       val = accel_dev->pf.vf_info ? 0 : BIT_ULL(GET_MAX_BANKS(accel_dev)) - 1;
+
+       /* Enable bundle and misc interrupts */
+       ADF_CSR_WR(addr, ADF_GEN2_SMIAPF0_MASK_OFFSET, val);
+       ADF_CSR_WR(addr, ADF_GEN2_SMIAPF1_MASK_OFFSET, ADF_GEN2_SMIA1_MASK);
+}
+EXPORT_SYMBOL_GPL(adf_gen2_enable_ints);
+
+static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size)
+{
+       return BUILD_RING_BASE_ADDR(addr, size);
+}
+
+static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring)
+{
+       return READ_CSR_RING_HEAD(csr_base_addr, bank, ring);
+}
+
+static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring,
+                               u32 value)
+{
+       WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value);
+}
+
+static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring)
+{
+       return READ_CSR_RING_TAIL(csr_base_addr, bank, ring);
+}
+
+static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring,
+                               u32 value)
+{
+       WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value);
+}
+
+static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank)
+{
+       return READ_CSR_E_STAT(csr_base_addr, bank);
+}
+
+static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank,
+                                 u32 ring, u32 value)
+{
+       WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value);
+}
+
+static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring,
+                               dma_addr_t addr)
+{
+       WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr);
+}
+
+static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank, u32 value)
+{
+       WRITE_CSR_INT_FLAG(csr_base_addr, bank, value);
+}
+
+static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank)
+{
+       WRITE_CSR_INT_SRCSEL(csr_base_addr, bank);
+}
+
+static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank,
+                                u32 value)
+{
+       WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value);
+}
+
+static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank,
+                                 u32 value)
+{
+       WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value);
+}
+
+static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank,
+                                      u32 value)
+{
+       WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value);
+}
+
+static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank,
+                                     u32 value)
+{
+       WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value);
+}
+
+void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
+{
+       csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr;
+       csr_ops->read_csr_ring_head = read_csr_ring_head;
+       csr_ops->write_csr_ring_head = write_csr_ring_head;
+       csr_ops->read_csr_ring_tail = read_csr_ring_tail;
+       csr_ops->write_csr_ring_tail = write_csr_ring_tail;
+       csr_ops->read_csr_e_stat = read_csr_e_stat;
+       csr_ops->write_csr_ring_config = write_csr_ring_config;
+       csr_ops->write_csr_ring_base = write_csr_ring_base;
+       csr_ops->write_csr_int_flag = write_csr_int_flag;
+       csr_ops->write_csr_int_srcsel = write_csr_int_srcsel;
+       csr_ops->write_csr_int_col_en = write_csr_int_col_en;
+       csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl;
+       csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col;
+       csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_init_hw_csr_ops);
+
+u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
+       u32 straps = hw_data->straps;
+       u32 fuses = hw_data->fuses;
+       u32 legfuses;
+       u32 capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
+                          ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
+                          ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
+                          ICP_ACCEL_CAPABILITIES_CIPHER |
+                          ICP_ACCEL_CAPABILITIES_COMPRESSION;
+
+       /* Read accelerator capabilities mask */
+       pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, &legfuses);
+
+       /* A set bit in legfuses means the feature is OFF in this SKU */
+       if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) {
+               capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
+               capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+       }
+       if (legfuses & ICP_ACCEL_MASK_PKE_SLICE)
+               capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
+       if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE) {
+               capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+               capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+       }
+       if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE)
+               capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
+
+       if ((straps | fuses) & ADF_POWERGATE_PKE)
+               capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
+
+       if ((straps | fuses) & ADF_POWERGATE_DC)
+               capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
+
+       return capabilities;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_get_accel_cap);
+
+void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+       u32 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
+       u32 timer_val = ADF_SSM_WDT_DEFAULT_VALUE;
+       unsigned long accel_mask = hw_data->accel_mask;
+       u32 i = 0;
+
+       /* Configures WDT timers */
+       for_each_set_bit(i, &accel_mask, hw_data->num_accel) {
+               /* Enable WDT for sym and dc */
+               ADF_CSR_WR(pmisc_addr, ADF_SSMWDT(i), timer_val);
+               /* Enable WDT for pke */
+               ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKE(i), timer_val_pke);
+       }
+}
+EXPORT_SYMBOL_GPL(adf_gen2_set_ssm_wdtimer);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
new file mode 100644 (file)
index 0000000..e4bc075
--- /dev/null
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2020 Intel Corporation */
+#ifndef ADF_GEN2_HW_DATA_H_
+#define ADF_GEN2_HW_DATA_H_
+
+#include "adf_accel_devices.h"
+#include "adf_cfg_common.h"
+
+/* Transport access */
+#define ADF_BANK_INT_SRC_SEL_MASK_0    0x4444444CUL
+#define ADF_BANK_INT_SRC_SEL_MASK_X    0x44444444UL
+#define ADF_RING_CSR_RING_CONFIG       0x000
+#define ADF_RING_CSR_RING_LBASE                0x040
+#define ADF_RING_CSR_RING_UBASE                0x080
+#define ADF_RING_CSR_RING_HEAD         0x0C0
+#define ADF_RING_CSR_RING_TAIL         0x100
+#define ADF_RING_CSR_E_STAT            0x14C
+#define ADF_RING_CSR_INT_FLAG          0x170
+#define ADF_RING_CSR_INT_SRCSEL                0x174
+#define ADF_RING_CSR_INT_SRCSEL_2      0x178
+#define ADF_RING_CSR_INT_COL_EN                0x17C
+#define ADF_RING_CSR_INT_COL_CTL       0x180
+#define ADF_RING_CSR_INT_FLAG_AND_COL  0x184
+#define ADF_RING_CSR_INT_COL_CTL_ENABLE        0x80000000
+#define ADF_RING_BUNDLE_SIZE           0x1000
+#define ADF_GEN2_RX_RINGS_OFFSET       8
+#define ADF_GEN2_TX_RINGS_MASK         0xFF
+
+#define BUILD_RING_BASE_ADDR(addr, size) \
+       (((addr) >> 6) & (GENMASK_ULL(63, 0) << (size)))
+#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
+       ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+                  ADF_RING_CSR_RING_HEAD + ((ring) << 2))
+#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
+       ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+                  ADF_RING_CSR_RING_TAIL + ((ring) << 2))
+#define READ_CSR_E_STAT(csr_base_addr, bank) \
+       ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+                  ADF_RING_CSR_E_STAT)
+#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+                  ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value)
+#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
+do { \
+       u32 l_base = 0, u_base = 0; \
+       l_base = (u32)((value) & 0xFFFFFFFF); \
+       u_base = (u32)(((value) & 0xFFFFFFFF00000000ULL) >> 32); \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+                  ADF_RING_CSR_RING_LBASE + ((ring) << 2), l_base); \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+                  ADF_RING_CSR_RING_UBASE + ((ring) << 2), u_base); \
+} while (0)
+
+#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+                  ADF_RING_CSR_RING_HEAD + ((ring) << 2), value)
+#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+                  ADF_RING_CSR_RING_TAIL + ((ring) << 2), value)
+#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+                  ADF_RING_CSR_INT_FLAG, value)
+#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
+do { \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+       ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+       ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \
+} while (0)
+#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+                  ADF_RING_CSR_INT_COL_EN, value)
+#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+                  ADF_RING_CSR_INT_COL_CTL, \
+                  ADF_RING_CSR_INT_COL_CTL_ENABLE | (value))
+#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+                  ADF_RING_CSR_INT_FLAG_AND_COL, value)
+
+/* AE to function map */
+#define AE2FUNCTION_MAP_A_OFFSET       (0x3A400 + 0x190)
+#define AE2FUNCTION_MAP_B_OFFSET       (0x3A400 + 0x310)
+#define AE2FUNCTION_MAP_REG_SIZE       4
+#define AE2FUNCTION_MAP_VALID          BIT(7)
+
+#define READ_CSR_AE2FUNCTION_MAP_A(pmisc_bar_addr, index) \
+       ADF_CSR_RD(pmisc_bar_addr, AE2FUNCTION_MAP_A_OFFSET + \
+                  AE2FUNCTION_MAP_REG_SIZE * (index))
+#define WRITE_CSR_AE2FUNCTION_MAP_A(pmisc_bar_addr, index, value) \
+       ADF_CSR_WR(pmisc_bar_addr, AE2FUNCTION_MAP_A_OFFSET + \
+                  AE2FUNCTION_MAP_REG_SIZE * (index), value)
+#define READ_CSR_AE2FUNCTION_MAP_B(pmisc_bar_addr, index) \
+       ADF_CSR_RD(pmisc_bar_addr, AE2FUNCTION_MAP_B_OFFSET + \
+                  AE2FUNCTION_MAP_REG_SIZE * (index))
+#define WRITE_CSR_AE2FUNCTION_MAP_B(pmisc_bar_addr, index, value) \
+       ADF_CSR_WR(pmisc_bar_addr, AE2FUNCTION_MAP_B_OFFSET + \
+                  AE2FUNCTION_MAP_REG_SIZE * (index), value)
+
+/* Admin Interface Offsets */
+#define ADF_ADMINMSGUR_OFFSET  (0x3A000 + 0x574)
+#define ADF_ADMINMSGLR_OFFSET  (0x3A000 + 0x578)
+#define ADF_MAILBOX_BASE_OFFSET        0x20970
+
+/* Arbiter configuration */
+#define ADF_ARB_OFFSET                 0x30000
+#define ADF_ARB_WRK_2_SER_MAP_OFFSET   0x180
+#define ADF_ARB_CONFIG                 (BIT(31) | BIT(6) | BIT(0))
+#define ADF_ARB_REG_SLOT               0x1000
+#define ADF_ARB_RINGSRVARBEN_OFFSET    0x19C
+
+#define WRITE_CSR_RING_SRV_ARB_EN(csr_addr, index, value) \
+       ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
+       (ADF_ARB_REG_SLOT * (index)), value)
+
+/* Power gating */
+#define ADF_POWERGATE_DC               BIT(23)
+#define ADF_POWERGATE_PKE              BIT(24)
+
+/* Default ring mapping */
+#define ADF_GEN2_DEFAULT_RING_TO_SRV_MAP \
+       (CRYPTO << ADF_CFG_SERV_RING_PAIR_0_SHIFT | \
+        CRYPTO << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
+        UNUSED << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
+          COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
+
+/* WDT timers
+ *
+ * Timeout is in cycles. Clock speed may vary across products but this
+ * value should be a few milli-seconds.
+ */
+#define ADF_SSM_WDT_DEFAULT_VALUE      0x200000
+#define ADF_SSM_WDT_PKE_DEFAULT_VALUE  0x2000000
+#define ADF_SSMWDT_OFFSET              0x54
+#define ADF_SSMWDTPKE_OFFSET           0x58
+#define ADF_SSMWDT(i)          (ADF_SSMWDT_OFFSET + ((i) * 0x4000))
+#define ADF_SSMWDTPKE(i)       (ADF_SSMWDTPKE_OFFSET + ((i) * 0x4000))
+
+/* Error detection and correction */
+#define ADF_GEN2_AE_CTX_ENABLES(i)     ((i) * 0x1000 + 0x20818)
+#define ADF_GEN2_AE_MISC_CONTROL(i)    ((i) * 0x1000 + 0x20960)
+#define ADF_GEN2_ENABLE_AE_ECC_ERR     BIT(28)
+#define ADF_GEN2_ENABLE_AE_ECC_PARITY_CORR     (BIT(24) | BIT(12))
+#define ADF_GEN2_UERRSSMSH(i)          ((i) * 0x4000 + 0x18)
+#define ADF_GEN2_CERRSSMSH(i)          ((i) * 0x4000 + 0x10)
+#define ADF_GEN2_ERRSSMSH_EN           BIT(3)
+
+/* Interrupts */
+#define ADF_GEN2_SMIAPF0_MASK_OFFSET    (0x3A000 + 0x28)
+#define ADF_GEN2_SMIAPF1_MASK_OFFSET    (0x3A000 + 0x30)
+#define ADF_GEN2_SMIA1_MASK             0x1
+
+u32 adf_gen2_get_num_accels(struct adf_hw_device_data *self);
+u32 adf_gen2_get_num_aes(struct adf_hw_device_data *self);
+void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev);
+void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable,
+                          int num_a_regs, int num_b_regs);
+void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
+void adf_gen2_get_admin_info(struct admin_info *admin_csrs_info);
+void adf_gen2_get_arb_info(struct arb_info *arb_info);
+void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev);
+u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev);
+void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c
new file mode 100644 (file)
index 0000000..70ef119
--- /dev/null
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2021 Intel Corporation */
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_gen2_pfvf.h"
+#include "adf_pfvf_msg.h"
+#include "adf_pfvf_pf_proto.h"
+#include "adf_pfvf_vf_proto.h"
+#include "adf_pfvf_utils.h"
+
+ /* VF2PF interrupts */
+#define ADF_GEN2_VF_MSK                        0xFFFF
+#define ADF_GEN2_ERR_REG_VF2PF(vf_src) (((vf_src) & 0x01FFFE00) >> 9)
+#define ADF_GEN2_ERR_MSK_VF2PF(vf_mask)        (((vf_mask) & ADF_GEN2_VF_MSK) << 9)
+
+#define ADF_GEN2_PF_PF2VF_OFFSET(i)    (0x3A000 + 0x280 + ((i) * 0x04))
+#define ADF_GEN2_VF_PF2VF_OFFSET       0x200
+
+#define ADF_GEN2_CSR_IN_USE            0x6AC2
+#define ADF_GEN2_CSR_IN_USE_MASK       0xFFFE
+
+enum gen2_csr_pos {
+       ADF_GEN2_CSR_PF2VF_OFFSET       =  0,
+       ADF_GEN2_CSR_VF2PF_OFFSET       = 16,
+};
+
+#define ADF_PFVF_GEN2_MSGTYPE_SHIFT    2
+#define ADF_PFVF_GEN2_MSGTYPE_MASK     0x0F
+#define ADF_PFVF_GEN2_MSGDATA_SHIFT    6
+#define ADF_PFVF_GEN2_MSGDATA_MASK     0x3FF
+
+static const struct pfvf_csr_format csr_gen2_fmt = {
+       { ADF_PFVF_GEN2_MSGTYPE_SHIFT, ADF_PFVF_GEN2_MSGTYPE_MASK },
+       { ADF_PFVF_GEN2_MSGDATA_SHIFT, ADF_PFVF_GEN2_MSGDATA_MASK },
+};
+
+#define ADF_PFVF_MSG_RETRY_DELAY       5
+#define ADF_PFVF_MSG_MAX_RETRIES       3
+
+static u32 adf_gen2_pf_get_pfvf_offset(u32 i)
+{
+       return ADF_GEN2_PF_PF2VF_OFFSET(i);
+}
+
+static u32 adf_gen2_vf_get_pfvf_offset(u32 i)
+{
+       return ADF_GEN2_VF_PF2VF_OFFSET;
+}
+
+static void adf_gen2_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
+{
+       /* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */
+       if (vf_mask & ADF_GEN2_VF_MSK) {
+               u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
+                         & ~ADF_GEN2_ERR_MSK_VF2PF(vf_mask);
+               ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
+       }
+}
+
+static void adf_gen2_disable_all_vf2pf_interrupts(void __iomem *pmisc_addr)
+{
+       /* Disable VF2PF interrupts for VFs 0 through 15 per vf_mask[15:0] */
+       u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
+                 | ADF_GEN2_ERR_MSK_VF2PF(ADF_GEN2_VF_MSK);
+       ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
+}
+
+static u32 adf_gen2_disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
+{
+       u32 sources, disabled, pending;
+       u32 errsou3, errmsk3;
+
+       /* Get the interrupt sources triggered by VFs */
+       errsou3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU3);
+       sources = ADF_GEN2_ERR_REG_VF2PF(errsou3);
+
+       if (!sources)
+               return 0;
+
+       /* Get the already disabled interrupts */
+       errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3);
+       disabled = ADF_GEN2_ERR_REG_VF2PF(errmsk3);
+
+       pending = sources & ~disabled;
+       if (!pending)
+               return 0;
+
+       /* Due to HW limitations, when disabling the interrupts, we can't
+        * just disable the requested sources, as this would lead to missed
+        * interrupts if ERRSOU3 changes just before writing to ERRMSK3.
+        * To work around it, disable all and re-enable only the sources that
+        * are not in vf_mask and were not already disabled. Re-enabling will
+        * trigger a new interrupt for the sources that have changed in the
+        * meantime, if any.
+        */
+       errmsk3 |= ADF_GEN2_ERR_MSK_VF2PF(ADF_GEN2_VF_MSK);
+       ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
+
+       errmsk3 &= ADF_GEN2_ERR_MSK_VF2PF(sources | disabled);
+       ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
+
+       /* Return the sources of the (new) interrupt(s) */
+       return pending;
+}
+
+static u32 gen2_csr_get_int_bit(enum gen2_csr_pos offset)
+{
+       return ADF_PFVF_INT << offset;
+}
+
+static u32 gen2_csr_msg_to_position(u32 csr_msg, enum gen2_csr_pos offset)
+{
+       return (csr_msg & 0xFFFF) << offset;
+}
+
+static u32 gen2_csr_msg_from_position(u32 csr_val, enum gen2_csr_pos offset)
+{
+       return (csr_val >> offset) & 0xFFFF;
+}
+
+static bool gen2_csr_is_in_use(u32 msg, enum gen2_csr_pos offset)
+{
+       return ((msg >> offset) & ADF_GEN2_CSR_IN_USE_MASK) == ADF_GEN2_CSR_IN_USE;
+}
+
+static void gen2_csr_clear_in_use(u32 *msg, enum gen2_csr_pos offset)
+{
+       *msg &= ~(ADF_GEN2_CSR_IN_USE_MASK << offset);
+}
+
+static void gen2_csr_set_in_use(u32 *msg, enum gen2_csr_pos offset)
+{
+       *msg |= (ADF_GEN2_CSR_IN_USE << offset);
+}
+
+static bool is_legacy_user_pfvf_message(u32 msg)
+{
+       return !(msg & ADF_PFVF_MSGORIGIN_SYSTEM);
+}
+
+static bool is_pf2vf_notification(u8 msg_type)
+{
+       switch (msg_type) {
+       case ADF_PF2VF_MSGTYPE_RESTARTING:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static bool is_vf2pf_notification(u8 msg_type)
+{
+       switch (msg_type) {
+       case ADF_VF2PF_MSGTYPE_INIT:
+       case ADF_VF2PF_MSGTYPE_SHUTDOWN:
+               return true;
+       default:
+               return false;
+       }
+}
+
+struct pfvf_gen2_params {
+       u32 pfvf_offset;
+       struct mutex *csr_lock; /* lock preventing concurrent access of CSR */
+       enum gen2_csr_pos local_offset;
+       enum gen2_csr_pos remote_offset;
+       bool (*is_notification_message)(u8 msg_type);
+       u8 compat_ver;
+};
+
+static int adf_gen2_pfvf_send(struct adf_accel_dev *accel_dev,
+                             struct pfvf_message msg,
+                             struct pfvf_gen2_params *params)
+{
+       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+       enum gen2_csr_pos remote_offset = params->remote_offset;
+       enum gen2_csr_pos local_offset = params->local_offset;
+       unsigned int retries = ADF_PFVF_MSG_MAX_RETRIES;
+       struct mutex *lock = params->csr_lock;
+       u32 pfvf_offset = params->pfvf_offset;
+       u32 int_bit;
+       u32 csr_val;
+       u32 csr_msg;
+       int ret;
+
+       /* Gen2 messages, both PF->VF and VF->PF, are all 16 bits long. This
+        * allows us to build and read messages as if they where all 0 based.
+        * However, send and receive are in a single shared 32 bits register,
+        * so we need to shift and/or mask the message half before decoding
+        * it and after encoding it. Which one to shift depends on the
+        * direction.
+        */
+
+       int_bit = gen2_csr_get_int_bit(local_offset);
+
+       csr_msg = adf_pfvf_csr_msg_of(accel_dev, msg, &csr_gen2_fmt);
+       if (unlikely(!csr_msg))
+               return -EINVAL;
+
+       /* Prepare for CSR format, shifting the wire message in place and
+        * setting the in use pattern
+        */
+       csr_msg = gen2_csr_msg_to_position(csr_msg, local_offset);
+       gen2_csr_set_in_use(&csr_msg, remote_offset);
+
+       mutex_lock(lock);
+
+start:
+       /* Check if the PFVF CSR is in use by remote function */
+       csr_val = ADF_CSR_RD(pmisc_addr, pfvf_offset);
+       if (gen2_csr_is_in_use(csr_val, local_offset)) {
+               dev_dbg(&GET_DEV(accel_dev),
+                       "PFVF CSR in use by remote function\n");
+               goto retry;
+       }
+
+       /* Attempt to get ownership of the PFVF CSR */
+       ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_msg | int_bit);
+
+       /* Wait for confirmation from remote func it received the message */
+       ret = read_poll_timeout(ADF_CSR_RD, csr_val, !(csr_val & int_bit),
+                               ADF_PFVF_MSG_ACK_DELAY_US,
+                               ADF_PFVF_MSG_ACK_MAX_DELAY_US,
+                               true, pmisc_addr, pfvf_offset);
+       if (unlikely(ret < 0)) {
+               dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
+               csr_val &= ~int_bit;
+       }
+
+       /* For fire-and-forget notifications, the receiver does not clear
+        * the in-use pattern. This is used to detect collisions.
+        */
+       if (params->is_notification_message(msg.type) && csr_val != csr_msg) {
+               /* Collision must have overwritten the message */
+               dev_err(&GET_DEV(accel_dev),
+                       "Collision on notification - PFVF CSR overwritten by remote function\n");
+               goto retry;
+       }
+
+       /* If the far side did not clear the in-use pattern it is either
+        * 1) Notification - message left intact to detect collision
+        * 2) Older protocol (compatibility version < 3) on the far side
+        *    where the sender is responsible for clearing the in-use
+        *    pattern after the received has acknowledged receipt.
+        * In either case, clear the in-use pattern now.
+        */
+       if (gen2_csr_is_in_use(csr_val, remote_offset)) {
+               gen2_csr_clear_in_use(&csr_val, remote_offset);
+               ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_val);
+       }
+
+out:
+       mutex_unlock(lock);
+       return ret;
+
+retry:
+       if (--retries) {
+               msleep(ADF_PFVF_MSG_RETRY_DELAY);
+               goto start;
+       } else {
+               ret = -EBUSY;
+               goto out;
+       }
+}
+
+static struct pfvf_message adf_gen2_pfvf_recv(struct adf_accel_dev *accel_dev,
+                                             struct pfvf_gen2_params *params)
+{
+       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+       enum gen2_csr_pos remote_offset = params->remote_offset;
+       enum gen2_csr_pos local_offset = params->local_offset;
+       u32 pfvf_offset = params->pfvf_offset;
+       struct pfvf_message msg = { 0 };
+       u32 int_bit;
+       u32 csr_val;
+       u16 csr_msg;
+
+       int_bit = gen2_csr_get_int_bit(local_offset);
+
+       /* Read message */
+       csr_val = ADF_CSR_RD(pmisc_addr, pfvf_offset);
+       if (!(csr_val & int_bit)) {
+               dev_info(&GET_DEV(accel_dev),
+                        "Spurious PFVF interrupt, msg 0x%.8x. Ignored\n", csr_val);
+               return msg;
+       }
+
+       /* Extract the message from the CSR */
+       csr_msg = gen2_csr_msg_from_position(csr_val, local_offset);
+
+       /* Ignore legacy non-system (non-kernel) messages */
+       if (unlikely(is_legacy_user_pfvf_message(csr_msg))) {
+               dev_dbg(&GET_DEV(accel_dev),
+                       "Ignored non-system message (0x%.8x);\n", csr_val);
+               /* Because this must be a legacy message, the far side
+                * must clear the in-use pattern, so don't do it.
+                */
+               return msg;
+       }
+
+       /* Return the pfvf_message format */
+       msg = adf_pfvf_message_of(accel_dev, csr_msg, &csr_gen2_fmt);
+
+       /* The in-use pattern is not cleared for notifications (so that
+        * it can be used for collision detection) or older implementations
+        */
+       if (params->compat_ver >= ADF_PFVF_COMPAT_FAST_ACK &&
+           !params->is_notification_message(msg.type))
+               gen2_csr_clear_in_use(&csr_val, remote_offset);
+
+       /* To ACK, clear the INT bit */
+       csr_val &= ~int_bit;
+       ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_val);
+
+       return msg;
+}
+
+static int adf_gen2_pf2vf_send(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
+                              u32 pfvf_offset, struct mutex *csr_lock)
+{
+       struct pfvf_gen2_params params = {
+               .csr_lock = csr_lock,
+               .pfvf_offset = pfvf_offset,
+               .local_offset = ADF_GEN2_CSR_PF2VF_OFFSET,
+               .remote_offset = ADF_GEN2_CSR_VF2PF_OFFSET,
+               .is_notification_message = is_pf2vf_notification,
+       };
+
+       return adf_gen2_pfvf_send(accel_dev, msg, &params);
+}
+
+static int adf_gen2_vf2pf_send(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
+                              u32 pfvf_offset, struct mutex *csr_lock)
+{
+       struct pfvf_gen2_params params = {
+               .csr_lock = csr_lock,
+               .pfvf_offset = pfvf_offset,
+               .local_offset = ADF_GEN2_CSR_VF2PF_OFFSET,
+               .remote_offset = ADF_GEN2_CSR_PF2VF_OFFSET,
+               .is_notification_message = is_vf2pf_notification,
+       };
+
+       return adf_gen2_pfvf_send(accel_dev, msg, &params);
+}
+
+static struct pfvf_message adf_gen2_pf2vf_recv(struct adf_accel_dev *accel_dev,
+                                              u32 pfvf_offset, u8 compat_ver)
+{
+       struct pfvf_gen2_params params = {
+               .pfvf_offset = pfvf_offset,
+               .local_offset = ADF_GEN2_CSR_PF2VF_OFFSET,
+               .remote_offset = ADF_GEN2_CSR_VF2PF_OFFSET,
+               .is_notification_message = is_pf2vf_notification,
+               .compat_ver = compat_ver,
+       };
+
+       return adf_gen2_pfvf_recv(accel_dev, &params);
+}
+
+static struct pfvf_message adf_gen2_vf2pf_recv(struct adf_accel_dev *accel_dev,
+                                              u32 pfvf_offset, u8 compat_ver)
+{
+       struct pfvf_gen2_params params = {
+               .pfvf_offset = pfvf_offset,
+               .local_offset = ADF_GEN2_CSR_VF2PF_OFFSET,
+               .remote_offset = ADF_GEN2_CSR_PF2VF_OFFSET,
+               .is_notification_message = is_vf2pf_notification,
+               .compat_ver = compat_ver,
+       };
+
+       return adf_gen2_pfvf_recv(accel_dev, &params);
+}
+
+void adf_gen2_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
+{
+       pfvf_ops->enable_comms = adf_enable_pf2vf_comms;
+       pfvf_ops->get_pf2vf_offset = adf_gen2_pf_get_pfvf_offset;
+       pfvf_ops->get_vf2pf_offset = adf_gen2_pf_get_pfvf_offset;
+       pfvf_ops->enable_vf2pf_interrupts = adf_gen2_enable_vf2pf_interrupts;
+       pfvf_ops->disable_all_vf2pf_interrupts = adf_gen2_disable_all_vf2pf_interrupts;
+       pfvf_ops->disable_pending_vf2pf_interrupts = adf_gen2_disable_pending_vf2pf_interrupts;
+       pfvf_ops->send_msg = adf_gen2_pf2vf_send;
+       pfvf_ops->recv_msg = adf_gen2_vf2pf_recv;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_init_pf_pfvf_ops);
+
+void adf_gen2_init_vf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
+{
+       pfvf_ops->enable_comms = adf_enable_vf2pf_comms;
+       pfvf_ops->get_pf2vf_offset = adf_gen2_vf_get_pfvf_offset;
+       pfvf_ops->get_vf2pf_offset = adf_gen2_vf_get_pfvf_offset;
+       pfvf_ops->send_msg = adf_gen2_vf2pf_send;
+       pfvf_ops->recv_msg = adf_gen2_pf2vf_recv;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_init_vf_pfvf_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h
new file mode 100644 (file)
index 0000000..a716545
--- /dev/null
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2021 Intel Corporation */
+#ifndef ADF_GEN2_PFVF_H
+#define ADF_GEN2_PFVF_H
+
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+
+#define ADF_GEN2_ERRSOU3 (0x3A000 + 0x0C)
+#define ADF_GEN2_ERRSOU5 (0x3A000 + 0xD8)
+#define ADF_GEN2_ERRMSK3 (0x3A000 + 0x1C)
+#define ADF_GEN2_ERRMSK5 (0x3A000 + 0xDC)
+
+#if defined(CONFIG_PCI_IOV)
+void adf_gen2_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops);
+void adf_gen2_init_vf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops);
+#else
+static inline void adf_gen2_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
+{
+       pfvf_ops->enable_comms = adf_pfvf_comms_disabled;
+}
+
+static inline void adf_gen2_init_vf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
+{
+       pfvf_ops->enable_comms = adf_pfvf_comms_disabled;
+}
+#endif
+
+#endif /* ADF_GEN2_PFVF_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c
new file mode 100644 (file)
index 0000000..5859238
--- /dev/null
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation */
+#include "adf_accel_devices.h"
+#include "icp_qat_fw_comp.h"
+#include "icp_qat_hw_20_comp.h"
+#include "adf_gen4_dc.h"
+
+static void qat_comp_build_deflate(void *ctx)
+{
+       struct icp_qat_fw_comp_req *req_tmpl =
+                               (struct icp_qat_fw_comp_req *)ctx;
+       struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+       struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+       struct icp_qat_fw_comp_req_params *req_pars = &req_tmpl->comp_pars;
+       struct icp_qat_hw_comp_20_config_csr_upper hw_comp_upper_csr = {0};
+       struct icp_qat_hw_comp_20_config_csr_lower hw_comp_lower_csr = {0};
+       struct icp_qat_hw_decomp_20_config_csr_lower hw_decomp_lower_csr = {0};
+       u32 upper_val;
+       u32 lower_val;
+
+       memset(req_tmpl, 0, sizeof(*req_tmpl));
+       header->hdr_flags =
+               ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+       header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
+       header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
+       header->comn_req_flags =
+               ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_16BYTE_DATA,
+                                           QAT_COMN_PTR_TYPE_SGL);
+       header->serv_specif_flags =
+               ICP_QAT_FW_COMP_FLAGS_BUILD(ICP_QAT_FW_COMP_STATELESS_SESSION,
+                                           ICP_QAT_FW_COMP_AUTO_SELECT_BEST,
+                                           ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
+                                           ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
+                                           ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
+       hw_comp_lower_csr.skip_ctrl = ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL;
+       hw_comp_lower_csr.algo = ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77;
+       hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED;
+       hw_comp_lower_csr.sd = ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1;
+       hw_comp_lower_csr.hash_update = ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW;
+       hw_comp_lower_csr.edmm = ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED;
+       hw_comp_upper_csr.nice = ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_DEFAULT_VAL;
+       hw_comp_upper_csr.lazy = ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_DEFAULT_VAL;
+
+       upper_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(hw_comp_upper_csr);
+       lower_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(hw_comp_lower_csr);
+
+       cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
+       cd_pars->u.sl.comp_slice_cfg_word[1] = upper_val;
+
+       req_pars->crc.legacy.initial_adler = COMP_CPR_INITIAL_ADLER;
+       req_pars->crc.legacy.initial_crc32 = COMP_CPR_INITIAL_CRC;
+       req_pars->req_par_flags =
+               ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(ICP_QAT_FW_COMP_SOP,
+                                                     ICP_QAT_FW_COMP_EOP,
+                                                     ICP_QAT_FW_COMP_BFINAL,
+                                                     ICP_QAT_FW_COMP_CNV,
+                                                     ICP_QAT_FW_COMP_CNV_RECOVERY,
+                                                     ICP_QAT_FW_COMP_NO_CNV_DFX,
+                                                     ICP_QAT_FW_COMP_CRC_MODE_LEGACY,
+                                                     ICP_QAT_FW_COMP_NO_XXHASH_ACC,
+                                                     ICP_QAT_FW_COMP_CNV_ERROR_NONE,
+                                                     ICP_QAT_FW_COMP_NO_APPEND_CRC,
+                                                     ICP_QAT_FW_COMP_NO_DROP_DATA);
+
+       /* Fill second half of the template for decompression */
+       memcpy(req_tmpl + 1, req_tmpl, sizeof(*req_tmpl));
+       req_tmpl++;
+       header = &req_tmpl->comn_hdr;
+       header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
+       cd_pars = &req_tmpl->cd_pars;
+
+       hw_decomp_lower_csr.algo = ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE;
+       lower_val = ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(hw_decomp_lower_csr);
+
+       cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
+       cd_pars->u.sl.comp_slice_cfg_word[1] = 0;
+}
+
+void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops)
+{
+       dc_ops->build_deflate_ctx = qat_comp_build_deflate;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_dc_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h
new file mode 100644 (file)
index 0000000..0b1a677
--- /dev/null
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef ADF_GEN4_DC_H
+#define ADF_GEN4_DC_H
+
+#include "adf_accel_devices.h"
+
+void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops);
+
+#endif /* ADF_GEN4_DC_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
new file mode 100644 (file)
index 0000000..3148a62
--- /dev/null
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2020 Intel Corporation */
+#include <linux/iopoll.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_gen4_hw_data.h"
+
+static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size)
+{
+       return BUILD_RING_BASE_ADDR(addr, size);
+}
+
+static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring)
+{
+       return READ_CSR_RING_HEAD(csr_base_addr, bank, ring);
+}
+
+static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring,
+                               u32 value)
+{
+       WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value);
+}
+
+static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring)
+{
+       return READ_CSR_RING_TAIL(csr_base_addr, bank, ring);
+}
+
+static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring,
+                               u32 value)
+{
+       WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value);
+}
+
+static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank)
+{
+       return READ_CSR_E_STAT(csr_base_addr, bank);
+}
+
+static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank, u32 ring,
+                                 u32 value)
+{
+       WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value);
+}
+
+static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring,
+                               dma_addr_t addr)
+{
+       WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr);
+}
+
+static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank,
+                              u32 value)
+{
+       WRITE_CSR_INT_FLAG(csr_base_addr, bank, value);
+}
+
+static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank)
+{
+       WRITE_CSR_INT_SRCSEL(csr_base_addr, bank);
+}
+
+static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank, u32 value)
+{
+       WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value);
+}
+
+static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank,
+                                 u32 value)
+{
+       WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value);
+}
+
+static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank,
+                                      u32 value)
+{
+       WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value);
+}
+
+static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank,
+                                     u32 value)
+{
+       WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value);
+}
+
+void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
+{
+       csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr;
+       csr_ops->read_csr_ring_head = read_csr_ring_head;
+       csr_ops->write_csr_ring_head = write_csr_ring_head;
+       csr_ops->read_csr_ring_tail = read_csr_ring_tail;
+       csr_ops->write_csr_ring_tail = write_csr_ring_tail;
+       csr_ops->read_csr_e_stat = read_csr_e_stat;
+       csr_ops->write_csr_ring_config = write_csr_ring_config;
+       csr_ops->write_csr_ring_base = write_csr_ring_base;
+       csr_ops->write_csr_int_flag = write_csr_int_flag;
+       csr_ops->write_csr_int_srcsel = write_csr_int_srcsel;
+       csr_ops->write_csr_int_col_en = write_csr_int_col_en;
+       csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl;
+       csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col;
+       csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_ops);
+
+static inline void adf_gen4_unpack_ssm_wdtimer(u64 value, u32 *upper,
+                                              u32 *lower)
+{
+       *lower = lower_32_bits(value);
+       *upper = upper_32_bits(value);
+}
+
+void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
+{
+       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+       u64 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
+       u64 timer_val = ADF_SSM_WDT_DEFAULT_VALUE;
+       u32 ssm_wdt_pke_high = 0;
+       u32 ssm_wdt_pke_low = 0;
+       u32 ssm_wdt_high = 0;
+       u32 ssm_wdt_low = 0;
+
+       /* Convert 64bit WDT timer value into 32bit values for
+        * mmio write to 32bit CSRs.
+        */
+       adf_gen4_unpack_ssm_wdtimer(timer_val, &ssm_wdt_high, &ssm_wdt_low);
+       adf_gen4_unpack_ssm_wdtimer(timer_val_pke, &ssm_wdt_pke_high,
+                                   &ssm_wdt_pke_low);
+
+       /* Enable WDT for sym and dc */
+       ADF_CSR_WR(pmisc_addr, ADF_SSMWDTL_OFFSET, ssm_wdt_low);
+       ADF_CSR_WR(pmisc_addr, ADF_SSMWDTH_OFFSET, ssm_wdt_high);
+       /* Enable WDT for pke */
+       ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEL_OFFSET, ssm_wdt_pke_low);
+       ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEH_OFFSET, ssm_wdt_pke_high);
+}
+EXPORT_SYMBOL_GPL(adf_gen4_set_ssm_wdtimer);
+
+int adf_pfvf_comms_disabled(struct adf_accel_dev *accel_dev)
+{
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_pfvf_comms_disabled);
+
+static int reset_ring_pair(void __iomem *csr, u32 bank_number)
+{
+       u32 status;
+       int ret;
+
+       /* Write rpresetctl register BIT(0) as 1
+        * Since rpresetctl registers have no RW fields, no need to preserve
+        * values for other bits. Just write directly.
+        */
+       ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETCTL(bank_number),
+                  ADF_WQM_CSR_RPRESETCTL_RESET);
+
+       /* Read rpresetsts register and wait for rp reset to complete */
+       ret = read_poll_timeout(ADF_CSR_RD, status,
+                               status & ADF_WQM_CSR_RPRESETSTS_STATUS,
+                               ADF_RPRESET_POLL_DELAY_US,
+                               ADF_RPRESET_POLL_TIMEOUT_US, true,
+                               csr, ADF_WQM_CSR_RPRESETSTS(bank_number));
+       if (!ret) {
+               /* When rp reset is done, clear rpresetsts */
+               ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETSTS(bank_number),
+                          ADF_WQM_CSR_RPRESETSTS_STATUS);
+       }
+
+       return ret;
+}
+
+int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       u32 etr_bar_id = hw_data->get_etr_bar_id(hw_data);
+       void __iomem *csr;
+       int ret;
+
+       if (bank_number >= hw_data->num_banks)
+               return -EINVAL;
+
+       dev_dbg(&GET_DEV(accel_dev),
+               "ring pair reset for bank:%d\n", bank_number);
+
+       csr = (&GET_BARS(accel_dev)[etr_bar_id])->virt_addr;
+       ret = reset_ring_pair(csr, bank_number);
+       if (ret)
+               dev_err(&GET_DEV(accel_dev),
+                       "ring pair reset failed (timeout)\n");
+       else
+               dev_dbg(&GET_DEV(accel_dev), "ring pair reset successful\n");
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_ring_pair_reset);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
new file mode 100644 (file)
index 0000000..4fb4b3d
--- /dev/null
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2020 Intel Corporation */
+#ifndef ADF_GEN4_HW_CSR_DATA_H_
+#define ADF_GEN4_HW_CSR_DATA_H_
+
+#include "adf_accel_devices.h"
+#include "adf_cfg_common.h"
+
+/* Transport access */
+#define ADF_BANK_INT_SRC_SEL_MASK      0x44UL
+#define ADF_RING_CSR_RING_CONFIG       0x1000
+#define ADF_RING_CSR_RING_LBASE                0x1040
+#define ADF_RING_CSR_RING_UBASE                0x1080
+#define ADF_RING_CSR_RING_HEAD         0x0C0
+#define ADF_RING_CSR_RING_TAIL         0x100
+#define ADF_RING_CSR_E_STAT            0x14C
+#define ADF_RING_CSR_INT_FLAG          0x170
+#define ADF_RING_CSR_INT_SRCSEL                0x174
+#define ADF_RING_CSR_INT_COL_CTL       0x180
+#define ADF_RING_CSR_INT_FLAG_AND_COL  0x184
+#define ADF_RING_CSR_INT_COL_CTL_ENABLE        0x80000000
+#define ADF_RING_CSR_INT_COL_EN                0x17C
+#define ADF_RING_CSR_ADDR_OFFSET       0x100000
+#define ADF_RING_BUNDLE_SIZE           0x2000
+
+#define BUILD_RING_BASE_ADDR(addr, size) \
+       ((((addr) >> 6) & (GENMASK_ULL(63, 0) << (size))) << 6)
+#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
+       ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+                  ADF_RING_BUNDLE_SIZE * (bank) + \
+                  ADF_RING_CSR_RING_HEAD + ((ring) << 2))
+#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
+       ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+                  ADF_RING_BUNDLE_SIZE * (bank) + \
+                  ADF_RING_CSR_RING_TAIL + ((ring) << 2))
+#define READ_CSR_E_STAT(csr_base_addr, bank) \
+       ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+                  ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_E_STAT)
+#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
+       ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+                  ADF_RING_BUNDLE_SIZE * (bank) + \
+                  ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value)
+#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value)  \
+do { \
+       void __iomem *_csr_base_addr = csr_base_addr; \
+       u32 _bank = bank;                                               \
+       u32 _ring = ring;                                               \
+       dma_addr_t _value = value;                                      \
+       u32 l_base = 0, u_base = 0;                                     \
+       l_base = lower_32_bits(_value);                                 \
+       u_base = upper_32_bits(_value);                                 \
+       ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET,         \
+                  ADF_RING_BUNDLE_SIZE * (_bank) +                     \
+                  ADF_RING_CSR_RING_LBASE + ((_ring) << 2), l_base);   \
+       ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET,         \
+                  ADF_RING_BUNDLE_SIZE * (_bank) +                     \
+                  ADF_RING_CSR_RING_UBASE + ((_ring) << 2), u_base);   \
+} while (0)
+
+#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
+       ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+                  ADF_RING_BUNDLE_SIZE * (bank) + \
+                  ADF_RING_CSR_RING_HEAD + ((ring) << 2), value)
+#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
+       ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+                  ADF_RING_BUNDLE_SIZE * (bank) + \
+                  ADF_RING_CSR_RING_TAIL + ((ring) << 2), value)
+#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \
+       ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+                  ADF_RING_BUNDLE_SIZE * (bank) + \
+                  ADF_RING_CSR_INT_FLAG, (value))
+#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
+       ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+                  ADF_RING_BUNDLE_SIZE * (bank) + \
+                  ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK)
+#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
+       ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+                  ADF_RING_BUNDLE_SIZE * (bank) + \
+                  ADF_RING_CSR_INT_COL_EN, (value))
+#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
+       ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+                  ADF_RING_BUNDLE_SIZE * (bank) + \
+                  ADF_RING_CSR_INT_COL_CTL, \
+                  ADF_RING_CSR_INT_COL_CTL_ENABLE | (value))
+#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
+       ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+                  ADF_RING_BUNDLE_SIZE * (bank) + \
+                  ADF_RING_CSR_INT_FLAG_AND_COL, (value))
+
+/* Arbiter configuration */
+#define ADF_RING_CSR_RING_SRV_ARB_EN 0x19C
+
+#define WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value) \
+       ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+                  ADF_RING_BUNDLE_SIZE * (bank) + \
+                  ADF_RING_CSR_RING_SRV_ARB_EN, (value))
+
+/* Default ring mapping */
+#define ADF_GEN4_DEFAULT_RING_TO_SRV_MAP \
+       (ASYM << ADF_CFG_SERV_RING_PAIR_0_SHIFT | \
+         SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
+        ASYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
+         SYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
+
+/* WDT timers
+ *
+ * Timeout is in cycles. Clock speed may vary across products but this
+ * value should be a few milli-seconds.
+ */
+#define ADF_SSM_WDT_DEFAULT_VALUE      0x7000000ULL
+#define ADF_SSM_WDT_PKE_DEFAULT_VALUE  0x8000000
+#define ADF_SSMWDTL_OFFSET             0x54
+#define ADF_SSMWDTH_OFFSET             0x5C
+#define ADF_SSMWDTPKEL_OFFSET          0x58
+#define ADF_SSMWDTPKEH_OFFSET          0x60
+
+/* Ring reset */
+#define ADF_RPRESET_POLL_TIMEOUT_US    (5 * USEC_PER_SEC)
+#define ADF_RPRESET_POLL_DELAY_US      20
+#define ADF_WQM_CSR_RPRESETCTL_RESET   BIT(0)
+#define ADF_WQM_CSR_RPRESETCTL(bank)   (0x6000 + ((bank) << 3))
+#define ADF_WQM_CSR_RPRESETSTS_STATUS  BIT(0)
+#define ADF_WQM_CSR_RPRESETSTS(bank)   (ADF_WQM_CSR_RPRESETCTL(bank) + 4)
+
+/* Error source registers */
+#define ADF_GEN4_ERRSOU0       (0x41A200)
+#define ADF_GEN4_ERRSOU1       (0x41A204)
+#define ADF_GEN4_ERRSOU2       (0x41A208)
+#define ADF_GEN4_ERRSOU3       (0x41A20C)
+
+/* Error source mask registers */
+#define ADF_GEN4_ERRMSK0       (0x41A210)
+#define ADF_GEN4_ERRMSK1       (0x41A214)
+#define ADF_GEN4_ERRMSK2       (0x41A218)
+#define ADF_GEN4_ERRMSK3       (0x41A21C)
+
+#define ADF_GEN4_VFLNOTIFY     BIT(7)
+
+void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
+void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
+int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number);
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c
new file mode 100644 (file)
index 0000000..8e8efe9
--- /dev/null
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2021 Intel Corporation */
+#include <linux/iopoll.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_gen4_pfvf.h"
+#include "adf_pfvf_pf_proto.h"
+#include "adf_pfvf_utils.h"
+
+#define ADF_4XXX_PF2VM_OFFSET(i)       (0x40B010 + ((i) * 0x20))
+#define ADF_4XXX_VM2PF_OFFSET(i)       (0x40B014 + ((i) * 0x20))
+
+/* VF2PF interrupt source registers */
+#define ADF_4XXX_VM2PF_SOU             0x41A180
+#define ADF_4XXX_VM2PF_MSK             0x41A1C0
+#define ADF_GEN4_VF_MSK                        0xFFFF
+
+#define ADF_PFVF_GEN4_MSGTYPE_SHIFT    2
+#define ADF_PFVF_GEN4_MSGTYPE_MASK     0x3F
+#define ADF_PFVF_GEN4_MSGDATA_SHIFT    8
+#define ADF_PFVF_GEN4_MSGDATA_MASK     0xFFFFFF
+
+static const struct pfvf_csr_format csr_gen4_fmt = {
+       { ADF_PFVF_GEN4_MSGTYPE_SHIFT, ADF_PFVF_GEN4_MSGTYPE_MASK },
+       { ADF_PFVF_GEN4_MSGDATA_SHIFT, ADF_PFVF_GEN4_MSGDATA_MASK },
+};
+
+static u32 adf_gen4_pf_get_pf2vf_offset(u32 i)
+{
+       return ADF_4XXX_PF2VM_OFFSET(i);
+}
+
+static u32 adf_gen4_pf_get_vf2pf_offset(u32 i)
+{
+       return ADF_4XXX_VM2PF_OFFSET(i);
+}
+
+static void adf_gen4_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
+{
+       u32 val;
+
+       val = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK) & ~vf_mask;
+       ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, val);
+}
+
+static void adf_gen4_disable_all_vf2pf_interrupts(void __iomem *pmisc_addr)
+{
+       ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, ADF_GEN4_VF_MSK);
+}
+
+static u32 adf_gen4_disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
+{
+       u32 sources, disabled, pending;
+
+       /* Get the interrupt sources triggered by VFs */
+       sources = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_SOU);
+       if (!sources)
+               return 0;
+
+       /* Get the already disabled interrupts */
+       disabled = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK);
+
+       pending = sources & ~disabled;
+       if (!pending)
+               return 0;
+
+       /* Due to HW limitations, when disabling the interrupts, we can't
+        * just disable the requested sources, as this would lead to missed
+        * interrupts if VM2PF_SOU changes just before writing to VM2PF_MSK.
+        * To work around it, disable all and re-enable only the sources that
+        * are not in vf_mask and were not already disabled. Re-enabling will
+        * trigger a new interrupt for the sources that have changed in the
+        * meantime, if any.
+        */
+       ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, ADF_GEN4_VF_MSK);
+       ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, disabled | sources);
+
+       /* Return the sources of the (new) interrupt(s) */
+       return pending;
+}
+
+static int adf_gen4_pfvf_send(struct adf_accel_dev *accel_dev,
+                             struct pfvf_message msg, u32 pfvf_offset,
+                             struct mutex *csr_lock)
+{
+       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+       u32 csr_val;
+       int ret;
+
+       csr_val = adf_pfvf_csr_msg_of(accel_dev, msg, &csr_gen4_fmt);
+       if (unlikely(!csr_val))
+               return -EINVAL;
+
+       mutex_lock(csr_lock);
+
+       ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_val | ADF_PFVF_INT);
+
+       /* Wait for confirmation from remote that it received the message */
+       ret = read_poll_timeout(ADF_CSR_RD, csr_val, !(csr_val & ADF_PFVF_INT),
+                               ADF_PFVF_MSG_ACK_DELAY_US,
+                               ADF_PFVF_MSG_ACK_MAX_DELAY_US,
+                               true, pmisc_addr, pfvf_offset);
+       if (ret < 0)
+               dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
+
+       mutex_unlock(csr_lock);
+       return ret;
+}
+
+static struct pfvf_message adf_gen4_pfvf_recv(struct adf_accel_dev *accel_dev,
+                                             u32 pfvf_offset, u8 compat_ver)
+{
+       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+       struct pfvf_message msg = { 0 };
+       u32 csr_val;
+
+       /* Read message from the CSR */
+       csr_val = ADF_CSR_RD(pmisc_addr, pfvf_offset);
+       if (!(csr_val & ADF_PFVF_INT)) {
+               dev_info(&GET_DEV(accel_dev),
+                        "Spurious PFVF interrupt, msg 0x%.8x. Ignored\n", csr_val);
+               return msg;
+       }
+
+       /* We can now acknowledge the message reception by clearing the
+        * interrupt bit
+        */
+       ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_val & ~ADF_PFVF_INT);
+
+       /* Return the pfvf_message format */
+       return adf_pfvf_message_of(accel_dev, csr_val, &csr_gen4_fmt);
+}
+
+void adf_gen4_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
+{
+       pfvf_ops->enable_comms = adf_enable_pf2vf_comms;
+       pfvf_ops->get_pf2vf_offset = adf_gen4_pf_get_pf2vf_offset;
+       pfvf_ops->get_vf2pf_offset = adf_gen4_pf_get_vf2pf_offset;
+       pfvf_ops->enable_vf2pf_interrupts = adf_gen4_enable_vf2pf_interrupts;
+       pfvf_ops->disable_all_vf2pf_interrupts = adf_gen4_disable_all_vf2pf_interrupts;
+       pfvf_ops->disable_pending_vf2pf_interrupts = adf_gen4_disable_pending_vf2pf_interrupts;
+       pfvf_ops->send_msg = adf_gen4_pfvf_send;
+       pfvf_ops->recv_msg = adf_gen4_pfvf_recv;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_pf_pfvf_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h
new file mode 100644 (file)
index 0000000..17d1b77
--- /dev/null
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2021 Intel Corporation */
+#ifndef ADF_GEN4_PFVF_H
+#define ADF_GEN4_PFVF_H
+
+#include "adf_accel_devices.h"
+
+#ifdef CONFIG_PCI_IOV
+void adf_gen4_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops);
+#else
+static inline void adf_gen4_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
+{
+       pfvf_ops->enable_comms = adf_pfvf_comms_disabled;
+}
+#endif
+
+#endif /* ADF_GEN4_PFVF_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c
new file mode 100644 (file)
index 0000000..7037c08
--- /dev/null
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2022 Intel Corporation */
+#include <linux/bitfield.h>
+#include <linux/iopoll.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_gen4_pm.h"
+#include "adf_cfg_strings.h"
+#include "icp_qat_fw_init_admin.h"
+#include "adf_gen4_hw_data.h"
+#include "adf_cfg.h"
+
+enum qat_pm_host_msg {
+       PM_NO_CHANGE = 0,
+       PM_SET_MIN,
+};
+
+struct adf_gen4_pm_data {
+       struct work_struct pm_irq_work;
+       struct adf_accel_dev *accel_dev;
+       u32 pm_int_sts;
+};
+
+static int send_host_msg(struct adf_accel_dev *accel_dev)
+{
+       void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
+       u32 msg;
+
+       msg = ADF_CSR_RD(pmisc, ADF_GEN4_PM_HOST_MSG);
+       if (msg & ADF_GEN4_PM_MSG_PENDING)
+               return -EBUSY;
+
+       /* Send HOST_MSG */
+       msg = FIELD_PREP(ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK, PM_SET_MIN);
+       msg |= ADF_GEN4_PM_MSG_PENDING;
+       ADF_CSR_WR(pmisc, ADF_GEN4_PM_HOST_MSG, msg);
+
+       /* Poll status register to make sure the HOST_MSG has been processed */
+       return read_poll_timeout(ADF_CSR_RD, msg,
+                               !(msg & ADF_GEN4_PM_MSG_PENDING),
+                               ADF_GEN4_PM_MSG_POLL_DELAY_US,
+                               ADF_GEN4_PM_POLL_TIMEOUT_US, true, pmisc,
+                               ADF_GEN4_PM_HOST_MSG);
+}
+
+static void pm_bh_handler(struct work_struct *work)
+{
+       struct adf_gen4_pm_data *pm_data =
+               container_of(work, struct adf_gen4_pm_data, pm_irq_work);
+       struct adf_accel_dev *accel_dev = pm_data->accel_dev;
+       void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
+       u32 pm_int_sts = pm_data->pm_int_sts;
+       u32 val;
+
+       /* PM Idle interrupt */
+       if (pm_int_sts & ADF_GEN4_PM_IDLE_STS) {
+               /* Issue host message to FW */
+               if (send_host_msg(accel_dev))
+                       dev_warn_ratelimited(&GET_DEV(accel_dev),
+                                            "Failed to send host msg to FW\n");
+       }
+
+       /* Clear interrupt status */
+       ADF_CSR_WR(pmisc, ADF_GEN4_PM_INTERRUPT, pm_int_sts);
+
+       /* Reenable PM interrupt */
+       val = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
+       val &= ~ADF_GEN4_PM_SOU;
+       ADF_CSR_WR(pmisc, ADF_GEN4_ERRMSK2, val);
+
+       kfree(pm_data);
+}
+
+bool adf_gen4_handle_pm_interrupt(struct adf_accel_dev *accel_dev)
+{
+       void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
+       struct adf_gen4_pm_data *pm_data = NULL;
+       u32 errsou2;
+       u32 errmsk2;
+       u32 val;
+
+       /* Only handle the interrupt triggered by PM */
+       errmsk2 = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
+       if (errmsk2 & ADF_GEN4_PM_SOU)
+               return false;
+
+       errsou2 = ADF_CSR_RD(pmisc, ADF_GEN4_ERRSOU2);
+       if (!(errsou2 & ADF_GEN4_PM_SOU))
+               return false;
+
+       /* Disable interrupt */
+       val = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
+       val |= ADF_GEN4_PM_SOU;
+       ADF_CSR_WR(pmisc, ADF_GEN4_ERRMSK2, val);
+
+       val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_INTERRUPT);
+
+       pm_data = kzalloc(sizeof(*pm_data), GFP_ATOMIC);
+       if (!pm_data)
+               return false;
+
+       pm_data->pm_int_sts = val;
+       pm_data->accel_dev = accel_dev;
+
+       INIT_WORK(&pm_data->pm_irq_work, pm_bh_handler);
+       adf_misc_wq_queue_work(&pm_data->pm_irq_work);
+
+       return true;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_handle_pm_interrupt);
+
+int adf_gen4_enable_pm(struct adf_accel_dev *accel_dev)
+{
+       void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
+       int ret;
+       u32 val;
+
+       ret = adf_init_admin_pm(accel_dev, ADF_GEN4_PM_DEFAULT_IDLE_FILTER);
+       if (ret)
+               return ret;
+
+       /* Enable default PM interrupts: IDLE, THROTTLE */
+       val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_INTERRUPT);
+       val |= ADF_GEN4_PM_INT_EN_DEFAULT;
+
+       /* Clear interrupt status */
+       val |= ADF_GEN4_PM_INT_STS_MASK;
+       ADF_CSR_WR(pmisc, ADF_GEN4_PM_INTERRUPT, val);
+
+       /* Unmask PM Interrupt */
+       val = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
+       val &= ~ADF_GEN4_PM_SOU;
+       ADF_CSR_WR(pmisc, ADF_GEN4_ERRMSK2, val);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_enable_pm);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h
new file mode 100644 (file)
index 0000000..f8f8a9e
--- /dev/null
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef ADF_GEN4_PM_H
+#define ADF_GEN4_PM_H
+
+#include "adf_accel_devices.h"
+
+/* Power management registers */
+#define ADF_GEN4_PM_HOST_MSG (0x50A01C)
+
+/* Power management */
+#define ADF_GEN4_PM_POLL_DELAY_US      20
+#define ADF_GEN4_PM_POLL_TIMEOUT_US    USEC_PER_SEC
+#define ADF_GEN4_PM_MSG_POLL_DELAY_US  (10 * USEC_PER_MSEC)
+#define ADF_GEN4_PM_STATUS             (0x50A00C)
+#define ADF_GEN4_PM_INTERRUPT          (0x50A028)
+
+/* Power management source in ERRSOU2 and ERRMSK2 */
+#define ADF_GEN4_PM_SOU                        BIT(18)
+
+#define ADF_GEN4_PM_IDLE_INT_EN                BIT(18)
+#define ADF_GEN4_PM_THROTTLE_INT_EN    BIT(19)
+#define ADF_GEN4_PM_DRV_ACTIVE         BIT(20)
+#define ADF_GEN4_PM_INIT_STATE         BIT(21)
+#define ADF_GEN4_PM_INT_EN_DEFAULT     (ADF_GEN4_PM_IDLE_INT_EN | \
+                                       ADF_GEN4_PM_THROTTLE_INT_EN)
+
+#define ADF_GEN4_PM_THR_STS    BIT(0)
+#define ADF_GEN4_PM_IDLE_STS   BIT(1)
+#define ADF_GEN4_PM_FW_INT_STS BIT(2)
+#define ADF_GEN4_PM_INT_STS_MASK (ADF_GEN4_PM_THR_STS | \
+                                ADF_GEN4_PM_IDLE_STS | \
+                                ADF_GEN4_PM_FW_INT_STS)
+
+#define ADF_GEN4_PM_MSG_PENDING                        BIT(0)
+#define ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK       GENMASK(28, 1)
+
+#define ADF_GEN4_PM_DEFAULT_IDLE_FILTER                (0x0)
+#define ADF_GEN4_PM_MAX_IDLE_FILTER            (0x7)
+
+int adf_gen4_enable_pm(struct adf_accel_dev *accel_dev);
+bool adf_gen4_handle_pm_interrupt(struct adf_accel_dev *accel_dev);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c
new file mode 100644 (file)
index 0000000..da69566
--- /dev/null
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_transport_internal.h"
+
+#define ADF_ARB_NUM 4
+#define ADF_ARB_REG_SIZE 0x4
+
+#define WRITE_CSR_ARB_SARCONFIG(csr_addr, arb_offset, index, value) \
+       ADF_CSR_WR(csr_addr, (arb_offset) + \
+       (ADF_ARB_REG_SIZE * (index)), value)
+
+#define WRITE_CSR_ARB_WT2SAM(csr_addr, arb_offset, wt_offset, index, value) \
+       ADF_CSR_WR(csr_addr, ((arb_offset) + (wt_offset)) + \
+       (ADF_ARB_REG_SIZE * (index)), value)
+
+int adf_init_arb(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       void __iomem *csr = accel_dev->transport->banks[0].csr_addr;
+       unsigned long ae_mask = hw_data->ae_mask;
+       u32 arb_off, wt_off, arb_cfg;
+       const u32 *thd_2_arb_cfg;
+       struct arb_info info;
+       int arb, i;
+
+       hw_data->get_arb_info(&info);
+       arb_cfg = info.arb_cfg;
+       arb_off = info.arb_offset;
+       wt_off = info.wt2sam_offset;
+
+       /* Service arb configured for 32 bytes responses and
+        * ring flow control check enabled. */
+       for (arb = 0; arb < ADF_ARB_NUM; arb++)
+               WRITE_CSR_ARB_SARCONFIG(csr, arb_off, arb, arb_cfg);
+
+       /* Map worker threads to service arbiters */
+       thd_2_arb_cfg = hw_data->get_arb_mapping(accel_dev);
+
+       for_each_set_bit(i, &ae_mask, hw_data->num_engines)
+               WRITE_CSR_ARB_WT2SAM(csr, arb_off, wt_off, i, thd_2_arb_cfg[i]);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_init_arb);
+
+void adf_update_ring_arb(struct adf_etr_ring_data *ring)
+{
+       struct adf_accel_dev *accel_dev = ring->bank->accel_dev;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
+       u32 tx_ring_mask = hw_data->tx_rings_mask;
+       u32 shift = hw_data->tx_rx_gap;
+       u32 arben, arben_tx, arben_rx;
+       u32 rx_ring_mask;
+
+       /*
+        * Enable arbitration on a ring only if the TX half of the ring mask
+        * matches the RX part. This results in writes to CSR on both TX and
+        * RX update - only one is necessary, but both are done for
+        * simplicity.
+        */
+       rx_ring_mask = tx_ring_mask << shift;
+       arben_tx = (ring->bank->ring_mask & tx_ring_mask) >> 0;
+       arben_rx = (ring->bank->ring_mask & rx_ring_mask) >> shift;
+       arben = arben_tx & arben_rx;
+
+       csr_ops->write_csr_ring_srv_arb_en(ring->bank->csr_addr,
+                                          ring->bank->bank_number, arben);
+}
+
+void adf_exit_arb(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
+       u32 arb_off, wt_off;
+       struct arb_info info;
+       void __iomem *csr;
+       unsigned int i;
+
+       hw_data->get_arb_info(&info);
+       arb_off = info.arb_offset;
+       wt_off = info.wt2sam_offset;
+
+       if (!accel_dev->transport)
+               return;
+
+       csr = accel_dev->transport->banks[0].csr_addr;
+
+       hw_data->get_arb_info(&info);
+
+       /* Reset arbiter configuration */
+       for (i = 0; i < ADF_ARB_NUM; i++)
+               WRITE_CSR_ARB_SARCONFIG(csr, arb_off, i, 0);
+
+       /* Unmap worker threads to service arbiters */
+       for (i = 0; i < hw_data->num_engines; i++)
+               WRITE_CSR_ARB_WT2SAM(csr, arb_off, wt_off, i, 0);
+
+       /* Disable arbitration on all rings */
+       for (i = 0; i < GET_MAX_BANKS(accel_dev); i++)
+               csr_ops->write_csr_ring_srv_arb_en(csr, i, 0);
+}
+EXPORT_SYMBOL_GPL(adf_exit_arb);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c
new file mode 100644 (file)
index 0000000..0985f64
--- /dev/null
@@ -0,0 +1,480 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+
+static LIST_HEAD(service_table);
+static DEFINE_MUTEX(service_lock);
+
+static void adf_service_add(struct service_hndl *service)
+{
+       mutex_lock(&service_lock);
+       list_add(&service->list, &service_table);
+       mutex_unlock(&service_lock);
+}
+
+int adf_service_register(struct service_hndl *service)
+{
+       memset(service->init_status, 0, sizeof(service->init_status));
+       memset(service->start_status, 0, sizeof(service->start_status));
+       adf_service_add(service);
+       return 0;
+}
+
+static void adf_service_remove(struct service_hndl *service)
+{
+       mutex_lock(&service_lock);
+       list_del(&service->list);
+       mutex_unlock(&service_lock);
+}
+
+int adf_service_unregister(struct service_hndl *service)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(service->init_status); i++) {
+               if (service->init_status[i] || service->start_status[i]) {
+                       pr_err("QAT: Could not remove active service\n");
+                       return -EFAULT;
+               }
+       }
+       adf_service_remove(service);
+       return 0;
+}
+
+/**
+ * adf_dev_init() - Init data structures and services for the given accel device
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Initialize the ring data structures and the admin comms and arbitration
+ * services.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+static int adf_dev_init(struct adf_accel_dev *accel_dev)
+{
+       struct service_hndl *service;
+       struct list_head *list_itr;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       int ret;
+
+       if (!hw_data) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Failed to init device - hw_data not set\n");
+               return -EFAULT;
+       }
+
+       if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) &&
+           !accel_dev->is_vf) {
+               dev_err(&GET_DEV(accel_dev), "Device not configured\n");
+               return -EFAULT;
+       }
+
+       if (adf_init_etr_data(accel_dev)) {
+               dev_err(&GET_DEV(accel_dev), "Failed initialize etr\n");
+               return -EFAULT;
+       }
+
+       if (hw_data->init_device && hw_data->init_device(accel_dev)) {
+               dev_err(&GET_DEV(accel_dev), "Failed to initialize device\n");
+               return -EFAULT;
+       }
+
+       if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) {
+               dev_err(&GET_DEV(accel_dev), "Failed initialize admin comms\n");
+               return -EFAULT;
+       }
+
+       if (hw_data->init_arb && hw_data->init_arb(accel_dev)) {
+               dev_err(&GET_DEV(accel_dev), "Failed initialize hw arbiter\n");
+               return -EFAULT;
+       }
+
+       if (adf_ae_init(accel_dev)) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Failed to initialise Acceleration Engine\n");
+               return -EFAULT;
+       }
+       set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status);
+
+       if (adf_ae_fw_load(accel_dev)) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Failed to load acceleration FW\n");
+               return -EFAULT;
+       }
+       set_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
+
+       if (hw_data->alloc_irq(accel_dev)) {
+               dev_err(&GET_DEV(accel_dev), "Failed to allocate interrupts\n");
+               return -EFAULT;
+       }
+       set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
+
+       hw_data->enable_ints(accel_dev);
+       hw_data->enable_error_correction(accel_dev);
+
+       ret = hw_data->pfvf_ops.enable_comms(accel_dev);
+       if (ret)
+               return ret;
+
+       if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) &&
+           accel_dev->is_vf) {
+               if (qat_crypto_vf_dev_config(accel_dev))
+                       return -EFAULT;
+       }
+
+       /*
+        * Subservice initialisation is divided into two stages: init and start.
+        * This is to facilitate any ordering dependencies between services
+        * prior to starting any of the accelerators.
+        */
+       list_for_each(list_itr, &service_table) {
+               service = list_entry(list_itr, struct service_hndl, list);
+               if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
+                       dev_err(&GET_DEV(accel_dev),
+                               "Failed to initialise service %s\n",
+                               service->name);
+                       return -EFAULT;
+               }
+               set_bit(accel_dev->accel_id, service->init_status);
+       }
+
+       return 0;
+}
+
+/**
+ * adf_dev_start() - Start acceleration service for the given accel device
+ * @accel_dev:    Pointer to acceleration device.
+ *
+ * Function notifies all the registered services that the acceleration device
+ * is ready to be used.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+static int adf_dev_start(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct service_hndl *service;
+       struct list_head *list_itr;
+
+       set_bit(ADF_STATUS_STARTING, &accel_dev->status);
+
+       if (adf_ae_start(accel_dev)) {
+               dev_err(&GET_DEV(accel_dev), "AE Start Failed\n");
+               return -EFAULT;
+       }
+       set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
+
+       if (hw_data->send_admin_init(accel_dev)) {
+               dev_err(&GET_DEV(accel_dev), "Failed to send init message\n");
+               return -EFAULT;
+       }
+
+       /* Set ssm watch dog timer */
+       if (hw_data->set_ssm_wdtimer)
+               hw_data->set_ssm_wdtimer(accel_dev);
+
+       /* Enable Power Management */
+       if (hw_data->enable_pm && hw_data->enable_pm(accel_dev)) {
+               dev_err(&GET_DEV(accel_dev), "Failed to configure Power Management\n");
+               return -EFAULT;
+       }
+
+       list_for_each(list_itr, &service_table) {
+               service = list_entry(list_itr, struct service_hndl, list);
+               if (service->event_hld(accel_dev, ADF_EVENT_START)) {
+                       dev_err(&GET_DEV(accel_dev),
+                               "Failed to start service %s\n",
+                               service->name);
+                       return -EFAULT;
+               }
+               set_bit(accel_dev->accel_id, service->start_status);
+       }
+
+       clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
+       set_bit(ADF_STATUS_STARTED, &accel_dev->status);
+
+       if (!list_empty(&accel_dev->crypto_list) &&
+           (qat_algs_register() || qat_asym_algs_register())) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Failed to register crypto algs\n");
+               set_bit(ADF_STATUS_STARTING, &accel_dev->status);
+               clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
+               return -EFAULT;
+       }
+
+       if (!list_empty(&accel_dev->compression_list) && qat_comp_algs_register()) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Failed to register compression algs\n");
+               set_bit(ADF_STATUS_STARTING, &accel_dev->status);
+               clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
+               return -EFAULT;
+       }
+       return 0;
+}
+
+/**
+ * adf_dev_stop() - Stop acceleration service for the given accel device
+ * @accel_dev:    Pointer to acceleration device.
+ *
+ * Function notifies all the registered services that the acceleration device
+ * is shuting down.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+static void adf_dev_stop(struct adf_accel_dev *accel_dev)
+{
+       struct service_hndl *service;
+       struct list_head *list_itr;
+       bool wait = false;
+       int ret;
+
+       if (!adf_dev_started(accel_dev) &&
+           !test_bit(ADF_STATUS_STARTING, &accel_dev->status))
+               return;
+
+       clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
+       clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
+
+       if (!list_empty(&accel_dev->crypto_list)) {
+               qat_algs_unregister();
+               qat_asym_algs_unregister();
+       }
+
+       if (!list_empty(&accel_dev->compression_list))
+               qat_comp_algs_unregister();
+
+       list_for_each(list_itr, &service_table) {
+               service = list_entry(list_itr, struct service_hndl, list);
+               if (!test_bit(accel_dev->accel_id, service->start_status))
+                       continue;
+               ret = service->event_hld(accel_dev, ADF_EVENT_STOP);
+               if (!ret) {
+                       clear_bit(accel_dev->accel_id, service->start_status);
+               } else if (ret == -EAGAIN) {
+                       wait = true;
+                       clear_bit(accel_dev->accel_id, service->start_status);
+               }
+       }
+
+       if (wait)
+               msleep(100);
+
+       if (test_bit(ADF_STATUS_AE_STARTED, &accel_dev->status)) {
+               if (adf_ae_stop(accel_dev))
+                       dev_err(&GET_DEV(accel_dev), "failed to stop AE\n");
+               else
+                       clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
+       }
+}
+
+/**
+ * adf_dev_shutdown() - shutdown acceleration services and data strucutures
+ * @accel_dev: Pointer to acceleration device
+ *
+ * Cleanup the ring data structures and the admin comms and arbitration
+ * services.
+ */
+static void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct service_hndl *service;
+       struct list_head *list_itr;
+
+       if (!hw_data) {
+               dev_err(&GET_DEV(accel_dev),
+                       "QAT: Failed to shutdown device - hw_data not set\n");
+               return;
+       }
+
+       if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) {
+               adf_ae_fw_release(accel_dev);
+               clear_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
+       }
+
+       if (test_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status)) {
+               if (adf_ae_shutdown(accel_dev))
+                       dev_err(&GET_DEV(accel_dev),
+                               "Failed to shutdown Accel Engine\n");
+               else
+                       clear_bit(ADF_STATUS_AE_INITIALISED,
+                                 &accel_dev->status);
+       }
+
+       list_for_each(list_itr, &service_table) {
+               service = list_entry(list_itr, struct service_hndl, list);
+               if (!test_bit(accel_dev->accel_id, service->init_status))
+                       continue;
+               if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
+                       dev_err(&GET_DEV(accel_dev),
+                               "Failed to shutdown service %s\n",
+                               service->name);
+               else
+                       clear_bit(accel_dev->accel_id, service->init_status);
+       }
+
+       hw_data->disable_iov(accel_dev);
+
+       if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
+               hw_data->free_irq(accel_dev);
+               clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
+       }
+
+       /* Delete configuration only if not restarting */
+       if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
+               adf_cfg_del_all(accel_dev);
+
+       if (hw_data->exit_arb)
+               hw_data->exit_arb(accel_dev);
+
+       if (hw_data->exit_admin_comms)
+               hw_data->exit_admin_comms(accel_dev);
+
+       adf_cleanup_etr_data(accel_dev);
+       adf_dev_restore(accel_dev);
+}
+
+int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
+{
+       struct service_hndl *service;
+       struct list_head *list_itr;
+
+       list_for_each(list_itr, &service_table) {
+               service = list_entry(list_itr, struct service_hndl, list);
+               if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
+                       dev_err(&GET_DEV(accel_dev),
+                               "Failed to restart service %s.\n",
+                               service->name);
+       }
+       return 0;
+}
+
+int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
+{
+       struct service_hndl *service;
+       struct list_head *list_itr;
+
+       list_for_each(list_itr, &service_table) {
+               service = list_entry(list_itr, struct service_hndl, list);
+               if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
+                       dev_err(&GET_DEV(accel_dev),
+                               "Failed to restart service %s.\n",
+                               service->name);
+       }
+       return 0;
+}
+
+static int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev)
+{
+       char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
+       int ret;
+
+       ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+                                     ADF_SERVICES_ENABLED, services);
+
+       adf_dev_stop(accel_dev);
+       adf_dev_shutdown(accel_dev);
+
+       if (!ret) {
+               ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
+               if (ret)
+                       return ret;
+
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
+                                                 ADF_SERVICES_ENABLED,
+                                                 services, ADF_STR);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+int adf_dev_down(struct adf_accel_dev *accel_dev, bool reconfig)
+{
+       int ret = 0;
+
+       if (!accel_dev)
+               return -EINVAL;
+
+       mutex_lock(&accel_dev->state_lock);
+
+       if (!adf_dev_started(accel_dev)) {
+               dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already down\n",
+                        accel_dev->accel_id);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (reconfig) {
+               ret = adf_dev_shutdown_cache_cfg(accel_dev);
+               goto out;
+       }
+
+       adf_dev_stop(accel_dev);
+       adf_dev_shutdown(accel_dev);
+
+out:
+       mutex_unlock(&accel_dev->state_lock);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(adf_dev_down);
+
+int adf_dev_up(struct adf_accel_dev *accel_dev, bool config)
+{
+       int ret = 0;
+
+       if (!accel_dev)
+               return -EINVAL;
+
+       mutex_lock(&accel_dev->state_lock);
+
+       if (adf_dev_started(accel_dev)) {
+               dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already up\n",
+                        accel_dev->accel_id);
+               ret = -EALREADY;
+               goto out;
+       }
+
+       if (config && GET_HW_DATA(accel_dev)->dev_config) {
+               ret = GET_HW_DATA(accel_dev)->dev_config(accel_dev);
+               if (unlikely(ret))
+                       goto out;
+       }
+
+       ret = adf_dev_init(accel_dev);
+       if (unlikely(ret))
+               goto out;
+
+       ret = adf_dev_start(accel_dev);
+
+out:
+       mutex_unlock(&accel_dev->state_lock);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(adf_dev_up);
+
+int adf_dev_restart(struct adf_accel_dev *accel_dev)
+{
+       int ret = 0;
+
+       if (!accel_dev)
+               return -EFAULT;
+
+       adf_dev_down(accel_dev, false);
+
+       ret = adf_dev_up(accel_dev, false);
+       /* if device is already up return success*/
+       if (ret == -EALREADY)
+               return 0;
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(adf_dev_restart);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_isr.c
new file mode 100644 (file)
index 0000000..ad9e135
--- /dev/null
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_cfg.h"
+#include "adf_cfg_strings.h"
+#include "adf_cfg_common.h"
+#include "adf_transport_access_macros.h"
+#include "adf_transport_internal.h"
+
+#define ADF_MAX_NUM_VFS        32
+static struct workqueue_struct *adf_misc_wq;
+
+static int adf_enable_msix(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       u32 msix_num_entries = hw_data->num_banks + 1;
+       int ret;
+
+       if (hw_data->set_msix_rttable)
+               hw_data->set_msix_rttable(accel_dev);
+
+       ret = pci_alloc_irq_vectors(pci_dev_info->pci_dev, msix_num_entries,
+                                   msix_num_entries, PCI_IRQ_MSIX);
+       if (unlikely(ret < 0)) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Failed to allocate %d MSI-X vectors\n",
+                       msix_num_entries);
+               return ret;
+       }
+       return 0;
+}
+
+static void adf_disable_msix(struct adf_accel_pci *pci_dev_info)
+{
+       pci_free_irq_vectors(pci_dev_info->pci_dev);
+}
+
+static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr)
+{
+       struct adf_etr_bank_data *bank = bank_ptr;
+       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
+
+       csr_ops->write_csr_int_flag_and_col(bank->csr_addr, bank->bank_number,
+                                           0);
+       tasklet_hi_schedule(&bank->resp_handler);
+       return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_PCI_IOV
+void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
+{
+       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+       unsigned long flags;
+
+       spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
+       GET_PFVF_OPS(accel_dev)->enable_vf2pf_interrupts(pmisc_addr, vf_mask);
+       spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
+}
+
+void adf_disable_all_vf2pf_interrupts(struct adf_accel_dev *accel_dev)
+{
+       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+       unsigned long flags;
+
+       spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
+       GET_PFVF_OPS(accel_dev)->disable_all_vf2pf_interrupts(pmisc_addr);
+       spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
+}
+
+static u32 adf_disable_pending_vf2pf_interrupts(struct adf_accel_dev *accel_dev)
+{
+       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+       u32 pending;
+
+       spin_lock(&accel_dev->pf.vf2pf_ints_lock);
+       pending = GET_PFVF_OPS(accel_dev)->disable_pending_vf2pf_interrupts(pmisc_addr);
+       spin_unlock(&accel_dev->pf.vf2pf_ints_lock);
+
+       return pending;
+}
+
+static bool adf_handle_vf2pf_int(struct adf_accel_dev *accel_dev)
+{
+       bool irq_handled = false;
+       unsigned long vf_mask;
+
+       /* Get the interrupt sources triggered by VFs, except for those already disabled */
+       vf_mask = adf_disable_pending_vf2pf_interrupts(accel_dev);
+       if (vf_mask) {
+               struct adf_accel_vf_info *vf_info;
+               int i;
+
+               /*
+                * Handle VF2PF interrupt unless the VF is malicious and
+                * is attempting to flood the host OS with VF2PF interrupts.
+                */
+               for_each_set_bit(i, &vf_mask, ADF_MAX_NUM_VFS) {
+                       vf_info = accel_dev->pf.vf_info + i;
+
+                       if (!__ratelimit(&vf_info->vf2pf_ratelimit)) {
+                               dev_info(&GET_DEV(accel_dev),
+                                        "Too many ints from VF%d\n",
+                                         vf_info->vf_nr);
+                               continue;
+                       }
+
+                       adf_schedule_vf2pf_handler(vf_info);
+                       irq_handled = true;
+               }
+       }
+       return irq_handled;
+}
+#endif /* CONFIG_PCI_IOV */
+
+static bool adf_handle_pm_int(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+
+       if (hw_data->handle_pm_interrupt &&
+           hw_data->handle_pm_interrupt(accel_dev))
+               return true;
+
+       return false;
+}
+
+static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
+{
+       struct adf_accel_dev *accel_dev = dev_ptr;
+
+#ifdef CONFIG_PCI_IOV
+       /* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */
+       if (accel_dev->pf.vf_info && adf_handle_vf2pf_int(accel_dev))
+               return IRQ_HANDLED;
+#endif /* CONFIG_PCI_IOV */
+
+       if (adf_handle_pm_int(accel_dev))
+               return IRQ_HANDLED;
+
+       dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n",
+               accel_dev->accel_id);
+
+       return IRQ_NONE;
+}
+
+static void adf_free_irqs(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct adf_irq *irqs = pci_dev_info->msix_entries.irqs;
+       struct adf_etr_data *etr_data = accel_dev->transport;
+       int clust_irq = hw_data->num_banks;
+       int irq, i = 0;
+
+       if (pci_dev_info->msix_entries.num_entries > 1) {
+               for (i = 0; i < hw_data->num_banks; i++) {
+                       if (irqs[i].enabled) {
+                               irq = pci_irq_vector(pci_dev_info->pci_dev, i);
+                               irq_set_affinity_hint(irq, NULL);
+                               free_irq(irq, &etr_data->banks[i]);
+                       }
+               }
+       }
+
+       if (irqs[i].enabled) {
+               irq = pci_irq_vector(pci_dev_info->pci_dev, clust_irq);
+               free_irq(irq, accel_dev);
+       }
+}
+
+static int adf_request_irqs(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct adf_irq *irqs = pci_dev_info->msix_entries.irqs;
+       struct adf_etr_data *etr_data = accel_dev->transport;
+       int clust_irq = hw_data->num_banks;
+       int ret, irq, i = 0;
+       char *name;
+
+       /* Request msix irq for all banks unless SR-IOV enabled */
+       if (!accel_dev->pf.vf_info) {
+               for (i = 0; i < hw_data->num_banks; i++) {
+                       struct adf_etr_bank_data *bank = &etr_data->banks[i];
+                       unsigned int cpu, cpus = num_online_cpus();
+
+                       name = irqs[i].name;
+                       snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
+                                "qat%d-bundle%d", accel_dev->accel_id, i);
+                       irq = pci_irq_vector(pci_dev_info->pci_dev, i);
+                       if (unlikely(irq < 0)) {
+                               dev_err(&GET_DEV(accel_dev),
+                                       "Failed to get IRQ number of device vector %d - %s\n",
+                                       i, name);
+                               ret = irq;
+                               goto err;
+                       }
+                       ret = request_irq(irq, adf_msix_isr_bundle, 0,
+                                         &name[0], bank);
+                       if (ret) {
+                               dev_err(&GET_DEV(accel_dev),
+                                       "Failed to allocate IRQ %d for %s\n",
+                                       irq, name);
+                               goto err;
+                       }
+
+                       cpu = ((accel_dev->accel_id * hw_data->num_banks) +
+                              i) % cpus;
+                       irq_set_affinity_hint(irq, get_cpu_mask(cpu));
+                       irqs[i].enabled = true;
+               }
+       }
+
+       /* Request msix irq for AE */
+       name = irqs[i].name;
+       snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
+                "qat%d-ae-cluster", accel_dev->accel_id);
+       irq = pci_irq_vector(pci_dev_info->pci_dev, clust_irq);
+       if (unlikely(irq < 0)) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Failed to get IRQ number of device vector %d - %s\n",
+                       i, name);
+               ret = irq;
+               goto err;
+       }
+       ret = request_irq(irq, adf_msix_isr_ae, 0, &name[0], accel_dev);
+       if (ret) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Failed to allocate IRQ %d for %s\n", irq, name);
+               goto err;
+       }
+       irqs[i].enabled = true;
+       return ret;
+err:
+       adf_free_irqs(accel_dev);
+       return ret;
+}
+
+static int adf_isr_alloc_msix_vectors_data(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       u32 msix_num_entries = 1;
+       struct adf_irq *irqs;
+
+       /* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */
+       if (!accel_dev->pf.vf_info)
+               msix_num_entries += hw_data->num_banks;
+
+       irqs = kzalloc_node(msix_num_entries * sizeof(*irqs),
+                           GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
+       if (!irqs)
+               return -ENOMEM;
+
+       accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries;
+       accel_dev->accel_pci_dev.msix_entries.irqs = irqs;
+       return 0;
+}
+
+static void adf_isr_free_msix_vectors_data(struct adf_accel_dev *accel_dev)
+{
+       kfree(accel_dev->accel_pci_dev.msix_entries.irqs);
+       accel_dev->accel_pci_dev.msix_entries.irqs = NULL;
+}
+
+static int adf_setup_bh(struct adf_accel_dev *accel_dev)
+{
+       struct adf_etr_data *priv_data = accel_dev->transport;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       int i;
+
+       for (i = 0; i < hw_data->num_banks; i++)
+               tasklet_init(&priv_data->banks[i].resp_handler,
+                            adf_response_handler,
+                            (unsigned long)&priv_data->banks[i]);
+       return 0;
+}
+
+static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
+{
+       struct adf_etr_data *priv_data = accel_dev->transport;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       int i;
+
+       for (i = 0; i < hw_data->num_banks; i++) {
+               tasklet_disable(&priv_data->banks[i].resp_handler);
+               tasklet_kill(&priv_data->banks[i].resp_handler);
+       }
+}
+
+/**
+ * adf_isr_resource_free() - Free IRQ for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function frees interrupts for acceleration device.
+ */
+void adf_isr_resource_free(struct adf_accel_dev *accel_dev)
+{
+       adf_free_irqs(accel_dev);
+       adf_cleanup_bh(accel_dev);
+       adf_disable_msix(&accel_dev->accel_pci_dev);
+       adf_isr_free_msix_vectors_data(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_isr_resource_free);
+
+/**
+ * adf_isr_resource_alloc() - Allocate IRQ for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function allocates interrupts for acceleration device.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
+{
+       int ret;
+
+       ret = adf_isr_alloc_msix_vectors_data(accel_dev);
+       if (ret)
+               goto err_out;
+
+       ret = adf_enable_msix(accel_dev);
+       if (ret)
+               goto err_free_msix_table;
+
+       ret = adf_setup_bh(accel_dev);
+       if (ret)
+               goto err_disable_msix;
+
+       ret = adf_request_irqs(accel_dev);
+       if (ret)
+               goto err_cleanup_bh;
+
+       return 0;
+
+err_cleanup_bh:
+       adf_cleanup_bh(accel_dev);
+
+err_disable_msix:
+       adf_disable_msix(&accel_dev->accel_pci_dev);
+
+err_free_msix_table:
+       adf_isr_free_msix_vectors_data(accel_dev);
+
+err_out:
+       return ret;
+}
+EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);
+
+/**
+ * adf_init_misc_wq() - Init misc workqueue
+ *
+ * Function init workqueue 'qat_misc_wq' for general purpose.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int __init adf_init_misc_wq(void)
+{
+       adf_misc_wq = alloc_workqueue("qat_misc_wq", WQ_MEM_RECLAIM, 0);
+
+       return !adf_misc_wq ? -ENOMEM : 0;
+}
+
+void adf_exit_misc_wq(void)
+{
+       if (adf_misc_wq)
+               destroy_workqueue(adf_misc_wq);
+
+       adf_misc_wq = NULL;
+}
+
+bool adf_misc_wq_queue_work(struct work_struct *work)
+{
+       return queue_work(adf_misc_wq, work);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h
new file mode 100644 (file)
index 0000000..204a424
--- /dev/null
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#ifndef ADF_PFVF_MSG_H
+#define ADF_PFVF_MSG_H
+
+#include <linux/bits.h>
+
+/*
+ * PF<->VF Gen2 Messaging format
+ *
+ * The PF has an array of 32-bit PF2VF registers, one for each VF. The
+ * PF can access all these registers while each VF can access only the one
+ * register associated with that particular VF.
+ *
+ * The register functionally is split into two parts:
+ * The bottom half is for PF->VF messages. In particular when the first
+ * bit of this register (bit 0) gets set an interrupt will be triggered
+ * in the respective VF.
+ * The top half is for VF->PF messages. In particular when the first bit
+ * of this half of register (bit 16) gets set an interrupt will be triggered
+ * in the PF.
+ *
+ * The remaining bits within this register are available to encode messages.
+ * and implement a collision control mechanism to prevent concurrent use of
+ * the PF2VF register by both the PF and VF.
+ *
+ *  31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16
+ *  _______________________________________________
+ * |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |
+ * +-----------------------------------------------+
+ *  \___________________________/ \_________/ ^   ^
+ *                ^                    ^      |   |
+ *                |                    |      |   VF2PF Int
+ *                |                    |      Message Origin
+ *                |                    Message Type
+ *                Message-specific Data/Reserved
+ *
+ *  15 14 13 12 11 10  9  8  7  6  5  4  3  2  1  0
+ *  _______________________________________________
+ * |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |
+ * +-----------------------------------------------+
+ *  \___________________________/ \_________/ ^   ^
+ *                ^                    ^      |   |
+ *                |                    |      |   PF2VF Int
+ *                |                    |      Message Origin
+ *                |                    Message Type
+ *                Message-specific Data/Reserved
+ *
+ * Message Origin (Should always be 1)
+ * A legacy out-of-tree QAT driver allowed for a set of messages not supported
+ * by this driver; these had a Msg Origin of 0 and are ignored by this driver.
+ *
+ * When a PF or VF attempts to send a message in the lower or upper 16 bits,
+ * respectively, the other 16 bits are written to first with a defined
+ * IN_USE_BY pattern as part of a collision control scheme (see function
+ * adf_gen2_pfvf_send() in adf_pf2vf_msg.c).
+ *
+ *
+ * PF<->VF Gen4 Messaging format
+ *
+ * Similarly to the gen2 messaging format, 32-bit long registers are used for
+ * communication between PF and VFs. However, each VF and PF share a pair of
+ * 32-bits register to avoid collisions: one for PV to VF messages and one
+ * for VF to PF messages.
+ *
+ * Both the Interrupt bit and the Message Origin bit retain the same position
+ * and meaning, although non-system messages are now deprecated and not
+ * expected.
+ *
+ *  31 30              9  8  7  6  5  4  3  2  1  0
+ *  _______________________________________________
+ * |  |  |   . . .   |  |  |  |  |  |  |  |  |  |  |
+ * +-----------------------------------------------+
+ *  \_____________________/ \_______________/  ^  ^
+ *             ^                     ^         |  |
+ *             |                     |         |  PF/VF Int
+ *             |                     |         Message Origin
+ *             |                     Message Type
+ *             Message-specific Data/Reserved
+ *
+ * For both formats, the message reception is acknowledged by lowering the
+ * interrupt bit on the register where the message was sent.
+ */
+
+/* PFVF message common bits */
+#define ADF_PFVF_INT                           BIT(0)
+#define ADF_PFVF_MSGORIGIN_SYSTEM              BIT(1)
+
+/* Different generations have different CSR layouts, use this struct
+ * to abstract these differences away
+ */
+struct pfvf_message {
+       u8 type;
+       u32 data;
+};
+
+/* PF->VF messages */
+enum pf2vf_msgtype {
+       ADF_PF2VF_MSGTYPE_RESTARTING            = 0x01,
+       ADF_PF2VF_MSGTYPE_VERSION_RESP          = 0x02,
+       ADF_PF2VF_MSGTYPE_BLKMSG_RESP           = 0x03,
+/* Values from 0x10 are Gen4 specific, message type is only 4 bits in Gen2 devices. */
+       ADF_PF2VF_MSGTYPE_RP_RESET_RESP         = 0x10,
+};
+
+/* VF->PF messages */
+enum vf2pf_msgtype {
+       ADF_VF2PF_MSGTYPE_INIT                  = 0x03,
+       ADF_VF2PF_MSGTYPE_SHUTDOWN              = 0x04,
+       ADF_VF2PF_MSGTYPE_VERSION_REQ           = 0x05,
+       ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ        = 0x06,
+       ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ       = 0x07,
+       ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ      = 0x08,
+       ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ       = 0x09,
+/* Values from 0x10 are Gen4 specific, message type is only 4 bits in Gen2 devices. */
+       ADF_VF2PF_MSGTYPE_RP_RESET              = 0x10,
+};
+
+/* VF/PF compatibility version. */
+enum pfvf_compatibility_version {
+       /* Support for extended capabilities */
+       ADF_PFVF_COMPAT_CAPABILITIES            = 0x02,
+       /* In-use pattern cleared by receiver */
+       ADF_PFVF_COMPAT_FAST_ACK                = 0x03,
+       /* Ring to service mapping support for non-standard mappings */
+       ADF_PFVF_COMPAT_RING_TO_SVC_MAP         = 0x04,
+       /* Reference to the latest version */
+       ADF_PFVF_COMPAT_THIS_VERSION            = 0x04,
+};
+
+/* PF->VF Version Response */
+#define ADF_PF2VF_VERSION_RESP_VERS_MASK       GENMASK(7, 0)
+#define ADF_PF2VF_VERSION_RESP_RESULT_MASK     GENMASK(9, 8)
+
+enum pf2vf_compat_response {
+       ADF_PF2VF_VF_COMPATIBLE                 = 0x01,
+       ADF_PF2VF_VF_INCOMPATIBLE               = 0x02,
+       ADF_PF2VF_VF_COMPAT_UNKNOWN             = 0x03,
+};
+
+enum ring_reset_result {
+       RPRESET_SUCCESS                         = 0x00,
+       RPRESET_NOT_SUPPORTED                   = 0x01,
+       RPRESET_INVAL_BANK                      = 0x02,
+       RPRESET_TIMEOUT                         = 0x03,
+};
+
+#define ADF_VF2PF_RNG_RESET_RP_MASK            GENMASK(1, 0)
+#define ADF_VF2PF_RNG_RESET_RSVD_MASK          GENMASK(25, 2)
+
+/* PF->VF Block Responses */
+#define ADF_PF2VF_BLKMSG_RESP_TYPE_MASK                GENMASK(1, 0)
+#define ADF_PF2VF_BLKMSG_RESP_DATA_MASK                GENMASK(9, 2)
+
+enum pf2vf_blkmsg_resp_type {
+       ADF_PF2VF_BLKMSG_RESP_TYPE_DATA         = 0x00,
+       ADF_PF2VF_BLKMSG_RESP_TYPE_CRC          = 0x01,
+       ADF_PF2VF_BLKMSG_RESP_TYPE_ERROR        = 0x02,
+};
+
+/* PF->VF Block Error Code */
+enum pf2vf_blkmsg_error {
+       ADF_PF2VF_INVALID_BLOCK_TYPE            = 0x00,
+       ADF_PF2VF_INVALID_BYTE_NUM_REQ          = 0x01,
+       ADF_PF2VF_PAYLOAD_TRUNCATED             = 0x02,
+       ADF_PF2VF_UNSPECIFIED_ERROR             = 0x03,
+};
+
+/* VF->PF Block Requests */
+#define ADF_VF2PF_LARGE_BLOCK_TYPE_MASK                GENMASK(1, 0)
+#define ADF_VF2PF_LARGE_BLOCK_BYTE_MASK                GENMASK(8, 2)
+#define ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK       GENMASK(2, 0)
+#define ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK       GENMASK(8, 3)
+#define ADF_VF2PF_SMALL_BLOCK_TYPE_MASK                GENMASK(3, 0)
+#define ADF_VF2PF_SMALL_BLOCK_BYTE_MASK                GENMASK(8, 4)
+#define ADF_VF2PF_BLOCK_CRC_REQ_MASK           BIT(9)
+
+/* PF->VF Block Request Types
+ *  0..15 - 32 byte message
+ * 16..23 - 64 byte message
+ * 24..27 - 128 byte message
+ */
+enum vf2pf_blkmsg_req_type {
+       ADF_VF2PF_BLKMSG_REQ_CAP_SUMMARY        = 0x02,
+       ADF_VF2PF_BLKMSG_REQ_RING_SVC_MAP       = 0x03,
+};
+
+#define ADF_VF2PF_SMALL_BLOCK_TYPE_MAX \
+               (FIELD_MAX(ADF_VF2PF_SMALL_BLOCK_TYPE_MASK))
+
+#define ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX \
+               (FIELD_MAX(ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK) + \
+               ADF_VF2PF_SMALL_BLOCK_TYPE_MAX + 1)
+
+#define ADF_VF2PF_LARGE_BLOCK_TYPE_MAX \
+               (FIELD_MAX(ADF_VF2PF_LARGE_BLOCK_TYPE_MASK) + \
+               ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX)
+
+#define ADF_VF2PF_SMALL_BLOCK_BYTE_MAX \
+               FIELD_MAX(ADF_VF2PF_SMALL_BLOCK_BYTE_MASK)
+
+#define ADF_VF2PF_MEDIUM_BLOCK_BYTE_MAX \
+               FIELD_MAX(ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK)
+
+#define ADF_VF2PF_LARGE_BLOCK_BYTE_MAX \
+               FIELD_MAX(ADF_VF2PF_LARGE_BLOCK_BYTE_MASK)
+
+struct pfvf_blkmsg_header {
+       u8 version;
+       u8 payload_size;
+} __packed;
+
+#define ADF_PFVF_BLKMSG_HEADER_SIZE            (sizeof(struct pfvf_blkmsg_header))
+#define ADF_PFVF_BLKMSG_PAYLOAD_SIZE(blkmsg)   (sizeof(blkmsg) - \
+                                                       ADF_PFVF_BLKMSG_HEADER_SIZE)
+#define ADF_PFVF_BLKMSG_MSG_SIZE(blkmsg)       (ADF_PFVF_BLKMSG_HEADER_SIZE + \
+                                                       (blkmsg)->hdr.payload_size)
+#define ADF_PFVF_BLKMSG_MSG_MAX_SIZE           128
+
+/* PF->VF Block message header bytes */
+#define ADF_PFVF_BLKMSG_VER_BYTE               0
+#define ADF_PFVF_BLKMSG_LEN_BYTE               1
+
+/* PF/VF Capabilities message values */
+enum blkmsg_capabilities_versions {
+       ADF_PFVF_CAPABILITIES_V1_VERSION        = 0x01,
+       ADF_PFVF_CAPABILITIES_V2_VERSION        = 0x02,
+       ADF_PFVF_CAPABILITIES_V3_VERSION        = 0x03,
+};
+
+struct capabilities_v1 {
+       struct pfvf_blkmsg_header hdr;
+       u32 ext_dc_caps;
+} __packed;
+
+struct capabilities_v2 {
+       struct pfvf_blkmsg_header hdr;
+       u32 ext_dc_caps;
+       u32 capabilities;
+} __packed;
+
+struct capabilities_v3 {
+       struct pfvf_blkmsg_header hdr;
+       u32 ext_dc_caps;
+       u32 capabilities;
+       u32 frequency;
+} __packed;
+
+/* PF/VF Ring to service mapping values */
+enum blkmsg_ring_to_svc_versions {
+       ADF_PFVF_RING_TO_SVC_VERSION            = 0x01,
+};
+
+struct ring_to_svc_map_v1 {
+       struct pfvf_blkmsg_header hdr;
+       u16 map;
+} __packed;
+
+#endif /* ADF_PFVF_MSG_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c
new file mode 100644 (file)
index 0000000..14c069f
--- /dev/null
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#include <linux/pci.h>
+#include "adf_accel_devices.h"
+#include "adf_pfvf_msg.h"
+#include "adf_pfvf_pf_msg.h"
+#include "adf_pfvf_pf_proto.h"
+
+void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_vf_info *vf;
+       struct pfvf_message msg = { .type = ADF_PF2VF_MSGTYPE_RESTARTING };
+       int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
+
+       for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
+               if (vf->init && adf_send_pf2vf_msg(accel_dev, i, msg))
+                       dev_err(&GET_DEV(accel_dev),
+                               "Failed to send restarting msg to VF%d\n", i);
+       }
+}
+
+int adf_pf_capabilities_msg_provider(struct adf_accel_dev *accel_dev,
+                                    u8 *buffer, u8 compat)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct capabilities_v2 caps_msg;
+
+       caps_msg.ext_dc_caps = hw_data->extended_dc_capabilities;
+       caps_msg.capabilities = hw_data->accel_capabilities_mask;
+
+       caps_msg.hdr.version = ADF_PFVF_CAPABILITIES_V2_VERSION;
+       caps_msg.hdr.payload_size =
+                       ADF_PFVF_BLKMSG_PAYLOAD_SIZE(struct capabilities_v2);
+
+       memcpy(buffer, &caps_msg, sizeof(caps_msg));
+
+       return 0;
+}
+
+int adf_pf_ring_to_svc_msg_provider(struct adf_accel_dev *accel_dev,
+                                   u8 *buffer, u8 compat)
+{
+       struct ring_to_svc_map_v1 rts_map_msg;
+
+       rts_map_msg.map = accel_dev->hw_device->ring_to_svc_map;
+       rts_map_msg.hdr.version = ADF_PFVF_RING_TO_SVC_VERSION;
+       rts_map_msg.hdr.payload_size = ADF_PFVF_BLKMSG_PAYLOAD_SIZE(rts_map_msg);
+
+       memcpy(buffer, &rts_map_msg, sizeof(rts_map_msg));
+
+       return 0;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h
new file mode 100644 (file)
index 0000000..e8982d1
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2021 Intel Corporation */
+#ifndef ADF_PFVF_PF_MSG_H
+#define ADF_PFVF_PF_MSG_H
+
+#include "adf_accel_devices.h"
+
+void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev);
+
+typedef int (*adf_pf2vf_blkmsg_provider)(struct adf_accel_dev *accel_dev,
+                                        u8 *buffer, u8 compat);
+
+int adf_pf_capabilities_msg_provider(struct adf_accel_dev *accel_dev,
+                                    u8 *buffer, u8 comapt);
+int adf_pf_ring_to_svc_msg_provider(struct adf_accel_dev *accel_dev,
+                                   u8 *buffer, u8 comapt);
+
+#endif /* ADF_PFVF_PF_MSG_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c
new file mode 100644 (file)
index 0000000..388e58b
--- /dev/null
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#include <linux/bitfield.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_pfvf_msg.h"
+#include "adf_pfvf_pf_msg.h"
+#include "adf_pfvf_pf_proto.h"
+#include "adf_pfvf_utils.h"
+
+typedef u8 (*pf2vf_blkmsg_data_getter_fn)(u8 const *blkmsg, u8 byte);
+
+static const adf_pf2vf_blkmsg_provider pf2vf_blkmsg_providers[] = {
+       NULL,                             /* no message type defined for value 0 */
+       NULL,                             /* no message type defined for value 1 */
+       adf_pf_capabilities_msg_provider, /* ADF_VF2PF_BLKMSG_REQ_CAP_SUMMARY */
+       adf_pf_ring_to_svc_msg_provider,  /* ADF_VF2PF_BLKMSG_REQ_RING_SVC_MAP */
+};
+
+/**
+ * adf_send_pf2vf_msg() - send PF to VF message
+ * @accel_dev: Pointer to acceleration device
+ * @vf_nr:     VF number to which the message will be sent
+ * @msg:       Message to send
+ *
+ * This function allows the PF to send a message to a specific VF.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_send_pf2vf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr, struct pfvf_message msg)
+{
+       struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev);
+       u32 pfvf_offset = pfvf_ops->get_pf2vf_offset(vf_nr);
+
+       return pfvf_ops->send_msg(accel_dev, msg, pfvf_offset,
+                                 &accel_dev->pf.vf_info[vf_nr].pf2vf_lock);
+}
+
+/**
+ * adf_recv_vf2pf_msg() - receive a VF to PF message
+ * @accel_dev: Pointer to acceleration device
+ * @vf_nr:     Number of the VF from where the message will be received
+ *
+ * This function allows the PF to receive a message from a specific VF.
+ *
+ * Return: a valid message on success, zero otherwise.
+ */
+static struct pfvf_message adf_recv_vf2pf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr)
+{
+       struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+       struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev);
+       u32 pfvf_offset = pfvf_ops->get_vf2pf_offset(vf_nr);
+
+       return pfvf_ops->recv_msg(accel_dev, pfvf_offset, vf_info->vf_compat_ver);
+}
+
+static adf_pf2vf_blkmsg_provider get_blkmsg_response_provider(u8 type)
+{
+       if (type >= ARRAY_SIZE(pf2vf_blkmsg_providers))
+               return NULL;
+
+       return pf2vf_blkmsg_providers[type];
+}
+
+/* Byte pf2vf_blkmsg_data_getter_fn callback */
+static u8 adf_pf2vf_blkmsg_get_byte(u8 const *blkmsg, u8 index)
+{
+       return blkmsg[index];
+}
+
+/* CRC pf2vf_blkmsg_data_getter_fn callback */
+static u8 adf_pf2vf_blkmsg_get_crc(u8 const *blkmsg, u8 count)
+{
+       /* count is 0-based, turn it into a length */
+       return adf_pfvf_calc_blkmsg_crc(blkmsg, count + 1);
+}
+
+static int adf_pf2vf_blkmsg_get_data(struct adf_accel_vf_info *vf_info,
+                                    u8 type, u8 byte, u8 max_size, u8 *data,
+                                    pf2vf_blkmsg_data_getter_fn data_getter)
+{
+       u8 blkmsg[ADF_PFVF_BLKMSG_MSG_MAX_SIZE] = { 0 };
+       struct adf_accel_dev *accel_dev = vf_info->accel_dev;
+       adf_pf2vf_blkmsg_provider provider;
+       u8 msg_size;
+
+       provider = get_blkmsg_response_provider(type);
+
+       if (unlikely(!provider)) {
+               pr_err("QAT: No registered provider for message %d\n", type);
+               *data = ADF_PF2VF_INVALID_BLOCK_TYPE;
+               return -EINVAL;
+       }
+
+       if (unlikely((*provider)(accel_dev, blkmsg, vf_info->vf_compat_ver))) {
+               pr_err("QAT: unknown error from provider for message %d\n", type);
+               *data = ADF_PF2VF_UNSPECIFIED_ERROR;
+               return -EINVAL;
+       }
+
+       msg_size = ADF_PFVF_BLKMSG_HEADER_SIZE + blkmsg[ADF_PFVF_BLKMSG_LEN_BYTE];
+
+       if (unlikely(msg_size >= max_size)) {
+               pr_err("QAT: Invalid size %d provided for message type %d\n",
+                      msg_size, type);
+               *data = ADF_PF2VF_PAYLOAD_TRUNCATED;
+               return -EINVAL;
+       }
+
+       if (unlikely(byte >= msg_size)) {
+               pr_err("QAT: Out-of-bound byte number %d (msg size %d)\n",
+                      byte, msg_size);
+               *data = ADF_PF2VF_INVALID_BYTE_NUM_REQ;
+               return -EINVAL;
+       }
+
+       *data = data_getter(blkmsg, byte);
+       return 0;
+}
+
+static struct pfvf_message handle_blkmsg_req(struct adf_accel_vf_info *vf_info,
+                                            struct pfvf_message req)
+{
+       u8 resp_type = ADF_PF2VF_BLKMSG_RESP_TYPE_ERROR;
+       struct pfvf_message resp = { 0 };
+       u8 resp_data = 0;
+       u8 blk_type;
+       u8 blk_byte;
+       u8 byte_max;
+
+       switch (req.type) {
+       case ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ:
+               blk_type = FIELD_GET(ADF_VF2PF_LARGE_BLOCK_TYPE_MASK, req.data)
+                          + ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX + 1;
+               blk_byte = FIELD_GET(ADF_VF2PF_LARGE_BLOCK_BYTE_MASK, req.data);
+               byte_max = ADF_VF2PF_LARGE_BLOCK_BYTE_MAX;
+               break;
+       case ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ:
+               blk_type = FIELD_GET(ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK, req.data)
+                          + ADF_VF2PF_SMALL_BLOCK_TYPE_MAX + 1;
+               blk_byte = FIELD_GET(ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK, req.data);
+               byte_max = ADF_VF2PF_MEDIUM_BLOCK_BYTE_MAX;
+               break;
+       case ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ:
+               blk_type = FIELD_GET(ADF_VF2PF_SMALL_BLOCK_TYPE_MASK, req.data);
+               blk_byte = FIELD_GET(ADF_VF2PF_SMALL_BLOCK_BYTE_MASK, req.data);
+               byte_max = ADF_VF2PF_SMALL_BLOCK_BYTE_MAX;
+               break;
+       }
+
+       /* Is this a request for CRC or data? */
+       if (FIELD_GET(ADF_VF2PF_BLOCK_CRC_REQ_MASK, req.data)) {
+               dev_dbg(&GET_DEV(vf_info->accel_dev),
+                       "BlockMsg of type %d for CRC over %d bytes received from VF%d\n",
+                       blk_type, blk_byte + 1, vf_info->vf_nr);
+
+               if (!adf_pf2vf_blkmsg_get_data(vf_info, blk_type, blk_byte,
+                                              byte_max, &resp_data,
+                                              adf_pf2vf_blkmsg_get_crc))
+                       resp_type = ADF_PF2VF_BLKMSG_RESP_TYPE_CRC;
+       } else {
+               dev_dbg(&GET_DEV(vf_info->accel_dev),
+                       "BlockMsg of type %d for data byte %d received from VF%d\n",
+                       blk_type, blk_byte, vf_info->vf_nr);
+
+               if (!adf_pf2vf_blkmsg_get_data(vf_info, blk_type, blk_byte,
+                                              byte_max, &resp_data,
+                                              adf_pf2vf_blkmsg_get_byte))
+                       resp_type = ADF_PF2VF_BLKMSG_RESP_TYPE_DATA;
+       }
+
+       resp.type = ADF_PF2VF_MSGTYPE_BLKMSG_RESP;
+       resp.data = FIELD_PREP(ADF_PF2VF_BLKMSG_RESP_TYPE_MASK, resp_type) |
+                   FIELD_PREP(ADF_PF2VF_BLKMSG_RESP_DATA_MASK, resp_data);
+
+       return resp;
+}
+
+static struct pfvf_message handle_rp_reset_req(struct adf_accel_dev *accel_dev, u8 vf_nr,
+                                              struct pfvf_message req)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct pfvf_message resp = {
+               .type = ADF_PF2VF_MSGTYPE_RP_RESET_RESP,
+               .data = RPRESET_SUCCESS
+       };
+       u32 bank_number;
+       u32 rsvd_field;
+
+       bank_number = FIELD_GET(ADF_VF2PF_RNG_RESET_RP_MASK, req.data);
+       rsvd_field = FIELD_GET(ADF_VF2PF_RNG_RESET_RSVD_MASK, req.data);
+
+       dev_dbg(&GET_DEV(accel_dev),
+               "Ring Pair Reset Message received from VF%d for bank 0x%x\n",
+               vf_nr, bank_number);
+
+       if (!hw_data->ring_pair_reset || rsvd_field) {
+               dev_dbg(&GET_DEV(accel_dev),
+                       "Ring Pair Reset for VF%d is not supported\n", vf_nr);
+               resp.data = RPRESET_NOT_SUPPORTED;
+               goto out;
+       }
+
+       if (bank_number >= hw_data->num_banks_per_vf) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Invalid bank number (0x%x) from VF%d for Ring Reset\n",
+                       bank_number, vf_nr);
+               resp.data = RPRESET_INVAL_BANK;
+               goto out;
+       }
+
+       /* Convert the VF provided value to PF bank number */
+       bank_number = vf_nr * hw_data->num_banks_per_vf + bank_number;
+       if (hw_data->ring_pair_reset(accel_dev, bank_number)) {
+               dev_dbg(&GET_DEV(accel_dev),
+                       "Ring pair reset for VF%d failure\n", vf_nr);
+               resp.data = RPRESET_TIMEOUT;
+               goto out;
+       }
+
+       dev_dbg(&GET_DEV(accel_dev),
+               "Ring pair reset for VF%d successfully\n", vf_nr);
+
+out:
+       return resp;
+}
+
+static int adf_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr,
+                               struct pfvf_message msg, struct pfvf_message *resp)
+{
+       struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+
+       switch (msg.type) {
+       case ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ:
+               {
+               u8 vf_compat_ver = msg.data;
+               u8 compat;
+
+               dev_dbg(&GET_DEV(accel_dev),
+                       "VersionRequest received from VF%d (vers %d) to PF (vers %d)\n",
+                       vf_nr, vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
+
+               if (vf_compat_ver == 0)
+                       compat = ADF_PF2VF_VF_INCOMPATIBLE;
+               else if (vf_compat_ver <= ADF_PFVF_COMPAT_THIS_VERSION)
+                       compat = ADF_PF2VF_VF_COMPATIBLE;
+               else
+                       compat = ADF_PF2VF_VF_COMPAT_UNKNOWN;
+
+               vf_info->vf_compat_ver = vf_compat_ver;
+
+               resp->type = ADF_PF2VF_MSGTYPE_VERSION_RESP;
+               resp->data = FIELD_PREP(ADF_PF2VF_VERSION_RESP_VERS_MASK,
+                                       ADF_PFVF_COMPAT_THIS_VERSION) |
+                            FIELD_PREP(ADF_PF2VF_VERSION_RESP_RESULT_MASK, compat);
+               }
+               break;
+       case ADF_VF2PF_MSGTYPE_VERSION_REQ:
+               {
+               u8 compat;
+
+               dev_dbg(&GET_DEV(accel_dev),
+                       "Legacy VersionRequest received from VF%d to PF (vers 1.1)\n",
+                       vf_nr);
+
+               /* legacy driver, VF compat_ver is 0 */
+               vf_info->vf_compat_ver = 0;
+
+               /* PF always newer than legacy VF */
+               compat = ADF_PF2VF_VF_COMPATIBLE;
+
+               /* Set legacy major and minor version to the latest, 1.1 */
+               resp->type = ADF_PF2VF_MSGTYPE_VERSION_RESP;
+               resp->data = FIELD_PREP(ADF_PF2VF_VERSION_RESP_VERS_MASK, 0x11) |
+                            FIELD_PREP(ADF_PF2VF_VERSION_RESP_RESULT_MASK, compat);
+               }
+               break;
+       case ADF_VF2PF_MSGTYPE_INIT:
+               {
+               dev_dbg(&GET_DEV(accel_dev),
+                       "Init message received from VF%d\n", vf_nr);
+               vf_info->init = true;
+               }
+               break;
+       case ADF_VF2PF_MSGTYPE_SHUTDOWN:
+               {
+               dev_dbg(&GET_DEV(accel_dev),
+                       "Shutdown message received from VF%d\n", vf_nr);
+               vf_info->init = false;
+               }
+               break;
+       case ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ:
+       case ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ:
+       case ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ:
+               *resp = handle_blkmsg_req(vf_info, msg);
+               break;
+       case ADF_VF2PF_MSGTYPE_RP_RESET:
+               *resp = handle_rp_reset_req(accel_dev, vf_nr, msg);
+               break;
+       default:
+               dev_dbg(&GET_DEV(accel_dev),
+                       "Unknown message from VF%d (type 0x%.4x, data: 0x%.4x)\n",
+                       vf_nr, msg.type, msg.data);
+               return -ENOMSG;
+       }
+
+       return 0;
+}
+
+bool adf_recv_and_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+       struct pfvf_message req;
+       struct pfvf_message resp = {0};
+
+       req = adf_recv_vf2pf_msg(accel_dev, vf_nr);
+       if (!req.type)  /* Legacy or no message */
+               return true;
+
+       if (adf_handle_vf2pf_msg(accel_dev, vf_nr, req, &resp))
+               return false;
+
+       if (resp.type && adf_send_pf2vf_msg(accel_dev, vf_nr, resp))
+               dev_err(&GET_DEV(accel_dev),
+                       "Failed to send response to VF%d\n", vf_nr);
+
+       return true;
+}
+
+/**
+ * adf_enable_pf2vf_comms() - Function enables communication from pf to vf
+ *
+ * @accel_dev: Pointer to acceleration device virtual function.
+ *
+ * This function carries out the necessary steps to setup and start the PFVF
+ * communication channel, if any.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
+{
+       adf_pfvf_crc_init();
+       spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_enable_pf2vf_comms);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.h
new file mode 100644 (file)
index 0000000..165d266
--- /dev/null
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2021 Intel Corporation */
+#ifndef ADF_PFVF_PF_PROTO_H
+#define ADF_PFVF_PF_PROTO_H
+
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+
+int adf_send_pf2vf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr, struct pfvf_message msg);
+
+int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev);
+
+#endif /* ADF_PFVF_PF_PROTO_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.c
new file mode 100644 (file)
index 0000000..c5f6d77
--- /dev/null
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2021 Intel Corporation */
+#include <linux/crc8.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+#include "adf_pfvf_msg.h"
+#include "adf_pfvf_utils.h"
+
+/* CRC Calculation */
+DECLARE_CRC8_TABLE(pfvf_crc8_table);
+#define ADF_PFVF_CRC8_POLYNOMIAL 0x97
+
+void adf_pfvf_crc_init(void)
+{
+       crc8_populate_msb(pfvf_crc8_table, ADF_PFVF_CRC8_POLYNOMIAL);
+}
+
+u8 adf_pfvf_calc_blkmsg_crc(u8 const *buf, u8 buf_len)
+{
+       return crc8(pfvf_crc8_table, buf, buf_len, CRC8_INIT_VALUE);
+}
+
+static bool set_value_on_csr_msg(struct adf_accel_dev *accel_dev, u32 *csr_msg,
+                                u32 value, const struct pfvf_field_format *fmt)
+{
+       if (unlikely((value & fmt->mask) != value)) {
+               dev_err(&GET_DEV(accel_dev),
+                       "PFVF message value 0x%X out of range, %u max allowed\n",
+                       value, fmt->mask);
+               return false;
+       }
+
+       *csr_msg |= value << fmt->offset;
+
+       return true;
+}
+
+u32 adf_pfvf_csr_msg_of(struct adf_accel_dev *accel_dev,
+                       struct pfvf_message msg,
+                       const struct pfvf_csr_format *fmt)
+{
+       u32 csr_msg = 0;
+
+       if (!set_value_on_csr_msg(accel_dev, &csr_msg, msg.type, &fmt->type) ||
+           !set_value_on_csr_msg(accel_dev, &csr_msg, msg.data, &fmt->data))
+               return 0;
+
+       return csr_msg | ADF_PFVF_MSGORIGIN_SYSTEM;
+}
+
+struct pfvf_message adf_pfvf_message_of(struct adf_accel_dev *accel_dev, u32 csr_msg,
+                                       const struct pfvf_csr_format *fmt)
+{
+       struct pfvf_message msg = { 0 };
+
+       msg.type = (csr_msg >> fmt->type.offset) & fmt->type.mask;
+       msg.data = (csr_msg >> fmt->data.offset) & fmt->data.mask;
+
+       if (unlikely(!msg.type))
+               dev_err(&GET_DEV(accel_dev),
+                       "Invalid PFVF msg with no type received\n");
+
+       return msg;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h
new file mode 100644 (file)
index 0000000..2be048e
--- /dev/null
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2021 Intel Corporation */
+#ifndef ADF_PFVF_UTILS_H
+#define ADF_PFVF_UTILS_H
+
+#include <linux/types.h>
+#include "adf_pfvf_msg.h"
+
+/* How long to wait for far side to acknowledge receipt */
+#define ADF_PFVF_MSG_ACK_DELAY_US      4
+#define ADF_PFVF_MSG_ACK_MAX_DELAY_US  (1 * USEC_PER_SEC)
+
+u8 adf_pfvf_calc_blkmsg_crc(u8 const *buf, u8 buf_len);
+void adf_pfvf_crc_init(void);
+
+struct pfvf_field_format {
+       u8  offset;
+       u32 mask;
+};
+
+struct pfvf_csr_format {
+       struct pfvf_field_format type;
+       struct pfvf_field_format data;
+};
+
+u32 adf_pfvf_csr_msg_of(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
+                       const struct pfvf_csr_format *fmt);
+struct pfvf_message adf_pfvf_message_of(struct adf_accel_dev *accel_dev, u32 raw_msg,
+                                       const struct pfvf_csr_format *fmt);
+
+#endif /* ADF_PFVF_UTILS_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c
new file mode 100644 (file)
index 0000000..1141258
--- /dev/null
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#include <linux/bitfield.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_pfvf_msg.h"
+#include "adf_pfvf_vf_msg.h"
+#include "adf_pfvf_vf_proto.h"
+
+/**
+ * adf_vf2pf_notify_init() - send init msg to PF
+ * @accel_dev:  Pointer to acceleration VF device.
+ *
+ * Function sends an init message from the VF to a PF
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
+{
+       struct pfvf_message msg = { .type = ADF_VF2PF_MSGTYPE_INIT };
+
+       if (adf_send_vf2pf_msg(accel_dev, msg)) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Failed to send Init event to PF\n");
+               return -EFAULT;
+       }
+       set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_vf2pf_notify_init);
+
+/**
+ * adf_vf2pf_notify_shutdown() - send shutdown msg to PF
+ * @accel_dev:  Pointer to acceleration VF device.
+ *
+ * Function sends a shutdown message from the VF to a PF
+ *
+ * Return: void
+ */
+void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
+{
+       struct pfvf_message msg = { .type = ADF_VF2PF_MSGTYPE_SHUTDOWN };
+
+       if (test_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status))
+               if (adf_send_vf2pf_msg(accel_dev, msg))
+                       dev_err(&GET_DEV(accel_dev),
+                               "Failed to send Shutdown event to PF\n");
+}
+EXPORT_SYMBOL_GPL(adf_vf2pf_notify_shutdown);
+
+int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
+{
+       u8 pf_version;
+       int compat;
+       int ret;
+       struct pfvf_message resp;
+       struct pfvf_message msg = {
+               .type = ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ,
+               .data = ADF_PFVF_COMPAT_THIS_VERSION,
+       };
+
+       BUILD_BUG_ON(ADF_PFVF_COMPAT_THIS_VERSION > 255);
+
+       ret = adf_send_vf2pf_req(accel_dev, msg, &resp);
+       if (ret) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Failed to send Compatibility Version Request.\n");
+               return ret;
+       }
+
+       pf_version = FIELD_GET(ADF_PF2VF_VERSION_RESP_VERS_MASK, resp.data);
+       compat = FIELD_GET(ADF_PF2VF_VERSION_RESP_RESULT_MASK, resp.data);
+
+       /* Response from PF received, check compatibility */
+       switch (compat) {
+       case ADF_PF2VF_VF_COMPATIBLE:
+               break;
+       case ADF_PF2VF_VF_COMPAT_UNKNOWN:
+               /* VF is newer than PF - compatible for now */
+               break;
+       case ADF_PF2VF_VF_INCOMPATIBLE:
+               dev_err(&GET_DEV(accel_dev),
+                       "PF (vers %d) and VF (vers %d) are not compatible\n",
+                       pf_version, ADF_PFVF_COMPAT_THIS_VERSION);
+               return -EINVAL;
+       default:
+               dev_err(&GET_DEV(accel_dev),
+                       "Invalid response from PF; assume not compatible\n");
+               return -EINVAL;
+       }
+
+       accel_dev->vf.pf_compat_ver = pf_version;
+       return 0;
+}
+
+int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct capabilities_v3 cap_msg = { 0 };
+       unsigned int len = sizeof(cap_msg);
+
+       if (accel_dev->vf.pf_compat_ver < ADF_PFVF_COMPAT_CAPABILITIES)
+               /* The PF is too old to support the extended capabilities */
+               return 0;
+
+       if (adf_send_vf2pf_blkmsg_req(accel_dev, ADF_VF2PF_BLKMSG_REQ_CAP_SUMMARY,
+                                     (u8 *)&cap_msg, &len)) {
+               dev_err(&GET_DEV(accel_dev),
+                       "QAT: Failed to get block message response\n");
+               return -EFAULT;
+       }
+
+       switch (cap_msg.hdr.version) {
+       default:
+               /* Newer version received, handle only the know parts */
+               fallthrough;
+       case ADF_PFVF_CAPABILITIES_V3_VERSION:
+               if (likely(len >= sizeof(struct capabilities_v3)))
+                       hw_data->clock_frequency = cap_msg.frequency;
+               else
+                       dev_info(&GET_DEV(accel_dev), "Could not get frequency");
+               fallthrough;
+       case ADF_PFVF_CAPABILITIES_V2_VERSION:
+               if (likely(len >= sizeof(struct capabilities_v2)))
+                       hw_data->accel_capabilities_mask = cap_msg.capabilities;
+               else
+                       dev_info(&GET_DEV(accel_dev), "Could not get capabilities");
+               fallthrough;
+       case ADF_PFVF_CAPABILITIES_V1_VERSION:
+               if (likely(len >= sizeof(struct capabilities_v1))) {
+                       hw_data->extended_dc_capabilities = cap_msg.ext_dc_caps;
+               } else {
+                       dev_err(&GET_DEV(accel_dev),
+                               "Capabilities message truncated to %d bytes\n", len);
+                       return -EFAULT;
+               }
+       }
+
+       return 0;
+}
+
+int adf_vf2pf_get_ring_to_svc(struct adf_accel_dev *accel_dev)
+{
+       struct ring_to_svc_map_v1 rts_map_msg = { 0 };
+       unsigned int len = sizeof(rts_map_msg);
+
+       if (accel_dev->vf.pf_compat_ver < ADF_PFVF_COMPAT_RING_TO_SVC_MAP)
+               /* Use already set default mappings */
+               return 0;
+
+       if (adf_send_vf2pf_blkmsg_req(accel_dev, ADF_VF2PF_BLKMSG_REQ_RING_SVC_MAP,
+                                     (u8 *)&rts_map_msg, &len)) {
+               dev_err(&GET_DEV(accel_dev),
+                       "QAT: Failed to get block message response\n");
+               return -EFAULT;
+       }
+
+       if (unlikely(len < sizeof(struct ring_to_svc_map_v1))) {
+               dev_err(&GET_DEV(accel_dev),
+                       "RING_TO_SVC message truncated to %d bytes\n", len);
+               return -EFAULT;
+       }
+
+       /* Only v1 at present */
+       accel_dev->hw_device->ring_to_svc_map = rts_map_msg.map;
+       return 0;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.h
new file mode 100644 (file)
index 0000000..71bc0e3
--- /dev/null
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2021 Intel Corporation */
+#ifndef ADF_PFVF_VF_MSG_H
+#define ADF_PFVF_VF_MSG_H
+
+#if defined(CONFIG_PCI_IOV)
+int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev);
+void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev);
+int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev);
+int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev);
+int adf_vf2pf_get_ring_to_svc(struct adf_accel_dev *accel_dev);
+#else
+static inline int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
+{
+       return 0;
+}
+
+static inline void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
+{
+}
+#endif
+
+#endif /* ADF_PFVF_VF_MSG_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c
new file mode 100644 (file)
index 0000000..1015155
--- /dev/null
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#include <linux/bitfield.h>
+#include <linux/completion.h>
+#include <linux/minmax.h>
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_pfvf_msg.h"
+#include "adf_pfvf_utils.h"
+#include "adf_pfvf_vf_msg.h"
+#include "adf_pfvf_vf_proto.h"
+
+#define ADF_PFVF_MSG_COLLISION_DETECT_DELAY    10
+#define ADF_PFVF_MSG_ACK_DELAY                 2
+#define ADF_PFVF_MSG_ACK_MAX_RETRY             100
+
+/* How often to retry if there is no response */
+#define ADF_PFVF_MSG_RESP_RETRIES      5
+#define ADF_PFVF_MSG_RESP_TIMEOUT      (ADF_PFVF_MSG_ACK_DELAY * \
+                                        ADF_PFVF_MSG_ACK_MAX_RETRY + \
+                                        ADF_PFVF_MSG_COLLISION_DETECT_DELAY)
+
+/**
+ * adf_send_vf2pf_msg() - send VF to PF message
+ * @accel_dev: Pointer to acceleration device
+ * @msg:       Message to send
+ *
+ * This function allows the VF to send a message to the PF.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_send_vf2pf_msg(struct adf_accel_dev *accel_dev, struct pfvf_message msg)
+{
+       struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev);
+       u32 pfvf_offset = pfvf_ops->get_vf2pf_offset(0);
+
+       return pfvf_ops->send_msg(accel_dev, msg, pfvf_offset,
+                                 &accel_dev->vf.vf2pf_lock);
+}
+
+/**
+ * adf_recv_pf2vf_msg() - receive a PF to VF message
+ * @accel_dev: Pointer to acceleration device
+ *
+ * This function allows the VF to receive a message from the PF.
+ *
+ * Return: a valid message on success, zero otherwise.
+ */
+static struct pfvf_message adf_recv_pf2vf_msg(struct adf_accel_dev *accel_dev)
+{
+       struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev);
+       u32 pfvf_offset = pfvf_ops->get_pf2vf_offset(0);
+
+       return pfvf_ops->recv_msg(accel_dev, pfvf_offset, accel_dev->vf.pf_compat_ver);
+}
+
+/**
+ * adf_send_vf2pf_req() - send VF2PF request message
+ * @accel_dev: Pointer to acceleration device.
+ * @msg:       Request message to send
+ * @resp:      Returned PF response
+ *
+ * This function sends a message that requires a response from the VF to the PF
+ * and waits for a reply.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_send_vf2pf_req(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
+                      struct pfvf_message *resp)
+{
+       unsigned long timeout = msecs_to_jiffies(ADF_PFVF_MSG_RESP_TIMEOUT);
+       unsigned int retries = ADF_PFVF_MSG_RESP_RETRIES;
+       int ret;
+
+       reinit_completion(&accel_dev->vf.msg_received);
+
+       /* Send request from VF to PF */
+       do {
+               ret = adf_send_vf2pf_msg(accel_dev, msg);
+               if (ret) {
+                       dev_err(&GET_DEV(accel_dev),
+                               "Failed to send request msg to PF\n");
+                       return ret;
+               }
+
+               /* Wait for response, if it times out retry */
+               ret = wait_for_completion_timeout(&accel_dev->vf.msg_received,
+                                                 timeout);
+               if (ret) {
+                       if (likely(resp))
+                               *resp = accel_dev->vf.response;
+
+                       /* Once copied, set to an invalid value */
+                       accel_dev->vf.response.type = 0;
+
+                       return 0;
+               }
+
+               dev_err(&GET_DEV(accel_dev), "PFVF response message timeout\n");
+       } while (--retries);
+
+       return -EIO;
+}
+
+static int adf_vf2pf_blkmsg_data_req(struct adf_accel_dev *accel_dev, bool crc,
+                                    u8 *type, u8 *data)
+{
+       struct pfvf_message req = { 0 };
+       struct pfvf_message resp = { 0 };
+       u8 blk_type;
+       u8 blk_byte;
+       u8 msg_type;
+       u8 max_data;
+       int err;
+
+       /* Convert the block type to {small, medium, large} size category */
+       if (*type <= ADF_VF2PF_SMALL_BLOCK_TYPE_MAX) {
+               msg_type = ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ;
+               blk_type = FIELD_PREP(ADF_VF2PF_SMALL_BLOCK_TYPE_MASK, *type);
+               blk_byte = FIELD_PREP(ADF_VF2PF_SMALL_BLOCK_BYTE_MASK, *data);
+               max_data = ADF_VF2PF_SMALL_BLOCK_BYTE_MAX;
+       } else if (*type <= ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX) {
+               msg_type = ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ;
+               blk_type = FIELD_PREP(ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK,
+                                     *type - ADF_VF2PF_SMALL_BLOCK_TYPE_MAX);
+               blk_byte = FIELD_PREP(ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK, *data);
+               max_data = ADF_VF2PF_MEDIUM_BLOCK_BYTE_MAX;
+       } else if (*type <= ADF_VF2PF_LARGE_BLOCK_TYPE_MAX) {
+               msg_type = ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ;
+               blk_type = FIELD_PREP(ADF_VF2PF_LARGE_BLOCK_TYPE_MASK,
+                                     *type - ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX);
+               blk_byte = FIELD_PREP(ADF_VF2PF_LARGE_BLOCK_BYTE_MASK, *data);
+               max_data = ADF_VF2PF_LARGE_BLOCK_BYTE_MAX;
+       } else {
+               dev_err(&GET_DEV(accel_dev), "Invalid message type %u\n", *type);
+               return -EINVAL;
+       }
+
+       /* Sanity check */
+       if (*data > max_data) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Invalid byte %s %u for message type %u\n",
+                       crc ? "count" : "index", *data, *type);
+               return -EINVAL;
+       }
+
+       /* Build the block message */
+       req.type = msg_type;
+       req.data = blk_type | blk_byte | FIELD_PREP(ADF_VF2PF_BLOCK_CRC_REQ_MASK, crc);
+
+       err = adf_send_vf2pf_req(accel_dev, req, &resp);
+       if (err)
+               return err;
+
+       *type = FIELD_GET(ADF_PF2VF_BLKMSG_RESP_TYPE_MASK, resp.data);
+       *data = FIELD_GET(ADF_PF2VF_BLKMSG_RESP_DATA_MASK, resp.data);
+
+       return 0;
+}
+
+static int adf_vf2pf_blkmsg_get_byte(struct adf_accel_dev *accel_dev, u8 type,
+                                    u8 index, u8 *data)
+{
+       int ret;
+
+       ret = adf_vf2pf_blkmsg_data_req(accel_dev, false, &type, &index);
+       if (ret < 0)
+               return ret;
+
+       if (unlikely(type != ADF_PF2VF_BLKMSG_RESP_TYPE_DATA)) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Unexpected BLKMSG response type %u, byte 0x%x\n",
+                       type, index);
+               return -EFAULT;
+       }
+
+       *data = index;
+       return 0;
+}
+
+static int adf_vf2pf_blkmsg_get_crc(struct adf_accel_dev *accel_dev, u8 type,
+                                   u8 bytes, u8 *crc)
+{
+       int ret;
+
+       /* The count of bytes refers to a length, however shift it to a 0-based
+        * count to avoid overflows. Thus, a request for 0 bytes is technically
+        * valid.
+        */
+       --bytes;
+
+       ret = adf_vf2pf_blkmsg_data_req(accel_dev, true, &type, &bytes);
+       if (ret < 0)
+               return ret;
+
+       if (unlikely(type != ADF_PF2VF_BLKMSG_RESP_TYPE_CRC)) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Unexpected CRC BLKMSG response type %u, crc 0x%x\n",
+                       type, bytes);
+               return  -EFAULT;
+       }
+
+       *crc = bytes;
+       return 0;
+}
+
+/**
+ * adf_send_vf2pf_blkmsg_req() - retrieve block message
+ * @accel_dev: Pointer to acceleration VF device.
+ * @type:      The block message type, see adf_pfvf_msg.h for allowed values
+ * @buffer:    input buffer where to place the received data
+ * @buffer_len:        buffer length as input, the amount of written bytes on output
+ *
+ * Request a message of type 'type' over the block message transport.
+ * This function will send the required amount block message requests and
+ * return the overall content back to the caller through the provided buffer.
+ * The buffer should be large enough to contain the requested message type,
+ * otherwise the response will be truncated.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_send_vf2pf_blkmsg_req(struct adf_accel_dev *accel_dev, u8 type,
+                             u8 *buffer, unsigned int *buffer_len)
+{
+       unsigned int index;
+       unsigned int msg_len;
+       int ret;
+       u8 remote_crc;
+       u8 local_crc;
+
+       if (unlikely(type > ADF_VF2PF_LARGE_BLOCK_TYPE_MAX)) {
+               dev_err(&GET_DEV(accel_dev), "Invalid block message type %d\n",
+                       type);
+               return -EINVAL;
+       }
+
+       if (unlikely(*buffer_len < ADF_PFVF_BLKMSG_HEADER_SIZE)) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Buffer size too small for a block message\n");
+               return -EINVAL;
+       }
+
+       ret = adf_vf2pf_blkmsg_get_byte(accel_dev, type,
+                                       ADF_PFVF_BLKMSG_VER_BYTE,
+                                       &buffer[ADF_PFVF_BLKMSG_VER_BYTE]);
+       if (unlikely(ret))
+               return ret;
+
+       if (unlikely(!buffer[ADF_PFVF_BLKMSG_VER_BYTE])) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Invalid version 0 received for block request %u", type);
+               return -EFAULT;
+       }
+
+       ret = adf_vf2pf_blkmsg_get_byte(accel_dev, type,
+                                       ADF_PFVF_BLKMSG_LEN_BYTE,
+                                       &buffer[ADF_PFVF_BLKMSG_LEN_BYTE]);
+       if (unlikely(ret))
+               return ret;
+
+       if (unlikely(!buffer[ADF_PFVF_BLKMSG_LEN_BYTE])) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Invalid size 0 received for block request %u", type);
+               return -EFAULT;
+       }
+
+       /* We need to pick the minimum since there is no way to request a
+        * specific version. As a consequence any scenario is possible:
+        * - PF has a newer (longer) version which doesn't fit in the buffer
+        * - VF expects a newer (longer) version, so we must not ask for
+        *   bytes in excess
+        * - PF and VF share the same version, no problem
+        */
+       msg_len = ADF_PFVF_BLKMSG_HEADER_SIZE + buffer[ADF_PFVF_BLKMSG_LEN_BYTE];
+       msg_len = min(*buffer_len, msg_len);
+
+       /* Get the payload */
+       for (index = ADF_PFVF_BLKMSG_HEADER_SIZE; index < msg_len; index++) {
+               ret = adf_vf2pf_blkmsg_get_byte(accel_dev, type, index,
+                                               &buffer[index]);
+               if (unlikely(ret))
+                       return ret;
+       }
+
+       ret = adf_vf2pf_blkmsg_get_crc(accel_dev, type, msg_len, &remote_crc);
+       if (unlikely(ret))
+               return ret;
+
+       local_crc = adf_pfvf_calc_blkmsg_crc(buffer, msg_len);
+       if (unlikely(local_crc != remote_crc)) {
+               dev_err(&GET_DEV(accel_dev),
+                       "CRC error on msg type %d. Local %02X, remote %02X\n",
+                       type, local_crc, remote_crc);
+               return -EIO;
+       }
+
+       *buffer_len = msg_len;
+       return 0;
+}
+
+static bool adf_handle_pf2vf_msg(struct adf_accel_dev *accel_dev,
+                                struct pfvf_message msg)
+{
+       switch (msg.type) {
+       case ADF_PF2VF_MSGTYPE_RESTARTING:
+               dev_dbg(&GET_DEV(accel_dev), "Restarting message received from PF\n");
+
+               adf_pf2vf_handle_pf_restarting(accel_dev);
+               return false;
+       case ADF_PF2VF_MSGTYPE_VERSION_RESP:
+       case ADF_PF2VF_MSGTYPE_BLKMSG_RESP:
+       case ADF_PF2VF_MSGTYPE_RP_RESET_RESP:
+               dev_dbg(&GET_DEV(accel_dev),
+                       "Response Message received from PF (type 0x%.4x, data 0x%.4x)\n",
+                       msg.type, msg.data);
+               accel_dev->vf.response = msg;
+               complete(&accel_dev->vf.msg_received);
+               return true;
+       default:
+               dev_err(&GET_DEV(accel_dev),
+                       "Unknown message from PF (type 0x%.4x, data: 0x%.4x)\n",
+                       msg.type, msg.data);
+       }
+
+       return false;
+}
+
+bool adf_recv_and_handle_pf2vf_msg(struct adf_accel_dev *accel_dev)
+{
+       struct pfvf_message msg;
+
+       msg = adf_recv_pf2vf_msg(accel_dev);
+       if (msg.type)  /* Invalid or no message */
+               return adf_handle_pf2vf_msg(accel_dev, msg);
+
+       /* No replies for PF->VF messages at present */
+
+       return true;
+}
+
+/**
+ * adf_enable_vf2pf_comms() - Function enables communication from vf to pf
+ *
+ * @accel_dev: Pointer to acceleration device virtual function.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+{
+       int ret;
+
+       adf_pfvf_crc_init();
+       adf_enable_pf2vf_interrupts(accel_dev);
+
+       ret = adf_vf2pf_request_version(accel_dev);
+       if (ret)
+               return ret;
+
+       ret = adf_vf2pf_get_capabilities(accel_dev);
+       if (ret)
+               return ret;
+
+       ret = adf_vf2pf_get_ring_to_svc(accel_dev);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.h
new file mode 100644 (file)
index 0000000..f6ee9b3
--- /dev/null
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2021 Intel Corporation */
+#ifndef ADF_PFVF_VF_PROTO_H
+#define ADF_PFVF_VF_PROTO_H
+
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+
+int adf_send_vf2pf_msg(struct adf_accel_dev *accel_dev, struct pfvf_message msg);
+int adf_send_vf2pf_req(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
+                      struct pfvf_message *resp);
+int adf_send_vf2pf_blkmsg_req(struct adf_accel_dev *accel_dev, u8 type,
+                             u8 *buffer, unsigned int *buffer_len);
+
+int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev);
+
+#endif /* ADF_PFVF_VF_PROTO_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sriov.c b/drivers/crypto/intel/qat/qat_common/adf_sriov.c
new file mode 100644 (file)
index 0000000..f44025b
--- /dev/null
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include "adf_common_drv.h"
+#include "adf_cfg.h"
+#include "adf_pfvf_pf_msg.h"
+
+#define ADF_VF2PF_RATELIMIT_INTERVAL   8
+#define ADF_VF2PF_RATELIMIT_BURST      130
+
+static struct workqueue_struct *pf2vf_resp_wq;
+
+struct adf_pf2vf_resp {
+       struct work_struct pf2vf_resp_work;
+       struct adf_accel_vf_info *vf_info;
+};
+
+static void adf_iov_send_resp(struct work_struct *work)
+{
+       struct adf_pf2vf_resp *pf2vf_resp =
+               container_of(work, struct adf_pf2vf_resp, pf2vf_resp_work);
+       struct adf_accel_vf_info *vf_info = pf2vf_resp->vf_info;
+       struct adf_accel_dev *accel_dev = vf_info->accel_dev;
+       u32 vf_nr = vf_info->vf_nr;
+       bool ret;
+
+       ret = adf_recv_and_handle_vf2pf_msg(accel_dev, vf_nr);
+       if (ret)
+               /* re-enable interrupt on PF from this VF */
+               adf_enable_vf2pf_interrupts(accel_dev, 1 << vf_nr);
+
+       kfree(pf2vf_resp);
+}
+
+void adf_schedule_vf2pf_handler(struct adf_accel_vf_info *vf_info)
+{
+       struct adf_pf2vf_resp *pf2vf_resp;
+
+       pf2vf_resp = kzalloc(sizeof(*pf2vf_resp), GFP_ATOMIC);
+       if (!pf2vf_resp)
+               return;
+
+       pf2vf_resp->vf_info = vf_info;
+       INIT_WORK(&pf2vf_resp->pf2vf_resp_work, adf_iov_send_resp);
+       queue_work(pf2vf_resp_wq, &pf2vf_resp->pf2vf_resp_work);
+}
+
+static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
+{
+       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+       int totalvfs = pci_sriov_get_totalvfs(pdev);
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct adf_accel_vf_info *vf_info;
+       int i;
+
+       for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs;
+            i++, vf_info++) {
+               /* This ptr will be populated when VFs will be created */
+               vf_info->accel_dev = accel_dev;
+               vf_info->vf_nr = i;
+               vf_info->vf_compat_ver = 0;
+
+               mutex_init(&vf_info->pf2vf_lock);
+               ratelimit_state_init(&vf_info->vf2pf_ratelimit,
+                                    ADF_VF2PF_RATELIMIT_INTERVAL,
+                                    ADF_VF2PF_RATELIMIT_BURST);
+       }
+
+       /* Set Valid bits in AE Thread to PCIe Function Mapping */
+       if (hw_data->configure_iov_threads)
+               hw_data->configure_iov_threads(accel_dev, true);
+
+       /* Enable VF to PF interrupts for all VFs */
+       adf_enable_vf2pf_interrupts(accel_dev, BIT_ULL(totalvfs) - 1);
+
+       /*
+        * Due to the hardware design, when SR-IOV and the ring arbiter
+        * are enabled all the VFs supported in hardware must be enabled in
+        * order for all the hardware resources (i.e. bundles) to be usable.
+        * When SR-IOV is enabled, each of the VFs will own one bundle.
+        */
+       return pci_enable_sriov(pdev, totalvfs);
+}
+
+/**
+ * adf_disable_sriov() - Disable SRIOV for the device
+ * @accel_dev:  Pointer to accel device.
+ *
+ * Function disables SRIOV for the accel device.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+void adf_disable_sriov(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       int totalvfs = pci_sriov_get_totalvfs(accel_to_pci_dev(accel_dev));
+       struct adf_accel_vf_info *vf;
+       int i;
+
+       if (!accel_dev->pf.vf_info)
+               return;
+
+       adf_pf2vf_notify_restarting(accel_dev);
+       pci_disable_sriov(accel_to_pci_dev(accel_dev));
+
+       /* Disable VF to PF interrupts */
+       adf_disable_all_vf2pf_interrupts(accel_dev);
+
+       /* Clear Valid bits in AE Thread to PCIe Function Mapping */
+       if (hw_data->configure_iov_threads)
+               hw_data->configure_iov_threads(accel_dev, false);
+
+       for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++)
+               mutex_destroy(&vf->pf2vf_lock);
+
+       kfree(accel_dev->pf.vf_info);
+       accel_dev->pf.vf_info = NULL;
+}
+EXPORT_SYMBOL_GPL(adf_disable_sriov);
+
+/**
+ * adf_sriov_configure() - Enable SRIOV for the device
+ * @pdev:  Pointer to PCI device.
+ * @numvfs: Number of virtual functions (VFs) to enable.
+ *
+ * Note that the @numvfs parameter is ignored and all VFs supported by the
+ * device are enabled due to the design of the hardware.
+ *
+ * Function enables SRIOV for the PCI device.
+ *
+ * Return: number of VFs enabled on success, error code otherwise.
+ */
+int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
+{
+       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+       int totalvfs = pci_sriov_get_totalvfs(pdev);
+       unsigned long val;
+       int ret;
+
+       if (!accel_dev) {
+               dev_err(&pdev->dev, "Failed to find accel_dev\n");
+               return -EFAULT;
+       }
+
+       if (!device_iommu_mapped(&pdev->dev))
+               dev_warn(&pdev->dev, "IOMMU should be enabled for SR-IOV to work correctly\n");
+
+       if (accel_dev->pf.vf_info) {
+               dev_info(&pdev->dev, "Already enabled for this device\n");
+               return -EINVAL;
+       }
+
+       if (adf_dev_started(accel_dev)) {
+               if (adf_devmgr_in_reset(accel_dev) ||
+                   adf_dev_in_use(accel_dev)) {
+                       dev_err(&GET_DEV(accel_dev), "Device busy\n");
+                       return -EBUSY;
+               }
+
+               ret = adf_dev_down(accel_dev, true);
+               if (ret)
+                       return ret;
+       }
+
+       if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
+               return -EFAULT;
+       val = 0;
+       if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                       ADF_NUM_CY, (void *)&val, ADF_DEC))
+               return -EFAULT;
+       ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
+                                         &val, ADF_DEC);
+       if (ret)
+               return ret;
+
+       set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+
+       /* Allocate memory for VF info structs */
+       accel_dev->pf.vf_info = kcalloc(totalvfs,
+                                       sizeof(struct adf_accel_vf_info),
+                                       GFP_KERNEL);
+       if (!accel_dev->pf.vf_info)
+               return -ENOMEM;
+
+       if (adf_dev_up(accel_dev, false)) {
+               dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
+                       accel_dev->accel_id);
+               return -EFAULT;
+       }
+
+       ret = adf_enable_sriov(accel_dev);
+       if (ret)
+               return ret;
+
+       return numvfs;
+}
+EXPORT_SYMBOL_GPL(adf_sriov_configure);
+
+int __init adf_init_pf_wq(void)
+{
+       /* Workqueue for PF2VF responses */
+       pf2vf_resp_wq = alloc_workqueue("qat_pf2vf_resp_wq", WQ_MEM_RECLAIM, 0);
+
+       return !pf2vf_resp_wq ? -ENOMEM : 0;
+}
+
+void adf_exit_pf_wq(void)
+{
+       if (pf2vf_resp_wq) {
+               destroy_workqueue(pf2vf_resp_wq);
+               pf2vf_resp_wq = NULL;
+       }
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
new file mode 100644 (file)
index 0000000..3eb6611
--- /dev/null
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2022 Intel Corporation */
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+
+static const char * const state_operations[] = {
+       [DEV_DOWN] = "down",
+       [DEV_UP] = "up",
+};
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+                         char *buf)
+{
+       struct adf_accel_dev *accel_dev;
+       char *state;
+
+       accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+       if (!accel_dev)
+               return -EINVAL;
+
+       state = adf_dev_started(accel_dev) ? "up" : "down";
+       return sysfs_emit(buf, "%s\n", state);
+}
+
+static ssize_t state_store(struct device *dev, struct device_attribute *attr,
+                          const char *buf, size_t count)
+{
+       struct adf_accel_dev *accel_dev;
+       u32 accel_id;
+       int ret;
+
+       accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+       if (!accel_dev)
+               return -EINVAL;
+
+       accel_id = accel_dev->accel_id;
+
+       if (adf_devmgr_in_reset(accel_dev) || adf_dev_in_use(accel_dev)) {
+               dev_info(dev, "Device qat_dev%d is busy\n", accel_id);
+               return -EBUSY;
+       }
+
+       ret = sysfs_match_string(state_operations, buf);
+       if (ret < 0)
+               return ret;
+
+       switch (ret) {
+       case DEV_DOWN:
+               dev_info(dev, "Stopping device qat_dev%d\n", accel_id);
+
+               ret = adf_dev_down(accel_dev, true);
+               if (ret < 0)
+                       return -EINVAL;
+
+               break;
+       case DEV_UP:
+               dev_info(dev, "Starting device qat_dev%d\n", accel_id);
+
+               ret = adf_dev_up(accel_dev, true);
+               if (ret < 0) {
+                       dev_err(dev, "Failed to start device qat_dev%d\n",
+                               accel_id);
+                       adf_dev_down(accel_dev, true);
+                       return ret;
+               }
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return count;
+}
+
+static const char * const services_operations[] = {
+       ADF_CFG_CY,
+       ADF_CFG_DC,
+};
+
+static ssize_t cfg_services_show(struct device *dev, struct device_attribute *attr,
+                                char *buf)
+{
+       char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
+       struct adf_accel_dev *accel_dev;
+       int ret;
+
+       accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+       if (!accel_dev)
+               return -EINVAL;
+
+       ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+                                     ADF_SERVICES_ENABLED, services);
+       if (ret)
+               return ret;
+
+       return sysfs_emit(buf, "%s\n", services);
+}
+
+static int adf_sysfs_update_dev_config(struct adf_accel_dev *accel_dev,
+                                      const char *services)
+{
+       return adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
+                                          ADF_SERVICES_ENABLED, services,
+                                          ADF_STR);
+}
+
+static ssize_t cfg_services_store(struct device *dev, struct device_attribute *attr,
+                                 const char *buf, size_t count)
+{
+       struct adf_hw_device_data *hw_data;
+       struct adf_accel_dev *accel_dev;
+       int ret;
+
+       ret = sysfs_match_string(services_operations, buf);
+       if (ret < 0)
+               return ret;
+
+       accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+       if (!accel_dev)
+               return -EINVAL;
+
+       if (adf_dev_started(accel_dev)) {
+               dev_info(dev, "Device qat_dev%d must be down to reconfigure the service.\n",
+                        accel_dev->accel_id);
+               return -EINVAL;
+       }
+
+       ret = adf_sysfs_update_dev_config(accel_dev, services_operations[ret]);
+       if (ret < 0)
+               return ret;
+
+       hw_data = GET_HW_DATA(accel_dev);
+
+       /* Update capabilities mask after change in configuration.
+        * A call to this function is required as capabilities are, at the
+        * moment, tied to configuration
+        */
+       hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
+       if (!hw_data->accel_capabilities_mask)
+               return -EINVAL;
+
+       return count;
+}
+
+static DEVICE_ATTR_RW(state);
+static DEVICE_ATTR_RW(cfg_services);
+
+static struct attribute *qat_attrs[] = {
+       &dev_attr_state.attr,
+       &dev_attr_cfg_services.attr,
+       NULL,
+};
+
+static struct attribute_group qat_group = {
+       .attrs = qat_attrs,
+       .name = "qat",
+};
+
+int adf_sysfs_init(struct adf_accel_dev *accel_dev)
+{
+       int ret;
+
+       ret = devm_device_add_group(&GET_DEV(accel_dev), &qat_group);
+       if (ret) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Failed to create qat attribute group: %d\n", ret);
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(adf_sysfs_init);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_transport.c b/drivers/crypto/intel/qat/qat_common/adf_transport.c
new file mode 100644 (file)
index 0000000..630d048
--- /dev/null
@@ -0,0 +1,577 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/delay.h>
+#include <linux/nospec.h>
+#include "adf_accel_devices.h"
+#include "adf_transport_internal.h"
+#include "adf_transport_access_macros.h"
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+
+#define ADF_MAX_RING_THRESHOLD         80
+#define ADF_PERCENT(tot, percent)      (((tot) * (percent)) / 100)
+
+static inline u32 adf_modulo(u32 data, u32 shift)
+{
+       u32 div = data >> shift;
+       u32 mult = div << shift;
+
+       return data - mult;
+}
+
+static inline int adf_check_ring_alignment(u64 addr, u64 size)
+{
+       if (((size - 1) & addr) != 0)
+               return -EFAULT;
+       return 0;
+}
+
+static int adf_verify_ring_size(u32 msg_size, u32 msg_num)
+{
+       int i = ADF_MIN_RING_SIZE;
+
+       for (; i <= ADF_MAX_RING_SIZE; i++)
+               if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i))
+                       return i;
+
+       return ADF_DEFAULT_RING_SIZE;
+}
+
+static int adf_reserve_ring(struct adf_etr_bank_data *bank, u32 ring)
+{
+       spin_lock(&bank->lock);
+       if (bank->ring_mask & (1 << ring)) {
+               spin_unlock(&bank->lock);
+               return -EFAULT;
+       }
+       bank->ring_mask |= (1 << ring);
+       spin_unlock(&bank->lock);
+       return 0;
+}
+
+static void adf_unreserve_ring(struct adf_etr_bank_data *bank, u32 ring)
+{
+       spin_lock(&bank->lock);
+       bank->ring_mask &= ~(1 << ring);
+       spin_unlock(&bank->lock);
+}
+
+static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
+{
+       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
+
+       spin_lock_bh(&bank->lock);
+       bank->irq_mask |= (1 << ring);
+       spin_unlock_bh(&bank->lock);
+       csr_ops->write_csr_int_col_en(bank->csr_addr, bank->bank_number,
+                                     bank->irq_mask);
+       csr_ops->write_csr_int_col_ctl(bank->csr_addr, bank->bank_number,
+                                      bank->irq_coalesc_timer);
+}
+
+static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
+{
+       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
+
+       spin_lock_bh(&bank->lock);
+       bank->irq_mask &= ~(1 << ring);
+       spin_unlock_bh(&bank->lock);
+       csr_ops->write_csr_int_col_en(bank->csr_addr, bank->bank_number,
+                                     bank->irq_mask);
+}
+
+bool adf_ring_nearly_full(struct adf_etr_ring_data *ring)
+{
+       return atomic_read(ring->inflights) > ring->threshold;
+}
+
+int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg)
+{
+       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
+
+       if (atomic_add_return(1, ring->inflights) >
+           ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) {
+               atomic_dec(ring->inflights);
+               return -EAGAIN;
+       }
+       spin_lock_bh(&ring->lock);
+       memcpy((void *)((uintptr_t)ring->base_addr + ring->tail), msg,
+              ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
+
+       ring->tail = adf_modulo(ring->tail +
+                               ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
+                               ADF_RING_SIZE_MODULO(ring->ring_size));
+       csr_ops->write_csr_ring_tail(ring->bank->csr_addr,
+                                    ring->bank->bank_number, ring->ring_number,
+                                    ring->tail);
+       spin_unlock_bh(&ring->lock);
+
+       return 0;
+}
+
+static int adf_handle_response(struct adf_etr_ring_data *ring)
+{
+       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
+       u32 msg_counter = 0;
+       u32 *msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
+
+       while (*msg != ADF_RING_EMPTY_SIG) {
+               ring->callback((u32 *)msg);
+               atomic_dec(ring->inflights);
+               *msg = ADF_RING_EMPTY_SIG;
+               ring->head = adf_modulo(ring->head +
+                                       ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
+                                       ADF_RING_SIZE_MODULO(ring->ring_size));
+               msg_counter++;
+               msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
+       }
+       if (msg_counter > 0) {
+               csr_ops->write_csr_ring_head(ring->bank->csr_addr,
+                                            ring->bank->bank_number,
+                                            ring->ring_number, ring->head);
+       }
+       return 0;
+}
+
+static void adf_configure_tx_ring(struct adf_etr_ring_data *ring)
+{
+       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
+       u32 ring_config = BUILD_RING_CONFIG(ring->ring_size);
+
+       csr_ops->write_csr_ring_config(ring->bank->csr_addr,
+                                      ring->bank->bank_number,
+                                      ring->ring_number, ring_config);
+
+}
+
+static void adf_configure_rx_ring(struct adf_etr_ring_data *ring)
+{
+       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
+       u32 ring_config =
+                       BUILD_RESP_RING_CONFIG(ring->ring_size,
+                                              ADF_RING_NEAR_WATERMARK_512,
+                                              ADF_RING_NEAR_WATERMARK_0);
+
+       csr_ops->write_csr_ring_config(ring->bank->csr_addr,
+                                      ring->bank->bank_number,
+                                      ring->ring_number, ring_config);
+}
+
+static int adf_init_ring(struct adf_etr_ring_data *ring)
+{
+       struct adf_etr_bank_data *bank = ring->bank;
+       struct adf_accel_dev *accel_dev = bank->accel_dev;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
+       u64 ring_base;
+       u32 ring_size_bytes =
+                       ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
+
+       ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
+       ring->base_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
+                                            ring_size_bytes, &ring->dma_addr,
+                                            GFP_KERNEL);
+       if (!ring->base_addr)
+               return -ENOMEM;
+
+       memset(ring->base_addr, 0x7F, ring_size_bytes);
+       /* The base_addr has to be aligned to the size of the buffer */
+       if (adf_check_ring_alignment(ring->dma_addr, ring_size_bytes)) {
+               dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n");
+               dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
+                                 ring->base_addr, ring->dma_addr);
+               ring->base_addr = NULL;
+               return -EFAULT;
+       }
+
+       if (hw_data->tx_rings_mask & (1 << ring->ring_number))
+               adf_configure_tx_ring(ring);
+
+       else
+               adf_configure_rx_ring(ring);
+
+       ring_base = csr_ops->build_csr_ring_base_addr(ring->dma_addr,
+                                                     ring->ring_size);
+
+       csr_ops->write_csr_ring_base(ring->bank->csr_addr,
+                                    ring->bank->bank_number, ring->ring_number,
+                                    ring_base);
+       spin_lock_init(&ring->lock);
+       return 0;
+}
+
+static void adf_cleanup_ring(struct adf_etr_ring_data *ring)
+{
+       u32 ring_size_bytes =
+                       ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
+       ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
+
+       if (ring->base_addr) {
+               memset(ring->base_addr, 0x7F, ring_size_bytes);
+               dma_free_coherent(&GET_DEV(ring->bank->accel_dev),
+                                 ring_size_bytes, ring->base_addr,
+                                 ring->dma_addr);
+       }
+}
+
+int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
+                   u32 bank_num, u32 num_msgs,
+                   u32 msg_size, const char *ring_name,
+                   adf_callback_fn callback, int poll_mode,
+                   struct adf_etr_ring_data **ring_ptr)
+{
+       struct adf_etr_data *transport_data = accel_dev->transport;
+       u8 num_rings_per_bank = GET_NUM_RINGS_PER_BANK(accel_dev);
+       struct adf_etr_bank_data *bank;
+       struct adf_etr_ring_data *ring;
+       char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+       int max_inflights;
+       u32 ring_num;
+       int ret;
+
+       if (bank_num >= GET_MAX_BANKS(accel_dev)) {
+               dev_err(&GET_DEV(accel_dev), "Invalid bank number\n");
+               return -EFAULT;
+       }
+       if (msg_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
+               dev_err(&GET_DEV(accel_dev), "Invalid msg size\n");
+               return -EFAULT;
+       }
+       if (ADF_MAX_INFLIGHTS(adf_verify_ring_size(msg_size, num_msgs),
+                             ADF_BYTES_TO_MSG_SIZE(msg_size)) < 2) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Invalid ring size for given msg size\n");
+               return -EFAULT;
+       }
+       if (adf_cfg_get_param_value(accel_dev, section, ring_name, val)) {
+               dev_err(&GET_DEV(accel_dev), "Section %s, no such entry : %s\n",
+                       section, ring_name);
+               return -EFAULT;
+       }
+       if (kstrtouint(val, 10, &ring_num)) {
+               dev_err(&GET_DEV(accel_dev), "Can't get ring number\n");
+               return -EFAULT;
+       }
+       if (ring_num >= num_rings_per_bank) {
+               dev_err(&GET_DEV(accel_dev), "Invalid ring number\n");
+               return -EFAULT;
+       }
+
+       ring_num = array_index_nospec(ring_num, num_rings_per_bank);
+       bank = &transport_data->banks[bank_num];
+       if (adf_reserve_ring(bank, ring_num)) {
+               dev_err(&GET_DEV(accel_dev), "Ring %d, %s already exists.\n",
+                       ring_num, ring_name);
+               return -EFAULT;
+       }
+       ring = &bank->rings[ring_num];
+       ring->ring_number = ring_num;
+       ring->bank = bank;
+       ring->callback = callback;
+       ring->msg_size = ADF_BYTES_TO_MSG_SIZE(msg_size);
+       ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
+       ring->head = 0;
+       ring->tail = 0;
+       max_inflights = ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size);
+       ring->threshold = ADF_PERCENT(max_inflights, ADF_MAX_RING_THRESHOLD);
+       atomic_set(ring->inflights, 0);
+       ret = adf_init_ring(ring);
+       if (ret)
+               goto err;
+
+       /* Enable HW arbitration for the given ring */
+       adf_update_ring_arb(ring);
+
+       if (adf_ring_debugfs_add(ring, ring_name)) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Couldn't add ring debugfs entry\n");
+               ret = -EFAULT;
+               goto err;
+       }
+
+       /* Enable interrupts if needed */
+       if (callback && (!poll_mode))
+               adf_enable_ring_irq(bank, ring->ring_number);
+       *ring_ptr = ring;
+       return 0;
+err:
+       adf_cleanup_ring(ring);
+       adf_unreserve_ring(bank, ring_num);
+       adf_update_ring_arb(ring);
+       return ret;
+}
+
+void adf_remove_ring(struct adf_etr_ring_data *ring)
+{
+       struct adf_etr_bank_data *bank = ring->bank;
+       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
+
+       /* Disable interrupts for the given ring */
+       adf_disable_ring_irq(bank, ring->ring_number);
+
+       /* Clear PCI config space */
+
+       csr_ops->write_csr_ring_config(bank->csr_addr, bank->bank_number,
+                                      ring->ring_number, 0);
+       csr_ops->write_csr_ring_base(bank->csr_addr, bank->bank_number,
+                                    ring->ring_number, 0);
+       adf_ring_debugfs_rm(ring);
+       adf_unreserve_ring(bank, ring->ring_number);
+       /* Disable HW arbitration for the given ring */
+       adf_update_ring_arb(ring);
+       adf_cleanup_ring(ring);
+}
+
+static void adf_ring_response_handler(struct adf_etr_bank_data *bank)
+{
+       struct adf_accel_dev *accel_dev = bank->accel_dev;
+       u8 num_rings_per_bank = GET_NUM_RINGS_PER_BANK(accel_dev);
+       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
+       unsigned long empty_rings;
+       int i;
+
+       empty_rings = csr_ops->read_csr_e_stat(bank->csr_addr,
+                                              bank->bank_number);
+       empty_rings = ~empty_rings & bank->irq_mask;
+
+       for_each_set_bit(i, &empty_rings, num_rings_per_bank)
+               adf_handle_response(&bank->rings[i]);
+}
+
+void adf_response_handler(uintptr_t bank_addr)
+{
+       struct adf_etr_bank_data *bank = (void *)bank_addr;
+       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
+
+       /* Handle all the responses and reenable IRQs */
+       adf_ring_response_handler(bank);
+
+       csr_ops->write_csr_int_flag_and_col(bank->csr_addr, bank->bank_number,
+                                           bank->irq_mask);
+}
+
+static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev,
+                                 const char *section, const char *format,
+                                 u32 key, u32 *value)
+{
+       char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+       char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+
+       snprintf(key_buf, ADF_CFG_MAX_KEY_LEN_IN_BYTES, format, key);
+
+       if (adf_cfg_get_param_value(accel_dev, section, key_buf, val_buf))
+               return -EFAULT;
+
+       if (kstrtouint(val_buf, 10, value))
+               return -EFAULT;
+       return 0;
+}
+
+static void adf_get_coalesc_timer(struct adf_etr_bank_data *bank,
+                                 const char *section,
+                                 u32 bank_num_in_accel)
+{
+       if (adf_get_cfg_int(bank->accel_dev, section,
+                           ADF_ETRMGR_COALESCE_TIMER_FORMAT,
+                           bank_num_in_accel, &bank->irq_coalesc_timer))
+               bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
+
+       if (ADF_COALESCING_MAX_TIME < bank->irq_coalesc_timer ||
+           ADF_COALESCING_MIN_TIME > bank->irq_coalesc_timer)
+               bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
+}
+
+static int adf_init_bank(struct adf_accel_dev *accel_dev,
+                        struct adf_etr_bank_data *bank,
+                        u32 bank_num, void __iomem *csr_addr)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       u8 num_rings_per_bank = hw_data->num_rings_per_bank;
+       struct adf_hw_csr_ops *csr_ops = &hw_data->csr_ops;
+       u32 irq_mask = BIT(num_rings_per_bank) - 1;
+       struct adf_etr_ring_data *ring;
+       struct adf_etr_ring_data *tx_ring;
+       u32 i, coalesc_enabled = 0;
+       unsigned long ring_mask;
+       int size;
+
+       memset(bank, 0, sizeof(*bank));
+       bank->bank_number = bank_num;
+       bank->csr_addr = csr_addr;
+       bank->accel_dev = accel_dev;
+       spin_lock_init(&bank->lock);
+
+       /* Allocate the rings in the bank */
+       size = num_rings_per_bank * sizeof(struct adf_etr_ring_data);
+       bank->rings = kzalloc_node(size, GFP_KERNEL,
+                                  dev_to_node(&GET_DEV(accel_dev)));
+       if (!bank->rings)
+               return -ENOMEM;
+
+       /* Enable IRQ coalescing always. This will allow to use
+        * the optimised flag and coalesc register.
+        * If it is disabled in the config file just use min time value */
+       if ((adf_get_cfg_int(accel_dev, "Accelerator0",
+                            ADF_ETRMGR_COALESCING_ENABLED_FORMAT, bank_num,
+                            &coalesc_enabled) == 0) && coalesc_enabled)
+               adf_get_coalesc_timer(bank, "Accelerator0", bank_num);
+       else
+               bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
+
+       for (i = 0; i < num_rings_per_bank; i++) {
+               csr_ops->write_csr_ring_config(csr_addr, bank_num, i, 0);
+               csr_ops->write_csr_ring_base(csr_addr, bank_num, i, 0);
+
+               ring = &bank->rings[i];
+               if (hw_data->tx_rings_mask & (1 << i)) {
+                       ring->inflights =
+                               kzalloc_node(sizeof(atomic_t),
+                                            GFP_KERNEL,
+                                            dev_to_node(&GET_DEV(accel_dev)));
+                       if (!ring->inflights)
+                               goto err;
+               } else {
+                       if (i < hw_data->tx_rx_gap) {
+                               dev_err(&GET_DEV(accel_dev),
+                                       "Invalid tx rings mask config\n");
+                               goto err;
+                       }
+                       tx_ring = &bank->rings[i - hw_data->tx_rx_gap];
+                       ring->inflights = tx_ring->inflights;
+               }
+       }
+       if (adf_bank_debugfs_add(bank)) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Failed to add bank debugfs entry\n");
+               goto err;
+       }
+
+       csr_ops->write_csr_int_flag(csr_addr, bank_num, irq_mask);
+       csr_ops->write_csr_int_srcsel(csr_addr, bank_num);
+
+       return 0;
+err:
+       ring_mask = hw_data->tx_rings_mask;
+       for_each_set_bit(i, &ring_mask, num_rings_per_bank) {
+               ring = &bank->rings[i];
+               kfree(ring->inflights);
+               ring->inflights = NULL;
+       }
+       kfree(bank->rings);
+       return -ENOMEM;
+}
+
+/**
+ * adf_init_etr_data() - Initialize transport rings for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function is the initializes the communications channels (rings) to the
+ * acceleration device accel_dev.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_init_etr_data(struct adf_accel_dev *accel_dev)
+{
+       struct adf_etr_data *etr_data;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       void __iomem *csr_addr;
+       u32 size;
+       u32 num_banks = 0;
+       int i, ret;
+
+       etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
+                               dev_to_node(&GET_DEV(accel_dev)));
+       if (!etr_data)
+               return -ENOMEM;
+
+       num_banks = GET_MAX_BANKS(accel_dev);
+       size = num_banks * sizeof(struct adf_etr_bank_data);
+       etr_data->banks = kzalloc_node(size, GFP_KERNEL,
+                                      dev_to_node(&GET_DEV(accel_dev)));
+       if (!etr_data->banks) {
+               ret = -ENOMEM;
+               goto err_bank;
+       }
+
+       accel_dev->transport = etr_data;
+       i = hw_data->get_etr_bar_id(hw_data);
+       csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr;
+
+       /* accel_dev->debugfs_dir should always be non-NULL here */
+       etr_data->debug = debugfs_create_dir("transport",
+                                            accel_dev->debugfs_dir);
+
+       for (i = 0; i < num_banks; i++) {
+               ret = adf_init_bank(accel_dev, &etr_data->banks[i], i,
+                                   csr_addr);
+               if (ret)
+                       goto err_bank_all;
+       }
+
+       return 0;
+
+err_bank_all:
+       debugfs_remove(etr_data->debug);
+       kfree(etr_data->banks);
+err_bank:
+       kfree(etr_data);
+       accel_dev->transport = NULL;
+       return ret;
+}
+EXPORT_SYMBOL_GPL(adf_init_etr_data);
+
+static void cleanup_bank(struct adf_etr_bank_data *bank)
+{
+       struct adf_accel_dev *accel_dev = bank->accel_dev;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       u8 num_rings_per_bank = hw_data->num_rings_per_bank;
+       u32 i;
+
+       for (i = 0; i < num_rings_per_bank; i++) {
+               struct adf_etr_ring_data *ring = &bank->rings[i];
+
+               if (bank->ring_mask & (1 << i))
+                       adf_cleanup_ring(ring);
+
+               if (hw_data->tx_rings_mask & (1 << i))
+                       kfree(ring->inflights);
+       }
+       kfree(bank->rings);
+       adf_bank_debugfs_rm(bank);
+       memset(bank, 0, sizeof(*bank));
+}
+
+static void adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev)
+{
+       struct adf_etr_data *etr_data = accel_dev->transport;
+       u32 i, num_banks = GET_MAX_BANKS(accel_dev);
+
+       for (i = 0; i < num_banks; i++)
+               cleanup_bank(&etr_data->banks[i]);
+}
+
+/**
+ * adf_cleanup_etr_data() - Clear transport rings for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function is the clears the communications channels (rings) of the
+ * acceleration device accel_dev.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev)
+{
+       struct adf_etr_data *etr_data = accel_dev->transport;
+
+       if (etr_data) {
+               adf_cleanup_etr_handles(accel_dev);
+               debugfs_remove(etr_data->debug);
+               kfree(etr_data->banks->rings);
+               kfree(etr_data->banks);
+               kfree(etr_data);
+               accel_dev->transport = NULL;
+       }
+}
+EXPORT_SYMBOL_GPL(adf_cleanup_etr_data);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_transport.h b/drivers/crypto/intel/qat/qat_common/adf_transport.h
new file mode 100644 (file)
index 0000000..e6ef6f9
--- /dev/null
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef ADF_TRANSPORT_H
+#define ADF_TRANSPORT_H
+
+#include "adf_accel_devices.h"
+
+struct adf_etr_ring_data;
+
+typedef void (*adf_callback_fn)(void *resp_msg);
+
+int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
+                   u32 bank_num, u32 num_mgs, u32 msg_size,
+                   const char *ring_name, adf_callback_fn callback,
+                   int poll_mode, struct adf_etr_ring_data **ring_ptr);
+
+bool adf_ring_nearly_full(struct adf_etr_ring_data *ring);
+int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg);
+void adf_remove_ring(struct adf_etr_ring_data *ring);
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/intel/qat/qat_common/adf_transport_access_macros.h
new file mode 100644 (file)
index 0000000..d3667db
--- /dev/null
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef ADF_TRANSPORT_ACCESS_MACROS_H
+#define ADF_TRANSPORT_ACCESS_MACROS_H
+
+#include "adf_accel_devices.h"
+#define ADF_RING_CONFIG_NEAR_FULL_WM 0x0A
+#define ADF_RING_CONFIG_NEAR_EMPTY_WM 0x05
+#define ADF_COALESCING_MIN_TIME 0x1FF
+#define ADF_COALESCING_MAX_TIME 0xFFFFF
+#define ADF_COALESCING_DEF_TIME 0x27FF
+#define ADF_RING_NEAR_WATERMARK_512 0x08
+#define ADF_RING_NEAR_WATERMARK_0 0x00
+#define ADF_RING_EMPTY_SIG 0x7F7F7F7F
+
+/* Valid internal ring size values */
+#define ADF_RING_SIZE_128 0x01
+#define ADF_RING_SIZE_256 0x02
+#define ADF_RING_SIZE_512 0x03
+#define ADF_RING_SIZE_4K 0x06
+#define ADF_RING_SIZE_16K 0x08
+#define ADF_RING_SIZE_4M 0x10
+#define ADF_MIN_RING_SIZE ADF_RING_SIZE_128
+#define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
+#define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K
+
+/* Valid internal msg size values */
+#define ADF_MSG_SIZE_32 0x01
+#define ADF_MSG_SIZE_64 0x02
+#define ADF_MSG_SIZE_128 0x04
+#define ADF_MIN_MSG_SIZE ADF_MSG_SIZE_32
+#define ADF_MAX_MSG_SIZE ADF_MSG_SIZE_128
+
+/* Size to bytes conversion macros for ring and msg size values */
+#define ADF_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5)
+#define ADF_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5)
+#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
+#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
+
+/* Minimum ring buffer size for memory allocation */
+#define ADF_RING_SIZE_BYTES_MIN(SIZE) \
+       ((SIZE < ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K)) ? \
+               ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K) : SIZE)
+#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
+#define ADF_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \
+                               SIZE) & ~0x4)
+/* Max outstanding requests */
+#define ADF_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \
+       ((((1 << (RING_SIZE - 1)) << 3) >> ADF_SIZE_TO_POW(MSG_SIZE)) - 1)
+#define BUILD_RING_CONFIG(size)        \
+       ((ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_FULL_WM) \
+       | (ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
+       | size)
+#define BUILD_RESP_RING_CONFIG(size, watermark_nf, watermark_ne) \
+       ((watermark_nf << ADF_RING_CONFIG_NEAR_FULL_WM) \
+       | (watermark_ne << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
+       | size)
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c b/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
new file mode 100644 (file)
index 0000000..08bca1c
--- /dev/null
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include "adf_accel_devices.h"
+#include "adf_transport_internal.h"
+#include "adf_transport_access_macros.h"
+
+static DEFINE_MUTEX(ring_read_lock);
+static DEFINE_MUTEX(bank_read_lock);
+
+static void *adf_ring_start(struct seq_file *sfile, loff_t *pos)
+{
+       struct adf_etr_ring_data *ring = sfile->private;
+
+       mutex_lock(&ring_read_lock);
+       if (*pos == 0)
+               return SEQ_START_TOKEN;
+
+       if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
+                    ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
+               return NULL;
+
+       return ring->base_addr +
+               (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++);
+}
+
+static void *adf_ring_next(struct seq_file *sfile, void *v, loff_t *pos)
+{
+       struct adf_etr_ring_data *ring = sfile->private;
+
+       if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
+                    ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
+               return NULL;
+
+       return ring->base_addr +
+               (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++);
+}
+
+static int adf_ring_show(struct seq_file *sfile, void *v)
+{
+       struct adf_etr_ring_data *ring = sfile->private;
+       struct adf_etr_bank_data *bank = ring->bank;
+       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
+       void __iomem *csr = ring->bank->csr_addr;
+
+       if (v == SEQ_START_TOKEN) {
+               int head, tail, empty;
+
+               head = csr_ops->read_csr_ring_head(csr, bank->bank_number,
+                                                  ring->ring_number);
+               tail = csr_ops->read_csr_ring_tail(csr, bank->bank_number,
+                                                  ring->ring_number);
+               empty = csr_ops->read_csr_e_stat(csr, bank->bank_number);
+
+               seq_puts(sfile, "------- Ring configuration -------\n");
+               seq_printf(sfile, "ring name: %s\n",
+                          ring->ring_debug->ring_name);
+               seq_printf(sfile, "ring num %d, bank num %d\n",
+                          ring->ring_number, ring->bank->bank_number);
+               seq_printf(sfile, "head %x, tail %x, empty: %d\n",
+                          head, tail, (empty & 1 << ring->ring_number)
+                          >> ring->ring_number);
+               seq_printf(sfile, "ring size %lld, msg size %d\n",
+                          (long long)ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size),
+                          ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
+               seq_puts(sfile, "----------- Ring data ------------\n");
+               return 0;
+       }
+       seq_hex_dump(sfile, "", DUMP_PREFIX_ADDRESS, 32, 4,
+                    v, ADF_MSG_SIZE_TO_BYTES(ring->msg_size), false);
+       return 0;
+}
+
+static void adf_ring_stop(struct seq_file *sfile, void *v)
+{
+       mutex_unlock(&ring_read_lock);
+}
+
+static const struct seq_operations adf_ring_debug_sops = {
+       .start = adf_ring_start,
+       .next = adf_ring_next,
+       .stop = adf_ring_stop,
+       .show = adf_ring_show
+};
+
+DEFINE_SEQ_ATTRIBUTE(adf_ring_debug);
+
+int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name)
+{
+       struct adf_etr_ring_debug_entry *ring_debug;
+       char entry_name[8];
+
+       ring_debug = kzalloc(sizeof(*ring_debug), GFP_KERNEL);
+       if (!ring_debug)
+               return -ENOMEM;
+
+       strscpy(ring_debug->ring_name, name, sizeof(ring_debug->ring_name));
+       snprintf(entry_name, sizeof(entry_name), "ring_%02d",
+                ring->ring_number);
+
+       ring_debug->debug = debugfs_create_file(entry_name, S_IRUSR,
+                                               ring->bank->bank_debug_dir,
+                                               ring, &adf_ring_debug_fops);
+       ring->ring_debug = ring_debug;
+       return 0;
+}
+
+void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring)
+{
+       if (ring->ring_debug) {
+               debugfs_remove(ring->ring_debug->debug);
+               kfree(ring->ring_debug);
+               ring->ring_debug = NULL;
+       }
+}
+
+static void *adf_bank_start(struct seq_file *sfile, loff_t *pos)
+{
+       struct adf_etr_bank_data *bank = sfile->private;
+       u8 num_rings_per_bank = GET_NUM_RINGS_PER_BANK(bank->accel_dev);
+
+       mutex_lock(&bank_read_lock);
+       if (*pos == 0)
+               return SEQ_START_TOKEN;
+
+       if (*pos >= num_rings_per_bank)
+               return NULL;
+
+       return pos;
+}
+
+static void *adf_bank_next(struct seq_file *sfile, void *v, loff_t *pos)
+{
+       struct adf_etr_bank_data *bank = sfile->private;
+       u8 num_rings_per_bank = GET_NUM_RINGS_PER_BANK(bank->accel_dev);
+
+       if (++(*pos) >= num_rings_per_bank)
+               return NULL;
+
+       return pos;
+}
+
+static int adf_bank_show(struct seq_file *sfile, void *v)
+{
+       struct adf_etr_bank_data *bank = sfile->private;
+       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
+
+       if (v == SEQ_START_TOKEN) {
+               seq_printf(sfile, "------- Bank %d configuration -------\n",
+                          bank->bank_number);
+       } else {
+               int ring_id = *((int *)v) - 1;
+               struct adf_etr_ring_data *ring = &bank->rings[ring_id];
+               void __iomem *csr = bank->csr_addr;
+               int head, tail, empty;
+
+               if (!(bank->ring_mask & 1 << ring_id))
+                       return 0;
+
+               head = csr_ops->read_csr_ring_head(csr, bank->bank_number,
+                                                  ring->ring_number);
+               tail = csr_ops->read_csr_ring_tail(csr, bank->bank_number,
+                                                  ring->ring_number);
+               empty = csr_ops->read_csr_e_stat(csr, bank->bank_number);
+
+               seq_printf(sfile,
+                          "ring num %02d, head %04x, tail %04x, empty: %d\n",
+                          ring->ring_number, head, tail,
+                          (empty & 1 << ring->ring_number) >>
+                          ring->ring_number);
+       }
+       return 0;
+}
+
+static void adf_bank_stop(struct seq_file *sfile, void *v)
+{
+       mutex_unlock(&bank_read_lock);
+}
+
+static const struct seq_operations adf_bank_debug_sops = {
+       .start = adf_bank_start,
+       .next = adf_bank_next,
+       .stop = adf_bank_stop,
+       .show = adf_bank_show
+};
+
+DEFINE_SEQ_ATTRIBUTE(adf_bank_debug);
+
+int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
+{
+       struct adf_accel_dev *accel_dev = bank->accel_dev;
+       struct dentry *parent = accel_dev->transport->debug;
+       char name[8];
+
+       snprintf(name, sizeof(name), "bank_%02d", bank->bank_number);
+       bank->bank_debug_dir = debugfs_create_dir(name, parent);
+       bank->bank_debug_cfg = debugfs_create_file("config", S_IRUSR,
+                                                  bank->bank_debug_dir, bank,
+                                                  &adf_bank_debug_fops);
+       return 0;
+}
+
+void adf_bank_debugfs_rm(struct adf_etr_bank_data *bank)
+{
+       debugfs_remove(bank->bank_debug_cfg);
+       debugfs_remove(bank->bank_debug_dir);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_transport_internal.h b/drivers/crypto/intel/qat/qat_common/adf_transport_internal.h
new file mode 100644 (file)
index 0000000..8b2c92b
--- /dev/null
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef ADF_TRANSPORT_INTRN_H
+#define ADF_TRANSPORT_INTRN_H
+
+#include <linux/interrupt.h>
+#include <linux/spinlock_types.h>
+#include "adf_transport.h"
+
+struct adf_etr_ring_debug_entry {
+       char ring_name[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+       struct dentry *debug;
+};
+
+struct adf_etr_ring_data {
+       void *base_addr;
+       atomic_t *inflights;
+       adf_callback_fn callback;
+       struct adf_etr_bank_data *bank;
+       dma_addr_t dma_addr;
+       struct adf_etr_ring_debug_entry *ring_debug;
+       spinlock_t lock;        /* protects ring data struct */
+       u16 head;
+       u16 tail;
+       u32 threshold;
+       u8 ring_number;
+       u8 ring_size;
+       u8 msg_size;
+};
+
+struct adf_etr_bank_data {
+       struct adf_etr_ring_data *rings;
+       struct tasklet_struct resp_handler;
+       void __iomem *csr_addr;
+       u32 irq_coalesc_timer;
+       u32 bank_number;
+       u16 ring_mask;
+       u16 irq_mask;
+       spinlock_t lock;        /* protects bank data struct */
+       struct adf_accel_dev *accel_dev;
+       struct dentry *bank_debug_dir;
+       struct dentry *bank_debug_cfg;
+};
+
+struct adf_etr_data {
+       struct adf_etr_bank_data *banks;
+       struct dentry *debug;
+};
+
+void adf_response_handler(uintptr_t bank_addr);
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+int adf_bank_debugfs_add(struct adf_etr_bank_data *bank);
+void adf_bank_debugfs_rm(struct adf_etr_bank_data *bank);
+int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name);
+void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring);
+#else
+static inline int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
+{
+       return 0;
+}
+
+#define adf_bank_debugfs_rm(bank) do {} while (0)
+
+static inline int adf_ring_debugfs_add(struct adf_etr_ring_data *ring,
+                                      const char *name)
+{
+       return 0;
+}
+
+#define adf_ring_debugfs_rm(ring) do {} while (0)
+#endif
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
new file mode 100644 (file)
index 0000000..b05c395
--- /dev/null
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_cfg.h"
+#include "adf_cfg_strings.h"
+#include "adf_cfg_common.h"
+#include "adf_transport_access_macros.h"
+#include "adf_transport_internal.h"
+
+#define ADF_VINTSOU_OFFSET     0x204
+#define ADF_VINTMSK_OFFSET     0x208
+#define ADF_VINTSOU_BUN                BIT(0)
+#define ADF_VINTSOU_PF2VF      BIT(1)
+
+static struct workqueue_struct *adf_vf_stop_wq;
+
+struct adf_vf_stop_data {
+       struct adf_accel_dev *accel_dev;
+       struct work_struct work;
+};
+
+void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+{
+       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+
+       ADF_CSR_WR(pmisc_addr, ADF_VINTMSK_OFFSET, 0x0);
+}
+
+void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+{
+       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+
+       ADF_CSR_WR(pmisc_addr, ADF_VINTMSK_OFFSET, 0x2);
+}
+EXPORT_SYMBOL_GPL(adf_disable_pf2vf_interrupts);
+
+static int adf_enable_msi(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+       int stat = pci_alloc_irq_vectors(pci_dev_info->pci_dev, 1, 1,
+                                        PCI_IRQ_MSI);
+       if (unlikely(stat < 0)) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Failed to enable MSI interrupt: %d\n", stat);
+               return stat;
+       }
+
+       return 0;
+}
+
+static void adf_disable_msi(struct adf_accel_dev *accel_dev)
+{
+       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+       pci_free_irq_vectors(pdev);
+}
+
+static void adf_dev_stop_async(struct work_struct *work)
+{
+       struct adf_vf_stop_data *stop_data =
+               container_of(work, struct adf_vf_stop_data, work);
+       struct adf_accel_dev *accel_dev = stop_data->accel_dev;
+
+       adf_dev_restarting_notify(accel_dev);
+       adf_dev_down(accel_dev, false);
+
+       /* Re-enable PF2VF interrupts */
+       adf_enable_pf2vf_interrupts(accel_dev);
+       kfree(stop_data);
+}
+
+int adf_pf2vf_handle_pf_restarting(struct adf_accel_dev *accel_dev)
+{
+       struct adf_vf_stop_data *stop_data;
+
+       clear_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+       stop_data = kzalloc(sizeof(*stop_data), GFP_ATOMIC);
+       if (!stop_data) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Couldn't schedule stop for vf_%d\n",
+                       accel_dev->accel_id);
+               return -ENOMEM;
+       }
+       stop_data->accel_dev = accel_dev;
+       INIT_WORK(&stop_data->work, adf_dev_stop_async);
+       queue_work(adf_vf_stop_wq, &stop_data->work);
+
+       return 0;
+}
+
+static void adf_pf2vf_bh_handler(void *data)
+{
+       struct adf_accel_dev *accel_dev = data;
+       bool ret;
+
+       ret = adf_recv_and_handle_pf2vf_msg(accel_dev);
+       if (ret)
+               /* Re-enable PF2VF interrupts */
+               adf_enable_pf2vf_interrupts(accel_dev);
+
+       return;
+
+}
+
+static int adf_setup_pf2vf_bh(struct adf_accel_dev *accel_dev)
+{
+       tasklet_init(&accel_dev->vf.pf2vf_bh_tasklet,
+                    (void *)adf_pf2vf_bh_handler, (unsigned long)accel_dev);
+
+       mutex_init(&accel_dev->vf.vf2pf_lock);
+       return 0;
+}
+
+static void adf_cleanup_pf2vf_bh(struct adf_accel_dev *accel_dev)
+{
+       tasklet_disable(&accel_dev->vf.pf2vf_bh_tasklet);
+       tasklet_kill(&accel_dev->vf.pf2vf_bh_tasklet);
+       mutex_destroy(&accel_dev->vf.vf2pf_lock);
+}
+
+static irqreturn_t adf_isr(int irq, void *privdata)
+{
+       struct adf_accel_dev *accel_dev = privdata;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct adf_hw_csr_ops *csr_ops = &hw_data->csr_ops;
+       struct adf_bar *pmisc =
+                       &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+       void __iomem *pmisc_bar_addr = pmisc->virt_addr;
+       bool handled = false;
+       u32 v_int, v_mask;
+
+       /* Read VF INT source CSR to determine the source of VF interrupt */
+       v_int = ADF_CSR_RD(pmisc_bar_addr, ADF_VINTSOU_OFFSET);
+
+       /* Read VF INT mask CSR to determine which sources are masked */
+       v_mask = ADF_CSR_RD(pmisc_bar_addr, ADF_VINTMSK_OFFSET);
+
+       /*
+        * Recompute v_int ignoring sources that are masked. This is to
+        * avoid rescheduling the tasklet for interrupts already handled
+        */
+       v_int &= ~v_mask;
+
+       /* Check for PF2VF interrupt */
+       if (v_int & ADF_VINTSOU_PF2VF) {
+               /* Disable PF to VF interrupt */
+               adf_disable_pf2vf_interrupts(accel_dev);
+
+               /* Schedule tasklet to handle interrupt BH */
+               tasklet_hi_schedule(&accel_dev->vf.pf2vf_bh_tasklet);
+               handled = true;
+       }
+
+       /* Check bundle interrupt */
+       if (v_int & ADF_VINTSOU_BUN) {
+               struct adf_etr_data *etr_data = accel_dev->transport;
+               struct adf_etr_bank_data *bank = &etr_data->banks[0];
+
+               /* Disable Flag and Coalesce Ring Interrupts */
+               csr_ops->write_csr_int_flag_and_col(bank->csr_addr,
+                                                   bank->bank_number, 0);
+               tasklet_hi_schedule(&bank->resp_handler);
+               handled = true;
+       }
+
+       return handled ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int adf_request_msi_irq(struct adf_accel_dev *accel_dev)
+{
+       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+       unsigned int cpu;
+       int ret;
+
+       snprintf(accel_dev->vf.irq_name, ADF_MAX_MSIX_VECTOR_NAME,
+                "qat_%02x:%02d.%02d", pdev->bus->number, PCI_SLOT(pdev->devfn),
+                PCI_FUNC(pdev->devfn));
+       ret = request_irq(pdev->irq, adf_isr, 0, accel_dev->vf.irq_name,
+                         (void *)accel_dev);
+       if (ret) {
+               dev_err(&GET_DEV(accel_dev), "failed to enable irq for %s\n",
+                       accel_dev->vf.irq_name);
+               return ret;
+       }
+       cpu = accel_dev->accel_id % num_online_cpus();
+       irq_set_affinity_hint(pdev->irq, get_cpu_mask(cpu));
+       accel_dev->vf.irq_enabled = true;
+
+       return ret;
+}
+
+static int adf_setup_bh(struct adf_accel_dev *accel_dev)
+{
+       struct adf_etr_data *priv_data = accel_dev->transport;
+
+       tasklet_init(&priv_data->banks[0].resp_handler, adf_response_handler,
+                    (unsigned long)priv_data->banks);
+       return 0;
+}
+
+static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
+{
+       struct adf_etr_data *priv_data = accel_dev->transport;
+
+       tasklet_disable(&priv_data->banks[0].resp_handler);
+       tasklet_kill(&priv_data->banks[0].resp_handler);
+}
+
+/**
+ * adf_vf_isr_resource_free() - Free IRQ for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function frees interrupts for acceleration device virtual function.
+ */
+void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev)
+{
+       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+       if (accel_dev->vf.irq_enabled) {
+               irq_set_affinity_hint(pdev->irq, NULL);
+               free_irq(pdev->irq, accel_dev);
+       }
+       adf_cleanup_bh(accel_dev);
+       adf_cleanup_pf2vf_bh(accel_dev);
+       adf_disable_msi(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_vf_isr_resource_free);
+
+/**
+ * adf_vf_isr_resource_alloc() - Allocate IRQ for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function allocates interrupts for acceleration device virtual function.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
+{
+       if (adf_enable_msi(accel_dev))
+               goto err_out;
+
+       if (adf_setup_pf2vf_bh(accel_dev))
+               goto err_disable_msi;
+
+       if (adf_setup_bh(accel_dev))
+               goto err_cleanup_pf2vf_bh;
+
+       if (adf_request_msi_irq(accel_dev))
+               goto err_cleanup_bh;
+
+       return 0;
+
+err_cleanup_bh:
+       adf_cleanup_bh(accel_dev);
+
+err_cleanup_pf2vf_bh:
+       adf_cleanup_pf2vf_bh(accel_dev);
+
+err_disable_msi:
+       adf_disable_msi(accel_dev);
+
+err_out:
+       return -EFAULT;
+}
+EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc);
+
+/**
+ * adf_flush_vf_wq() - Flush workqueue for VF
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function disables the PF/VF interrupts on the VF so that no new messages
+ * are received and flushes the workqueue 'adf_vf_stop_wq'.
+ *
+ * Return: void.
+ */
+void adf_flush_vf_wq(struct adf_accel_dev *accel_dev)
+{
+       adf_disable_pf2vf_interrupts(accel_dev);
+
+       flush_workqueue(adf_vf_stop_wq);
+}
+EXPORT_SYMBOL_GPL(adf_flush_vf_wq);
+
+/**
+ * adf_init_vf_wq() - Init workqueue for VF
+ *
+ * Function init workqueue 'adf_vf_stop_wq' for VF.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int __init adf_init_vf_wq(void)
+{
+       adf_vf_stop_wq = alloc_workqueue("adf_vf_stop_wq", WQ_MEM_RECLAIM, 0);
+
+       return !adf_vf_stop_wq ? -EFAULT : 0;
+}
+
+void adf_exit_vf_wq(void)
+{
+       if (adf_vf_stop_wq)
+               destroy_workqueue(adf_vf_stop_wq);
+
+       adf_vf_stop_wq = NULL;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw.h
new file mode 100644 (file)
index 0000000..c141160
--- /dev/null
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef _ICP_QAT_FW_H_
+#define _ICP_QAT_FW_H_
+#include <linux/types.h>
+#include "icp_qat_hw.h"
+
+#define QAT_FIELD_SET(flags, val, bitpos, mask) \
+{ (flags) = (((flags) & (~((mask) << (bitpos)))) | \
+               (((val) & (mask)) << (bitpos))) ; }
+
+#define QAT_FIELD_GET(flags, bitpos, mask) \
+       (((flags) >> (bitpos)) & (mask))
+
+#define ICP_QAT_FW_REQ_DEFAULT_SZ 128
+#define ICP_QAT_FW_RESP_DEFAULT_SZ 32
+#define ICP_QAT_FW_COMN_ONE_BYTE_SHIFT 8
+#define ICP_QAT_FW_COMN_SINGLE_BYTE_MASK 0xFF
+#define ICP_QAT_FW_NUM_LONGWORDS_1 1
+#define ICP_QAT_FW_NUM_LONGWORDS_2 2
+#define ICP_QAT_FW_NUM_LONGWORDS_3 3
+#define ICP_QAT_FW_NUM_LONGWORDS_4 4
+#define ICP_QAT_FW_NUM_LONGWORDS_5 5
+#define ICP_QAT_FW_NUM_LONGWORDS_6 6
+#define ICP_QAT_FW_NUM_LONGWORDS_7 7
+#define ICP_QAT_FW_NUM_LONGWORDS_10 10
+#define ICP_QAT_FW_NUM_LONGWORDS_13 13
+#define ICP_QAT_FW_NULL_REQ_SERV_ID 1
+
+enum icp_qat_fw_comn_resp_serv_id {
+       ICP_QAT_FW_COMN_RESP_SERV_NULL,
+       ICP_QAT_FW_COMN_RESP_SERV_CPM_FW,
+       ICP_QAT_FW_COMN_RESP_SERV_DELIMITER
+};
+
+enum icp_qat_fw_comn_request_id {
+       ICP_QAT_FW_COMN_REQ_NULL = 0,
+       ICP_QAT_FW_COMN_REQ_CPM_FW_PKE = 3,
+       ICP_QAT_FW_COMN_REQ_CPM_FW_LA = 4,
+       ICP_QAT_FW_COMN_REQ_CPM_FW_DMA = 7,
+       ICP_QAT_FW_COMN_REQ_CPM_FW_COMP = 9,
+       ICP_QAT_FW_COMN_REQ_DELIMITER
+};
+
+struct icp_qat_fw_comn_req_hdr_cd_pars {
+       union {
+               struct {
+                       __u64 content_desc_addr;
+                       __u16 content_desc_resrvd1;
+                       __u8 content_desc_params_sz;
+                       __u8 content_desc_hdr_resrvd2;
+                       __u32 content_desc_resrvd3;
+               } s;
+               struct {
+                       __u32 serv_specif_fields[4];
+               } s1;
+       } u;
+};
+
+struct icp_qat_fw_comn_req_mid {
+       __u64 opaque_data;
+       __u64 src_data_addr;
+       __u64 dest_data_addr;
+       __u32 src_length;
+       __u32 dst_length;
+};
+
+struct icp_qat_fw_comn_req_cd_ctrl {
+       __u32 content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5];
+};
+
+struct icp_qat_fw_comn_req_hdr {
+       __u8 resrvd1;
+       __u8 service_cmd_id;
+       __u8 service_type;
+       __u8 hdr_flags;
+       __u16 serv_specif_flags;
+       __u16 comn_req_flags;
+};
+
+struct icp_qat_fw_comn_req_rqpars {
+       __u32 serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13];
+};
+
+struct icp_qat_fw_comn_req {
+       struct icp_qat_fw_comn_req_hdr comn_hdr;
+       struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+       struct icp_qat_fw_comn_req_mid comn_mid;
+       struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
+       struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
+};
+
+struct icp_qat_fw_comn_error {
+       __u8 xlat_err_code;
+       __u8 cmp_err_code;
+};
+
+struct icp_qat_fw_comn_resp_hdr {
+       __u8 resrvd1;
+       __u8 service_id;
+       __u8 response_type;
+       __u8 hdr_flags;
+       struct icp_qat_fw_comn_error comn_error;
+       __u8 comn_status;
+       __u8 cmd_id;
+};
+
+struct icp_qat_fw_comn_resp {
+       struct icp_qat_fw_comn_resp_hdr comn_hdr;
+       __u64 opaque_data;
+       __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
+};
+
+#define ICP_QAT_FW_COMN_REQ_FLAG_SET 1
+#define ICP_QAT_FW_COMN_REQ_FLAG_CLR 0
+#define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7
+#define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1
+#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F
+#define ICP_QAT_FW_COMN_CNV_FLAG_BITPOS 6
+#define ICP_QAT_FW_COMN_CNV_FLAG_MASK 0x1
+#define ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS 5
+#define ICP_QAT_FW_COMN_CNVNR_FLAG_MASK 0x1
+
+#define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \
+       icp_qat_fw_comn_req_hdr_t.service_type
+
+#define ICP_QAT_FW_COMN_OV_SRV_TYPE_SET(icp_qat_fw_comn_req_hdr_t, val) \
+       icp_qat_fw_comn_req_hdr_t.service_type = val
+
+#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_GET(icp_qat_fw_comn_req_hdr_t) \
+       icp_qat_fw_comn_req_hdr_t.service_cmd_id
+
+#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_SET(icp_qat_fw_comn_req_hdr_t, val) \
+       icp_qat_fw_comn_req_hdr_t.service_cmd_id = val
+
+#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \
+       ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags)
+
+#define ICP_QAT_FW_COMN_HDR_CNVNR_FLAG_GET(hdr_flags) \
+       QAT_FIELD_GET(hdr_flags, \
+       ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS, \
+       ICP_QAT_FW_COMN_CNVNR_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_CNVNR_FLAG_SET(hdr_t, val) \
+       QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
+       ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS, \
+       ICP_QAT_FW_COMN_CNVNR_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(hdr_flags) \
+       QAT_FIELD_GET(hdr_flags, \
+       ICP_QAT_FW_COMN_CNV_FLAG_BITPOS, \
+       ICP_QAT_FW_COMN_CNV_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_CNV_FLAG_SET(hdr_t, val) \
+       QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
+       ICP_QAT_FW_COMN_CNV_FLAG_BITPOS, \
+       ICP_QAT_FW_COMN_CNV_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \
+       ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val)
+
+#define ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_flags) \
+       QAT_FIELD_GET(hdr_flags, \
+       ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
+       ICP_QAT_FW_COMN_VALID_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_GET(hdr_flags) \
+       (hdr_flags & ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK)
+
+#define ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) \
+       QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
+       ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
+       ICP_QAT_FW_COMN_VALID_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(valid) \
+       (((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
+        ICP_QAT_FW_COMN_VALID_FLAG_BITPOS)
+
+#define QAT_COMN_PTR_TYPE_BITPOS 0
+#define QAT_COMN_PTR_TYPE_MASK 0x1
+#define QAT_COMN_CD_FLD_TYPE_BITPOS 1
+#define QAT_COMN_CD_FLD_TYPE_MASK 0x1
+#define QAT_COMN_PTR_TYPE_FLAT 0x0
+#define QAT_COMN_PTR_TYPE_SGL 0x1
+#define QAT_COMN_CD_FLD_TYPE_64BIT_ADR 0x0
+#define QAT_COMN_CD_FLD_TYPE_16BYTE_DATA 0x1
+
+#define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \
+       ((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \
+        | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS))
+
+#define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_CD_FLD_TYPE_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_COMN_CD_FLD_TYPE_BITPOS, \
+                       QAT_COMN_CD_FLD_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_PTR_TYPE_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_COMN_PTR_TYPE_BITPOS, \
+                       QAT_COMN_PTR_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_CD_FLD_TYPE_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_COMN_CD_FLD_TYPE_BITPOS, \
+                       QAT_COMN_CD_FLD_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_NEXT_ID_BITPOS 4
+#define ICP_QAT_FW_COMN_NEXT_ID_MASK 0xF0
+#define ICP_QAT_FW_COMN_CURR_ID_BITPOS 0
+#define ICP_QAT_FW_COMN_CURR_ID_MASK 0x0F
+
+#define ICP_QAT_FW_COMN_NEXT_ID_GET(cd_ctrl_hdr_t) \
+       ((((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
+       >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+       { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
+       & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+       ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+        & ICP_QAT_FW_COMN_NEXT_ID_MASK)); }
+
+#define ICP_QAT_FW_COMN_CURR_ID_GET(cd_ctrl_hdr_t) \
+       (((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+       { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
+       & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+       ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)); }
+
+#define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7
+#define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1
+#define QAT_COMN_RESP_PKE_STATUS_BITPOS 6
+#define QAT_COMN_RESP_PKE_STATUS_MASK 0x1
+#define QAT_COMN_RESP_CMP_STATUS_BITPOS 5
+#define QAT_COMN_RESP_CMP_STATUS_MASK 0x1
+#define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4
+#define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1
+#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3
+#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1
+
+#define ICP_QAT_FW_COMN_RESP_STATUS_BUILD(crypto, comp, xlat, eolb) \
+       ((((crypto) & QAT_COMN_RESP_CRYPTO_STATUS_MASK) << \
+       QAT_COMN_RESP_CRYPTO_STATUS_BITPOS) | \
+       (((comp) & QAT_COMN_RESP_CMP_STATUS_MASK) << \
+       QAT_COMN_RESP_CMP_STATUS_BITPOS) | \
+       (((xlat) & QAT_COMN_RESP_XLAT_STATUS_MASK) << \
+       QAT_COMN_RESP_XLAT_STATUS_BITPOS) | \
+       (((eolb) & QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) << \
+       QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS))
+
+#define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \
+       QAT_FIELD_GET(status, QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \
+       QAT_COMN_RESP_CRYPTO_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \
+       QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_STATUS_BITPOS, \
+       QAT_COMN_RESP_CMP_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(status) \
+       QAT_FIELD_GET(status, QAT_COMN_RESP_XLAT_STATUS_BITPOS, \
+       QAT_COMN_RESP_XLAT_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \
+       QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \
+       QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK)
+
+#define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0
+#define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1
+#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0
+#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET 1
+#define ERR_CODE_NO_ERROR 0
+#define ERR_CODE_INVALID_BLOCK_TYPE -1
+#define ERR_CODE_NO_MATCH_ONES_COMP -2
+#define ERR_CODE_TOO_MANY_LEN_OR_DIS -3
+#define ERR_CODE_INCOMPLETE_LEN -4
+#define ERR_CODE_RPT_LEN_NO_FIRST_LEN -5
+#define ERR_CODE_RPT_GT_SPEC_LEN -6
+#define ERR_CODE_INV_LIT_LEN_CODE_LEN -7
+#define ERR_CODE_INV_DIS_CODE_LEN -8
+#define ERR_CODE_INV_LIT_LEN_DIS_IN_BLK -9
+#define ERR_CODE_DIS_TOO_FAR_BACK -10
+#define ERR_CODE_OVERFLOW_ERROR -11
+#define ERR_CODE_SOFT_ERROR -12
+#define ERR_CODE_FATAL_ERROR -13
+#define ERR_CODE_SSM_ERROR -14
+#define ERR_CODE_ENDPOINT_ERROR -15
+
+enum icp_qat_fw_slice {
+       ICP_QAT_FW_SLICE_NULL = 0,
+       ICP_QAT_FW_SLICE_CIPHER = 1,
+       ICP_QAT_FW_SLICE_AUTH = 2,
+       ICP_QAT_FW_SLICE_DRAM_RD = 3,
+       ICP_QAT_FW_SLICE_DRAM_WR = 4,
+       ICP_QAT_FW_SLICE_COMP = 5,
+       ICP_QAT_FW_SLICE_XLAT = 6,
+       ICP_QAT_FW_SLICE_DELIMITER
+};
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h
new file mode 100644 (file)
index 0000000..a03d43f
--- /dev/null
@@ -0,0 +1,404 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef _ICP_QAT_FW_COMP_H_
+#define _ICP_QAT_FW_COMP_H_
+#include "icp_qat_fw.h"
+
+enum icp_qat_fw_comp_cmd_id {
+       ICP_QAT_FW_COMP_CMD_STATIC = 0,
+       ICP_QAT_FW_COMP_CMD_DYNAMIC = 1,
+       ICP_QAT_FW_COMP_CMD_DECOMPRESS = 2,
+       ICP_QAT_FW_COMP_CMD_DELIMITER
+};
+
+enum icp_qat_fw_comp_20_cmd_id {
+       ICP_QAT_FW_COMP_20_CMD_LZ4_COMPRESS = 3,
+       ICP_QAT_FW_COMP_20_CMD_LZ4_DECOMPRESS = 4,
+       ICP_QAT_FW_COMP_20_CMD_LZ4S_COMPRESS = 5,
+       ICP_QAT_FW_COMP_20_CMD_LZ4S_DECOMPRESS = 6,
+       ICP_QAT_FW_COMP_20_CMD_XP10_COMPRESS = 7,
+       ICP_QAT_FW_COMP_20_CMD_XP10_DECOMPRESS = 8,
+       ICP_QAT_FW_COMP_20_CMD_RESERVED_9 = 9,
+       ICP_QAT_FW_COMP_23_CMD_ZSTD_COMPRESS = 10,
+       ICP_QAT_FW_COMP_23_CMD_ZSTD_DECOMPRESS = 11,
+       ICP_QAT_FW_COMP_20_CMD_DELIMITER
+};
+
+#define ICP_QAT_FW_COMP_STATELESS_SESSION 0
+#define ICP_QAT_FW_COMP_STATEFUL_SESSION 1
+#define ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST 0
+#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST 1
+#define ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST 0
+#define ICP_QAT_FW_COMP_ENH_AUTO_SELECT_BEST 1
+#define ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST 0
+#define ICP_QAT_FW_COMP_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST 1
+#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_USED_AS_INTMD_BUF 1
+#define ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF 0
+#define ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS 2
+#define ICP_QAT_FW_COMP_SESSION_TYPE_MASK 0x1
+#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS 3
+#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK 0x1
+#define ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS 4
+#define ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK 0x1
+#define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS 5
+#define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK 0x1
+#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS 7
+#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK 0x1
+
+#define ICP_QAT_FW_COMP_FLAGS_BUILD(sesstype, autoselect, enhanced_asb, \
+       ret_uncomp, secure_ram) \
+       ((((sesstype) & ICP_QAT_FW_COMP_SESSION_TYPE_MASK) << \
+       ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS) | \
+       (((autoselect) & ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK) << \
+       ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS) | \
+       (((enhanced_asb) & ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK) << \
+       ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS) | \
+       (((ret_uncomp) & ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK) << \
+       ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS) | \
+       (((secure_ram) & ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK) << \
+       ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS))
+
+#define ICP_QAT_FW_COMP_SESSION_TYPE_GET(flags) \
+       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS, \
+       ICP_QAT_FW_COMP_SESSION_TYPE_MASK)
+
+#define ICP_QAT_FW_COMP_SESSION_TYPE_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS, \
+       ICP_QAT_FW_COMP_SESSION_TYPE_MASK)
+
+#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_GET(flags) \
+       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS, \
+       ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK)
+
+#define ICP_QAT_FW_COMP_EN_ASB_GET(flags) \
+       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS, \
+       ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK)
+
+#define ICP_QAT_FW_COMP_RET_UNCOMP_GET(flags) \
+       QAT_FIELD_GET(flags, \
+       ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS, \
+       ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK)
+
+#define ICP_QAT_FW_COMP_SECURE_RAM_USE_GET(flags) \
+       QAT_FIELD_GET(flags, \
+       ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS, \
+       ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK)
+
+struct icp_qat_fw_comp_req_hdr_cd_pars {
+       union {
+               struct {
+                       __u64 content_desc_addr;
+                       __u16 content_desc_resrvd1;
+                       __u8 content_desc_params_sz;
+                       __u8 content_desc_hdr_resrvd2;
+                       __u32 content_desc_resrvd3;
+               } s;
+               struct {
+                       __u32 comp_slice_cfg_word[ICP_QAT_FW_NUM_LONGWORDS_2];
+                       __u32 content_desc_resrvd4;
+               } sl;
+       } u;
+};
+
+struct icp_qat_fw_comp_req_params {
+       __u32 comp_len;
+       __u32 out_buffer_sz;
+       union {
+               struct {
+                       __u32 initial_crc32;
+                       __u32 initial_adler;
+               } legacy;
+               __u64 crc_data_addr;
+       } crc;
+       __u32 req_par_flags;
+       __u32 rsrvd;
+};
+
+#define ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(sop, eop, bfinal, cnv, cnvnr, \
+                                             cnvdfx, crc, xxhash_acc, \
+                                             cnv_error_type, append_crc, \
+                                             drop_data) \
+       ((((sop) & ICP_QAT_FW_COMP_SOP_MASK) << \
+       ICP_QAT_FW_COMP_SOP_BITPOS) | \
+       (((eop) & ICP_QAT_FW_COMP_EOP_MASK) << \
+       ICP_QAT_FW_COMP_EOP_BITPOS) | \
+       (((bfinal) & ICP_QAT_FW_COMP_BFINAL_MASK) \
+       << ICP_QAT_FW_COMP_BFINAL_BITPOS) | \
+       (((cnv) & ICP_QAT_FW_COMP_CNV_MASK) << \
+       ICP_QAT_FW_COMP_CNV_BITPOS) | \
+       (((cnvnr) & ICP_QAT_FW_COMP_CNVNR_MASK) \
+       << ICP_QAT_FW_COMP_CNVNR_BITPOS) | \
+       (((cnvdfx) & ICP_QAT_FW_COMP_CNV_DFX_MASK) \
+       << ICP_QAT_FW_COMP_CNV_DFX_BITPOS) | \
+       (((crc) & ICP_QAT_FW_COMP_CRC_MODE_MASK) \
+       << ICP_QAT_FW_COMP_CRC_MODE_BITPOS) | \
+       (((xxhash_acc) & ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK) \
+       << ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS) | \
+       (((cnv_error_type) & ICP_QAT_FW_COMP_CNV_ERROR_MASK) \
+       << ICP_QAT_FW_COMP_CNV_ERROR_BITPOS) | \
+       (((append_crc) & ICP_QAT_FW_COMP_APPEND_CRC_MASK) \
+       << ICP_QAT_FW_COMP_APPEND_CRC_BITPOS) | \
+       (((drop_data) & ICP_QAT_FW_COMP_DROP_DATA_MASK) \
+       << ICP_QAT_FW_COMP_DROP_DATA_BITPOS))
+
+#define ICP_QAT_FW_COMP_NOT_SOP 0
+#define ICP_QAT_FW_COMP_SOP 1
+#define ICP_QAT_FW_COMP_NOT_EOP 0
+#define ICP_QAT_FW_COMP_EOP 1
+#define ICP_QAT_FW_COMP_NOT_BFINAL 0
+#define ICP_QAT_FW_COMP_BFINAL 1
+#define ICP_QAT_FW_COMP_NO_CNV 0
+#define ICP_QAT_FW_COMP_CNV 1
+#define ICP_QAT_FW_COMP_NO_CNV_RECOVERY 0
+#define ICP_QAT_FW_COMP_CNV_RECOVERY 1
+#define ICP_QAT_FW_COMP_NO_CNV_DFX 0
+#define ICP_QAT_FW_COMP_CNV_DFX 1
+#define ICP_QAT_FW_COMP_CRC_MODE_LEGACY 0
+#define ICP_QAT_FW_COMP_CRC_MODE_E2E 1
+#define ICP_QAT_FW_COMP_NO_XXHASH_ACC 0
+#define ICP_QAT_FW_COMP_XXHASH_ACC 1
+#define ICP_QAT_FW_COMP_APPEND_CRC 1
+#define ICP_QAT_FW_COMP_NO_APPEND_CRC 0
+#define ICP_QAT_FW_COMP_DROP_DATA 1
+#define ICP_QAT_FW_COMP_NO_DROP_DATA 0
+#define ICP_QAT_FW_COMP_SOP_BITPOS 0
+#define ICP_QAT_FW_COMP_SOP_MASK 0x1
+#define ICP_QAT_FW_COMP_EOP_BITPOS 1
+#define ICP_QAT_FW_COMP_EOP_MASK 0x1
+#define ICP_QAT_FW_COMP_BFINAL_BITPOS 6
+#define ICP_QAT_FW_COMP_BFINAL_MASK 0x1
+#define ICP_QAT_FW_COMP_CNV_BITPOS 16
+#define ICP_QAT_FW_COMP_CNV_MASK 0x1
+#define ICP_QAT_FW_COMP_CNVNR_BITPOS 17
+#define ICP_QAT_FW_COMP_CNVNR_MASK 0x1
+#define ICP_QAT_FW_COMP_CNV_DFX_BITPOS 18
+#define ICP_QAT_FW_COMP_CNV_DFX_MASK 0x1
+#define ICP_QAT_FW_COMP_CRC_MODE_BITPOS 19
+#define ICP_QAT_FW_COMP_CRC_MODE_MASK 0x1
+#define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS 20
+#define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK 0x1
+#define ICP_QAT_FW_COMP_CNV_ERROR_BITPOS 21
+#define ICP_QAT_FW_COMP_CNV_ERROR_MASK 0b111
+#define ICP_QAT_FW_COMP_CNV_ERROR_NONE 0b000
+#define ICP_QAT_FW_COMP_CNV_ERROR_CHECKSUM 0b001
+#define ICP_QAT_FW_COMP_CNV_ERROR_DCPR_OBC_DIFF 0b010
+#define ICP_QAT_FW_COMP_CNV_ERROR_DCPR 0b011
+#define ICP_QAT_FW_COMP_CNV_ERROR_XLT 0b100
+#define ICP_QAT_FW_COMP_CNV_ERROR_DCPR_IBC_DIFF 0b101
+#define ICP_QAT_FW_COMP_APPEND_CRC_BITPOS 24
+#define ICP_QAT_FW_COMP_APPEND_CRC_MASK 0x1
+#define ICP_QAT_FW_COMP_DROP_DATA_BITPOS 25
+#define ICP_QAT_FW_COMP_DROP_DATA_MASK 0x1
+
+#define ICP_QAT_FW_COMP_SOP_GET(flags) \
+       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_SOP_BITPOS, \
+       ICP_QAT_FW_COMP_SOP_MASK)
+
+#define ICP_QAT_FW_COMP_SOP_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_SOP_BITPOS, \
+       ICP_QAT_FW_COMP_SOP_MASK)
+
+#define ICP_QAT_FW_COMP_EOP_GET(flags) \
+       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_EOP_BITPOS, \
+       ICP_QAT_FW_COMP_EOP_MASK)
+
+#define ICP_QAT_FW_COMP_EOP_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_EOP_BITPOS, \
+       ICP_QAT_FW_COMP_EOP_MASK)
+
+#define ICP_QAT_FW_COMP_BFINAL_GET(flags) \
+       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_BFINAL_BITPOS, \
+       ICP_QAT_FW_COMP_BFINAL_MASK)
+
+#define ICP_QAT_FW_COMP_BFINAL_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_BFINAL_BITPOS, \
+       ICP_QAT_FW_COMP_BFINAL_MASK)
+
+#define ICP_QAT_FW_COMP_CNV_GET(flags) \
+       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_CNV_BITPOS, \
+       ICP_QAT_FW_COMP_CNV_MASK)
+
+#define ICP_QAT_FW_COMP_CNVNR_GET(flags) \
+       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_CNVNR_BITPOS, \
+       ICP_QAT_FW_COMP_CNVNR_MASK)
+
+#define ICP_QAT_FW_COMP_CNV_DFX_GET(flags) \
+       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_CNV_DFX_BITPOS, \
+       ICP_QAT_FW_COMP_CNV_DFX_MASK)
+
+#define ICP_QAT_FW_COMP_CNV_DFX_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_CNV_DFX_BITPOS, \
+       ICP_QAT_FW_COMP_CNV_DFX_MASK)
+
+#define ICP_QAT_FW_COMP_CRC_MODE_GET(flags) \
+       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_CRC_MODE_BITPOS, \
+       ICP_QAT_FW_COMP_CRC_MODE_MASK)
+
+#define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_GET(flags) \
+       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS, \
+       ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK)
+
+#define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS, \
+       ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK)
+
+#define ICP_QAT_FW_COMP_CNV_ERROR_TYPE_GET(flags) \
+       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_CNV_ERROR_BITPOS, \
+       ICP_QAT_FW_COMP_CNV_ERROR_MASK)
+
+#define ICP_QAT_FW_COMP_CNV_ERROR_TYPE_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_CNV_ERROR_BITPOS, \
+       ICP_QAT_FW_COMP_CNV_ERROR_MASK)
+
+struct icp_qat_fw_xlt_req_params {
+       __u64 inter_buff_ptr;
+};
+
+struct icp_qat_fw_comp_cd_hdr {
+       __u16 ram_bank_flags;
+       __u8 comp_cfg_offset;
+       __u8 next_curr_id;
+       __u32 resrvd;
+       __u64 comp_state_addr;
+       __u64 ram_banks_addr;
+};
+
+#define COMP_CPR_INITIAL_CRC 0
+#define COMP_CPR_INITIAL_ADLER 1
+
+struct icp_qat_fw_xlt_cd_hdr {
+       __u16 resrvd1;
+       __u8 resrvd2;
+       __u8 next_curr_id;
+       __u32 resrvd3;
+};
+
+struct icp_qat_fw_comp_req {
+       struct icp_qat_fw_comn_req_hdr comn_hdr;
+       struct icp_qat_fw_comp_req_hdr_cd_pars cd_pars;
+       struct icp_qat_fw_comn_req_mid comn_mid;
+       struct icp_qat_fw_comp_req_params comp_pars;
+       union {
+               struct icp_qat_fw_xlt_req_params xlt_pars;
+               __u32 resrvd1[ICP_QAT_FW_NUM_LONGWORDS_2];
+       } u1;
+       __u32 resrvd2[ICP_QAT_FW_NUM_LONGWORDS_2];
+       struct icp_qat_fw_comp_cd_hdr comp_cd_ctrl;
+       union {
+               struct icp_qat_fw_xlt_cd_hdr xlt_cd_ctrl;
+               __u32 resrvd3[ICP_QAT_FW_NUM_LONGWORDS_2];
+       } u2;
+};
+
+struct icp_qat_fw_resp_comp_pars {
+       __u32 input_byte_counter;
+       __u32 output_byte_counter;
+       union {
+               struct {
+                       __u32 curr_crc32;
+                       __u32 curr_adler_32;
+               } legacy;
+               __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_2];
+       } crc;
+};
+
+struct icp_qat_fw_comp_state {
+       __u32 rd8_counter;
+       __u32 status_flags;
+       __u32 in_counter;
+       __u32 out_counter;
+       __u64 intermediate_state;
+       __u32 lobc;
+       __u32 replaybc;
+       __u64 pcrc64_poly;
+       __u32 crc32;
+       __u32 adler_xxhash32;
+       __u64 pcrc64_xorout;
+       __u32 out_buf_size;
+       __u32 in_buf_size;
+       __u64 in_pcrc64;
+       __u64 out_pcrc64;
+       __u32 lobs;
+       __u32 libc;
+       __u64 reserved;
+       __u32 xxhash_state[4];
+       __u32 cleartext[4];
+};
+
+struct icp_qat_fw_comp_resp {
+       struct icp_qat_fw_comn_resp_hdr comn_resp;
+       __u64 opaque_data;
+       struct icp_qat_fw_resp_comp_pars comp_resp_pars;
+};
+
+#define QAT_FW_COMP_BANK_FLAG_MASK 0x1
+#define QAT_FW_COMP_BANK_I_BITPOS 8
+#define QAT_FW_COMP_BANK_H_BITPOS 7
+#define QAT_FW_COMP_BANK_G_BITPOS 6
+#define QAT_FW_COMP_BANK_F_BITPOS 5
+#define QAT_FW_COMP_BANK_E_BITPOS 4
+#define QAT_FW_COMP_BANK_D_BITPOS 3
+#define QAT_FW_COMP_BANK_C_BITPOS 2
+#define QAT_FW_COMP_BANK_B_BITPOS 1
+#define QAT_FW_COMP_BANK_A_BITPOS 0
+
+enum icp_qat_fw_comp_bank_enabled {
+       ICP_QAT_FW_COMP_BANK_DISABLED = 0,
+       ICP_QAT_FW_COMP_BANK_ENABLED = 1,
+       ICP_QAT_FW_COMP_BANK_DELIMITER = 2
+};
+
+#define ICP_QAT_FW_COMP_RAM_FLAGS_BUILD(bank_i_enable, bank_h_enable, \
+                                       bank_g_enable, bank_f_enable, \
+                                       bank_e_enable, bank_d_enable, \
+                                       bank_c_enable, bank_b_enable, \
+                                       bank_a_enable) \
+       ((((bank_i_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
+       QAT_FW_COMP_BANK_I_BITPOS) | \
+       (((bank_h_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
+       QAT_FW_COMP_BANK_H_BITPOS) | \
+       (((bank_g_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
+       QAT_FW_COMP_BANK_G_BITPOS) | \
+       (((bank_f_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
+       QAT_FW_COMP_BANK_F_BITPOS) | \
+       (((bank_e_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
+       QAT_FW_COMP_BANK_E_BITPOS) | \
+       (((bank_d_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
+       QAT_FW_COMP_BANK_D_BITPOS) | \
+       (((bank_c_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
+       QAT_FW_COMP_BANK_C_BITPOS) | \
+       (((bank_b_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
+       QAT_FW_COMP_BANK_B_BITPOS) | \
+       (((bank_a_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
+       QAT_FW_COMP_BANK_A_BITPOS))
+
+struct icp_qat_fw_comp_crc_data_struct {
+       __u32 crc32;
+       union {
+               __u32 adler;
+               __u32 xxhash;
+       } adler_xxhash_u;
+       __u32 cpr_in_crc_lo;
+       __u32 cpr_in_crc_hi;
+       __u32 cpr_out_crc_lo;
+       __u32 cpr_out_crc_hi;
+       __u32 xlt_in_crc_lo;
+       __u32 xlt_in_crc_hi;
+       __u32 xlt_out_crc_lo;
+       __u32 xlt_out_crc_hi;
+       __u32 prog_crc_poly_lo;
+       __u32 prog_crc_poly_hi;
+       __u32 xor_out_lo;
+       __u32 xor_out_hi;
+       __u32 append_crc_lo;
+       __u32 append_crc_hi;
+};
+
+struct xxhash_acc_state_buff {
+       __u32 in_counter;
+       __u32 out_counter;
+       __u32 xxhash_state[4];
+       __u32 clear_txt[4];
+};
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
new file mode 100644 (file)
index 0000000..56cb827
--- /dev/null
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef _ICP_QAT_FW_INIT_ADMIN_H_
+#define _ICP_QAT_FW_INIT_ADMIN_H_
+
+#include "icp_qat_fw.h"
+
+enum icp_qat_fw_init_admin_cmd_id {
+       ICP_QAT_FW_INIT_AE = 0,
+       ICP_QAT_FW_TRNG_ENABLE = 1,
+       ICP_QAT_FW_TRNG_DISABLE = 2,
+       ICP_QAT_FW_CONSTANTS_CFG = 3,
+       ICP_QAT_FW_STATUS_GET = 4,
+       ICP_QAT_FW_COUNTERS_GET = 5,
+       ICP_QAT_FW_LOOPBACK = 6,
+       ICP_QAT_FW_HEARTBEAT_SYNC = 7,
+       ICP_QAT_FW_HEARTBEAT_GET = 8,
+       ICP_QAT_FW_COMP_CAPABILITY_GET = 9,
+       ICP_QAT_FW_PM_STATE_CONFIG = 128,
+};
+
+enum icp_qat_fw_init_admin_resp_status {
+       ICP_QAT_FW_INIT_RESP_STATUS_SUCCESS = 0,
+       ICP_QAT_FW_INIT_RESP_STATUS_FAIL
+};
+
+struct icp_qat_fw_init_admin_req {
+       __u16 init_cfg_sz;
+       __u8 resrvd1;
+       __u8 cmd_id;
+       __u32 resrvd2;
+       __u64 opaque_data;
+       __u64 init_cfg_ptr;
+
+       union {
+               struct {
+                       __u16 ibuf_size_in_kb;
+                       __u16 resrvd3;
+               };
+               __u32 idle_filter;
+       };
+
+       __u32 resrvd4;
+} __packed;
+
+struct icp_qat_fw_init_admin_resp {
+       __u8 flags;
+       __u8 resrvd1;
+       __u8 status;
+       __u8 cmd_id;
+       union {
+               __u32 resrvd2;
+               struct {
+                       __u16 version_minor_num;
+                       __u16 version_major_num;
+               };
+               __u32 extended_features;
+       };
+       __u64 opaque_data;
+       union {
+               __u32 resrvd3[ICP_QAT_FW_NUM_LONGWORDS_4];
+               struct {
+                       __u32 version_patch_num;
+                       __u8 context_id;
+                       __u8 ae_id;
+                       __u16 resrvd4;
+                       __u64 resrvd5;
+               };
+               struct {
+                       __u64 req_rec_count;
+                       __u64 resp_sent_count;
+               };
+               struct {
+                       __u16 compression_algos;
+                       __u16 checksum_algos;
+                       __u32 deflate_capabilities;
+                       __u32 resrvd6;
+                       __u32 lzs_capabilities;
+               };
+               struct {
+                       __u32 cipher_algos;
+                       __u32 hash_algos;
+                       __u16 keygen_algos;
+                       __u16 other;
+                       __u16 public_key_algos;
+                       __u16 prime_algos;
+               };
+               struct {
+                       __u64 timestamp;
+                       __u64 resrvd7;
+               };
+               struct {
+                       __u32 successful_count;
+                       __u32 unsuccessful_count;
+                       __u64 resrvd8;
+               };
+       };
+} __packed;
+
+#define ICP_QAT_FW_COMN_HEARTBEAT_OK 0
+#define ICP_QAT_FW_COMN_HEARTBEAT_BLOCKED 1
+#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS 0
+#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK 0x1
+#define ICP_QAT_FW_COMN_STATUS_RESRVD_FLD_MASK 0xFE
+#define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_GET(hdr_t) \
+       ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(hdr_t.flags)
+
+#define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_SET(hdr_t, val) \
+       ICP_QAT_FW_COMN_HEARTBEAT_FLAG_SET(hdr_t, val)
+
+#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(flags) \
+       QAT_FIELD_GET(flags, \
+                ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS, \
+                ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK)
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_la.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_la.h
new file mode 100644 (file)
index 0000000..28fa17f
--- /dev/null
@@ -0,0 +1,367 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef _ICP_QAT_FW_LA_H_
+#define _ICP_QAT_FW_LA_H_
+#include "icp_qat_fw.h"
+
+enum icp_qat_fw_la_cmd_id {
+       ICP_QAT_FW_LA_CMD_CIPHER = 0,
+       ICP_QAT_FW_LA_CMD_AUTH = 1,
+       ICP_QAT_FW_LA_CMD_CIPHER_HASH = 2,
+       ICP_QAT_FW_LA_CMD_HASH_CIPHER = 3,
+       ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM = 4,
+       ICP_QAT_FW_LA_CMD_TRNG_TEST = 5,
+       ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE = 6,
+       ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE = 7,
+       ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE = 8,
+       ICP_QAT_FW_LA_CMD_MGF1 = 9,
+       ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP = 10,
+       ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP = 11,
+       ICP_QAT_FW_LA_CMD_DELIMITER = 12
+};
+
+#define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
+#define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
+#define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
+#define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
+
+struct icp_qat_fw_la_bulk_req {
+       struct icp_qat_fw_comn_req_hdr comn_hdr;
+       struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+       struct icp_qat_fw_comn_req_mid comn_mid;
+       struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
+       struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
+};
+
+#define ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE 1
+#define QAT_LA_SLICE_TYPE_BITPOS 14
+#define QAT_LA_SLICE_TYPE_MASK 0x3
+#define ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS 1
+#define ICP_QAT_FW_LA_GCM_IV_LEN_NOT_12_OCTETS 0
+#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS 12
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO 1
+#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK 0x1
+#define QAT_LA_GCM_IV_LEN_FLAG_BITPOS 11
+#define QAT_LA_GCM_IV_LEN_FLAG_MASK 0x1
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER 1
+#define ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER 0
+#define QAT_LA_DIGEST_IN_BUFFER_BITPOS 10
+#define QAT_LA_DIGEST_IN_BUFFER_MASK 0x1
+#define ICP_QAT_FW_LA_SNOW_3G_PROTO 4
+#define ICP_QAT_FW_LA_GCM_PROTO        2
+#define ICP_QAT_FW_LA_CCM_PROTO        1
+#define ICP_QAT_FW_LA_NO_PROTO 0
+#define QAT_LA_PROTO_BITPOS 7
+#define QAT_LA_PROTO_MASK 0x7
+#define ICP_QAT_FW_LA_CMP_AUTH_RES 1
+#define ICP_QAT_FW_LA_NO_CMP_AUTH_RES 0
+#define QAT_LA_CMP_AUTH_RES_BITPOS 6
+#define QAT_LA_CMP_AUTH_RES_MASK 0x1
+#define ICP_QAT_FW_LA_RET_AUTH_RES 1
+#define ICP_QAT_FW_LA_NO_RET_AUTH_RES 0
+#define QAT_LA_RET_AUTH_RES_BITPOS 5
+#define QAT_LA_RET_AUTH_RES_MASK 0x1
+#define ICP_QAT_FW_LA_UPDATE_STATE 1
+#define ICP_QAT_FW_LA_NO_UPDATE_STATE 0
+#define QAT_LA_UPDATE_STATE_BITPOS 4
+#define QAT_LA_UPDATE_STATE_MASK 0x1
+#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_CD_SETUP 0
+#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_SHRAM_CP 1
+#define QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS 3
+#define QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK 0x1
+#define ICP_QAT_FW_CIPH_IV_64BIT_PTR 0
+#define ICP_QAT_FW_CIPH_IV_16BYTE_DATA 1
+#define QAT_LA_CIPH_IV_FLD_BITPOS 2
+#define QAT_LA_CIPH_IV_FLD_MASK   0x1
+#define ICP_QAT_FW_LA_PARTIAL_NONE 0
+#define ICP_QAT_FW_LA_PARTIAL_START 1
+#define ICP_QAT_FW_LA_PARTIAL_MID 3
+#define ICP_QAT_FW_LA_PARTIAL_END 2
+#define QAT_LA_PARTIAL_BITPOS 0
+#define QAT_LA_PARTIAL_MASK 0x3
+#define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \
+       cmp_auth, ret_auth, update_state, \
+       ciph_iv, ciphcfg, partial) \
+       (((zuc_proto & QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) << \
+       QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS) | \
+       ((gcm_iv_len & QAT_LA_GCM_IV_LEN_FLAG_MASK) << \
+       QAT_LA_GCM_IV_LEN_FLAG_BITPOS) | \
+       ((auth_rslt & QAT_LA_DIGEST_IN_BUFFER_MASK) << \
+       QAT_LA_DIGEST_IN_BUFFER_BITPOS) | \
+       ((proto & QAT_LA_PROTO_MASK) << \
+       QAT_LA_PROTO_BITPOS)    | \
+       ((cmp_auth & QAT_LA_CMP_AUTH_RES_MASK) << \
+       QAT_LA_CMP_AUTH_RES_BITPOS) | \
+       ((ret_auth & QAT_LA_RET_AUTH_RES_MASK) << \
+       QAT_LA_RET_AUTH_RES_BITPOS) | \
+       ((update_state & QAT_LA_UPDATE_STATE_MASK) << \
+       QAT_LA_UPDATE_STATE_BITPOS) | \
+       ((ciph_iv & QAT_LA_CIPH_IV_FLD_MASK) << \
+       QAT_LA_CIPH_IV_FLD_BITPOS) | \
+       ((ciphcfg & QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) << \
+       QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS) | \
+       ((partial & QAT_LA_PARTIAL_MASK) << \
+       QAT_LA_PARTIAL_BITPOS))
+
+#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_CIPH_IV_FLD_BITPOS, \
+       QAT_LA_CIPH_IV_FLD_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
+       QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
+
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
+       QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
+       QAT_LA_GCM_IV_LEN_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_PROTO_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_PROTO_BITPOS, QAT_LA_PROTO_MASK)
+
+#define ICP_QAT_FW_LA_CMP_AUTH_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_CMP_AUTH_RES_BITPOS, \
+       QAT_LA_CMP_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_RET_AUTH_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_RET_AUTH_RES_BITPOS, \
+       QAT_LA_RET_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
+       QAT_LA_DIGEST_IN_BUFFER_MASK)
+
+#define ICP_QAT_FW_LA_UPDATE_STATE_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_UPDATE_STATE_BITPOS, \
+       QAT_LA_UPDATE_STATE_MASK)
+
+#define ICP_QAT_FW_LA_PARTIAL_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_PARTIAL_BITPOS, \
+       QAT_LA_PARTIAL_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_CIPH_IV_FLD_BITPOS, \
+       QAT_LA_CIPH_IV_FLD_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
+       QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
+
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
+       QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
+       QAT_LA_GCM_IV_LEN_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_PROTO_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_PROTO_BITPOS, \
+       QAT_LA_PROTO_MASK)
+
+#define ICP_QAT_FW_LA_CMP_AUTH_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_CMP_AUTH_RES_BITPOS, \
+       QAT_LA_CMP_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_RET_AUTH_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_RET_AUTH_RES_BITPOS, \
+       QAT_LA_RET_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
+       QAT_LA_DIGEST_IN_BUFFER_MASK)
+
+#define ICP_QAT_FW_LA_UPDATE_STATE_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_UPDATE_STATE_BITPOS, \
+       QAT_LA_UPDATE_STATE_MASK)
+
+#define ICP_QAT_FW_LA_PARTIAL_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \
+       QAT_LA_PARTIAL_MASK)
+
+#define ICP_QAT_FW_LA_SLICE_TYPE_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_SLICE_TYPE_BITPOS, \
+       QAT_LA_SLICE_TYPE_MASK)
+
+struct icp_qat_fw_cipher_req_hdr_cd_pars {
+       union {
+               struct {
+                       __u64 content_desc_addr;
+                       __u16 content_desc_resrvd1;
+                       __u8 content_desc_params_sz;
+                       __u8 content_desc_hdr_resrvd2;
+                       __u32 content_desc_resrvd3;
+               } s;
+               struct {
+                       __u32 cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+               } s1;
+       } u;
+};
+
+struct icp_qat_fw_cipher_auth_req_hdr_cd_pars {
+       union {
+               struct {
+                       __u64 content_desc_addr;
+                       __u16 content_desc_resrvd1;
+                       __u8 content_desc_params_sz;
+                       __u8 content_desc_hdr_resrvd2;
+                       __u32 content_desc_resrvd3;
+               } s;
+               struct {
+                       __u32 cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+               } sl;
+       } u;
+};
+
+struct icp_qat_fw_cipher_cd_ctrl_hdr {
+       __u8 cipher_state_sz;
+       __u8 cipher_key_sz;
+       __u8 cipher_cfg_offset;
+       __u8 next_curr_id;
+       __u8 cipher_padding_sz;
+       __u8 resrvd1;
+       __u16 resrvd2;
+       __u32 resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3];
+};
+
+struct icp_qat_fw_auth_cd_ctrl_hdr {
+       __u32 resrvd1;
+       __u8 resrvd2;
+       __u8 hash_flags;
+       __u8 hash_cfg_offset;
+       __u8 next_curr_id;
+       __u8 resrvd3;
+       __u8 outer_prefix_sz;
+       __u8 final_sz;
+       __u8 inner_res_sz;
+       __u8 resrvd4;
+       __u8 inner_state1_sz;
+       __u8 inner_state2_offset;
+       __u8 inner_state2_sz;
+       __u8 outer_config_offset;
+       __u8 outer_state1_sz;
+       __u8 outer_res_sz;
+       __u8 outer_prefix_offset;
+};
+
+struct icp_qat_fw_cipher_auth_cd_ctrl_hdr {
+       __u8 cipher_state_sz;
+       __u8 cipher_key_sz;
+       __u8 cipher_cfg_offset;
+       __u8 next_curr_id_cipher;
+       __u8 cipher_padding_sz;
+       __u8 hash_flags;
+       __u8 hash_cfg_offset;
+       __u8 next_curr_id_auth;
+       __u8 resrvd1;
+       __u8 outer_prefix_sz;
+       __u8 final_sz;
+       __u8 inner_res_sz;
+       __u8 resrvd2;
+       __u8 inner_state1_sz;
+       __u8 inner_state2_offset;
+       __u8 inner_state2_sz;
+       __u8 outer_config_offset;
+       __u8 outer_state1_sz;
+       __u8 outer_res_sz;
+       __u8 outer_prefix_offset;
+};
+
+#define ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED 1
+#define ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED 0
+#define ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX  240
+#define ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET \
+       (sizeof(struct icp_qat_fw_la_cipher_req_params_t))
+#define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0)
+
+struct icp_qat_fw_la_cipher_req_params {
+       __u32 cipher_offset;
+       __u32 cipher_length;
+       union {
+               __u32 cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+               struct {
+                       __u64 cipher_IV_ptr;
+                       __u64 resrvd1;
+               } s;
+       } u;
+};
+
+struct icp_qat_fw_la_auth_req_params {
+       __u32 auth_off;
+       __u32 auth_len;
+       union {
+               __u64 auth_partial_st_prefix;
+               __u64 aad_adr;
+       } u1;
+       __u64 auth_res_addr;
+       union {
+               __u8 inner_prefix_sz;
+               __u8 aad_sz;
+       } u2;
+       __u8 resrvd1;
+       __u8 hash_state_sz;
+       __u8 auth_res_sz;
+} __packed;
+
+struct icp_qat_fw_la_auth_req_params_resrvd_flds {
+       __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_6];
+       union {
+               __u8 inner_prefix_sz;
+               __u8 aad_sz;
+       } u2;
+       __u8 resrvd1;
+       __u16 resrvd2;
+};
+
+struct icp_qat_fw_la_resp {
+       struct icp_qat_fw_comn_resp_hdr comn_resp;
+       __u64 opaque_data;
+       __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
+};
+
+#define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \
+       ((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \
+         ICP_QAT_FW_COMN_NEXT_ID_MASK) >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_CIPHER_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
+       ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+       & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+       ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+       & ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
+
+#define ICP_QAT_FW_CIPHER_CURR_ID_GET(cd_ctrl_hdr_t) \
+       (((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+       & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_CIPHER_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
+       ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+       & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+       ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
+
+#define ICP_QAT_FW_AUTH_NEXT_ID_GET(cd_ctrl_hdr_t) \
+       ((((cd_ctrl_hdr_t)->next_curr_id_auth) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
+       >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_AUTH_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
+       ((((cd_ctrl_hdr_t)->next_curr_id_auth) \
+       & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+       ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+       & ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
+
+#define ICP_QAT_FW_AUTH_CURR_ID_GET(cd_ctrl_hdr_t) \
+       (((cd_ctrl_hdr_t)->next_curr_id_auth) \
+       & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_AUTH_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
+       ((((cd_ctrl_hdr_t)->next_curr_id_auth) \
+       & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+       ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h
new file mode 100644 (file)
index 0000000..7eb5dae
--- /dev/null
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef __ICP_QAT_FW_LOADER_HANDLE_H__
+#define __ICP_QAT_FW_LOADER_HANDLE_H__
+#include "icp_qat_uclo.h"
+
+struct icp_qat_fw_loader_ae_data {
+       unsigned int state;
+       unsigned int ustore_size;
+       unsigned int free_addr;
+       unsigned int free_size;
+       unsigned int live_ctx_mask;
+};
+
+struct icp_qat_fw_loader_hal_handle {
+       struct icp_qat_fw_loader_ae_data aes[ICP_QAT_UCLO_MAX_AE];
+       unsigned int ae_mask;
+       unsigned int admin_ae_mask;
+       unsigned int slice_mask;
+       unsigned int revision_id;
+       unsigned int ae_max_num;
+       unsigned int upc_mask;
+       unsigned int max_ustore;
+};
+
+struct icp_qat_fw_loader_chip_info {
+       int mmp_sram_size;
+       bool nn;
+       bool lm2lm3;
+       u32 lm_size;
+       u32 icp_rst_csr;
+       u32 icp_rst_mask;
+       u32 glb_clk_enable_csr;
+       u32 misc_ctl_csr;
+       u32 wakeup_event_val;
+       bool fw_auth;
+       bool css_3k;
+       bool tgroup_share_ustore;
+       u32 fcu_ctl_csr;
+       u32 fcu_sts_csr;
+       u32 fcu_dram_addr_hi;
+       u32 fcu_dram_addr_lo;
+       u32 fcu_loaded_ae_csr;
+       u8 fcu_loaded_ae_pos;
+};
+
+struct icp_qat_fw_loader_handle {
+       struct icp_qat_fw_loader_hal_handle *hal_handle;
+       struct icp_qat_fw_loader_chip_info *chip_info;
+       struct pci_dev *pci_dev;
+       void *obj_handle;
+       void *sobj_handle;
+       void *mobj_handle;
+       unsigned int cfg_ae_mask;
+       void __iomem *hal_sram_addr_v;
+       void __iomem *hal_cap_g_ctl_csr_addr_v;
+       void __iomem *hal_cap_ae_xfer_csr_addr_v;
+       void __iomem *hal_cap_ae_local_csr_addr_v;
+       void __iomem *hal_ep_csr_addr_v;
+};
+
+struct icp_firml_dram_desc {
+       void __iomem *dram_base_addr;
+       void *dram_base_addr_v;
+       dma_addr_t dram_bus_addr;
+       u64 dram_size;
+};
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_pke.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_pke.h
new file mode 100644 (file)
index 0000000..9dddae0
--- /dev/null
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef _ICP_QAT_FW_PKE_
+#define _ICP_QAT_FW_PKE_
+
+#include "icp_qat_fw.h"
+
+struct icp_qat_fw_req_hdr_pke_cd_pars {
+       __u64 content_desc_addr;
+       __u32 content_desc_resrvd;
+       __u32 func_id;
+};
+
+struct icp_qat_fw_req_pke_mid {
+       __u64 opaque;
+       __u64 src_data_addr;
+       __u64 dest_data_addr;
+};
+
+struct icp_qat_fw_req_pke_hdr {
+       __u8 resrvd1;
+       __u8 resrvd2;
+       __u8 service_type;
+       __u8 hdr_flags;
+       __u16 comn_req_flags;
+       __u16 resrvd4;
+       struct icp_qat_fw_req_hdr_pke_cd_pars cd_pars;
+};
+
+struct icp_qat_fw_pke_request {
+       struct icp_qat_fw_req_pke_hdr pke_hdr;
+       struct icp_qat_fw_req_pke_mid pke_mid;
+       __u8 output_param_count;
+       __u8 input_param_count;
+       __u16 resrvd1;
+       __u32 resrvd2;
+       __u64 next_req_adr;
+};
+
+struct icp_qat_fw_resp_pke_hdr {
+       __u8 resrvd1;
+       __u8 resrvd2;
+       __u8 response_type;
+       __u8 hdr_flags;
+       __u16 comn_resp_flags;
+       __u16 resrvd4;
+};
+
+struct icp_qat_fw_pke_resp {
+       struct icp_qat_fw_resp_pke_hdr pke_resp_hdr;
+       __u64 opaque;
+       __u64 src_data_addr;
+       __u64 dest_data_addr;
+};
+
+#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS              7
+#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK                0x1
+#define ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(status_word) \
+       QAT_FIELD_GET(((status_word >> ICP_QAT_FW_COMN_ONE_BYTE_SHIFT) & \
+               ICP_QAT_FW_COMN_SINGLE_BYTE_MASK), \
+               QAT_COMN_RESP_PKE_STATUS_BITPOS, \
+               QAT_COMN_RESP_PKE_STATUS_MASK)
+
+#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(hdr_t, val) \
+       QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
+               ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS, \
+               ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK)
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hal.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hal.h
new file mode 100644 (file)
index 0000000..20b2ee1
--- /dev/null
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef __ICP_QAT_HAL_H
+#define __ICP_QAT_HAL_H
+#include "icp_qat_fw_loader_handle.h"
+
+enum hal_global_csr {
+       MISC_CONTROL = 0xA04,
+       ICP_RESET = 0xA0c,
+       ICP_GLOBAL_CLK_ENABLE = 0xA50
+};
+
+enum {
+       MISC_CONTROL_C4XXX = 0xAA0,
+       ICP_RESET_CPP0 = 0x938,
+       ICP_RESET_CPP1 = 0x93c,
+       ICP_GLOBAL_CLK_ENABLE_CPP0 = 0x964,
+       ICP_GLOBAL_CLK_ENABLE_CPP1 = 0x968
+};
+
+enum hal_ae_csr {
+       USTORE_ADDRESS = 0x000,
+       USTORE_DATA_LOWER = 0x004,
+       USTORE_DATA_UPPER = 0x008,
+       ALU_OUT = 0x010,
+       CTX_ARB_CNTL = 0x014,
+       CTX_ENABLES = 0x018,
+       CC_ENABLE = 0x01c,
+       CSR_CTX_POINTER = 0x020,
+       CTX_STS_INDIRECT = 0x040,
+       ACTIVE_CTX_STATUS = 0x044,
+       CTX_SIG_EVENTS_INDIRECT = 0x048,
+       CTX_SIG_EVENTS_ACTIVE = 0x04c,
+       CTX_WAKEUP_EVENTS_INDIRECT = 0x050,
+       LM_ADDR_0_INDIRECT = 0x060,
+       LM_ADDR_1_INDIRECT = 0x068,
+       LM_ADDR_2_INDIRECT = 0x0cc,
+       LM_ADDR_3_INDIRECT = 0x0d4,
+       INDIRECT_LM_ADDR_0_BYTE_INDEX = 0x0e0,
+       INDIRECT_LM_ADDR_1_BYTE_INDEX = 0x0e8,
+       INDIRECT_LM_ADDR_2_BYTE_INDEX = 0x10c,
+       INDIRECT_LM_ADDR_3_BYTE_INDEX = 0x114,
+       INDIRECT_T_INDEX = 0x0f8,
+       INDIRECT_T_INDEX_BYTE_INDEX = 0x0fc,
+       FUTURE_COUNT_SIGNAL_INDIRECT = 0x078,
+       TIMESTAMP_LOW = 0x0c0,
+       TIMESTAMP_HIGH = 0x0c4,
+       PROFILE_COUNT = 0x144,
+       SIGNATURE_ENABLE = 0x150,
+       AE_MISC_CONTROL = 0x160,
+       LOCAL_CSR_STATUS = 0x180,
+};
+
+enum fcu_csr {
+       FCU_CONTROL           = 0x8c0,
+       FCU_STATUS            = 0x8c4,
+       FCU_STATUS1           = 0x8c8,
+       FCU_DRAM_ADDR_LO      = 0x8cc,
+       FCU_DRAM_ADDR_HI      = 0x8d0,
+       FCU_RAMBASE_ADDR_HI   = 0x8d4,
+       FCU_RAMBASE_ADDR_LO   = 0x8d8
+};
+
+enum fcu_csr_4xxx {
+       FCU_CONTROL_4XXX           = 0x1000,
+       FCU_STATUS_4XXX            = 0x1004,
+       FCU_ME_BROADCAST_MASK_TYPE = 0x1008,
+       FCU_AE_LOADED_4XXX         = 0x1010,
+       FCU_DRAM_ADDR_LO_4XXX      = 0x1014,
+       FCU_DRAM_ADDR_HI_4XXX      = 0x1018,
+};
+
+enum fcu_cmd {
+       FCU_CTRL_CMD_NOOP  = 0,
+       FCU_CTRL_CMD_AUTH  = 1,
+       FCU_CTRL_CMD_LOAD  = 2,
+       FCU_CTRL_CMD_START = 3
+};
+
+enum fcu_sts {
+       FCU_STS_NO_STS    = 0,
+       FCU_STS_VERI_DONE = 1,
+       FCU_STS_LOAD_DONE = 2,
+       FCU_STS_VERI_FAIL = 3,
+       FCU_STS_LOAD_FAIL = 4,
+       FCU_STS_BUSY      = 5
+};
+
+#define ALL_AE_MASK                 0xFFFFFFFF
+#define UA_ECS                      (0x1 << 31)
+#define ACS_ABO_BITPOS              31
+#define ACS_ACNO                    0x7
+#define CE_ENABLE_BITPOS            0x8
+#define CE_LMADDR_0_GLOBAL_BITPOS   16
+#define CE_LMADDR_1_GLOBAL_BITPOS   17
+#define CE_LMADDR_2_GLOBAL_BITPOS   22
+#define CE_LMADDR_3_GLOBAL_BITPOS   23
+#define CE_T_INDEX_GLOBAL_BITPOS    21
+#define CE_NN_MODE_BITPOS           20
+#define CE_REG_PAR_ERR_BITPOS       25
+#define CE_BREAKPOINT_BITPOS        27
+#define CE_CNTL_STORE_PARITY_ERROR_BITPOS 29
+#define CE_INUSE_CONTEXTS_BITPOS    31
+#define CE_NN_MODE                  (0x1 << CE_NN_MODE_BITPOS)
+#define CE_INUSE_CONTEXTS           (0x1 << CE_INUSE_CONTEXTS_BITPOS)
+#define XCWE_VOLUNTARY              (0x1)
+#define LCS_STATUS          (0x1)
+#define MMC_SHARE_CS_BITPOS         2
+#define WAKEUP_EVENT 0x10000
+#define FCU_CTRL_BROADCAST_POS   0x4
+#define FCU_CTRL_AE_POS     0x8
+#define FCU_AUTH_STS_MASK   0x7
+#define FCU_STS_DONE_POS    0x9
+#define FCU_STS_AUTHFWLD_POS 0X8
+#define FCU_LOADED_AE_POS   0x16
+#define FW_AUTH_WAIT_PERIOD 10
+#define FW_AUTH_MAX_RETRY   300
+#define ICP_QAT_AE_OFFSET 0x20000
+#define ICP_QAT_CAP_OFFSET (ICP_QAT_AE_OFFSET + 0x10000)
+#define LOCAL_TO_XFER_REG_OFFSET 0x800
+#define ICP_QAT_EP_OFFSET 0x3a000
+#define ICP_QAT_EP_OFFSET_4XXX   0x200000 /* HI MMIO CSRs */
+#define ICP_QAT_AE_OFFSET_4XXX   0x600000
+#define ICP_QAT_CAP_OFFSET_4XXX  0x640000
+#define SET_CAP_CSR(handle, csr, val) \
+       ADF_CSR_WR((handle)->hal_cap_g_ctl_csr_addr_v, csr, val)
+#define GET_CAP_CSR(handle, csr) \
+       ADF_CSR_RD((handle)->hal_cap_g_ctl_csr_addr_v, csr)
+#define AE_CSR(handle, ae) \
+       ((char __iomem *)(handle)->hal_cap_ae_local_csr_addr_v + ((ae) << 12))
+#define AE_CSR_ADDR(handle, ae, csr) (AE_CSR(handle, ae) + (0x3ff & (csr)))
+#define SET_AE_CSR(handle, ae, csr, val) \
+       ADF_CSR_WR(AE_CSR_ADDR(handle, ae, csr), 0, val)
+#define GET_AE_CSR(handle, ae, csr) ADF_CSR_RD(AE_CSR_ADDR(handle, ae, csr), 0)
+#define AE_XFER(handle, ae) \
+       ((char __iomem *)(handle)->hal_cap_ae_xfer_csr_addr_v + ((ae) << 12))
+#define AE_XFER_ADDR(handle, ae, reg) (AE_XFER(handle, ae) + \
+       (((reg) & 0xff) << 2))
+#define SET_AE_XFER(handle, ae, reg, val) \
+       ADF_CSR_WR(AE_XFER_ADDR(handle, ae, reg), 0, val)
+#define SRAM_WRITE(handle, addr, val) \
+       ADF_CSR_WR((handle)->hal_sram_addr_v, addr, val)
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
new file mode 100644 (file)
index 0000000..4042739
--- /dev/null
@@ -0,0 +1,376 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef _ICP_QAT_HW_H_
+#define _ICP_QAT_HW_H_
+
+enum icp_qat_hw_ae_id {
+       ICP_QAT_HW_AE_0 = 0,
+       ICP_QAT_HW_AE_1 = 1,
+       ICP_QAT_HW_AE_2 = 2,
+       ICP_QAT_HW_AE_3 = 3,
+       ICP_QAT_HW_AE_4 = 4,
+       ICP_QAT_HW_AE_5 = 5,
+       ICP_QAT_HW_AE_6 = 6,
+       ICP_QAT_HW_AE_7 = 7,
+       ICP_QAT_HW_AE_8 = 8,
+       ICP_QAT_HW_AE_9 = 9,
+       ICP_QAT_HW_AE_10 = 10,
+       ICP_QAT_HW_AE_11 = 11,
+       ICP_QAT_HW_AE_DELIMITER = 12
+};
+
+enum icp_qat_hw_qat_id {
+       ICP_QAT_HW_QAT_0 = 0,
+       ICP_QAT_HW_QAT_1 = 1,
+       ICP_QAT_HW_QAT_2 = 2,
+       ICP_QAT_HW_QAT_3 = 3,
+       ICP_QAT_HW_QAT_4 = 4,
+       ICP_QAT_HW_QAT_5 = 5,
+       ICP_QAT_HW_QAT_DELIMITER = 6
+};
+
+enum icp_qat_hw_auth_algo {
+       ICP_QAT_HW_AUTH_ALGO_NULL = 0,
+       ICP_QAT_HW_AUTH_ALGO_SHA1 = 1,
+       ICP_QAT_HW_AUTH_ALGO_MD5 = 2,
+       ICP_QAT_HW_AUTH_ALGO_SHA224 = 3,
+       ICP_QAT_HW_AUTH_ALGO_SHA256 = 4,
+       ICP_QAT_HW_AUTH_ALGO_SHA384 = 5,
+       ICP_QAT_HW_AUTH_ALGO_SHA512 = 6,
+       ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC = 7,
+       ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC = 8,
+       ICP_QAT_HW_AUTH_ALGO_AES_F9 = 9,
+       ICP_QAT_HW_AUTH_ALGO_GALOIS_128 = 10,
+       ICP_QAT_HW_AUTH_ALGO_GALOIS_64 = 11,
+       ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 = 12,
+       ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 = 13,
+       ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 = 14,
+       ICP_QAT_HW_AUTH_RESERVED_1 = 15,
+       ICP_QAT_HW_AUTH_RESERVED_2 = 16,
+       ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17,
+       ICP_QAT_HW_AUTH_RESERVED_3 = 18,
+       ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19,
+       ICP_QAT_HW_AUTH_ALGO_DELIMITER = 20
+};
+
+enum icp_qat_hw_auth_mode {
+       ICP_QAT_HW_AUTH_MODE0 = 0,
+       ICP_QAT_HW_AUTH_MODE1 = 1,
+       ICP_QAT_HW_AUTH_MODE2 = 2,
+       ICP_QAT_HW_AUTH_MODE_DELIMITER = 3
+};
+
+struct icp_qat_hw_auth_config {
+       __u32 config;
+       __u32 reserved;
+};
+
+struct icp_qat_hw_ucs_cipher_config {
+       __u32 val;
+       __u32 reserved[3];
+};
+
+enum icp_qat_slice_mask {
+       ICP_ACCEL_MASK_CIPHER_SLICE = BIT(0),
+       ICP_ACCEL_MASK_AUTH_SLICE = BIT(1),
+       ICP_ACCEL_MASK_PKE_SLICE = BIT(2),
+       ICP_ACCEL_MASK_COMPRESS_SLICE = BIT(3),
+       ICP_ACCEL_MASK_LZS_SLICE = BIT(4),
+       ICP_ACCEL_MASK_EIA3_SLICE = BIT(5),
+       ICP_ACCEL_MASK_SHA3_SLICE = BIT(6),
+};
+
+enum icp_qat_capabilities_mask {
+       ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = BIT(0),
+       ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = BIT(1),
+       ICP_ACCEL_CAPABILITIES_CIPHER = BIT(2),
+       ICP_ACCEL_CAPABILITIES_AUTHENTICATION = BIT(3),
+       ICP_ACCEL_CAPABILITIES_RESERVED_1 = BIT(4),
+       ICP_ACCEL_CAPABILITIES_COMPRESSION = BIT(5),
+       ICP_ACCEL_CAPABILITIES_LZS_COMPRESSION = BIT(6),
+       ICP_ACCEL_CAPABILITIES_RAND = BIT(7),
+       ICP_ACCEL_CAPABILITIES_ZUC = BIT(8),
+       ICP_ACCEL_CAPABILITIES_SHA3 = BIT(9),
+       /* Bits 10-11 are currently reserved */
+       ICP_ACCEL_CAPABILITIES_HKDF = BIT(12),
+       ICP_ACCEL_CAPABILITIES_ECEDMONT = BIT(13),
+       /* Bit 14 is currently reserved */
+       ICP_ACCEL_CAPABILITIES_SHA3_EXT = BIT(15),
+       ICP_ACCEL_CAPABILITIES_AESGCM_SPC = BIT(16),
+       ICP_ACCEL_CAPABILITIES_CHACHA_POLY = BIT(17),
+       /* Bits 18-21 are currently reserved */
+       ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY = BIT(22),
+       ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64 = BIT(23),
+       ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION = BIT(24),
+       ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION = BIT(25),
+       ICP_ACCEL_CAPABILITIES_AES_V2 = BIT(26)
+};
+
+#define QAT_AUTH_MODE_BITPOS 4
+#define QAT_AUTH_MODE_MASK 0xF
+#define QAT_AUTH_ALGO_BITPOS 0
+#define QAT_AUTH_ALGO_MASK 0xF
+#define QAT_AUTH_CMP_BITPOS 8
+#define QAT_AUTH_CMP_MASK 0x7F
+#define QAT_AUTH_SHA3_PADDING_BITPOS 16
+#define QAT_AUTH_SHA3_PADDING_MASK 0x1
+#define QAT_AUTH_ALGO_SHA3_BITPOS 22
+#define QAT_AUTH_ALGO_SHA3_MASK 0x3
+#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \
+       (((mode & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \
+       ((algo & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \
+       (((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) << \
+        QAT_AUTH_ALGO_SHA3_BITPOS) | \
+        (((((algo == ICP_QAT_HW_AUTH_ALGO_SHA3_256) || \
+       (algo == ICP_QAT_HW_AUTH_ALGO_SHA3_512)) ? 1 : 0) \
+       & QAT_AUTH_SHA3_PADDING_MASK) << QAT_AUTH_SHA3_PADDING_BITPOS) | \
+       ((cmp_len & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS))
+
+struct icp_qat_hw_auth_counter {
+       __be32 counter;
+       __u32 reserved;
+};
+
+#define QAT_AUTH_COUNT_MASK 0xFFFFFFFF
+#define QAT_AUTH_COUNT_BITPOS 0
+#define ICP_QAT_HW_AUTH_COUNT_BUILD(val) \
+       (((val) & QAT_AUTH_COUNT_MASK) << QAT_AUTH_COUNT_BITPOS)
+
+struct icp_qat_hw_auth_setup {
+       struct icp_qat_hw_auth_config auth_config;
+       struct icp_qat_hw_auth_counter auth_counter;
+};
+
+#define QAT_HW_DEFAULT_ALIGNMENT 8
+#define QAT_HW_ROUND_UP(val, n) (((val) + ((n) - 1)) & (~(n - 1)))
+#define ICP_QAT_HW_NULL_STATE1_SZ 32
+#define ICP_QAT_HW_MD5_STATE1_SZ 16
+#define ICP_QAT_HW_SHA1_STATE1_SZ 20
+#define ICP_QAT_HW_SHA224_STATE1_SZ 32
+#define ICP_QAT_HW_SHA256_STATE1_SZ 32
+#define ICP_QAT_HW_SHA3_256_STATE1_SZ 32
+#define ICP_QAT_HW_SHA384_STATE1_SZ 64
+#define ICP_QAT_HW_SHA512_STATE1_SZ 64
+#define ICP_QAT_HW_SHA3_512_STATE1_SZ 64
+#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28
+#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48
+#define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16
+#define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16
+#define ICP_QAT_HW_AES_F9_STATE1_SZ 32
+#define ICP_QAT_HW_KASUMI_F9_STATE1_SZ 16
+#define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16
+#define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8
+#define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8
+#define ICP_QAT_HW_NULL_STATE2_SZ 32
+#define ICP_QAT_HW_MD5_STATE2_SZ 16
+#define ICP_QAT_HW_SHA1_STATE2_SZ 20
+#define ICP_QAT_HW_SHA224_STATE2_SZ 32
+#define ICP_QAT_HW_SHA256_STATE2_SZ 32
+#define ICP_QAT_HW_SHA3_256_STATE2_SZ 0
+#define ICP_QAT_HW_SHA384_STATE2_SZ 64
+#define ICP_QAT_HW_SHA512_STATE2_SZ 64
+#define ICP_QAT_HW_SHA3_512_STATE2_SZ 0
+#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0
+#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0
+#define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16
+#define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16
+#define ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ 16
+#define ICP_QAT_HW_F9_IK_SZ 16
+#define ICP_QAT_HW_F9_FK_SZ 16
+#define ICP_QAT_HW_KASUMI_F9_STATE2_SZ (ICP_QAT_HW_F9_IK_SZ + \
+       ICP_QAT_HW_F9_FK_SZ)
+#define ICP_QAT_HW_AES_F9_STATE2_SZ ICP_QAT_HW_KASUMI_F9_STATE2_SZ
+#define ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ 24
+#define ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ 32
+#define ICP_QAT_HW_GALOIS_H_SZ 16
+#define ICP_QAT_HW_GALOIS_LEN_A_SZ 8
+#define ICP_QAT_HW_GALOIS_E_CTR0_SZ 16
+
+struct icp_qat_hw_auth_sha512 {
+       struct icp_qat_hw_auth_setup inner_setup;
+       __u8 state1[ICP_QAT_HW_SHA512_STATE1_SZ];
+       struct icp_qat_hw_auth_setup outer_setup;
+       __u8 state2[ICP_QAT_HW_SHA512_STATE2_SZ];
+};
+
+struct icp_qat_hw_auth_algo_blk {
+       struct icp_qat_hw_auth_sha512 sha;
+};
+
+#define ICP_QAT_HW_GALOIS_LEN_A_BITPOS 0
+#define ICP_QAT_HW_GALOIS_LEN_A_MASK 0xFFFFFFFF
+
+enum icp_qat_hw_cipher_algo {
+       ICP_QAT_HW_CIPHER_ALGO_NULL = 0,
+       ICP_QAT_HW_CIPHER_ALGO_DES = 1,
+       ICP_QAT_HW_CIPHER_ALGO_3DES = 2,
+       ICP_QAT_HW_CIPHER_ALGO_AES128 = 3,
+       ICP_QAT_HW_CIPHER_ALGO_AES192 = 4,
+       ICP_QAT_HW_CIPHER_ALGO_AES256 = 5,
+       ICP_QAT_HW_CIPHER_ALGO_ARC4 = 6,
+       ICP_QAT_HW_CIPHER_ALGO_KASUMI = 7,
+       ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 = 8,
+       ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9,
+       ICP_QAT_HW_CIPHER_DELIMITER = 10
+};
+
+enum icp_qat_hw_cipher_mode {
+       ICP_QAT_HW_CIPHER_ECB_MODE = 0,
+       ICP_QAT_HW_CIPHER_CBC_MODE = 1,
+       ICP_QAT_HW_CIPHER_CTR_MODE = 2,
+       ICP_QAT_HW_CIPHER_F8_MODE = 3,
+       ICP_QAT_HW_CIPHER_XTS_MODE = 6,
+       ICP_QAT_HW_CIPHER_MODE_DELIMITER = 7
+};
+
+struct icp_qat_hw_cipher_config {
+       __u32 val;
+       __u32 reserved;
+};
+
+enum icp_qat_hw_cipher_dir {
+       ICP_QAT_HW_CIPHER_ENCRYPT = 0,
+       ICP_QAT_HW_CIPHER_DECRYPT = 1,
+};
+
+enum icp_qat_hw_cipher_convert {
+       ICP_QAT_HW_CIPHER_NO_CONVERT = 0,
+       ICP_QAT_HW_CIPHER_KEY_CONVERT = 1,
+};
+
+#define QAT_CIPHER_MODE_BITPOS 4
+#define QAT_CIPHER_MODE_MASK 0xF
+#define QAT_CIPHER_ALGO_BITPOS 0
+#define QAT_CIPHER_ALGO_MASK 0xF
+#define QAT_CIPHER_CONVERT_BITPOS 9
+#define QAT_CIPHER_CONVERT_MASK 0x1
+#define QAT_CIPHER_DIR_BITPOS 8
+#define QAT_CIPHER_DIR_MASK 0x1
+#define QAT_CIPHER_MODE_F8_KEY_SZ_MULT 2
+#define QAT_CIPHER_MODE_XTS_KEY_SZ_MULT 2
+#define ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, algo, convert, dir) \
+       (((mode & QAT_CIPHER_MODE_MASK) << QAT_CIPHER_MODE_BITPOS) | \
+       ((algo & QAT_CIPHER_ALGO_MASK) << QAT_CIPHER_ALGO_BITPOS) | \
+       ((convert & QAT_CIPHER_CONVERT_MASK) << QAT_CIPHER_CONVERT_BITPOS) | \
+       ((dir & QAT_CIPHER_DIR_MASK) << QAT_CIPHER_DIR_BITPOS))
+#define ICP_QAT_HW_DES_BLK_SZ 8
+#define ICP_QAT_HW_3DES_BLK_SZ 8
+#define ICP_QAT_HW_NULL_BLK_SZ 8
+#define ICP_QAT_HW_AES_BLK_SZ 16
+#define ICP_QAT_HW_KASUMI_BLK_SZ 8
+#define ICP_QAT_HW_SNOW_3G_BLK_SZ 8
+#define ICP_QAT_HW_ZUC_3G_BLK_SZ 8
+#define ICP_QAT_HW_NULL_KEY_SZ 256
+#define ICP_QAT_HW_DES_KEY_SZ 8
+#define ICP_QAT_HW_3DES_KEY_SZ 24
+#define ICP_QAT_HW_AES_128_KEY_SZ 16
+#define ICP_QAT_HW_AES_192_KEY_SZ 24
+#define ICP_QAT_HW_AES_256_KEY_SZ 32
+#define ICP_QAT_HW_AES_128_F8_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+       QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_192_F8_KEY_SZ (ICP_QAT_HW_AES_192_KEY_SZ * \
+       QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_F8_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+       QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+       QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+       QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_KASUMI_KEY_SZ 16
+#define ICP_QAT_HW_KASUMI_F8_KEY_SZ (ICP_QAT_HW_KASUMI_KEY_SZ * \
+       QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+       QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+       QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_ARC4_KEY_SZ 256
+#define ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ 16
+#define ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ 16
+#define ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ 16
+#define ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ 16
+#define ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR 2
+#define INIT_SHRAM_CONSTANTS_TABLE_SZ 1024
+
+struct icp_qat_hw_cipher_aes256_f8 {
+       struct icp_qat_hw_cipher_config cipher_config;
+       __u8 key[ICP_QAT_HW_AES_256_F8_KEY_SZ];
+};
+
+struct icp_qat_hw_ucs_cipher_aes256_f8 {
+       struct icp_qat_hw_ucs_cipher_config cipher_config;
+       __u8 key[ICP_QAT_HW_AES_256_F8_KEY_SZ];
+};
+
+struct icp_qat_hw_cipher_algo_blk {
+       union {
+               struct icp_qat_hw_cipher_aes256_f8 aes;
+               struct icp_qat_hw_ucs_cipher_aes256_f8 ucs_aes;
+       };
+} __aligned(64);
+
+enum icp_qat_hw_compression_direction {
+       ICP_QAT_HW_COMPRESSION_DIR_COMPRESS = 0,
+       ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS = 1,
+       ICP_QAT_HW_COMPRESSION_DIR_DELIMITER = 2
+};
+
+enum icp_qat_hw_compression_delayed_match {
+       ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED = 0,
+       ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED = 1,
+       ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DELIMITER = 2
+};
+
+enum icp_qat_hw_compression_algo {
+       ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE = 0,
+       ICP_QAT_HW_COMPRESSION_ALGO_LZS = 1,
+       ICP_QAT_HW_COMPRESSION_ALGO_DELIMITER = 2
+};
+
+enum icp_qat_hw_compression_depth {
+       ICP_QAT_HW_COMPRESSION_DEPTH_1 = 0,
+       ICP_QAT_HW_COMPRESSION_DEPTH_4 = 1,
+       ICP_QAT_HW_COMPRESSION_DEPTH_8 = 2,
+       ICP_QAT_HW_COMPRESSION_DEPTH_16 = 3,
+       ICP_QAT_HW_COMPRESSION_DEPTH_128 = 4,
+       ICP_QAT_HW_COMPRESSION_DEPTH_DELIMITER = 5
+};
+
+enum icp_qat_hw_compression_file_type {
+       ICP_QAT_HW_COMPRESSION_FILE_TYPE_0 = 0,
+       ICP_QAT_HW_COMPRESSION_FILE_TYPE_1 = 1,
+       ICP_QAT_HW_COMPRESSION_FILE_TYPE_2 = 2,
+       ICP_QAT_HW_COMPRESSION_FILE_TYPE_3 = 3,
+       ICP_QAT_HW_COMPRESSION_FILE_TYPE_4 = 4,
+       ICP_QAT_HW_COMPRESSION_FILE_TYPE_DELIMITER = 5
+};
+
+struct icp_qat_hw_compression_config {
+       __u32 lower_val;
+       __u32 upper_val;
+};
+
+#define QAT_COMPRESSION_DIR_BITPOS 4
+#define QAT_COMPRESSION_DIR_MASK 0x7
+#define QAT_COMPRESSION_DELAYED_MATCH_BITPOS 16
+#define QAT_COMPRESSION_DELAYED_MATCH_MASK 0x1
+#define QAT_COMPRESSION_ALGO_BITPOS 31
+#define QAT_COMPRESSION_ALGO_MASK 0x1
+#define QAT_COMPRESSION_DEPTH_BITPOS 28
+#define QAT_COMPRESSION_DEPTH_MASK 0x7
+#define QAT_COMPRESSION_FILE_TYPE_BITPOS 24
+#define QAT_COMPRESSION_FILE_TYPE_MASK 0xF
+
+#define ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(dir, delayed, \
+       algo, depth, filetype) \
+       ((((dir) & QAT_COMPRESSION_DIR_MASK) << \
+       QAT_COMPRESSION_DIR_BITPOS) | \
+       (((delayed) & QAT_COMPRESSION_DELAYED_MATCH_MASK) << \
+       QAT_COMPRESSION_DELAYED_MATCH_BITPOS) | \
+       (((algo) & QAT_COMPRESSION_ALGO_MASK) << \
+       QAT_COMPRESSION_ALGO_BITPOS) | \
+       (((depth) & QAT_COMPRESSION_DEPTH_MASK) << \
+       QAT_COMPRESSION_DEPTH_BITPOS) | \
+       (((filetype) & QAT_COMPRESSION_FILE_TYPE_MASK) << \
+       QAT_COMPRESSION_FILE_TYPE_BITPOS))
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp.h
new file mode 100644 (file)
index 0000000..7ea8962
--- /dev/null
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef _ICP_QAT_HW_20_COMP_H_
+#define _ICP_QAT_HW_20_COMP_H_
+
+#include "icp_qat_hw_20_comp_defs.h"
+#include "icp_qat_fw.h"
+
+struct icp_qat_hw_comp_20_config_csr_lower {
+       enum icp_qat_hw_comp_20_extended_delay_match_mode edmm;
+       enum icp_qat_hw_comp_20_hw_comp_format algo;
+       enum icp_qat_hw_comp_20_search_depth sd;
+       enum icp_qat_hw_comp_20_hbs_control hbs;
+       enum icp_qat_hw_comp_20_abd abd;
+       enum icp_qat_hw_comp_20_lllbd_ctrl lllbd;
+       enum icp_qat_hw_comp_20_min_match_control mmctrl;
+       enum icp_qat_hw_comp_20_skip_hash_collision hash_col;
+       enum icp_qat_hw_comp_20_skip_hash_update hash_update;
+       enum icp_qat_hw_comp_20_byte_skip skip_ctrl;
+};
+
+static inline __u32
+ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(struct icp_qat_hw_comp_20_config_csr_lower csr)
+{
+       u32 val32 = 0;
+
+       QAT_FIELD_SET(val32, csr.algo,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_MASK);
+       QAT_FIELD_SET(val32, csr.sd,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_MASK);
+       QAT_FIELD_SET(val32, csr.edmm,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_MASK);
+       QAT_FIELD_SET(val32, csr.hbs,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_MASK);
+       QAT_FIELD_SET(val32, csr.lllbd,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_MASK);
+       QAT_FIELD_SET(val32, csr.mmctrl,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK);
+       QAT_FIELD_SET(val32, csr.hash_col,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_MASK);
+       QAT_FIELD_SET(val32, csr.hash_update,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_MASK);
+       QAT_FIELD_SET(val32, csr.skip_ctrl,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_MASK);
+       QAT_FIELD_SET(val32, csr.abd, ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_MASK);
+
+       return __builtin_bswap32(val32);
+}
+
+struct icp_qat_hw_comp_20_config_csr_upper {
+       enum icp_qat_hw_comp_20_scb_control scb_ctrl;
+       enum icp_qat_hw_comp_20_rmb_control rmb_ctrl;
+       enum icp_qat_hw_comp_20_som_control som_ctrl;
+       enum icp_qat_hw_comp_20_skip_hash_rd_control skip_hash_ctrl;
+       enum icp_qat_hw_comp_20_scb_unload_control scb_unload_ctrl;
+       enum icp_qat_hw_comp_20_disable_token_fusion_control disable_token_fusion_ctrl;
+       enum icp_qat_hw_comp_20_lbms lbms;
+       enum icp_qat_hw_comp_20_scb_mode_reset_mask scb_mode_reset;
+       __u16 lazy;
+       __u16 nice;
+};
+
+static inline __u32
+ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(struct icp_qat_hw_comp_20_config_csr_upper csr)
+{
+       u32 val32 = 0;
+
+       QAT_FIELD_SET(val32, csr.scb_ctrl,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_MASK);
+       QAT_FIELD_SET(val32, csr.rmb_ctrl,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_MASK);
+       QAT_FIELD_SET(val32, csr.som_ctrl,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_MASK);
+       QAT_FIELD_SET(val32, csr.skip_hash_ctrl,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_MASK);
+       QAT_FIELD_SET(val32, csr.scb_unload_ctrl,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_MASK);
+       QAT_FIELD_SET(val32, csr.disable_token_fusion_ctrl,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_MASK);
+       QAT_FIELD_SET(val32, csr.lbms,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_MASK);
+       QAT_FIELD_SET(val32, csr.scb_mode_reset,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_MASK);
+       QAT_FIELD_SET(val32, csr.lazy,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_MASK);
+       QAT_FIELD_SET(val32, csr.nice,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_MASK);
+
+       return __builtin_bswap32(val32);
+}
+
+struct icp_qat_hw_decomp_20_config_csr_lower {
+       enum icp_qat_hw_decomp_20_hbs_control hbs;
+       enum icp_qat_hw_decomp_20_lbms lbms;
+       enum icp_qat_hw_decomp_20_hw_comp_format algo;
+       enum icp_qat_hw_decomp_20_min_match_control mmctrl;
+       enum icp_qat_hw_decomp_20_lz4_block_checksum_present lbc;
+};
+
+static inline __u32
+ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(struct icp_qat_hw_decomp_20_config_csr_lower csr)
+{
+       u32 val32 = 0;
+
+       QAT_FIELD_SET(val32, csr.hbs,
+                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS,
+                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_MASK);
+       QAT_FIELD_SET(val32, csr.lbms,
+                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_BITPOS,
+                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_MASK);
+       QAT_FIELD_SET(val32, csr.algo,
+                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_BITPOS,
+                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_MASK);
+       QAT_FIELD_SET(val32, csr.mmctrl,
+                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS,
+                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK);
+       QAT_FIELD_SET(val32, csr.lbc,
+                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_BITPOS,
+                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_MASK);
+
+       return __builtin_bswap32(val32);
+}
+
+struct icp_qat_hw_decomp_20_config_csr_upper {
+       enum icp_qat_hw_decomp_20_speculative_decoder_control sdc;
+       enum icp_qat_hw_decomp_20_mini_cam_control mcc;
+};
+
+static inline __u32
+ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_UPPER(struct icp_qat_hw_decomp_20_config_csr_upper csr)
+{
+       u32 val32 = 0;
+
+       QAT_FIELD_SET(val32, csr.sdc,
+                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_BITPOS,
+                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_MASK);
+       QAT_FIELD_SET(val32, csr.mcc,
+                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_BITPOS,
+                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_MASK);
+
+       return __builtin_bswap32(val32);
+}
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp_defs.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp_defs.h
new file mode 100644 (file)
index 0000000..208d455
--- /dev/null
@@ -0,0 +1,300 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef _ICP_QAT_HW_20_COMP_DEFS_H
+#define _ICP_QAT_HW_20_COMP_DEFS_H
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_BITPOS 31
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_MASK 0x1
+
+enum icp_qat_hw_comp_20_scb_control {
+       ICP_QAT_HW_COMP_20_SCB_CONTROL_ENABLE = 0x0,
+       ICP_QAT_HW_COMP_20_SCB_CONTROL_DISABLE = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_DEFAULT_VAL \
+       ICP_QAT_HW_COMP_20_SCB_CONTROL_DISABLE
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_BITPOS 30
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_MASK 0x1
+
+enum icp_qat_hw_comp_20_rmb_control {
+       ICP_QAT_HW_COMP_20_RMB_CONTROL_RESET_ALL = 0x0,
+       ICP_QAT_HW_COMP_20_RMB_CONTROL_RESET_FC_ONLY = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_DEFAULT_VAL \
+       ICP_QAT_HW_COMP_20_RMB_CONTROL_RESET_ALL
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_BITPOS 28
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_MASK 0x3
+
+enum icp_qat_hw_comp_20_som_control {
+       ICP_QAT_HW_COMP_20_SOM_CONTROL_NORMAL_MODE = 0x0,
+       ICP_QAT_HW_COMP_20_SOM_CONTROL_REPLAY_MODE = 0x1,
+       ICP_QAT_HW_COMP_20_SOM_CONTROL_INPUT_CRC = 0x2,
+       ICP_QAT_HW_COMP_20_SOM_CONTROL_RESERVED_MODE = 0x3,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_DEFAULT_VAL \
+       ICP_QAT_HW_COMP_20_SOM_CONTROL_NORMAL_MODE
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_BITPOS 27
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_MASK 0x1
+
+enum icp_qat_hw_comp_20_skip_hash_rd_control {
+       ICP_QAT_HW_COMP_20_SKIP_HASH_RD_CONTROL_NO_SKIP = 0x0,
+       ICP_QAT_HW_COMP_20_SKIP_HASH_RD_CONTROL_SKIP_HASH_READS = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_DEFAULT_VAL \
+       ICP_QAT_HW_COMP_20_SKIP_HASH_RD_CONTROL_NO_SKIP
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_BITPOS 26
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_MASK 0x1
+
+enum icp_qat_hw_comp_20_scb_unload_control {
+       ICP_QAT_HW_COMP_20_SCB_UNLOAD_CONTROL_UNLOAD = 0x0,
+       ICP_QAT_HW_COMP_20_SCB_UNLOAD_CONTROL_NO_UNLOAD = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_DEFAULT_VAL \
+       ICP_QAT_HW_COMP_20_SCB_UNLOAD_CONTROL_UNLOAD
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_BITPOS 21
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_MASK 0x1
+
+enum icp_qat_hw_comp_20_disable_token_fusion_control {
+       ICP_QAT_HW_COMP_20_DISABLE_TOKEN_FUSION_CONTROL_ENABLE = 0x0,
+       ICP_QAT_HW_COMP_20_DISABLE_TOKEN_FUSION_CONTROL_DISABLE = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_DEFAULT_VAL \
+       ICP_QAT_HW_COMP_20_DISABLE_TOKEN_FUSION_CONTROL_ENABLE
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_BITPOS 19
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_MASK 0x3
+
+enum icp_qat_hw_comp_20_lbms {
+       ICP_QAT_HW_COMP_20_LBMS_LBMS_64KB = 0x0,
+       ICP_QAT_HW_COMP_20_LBMS_LBMS_256KB = 0x1,
+       ICP_QAT_HW_COMP_20_LBMS_LBMS_1MB = 0x2,
+       ICP_QAT_HW_COMP_20_LBMS_LBMS_4MB = 0x3,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_DEFAULT_VAL \
+       ICP_QAT_HW_COMP_20_LBMS_LBMS_64KB
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_BITPOS 18
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_MASK 0x1
+
+enum icp_qat_hw_comp_20_scb_mode_reset_mask {
+       ICP_QAT_HW_COMP_20_SCB_MODE_RESET_MASK_RESET_COUNTERS = 0x0,
+       ICP_QAT_HW_COMP_20_SCB_MODE_RESET_MASK_RESET_COUNTERS_AND_HISTORY = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_DEFAULT_VAL \
+       ICP_QAT_HW_COMP_20_SCB_MODE_RESET_MASK_RESET_COUNTERS
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_BITPOS 9
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_MASK 0x1ff
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_DEFAULT_VAL 258
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_BITPOS 0
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_MASK 0x1ff
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_DEFAULT_VAL 259
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS 14
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_MASK 0x7
+
+enum icp_qat_hw_comp_20_hbs_control {
+       ICP_QAT_HW_COMP_20_HBS_CONTROL_HBS_IS_32KB = 0x0,
+       ICP_QAT_HW_COMP_23_HBS_CONTROL_HBS_IS_64KB = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_DEFAULT_VAL \
+       ICP_QAT_HW_COMP_20_HBS_CONTROL_HBS_IS_32KB
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_BITPOS 13
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_MASK 0x1
+
+enum icp_qat_hw_comp_20_abd {
+       ICP_QAT_HW_COMP_20_ABD_ABD_ENABLED = 0x0,
+       ICP_QAT_HW_COMP_20_ABD_ABD_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_DEFAULT_VAL \
+       ICP_QAT_HW_COMP_20_ABD_ABD_ENABLED
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_BITPOS 12
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_MASK 0x1
+
+enum icp_qat_hw_comp_20_lllbd_ctrl {
+       ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED = 0x0,
+       ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_DEFAULT_VAL \
+       ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_BITPOS 8
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_MASK 0xf
+
+enum icp_qat_hw_comp_20_search_depth {
+       ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1 = 0x1,
+       ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_6 = 0x3,
+       ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_9 = 0x4,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_DEFAULT_VAL \
+       ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_BITPOS 5
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_MASK 0x7
+
+enum icp_qat_hw_comp_20_hw_comp_format {
+       ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77 = 0x0,
+       ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_DEFLATE = 0x1,
+       ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_LZ4 = 0x2,
+       ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_LZ4S = 0x3,
+       ICP_QAT_HW_COMP_23_HW_COMP_FORMAT_ZSTD = 0x4,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_DEFAULT_VAL \
+       ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_DEFLATE
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS 4
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK 0x1
+
+enum icp_qat_hw_comp_20_min_match_control {
+       ICP_QAT_HW_COMP_20_MIN_MATCH_CONTROL_MATCH_3B = 0x0,
+       ICP_QAT_HW_COMP_20_MIN_MATCH_CONTROL_MATCH_4B = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_DEFAULT_VAL \
+       ICP_QAT_HW_COMP_20_MIN_MATCH_CONTROL_MATCH_3B
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_BITPOS 3
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_MASK 0x1
+
+enum icp_qat_hw_comp_20_skip_hash_collision {
+       ICP_QAT_HW_COMP_20_SKIP_HASH_COLLISION_ALLOW = 0x0,
+       ICP_QAT_HW_COMP_20_SKIP_HASH_COLLISION_DONT_ALLOW = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_DEFAULT_VAL \
+       ICP_QAT_HW_COMP_20_SKIP_HASH_COLLISION_ALLOW
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_BITPOS 2
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_MASK 0x1
+
+enum icp_qat_hw_comp_20_skip_hash_update {
+       ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_ALLOW = 0x0,
+       ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_DEFAULT_VAL \
+       ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_ALLOW
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_BITPOS 1
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_MASK 0x1
+
+enum icp_qat_hw_comp_20_byte_skip {
+       ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_TOKEN = 0x0,
+       ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_DEFAULT_VAL \
+       ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_TOKEN
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_BITPOS 0
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_MASK 0x1
+
+enum icp_qat_hw_comp_20_extended_delay_match_mode {
+       ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_DISABLED = 0x0,
+       ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_DEFAULT_VAL \
+       ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_DISABLED
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_BITPOS 31
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_MASK 0x1
+
+enum icp_qat_hw_decomp_20_speculative_decoder_control {
+       ICP_QAT_HW_DECOMP_20_SPECULATIVE_DECODER_CONTROL_ENABLE = 0x0,
+       ICP_QAT_HW_DECOMP_20_SPECULATIVE_DECODER_CONTROL_DISABLE = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_DEFAULT_VAL \
+       ICP_QAT_HW_DECOMP_20_SPECULATIVE_DECODER_CONTROL_ENABLE
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_BITPOS 30
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_MASK 0x1
+
+enum icp_qat_hw_decomp_20_mini_cam_control {
+       ICP_QAT_HW_DECOMP_20_MINI_CAM_CONTROL_ENABLE = 0x0,
+       ICP_QAT_HW_DECOMP_20_MINI_CAM_CONTROL_DISABLE = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_DEFAULT_VAL \
+       ICP_QAT_HW_DECOMP_20_MINI_CAM_CONTROL_ENABLE
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS 14
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_MASK 0x7
+
+enum icp_qat_hw_decomp_20_hbs_control {
+       ICP_QAT_HW_DECOMP_20_HBS_CONTROL_HBS_IS_32KB = 0x0,
+};
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_DEFAULT_VAL \
+       ICP_QAT_HW_DECOMP_20_HBS_CONTROL_HBS_IS_32KB
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_BITPOS 8
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_MASK 0x3
+
+enum icp_qat_hw_decomp_20_lbms {
+       ICP_QAT_HW_DECOMP_20_LBMS_LBMS_64KB = 0x0,
+       ICP_QAT_HW_DECOMP_20_LBMS_LBMS_256KB = 0x1,
+       ICP_QAT_HW_DECOMP_20_LBMS_LBMS_1MB = 0x2,
+       ICP_QAT_HW_DECOMP_20_LBMS_LBMS_4MB = 0x3,
+};
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_DEFAULT_VAL \
+       ICP_QAT_HW_DECOMP_20_LBMS_LBMS_64KB
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_BITPOS 5
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_MASK 0x7
+
+enum icp_qat_hw_decomp_20_hw_comp_format {
+       ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE = 0x1,
+       ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_LZ4 = 0x2,
+       ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_LZ4S = 0x3,
+       ICP_QAT_HW_DECOMP_23_HW_DECOMP_FORMAT_ZSTD = 0x4,
+};
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_DEFAULT_VAL \
+       ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS 4
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK 0x1
+
+enum icp_qat_hw_decomp_20_min_match_control {
+       ICP_QAT_HW_DECOMP_20_MIN_MATCH_CONTROL_MATCH_3B = 0x0,
+       ICP_QAT_HW_DECOMP_20_MIN_MATCH_CONTROL_MATCH_4B = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_DEFAULT_VAL \
+       ICP_QAT_HW_DECOMP_20_MIN_MATCH_CONTROL_MATCH_3B
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_BITPOS 3
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_MASK 0x1
+
+enum icp_qat_hw_decomp_20_lz4_block_checksum_present {
+       ICP_QAT_HW_DECOMP_20_LZ4_BLOCK_CHKSUM_ABSENT = 0x0,
+       ICP_QAT_HW_DECOMP_20_LZ4_BLOCK_CHKSUM_PRESENT = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_DEFAULT_VAL \
+       ICP_QAT_HW_DECOMP_20_LZ4_BLOCK_CHKSUM_ABSENT
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h
new file mode 100644 (file)
index 0000000..69482ab
--- /dev/null
@@ -0,0 +1,585 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef __ICP_QAT_UCLO_H__
+#define __ICP_QAT_UCLO_H__
+
+#define ICP_QAT_AC_895XCC_DEV_TYPE 0x00400000
+#define ICP_QAT_AC_C62X_DEV_TYPE   0x01000000
+#define ICP_QAT_AC_C3XXX_DEV_TYPE  0x02000000
+#define ICP_QAT_AC_4XXX_A_DEV_TYPE 0x08000000
+#define ICP_QAT_UCLO_MAX_AE       12
+#define ICP_QAT_UCLO_MAX_CTX      8
+#define ICP_QAT_UCLO_MAX_UIMAGE   (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX)
+#define ICP_QAT_UCLO_MAX_USTORE   0x4000
+#define ICP_QAT_UCLO_MAX_XFER_REG 128
+#define ICP_QAT_UCLO_MAX_GPR_REG  128
+#define ICP_QAT_UCLO_MAX_LMEM_REG 1024
+#define ICP_QAT_UCLO_MAX_LMEM_REG_2X 1280
+#define ICP_QAT_UCLO_AE_ALL_CTX   0xff
+#define ICP_QAT_UOF_OBJID_LEN     8
+#define ICP_QAT_UOF_FID 0xc6c2
+#define ICP_QAT_UOF_MAJVER 0x4
+#define ICP_QAT_UOF_MINVER 0x11
+#define ICP_QAT_UOF_OBJS        "UOF_OBJS"
+#define ICP_QAT_UOF_STRT        "UOF_STRT"
+#define ICP_QAT_UOF_IMAG        "UOF_IMAG"
+#define ICP_QAT_UOF_IMEM        "UOF_IMEM"
+#define ICP_QAT_UOF_LOCAL_SCOPE     1
+#define ICP_QAT_UOF_INIT_EXPR               0
+#define ICP_QAT_UOF_INIT_REG                1
+#define ICP_QAT_UOF_INIT_REG_CTX            2
+#define ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP   3
+#define ICP_QAT_SUOF_OBJ_ID_LEN             8
+#define ICP_QAT_SUOF_FID  0x53554f46
+#define ICP_QAT_SUOF_MAJVER 0x0
+#define ICP_QAT_SUOF_MINVER 0x1
+#define ICP_QAT_SUOF_OBJ_NAME_LEN 128
+#define ICP_QAT_MOF_OBJ_ID_LEN 8
+#define ICP_QAT_MOF_OBJ_CHUNKID_LEN 8
+#define ICP_QAT_MOF_FID 0x00666f6d
+#define ICP_QAT_MOF_MAJVER 0x0
+#define ICP_QAT_MOF_MINVER 0x1
+#define ICP_QAT_MOF_SYM_OBJS "SYM_OBJS"
+#define ICP_QAT_SUOF_OBJS "SUF_OBJS"
+#define ICP_QAT_SUOF_IMAG "SUF_IMAG"
+#define ICP_QAT_SIMG_AE_INIT_SEQ_LEN    (50 * sizeof(unsigned long long))
+#define ICP_QAT_SIMG_AE_INSTS_LEN       (0x4000 * sizeof(unsigned long long))
+
+#define DSS_FWSK_MODULUS_LEN    384 /* RSA3K */
+#define DSS_FWSK_EXPONENT_LEN   4
+#define DSS_FWSK_PADDING_LEN    380
+#define DSS_SIGNATURE_LEN       384 /* RSA3K */
+
+#define CSS_FWSK_MODULUS_LEN    256 /* RSA2K */
+#define CSS_FWSK_EXPONENT_LEN   4
+#define CSS_FWSK_PADDING_LEN    252
+#define CSS_SIGNATURE_LEN       256 /* RSA2K */
+
+#define ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)   ((handle)->chip_info->css_3k ? \
+                                               DSS_FWSK_MODULUS_LEN  : \
+                                               CSS_FWSK_MODULUS_LEN)
+
+#define ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle)  ((handle)->chip_info->css_3k ? \
+                                               DSS_FWSK_EXPONENT_LEN : \
+                                               CSS_FWSK_EXPONENT_LEN)
+
+#define ICP_QAT_CSS_FWSK_PAD_LEN(handle)       ((handle)->chip_info->css_3k ? \
+                                               DSS_FWSK_PADDING_LEN : \
+                                               CSS_FWSK_PADDING_LEN)
+
+#define ICP_QAT_CSS_FWSK_PUB_LEN(handle)       (ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) + \
+                                               ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle) + \
+                                               ICP_QAT_CSS_FWSK_PAD_LEN(handle))
+
+#define ICP_QAT_CSS_SIGNATURE_LEN(handle)      ((handle)->chip_info->css_3k ? \
+                                               DSS_SIGNATURE_LEN : \
+                                               CSS_SIGNATURE_LEN)
+
+#define ICP_QAT_CSS_AE_IMG_LEN     (sizeof(struct icp_qat_simg_ae_mode) + \
+                                   ICP_QAT_SIMG_AE_INIT_SEQ_LEN +         \
+                                   ICP_QAT_SIMG_AE_INSTS_LEN)
+#define ICP_QAT_CSS_AE_SIMG_LEN(handle) (sizeof(struct icp_qat_css_hdr) + \
+                                       ICP_QAT_CSS_FWSK_PUB_LEN(handle) + \
+                                       ICP_QAT_CSS_SIGNATURE_LEN(handle) + \
+                                       ICP_QAT_CSS_AE_IMG_LEN)
+#define ICP_QAT_AE_IMG_OFFSET(handle) (sizeof(struct icp_qat_css_hdr) + \
+                                       ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) + \
+                                       ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle) + \
+                                       ICP_QAT_CSS_SIGNATURE_LEN(handle))
+#define ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN    0x40000
+#define ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN    0x30000
+
+#define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf)
+#define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf)
+#define ICP_QAT_SHARED_USTORE_MODE(ae_mode) (((ae_mode) >> 0xb) & 0x1)
+#define RELOADABLE_CTX_SHARED_MODE(ae_mode) (((ae_mode) >> 0xc) & 0x1)
+
+#define ICP_QAT_LOC_MEM0_MODE(ae_mode) (((ae_mode) >> 0x8) & 0x1)
+#define ICP_QAT_LOC_MEM1_MODE(ae_mode) (((ae_mode) >> 0x9) & 0x1)
+#define ICP_QAT_LOC_MEM2_MODE(ae_mode) (((ae_mode) >> 0x6) & 0x1)
+#define ICP_QAT_LOC_MEM3_MODE(ae_mode) (((ae_mode) >> 0x7) & 0x1)
+#define ICP_QAT_LOC_TINDEX_MODE(ae_mode) (((ae_mode) >> 0xe) & 0x1)
+
+enum icp_qat_uof_mem_region {
+       ICP_QAT_UOF_SRAM_REGION = 0x0,
+       ICP_QAT_UOF_LMEM_REGION = 0x3,
+       ICP_QAT_UOF_UMEM_REGION = 0x5
+};
+
+enum icp_qat_uof_regtype {
+       ICP_NO_DEST     = 0,
+       ICP_GPA_REL     = 1,
+       ICP_GPA_ABS     = 2,
+       ICP_GPB_REL     = 3,
+       ICP_GPB_ABS     = 4,
+       ICP_SR_REL      = 5,
+       ICP_SR_RD_REL   = 6,
+       ICP_SR_WR_REL   = 7,
+       ICP_SR_ABS      = 8,
+       ICP_SR_RD_ABS   = 9,
+       ICP_SR_WR_ABS   = 10,
+       ICP_DR_REL      = 19,
+       ICP_DR_RD_REL   = 20,
+       ICP_DR_WR_REL   = 21,
+       ICP_DR_ABS      = 22,
+       ICP_DR_RD_ABS   = 23,
+       ICP_DR_WR_ABS   = 24,
+       ICP_LMEM        = 26,
+       ICP_LMEM0       = 27,
+       ICP_LMEM1       = 28,
+       ICP_NEIGH_REL   = 31,
+       ICP_LMEM2       = 61,
+       ICP_LMEM3       = 62,
+};
+
+enum icp_qat_css_fwtype {
+       CSS_AE_FIRMWARE = 0,
+       CSS_MMP_FIRMWARE = 1
+};
+
+struct icp_qat_uclo_page {
+       struct icp_qat_uclo_encap_page *encap_page;
+       struct icp_qat_uclo_region *region;
+       unsigned int flags;
+};
+
+struct icp_qat_uclo_region {
+       struct icp_qat_uclo_page *loaded;
+       struct icp_qat_uclo_page *page;
+};
+
+struct icp_qat_uclo_aeslice {
+       struct icp_qat_uclo_region *region;
+       struct icp_qat_uclo_page *page;
+       struct icp_qat_uclo_page *cur_page[ICP_QAT_UCLO_MAX_CTX];
+       struct icp_qat_uclo_encapme *encap_image;
+       unsigned int ctx_mask_assigned;
+       unsigned int new_uaddr[ICP_QAT_UCLO_MAX_CTX];
+};
+
+struct icp_qat_uclo_aedata {
+       unsigned int slice_num;
+       unsigned int eff_ustore_size;
+       struct icp_qat_uclo_aeslice ae_slices[ICP_QAT_UCLO_MAX_CTX];
+};
+
+struct icp_qat_uof_encap_obj {
+       char *beg_uof;
+       struct icp_qat_uof_objhdr *obj_hdr;
+       struct icp_qat_uof_chunkhdr *chunk_hdr;
+       struct icp_qat_uof_varmem_seg *var_mem_seg;
+};
+
+struct icp_qat_uclo_encap_uwblock {
+       unsigned int start_addr;
+       unsigned int words_num;
+       u64 micro_words;
+};
+
+struct icp_qat_uclo_encap_page {
+       unsigned int def_page;
+       unsigned int page_region;
+       unsigned int beg_addr_v;
+       unsigned int beg_addr_p;
+       unsigned int micro_words_num;
+       unsigned int uwblock_num;
+       struct icp_qat_uclo_encap_uwblock *uwblock;
+};
+
+struct icp_qat_uclo_encapme {
+       struct icp_qat_uof_image *img_ptr;
+       struct icp_qat_uclo_encap_page *page;
+       unsigned int ae_reg_num;
+       struct icp_qat_uof_ae_reg *ae_reg;
+       unsigned int init_regsym_num;
+       struct icp_qat_uof_init_regsym *init_regsym;
+       unsigned int sbreak_num;
+       struct icp_qat_uof_sbreak *sbreak;
+       unsigned int uwords_num;
+};
+
+struct icp_qat_uclo_init_mem_table {
+       unsigned int entry_num;
+       struct icp_qat_uof_initmem *init_mem;
+};
+
+struct icp_qat_uclo_objhdr {
+       char *file_buff;
+       unsigned int checksum;
+       unsigned int size;
+};
+
+struct icp_qat_uof_strtable {
+       unsigned int table_len;
+       unsigned int reserved;
+       u64 strings;
+};
+
+struct icp_qat_uclo_objhandle {
+       unsigned int prod_type;
+       unsigned int prod_rev;
+       struct icp_qat_uclo_objhdr *obj_hdr;
+       struct icp_qat_uof_encap_obj encap_uof_obj;
+       struct icp_qat_uof_strtable str_table;
+       struct icp_qat_uclo_encapme ae_uimage[ICP_QAT_UCLO_MAX_UIMAGE];
+       struct icp_qat_uclo_aedata ae_data[ICP_QAT_UCLO_MAX_AE];
+       struct icp_qat_uclo_init_mem_table init_mem_tab;
+       struct icp_qat_uof_batch_init *lm_init_tab[ICP_QAT_UCLO_MAX_AE];
+       struct icp_qat_uof_batch_init *umem_init_tab[ICP_QAT_UCLO_MAX_AE];
+       int uimage_num;
+       int uword_in_bytes;
+       int global_inited;
+       unsigned int ae_num;
+       unsigned int ustore_phy_size;
+       void *obj_buf;
+       u64 *uword_buf;
+};
+
+struct icp_qat_uof_uword_block {
+       unsigned int start_addr;
+       unsigned int words_num;
+       unsigned int uword_offset;
+       unsigned int reserved;
+};
+
+struct icp_qat_uof_filehdr {
+       unsigned short file_id;
+       unsigned short reserved1;
+       char min_ver;
+       char maj_ver;
+       unsigned short reserved2;
+       unsigned short max_chunks;
+       unsigned short num_chunks;
+};
+
+struct icp_qat_uof_filechunkhdr {
+       char chunk_id[ICP_QAT_UOF_OBJID_LEN];
+       unsigned int checksum;
+       unsigned int offset;
+       unsigned int size;
+};
+
+struct icp_qat_uof_objhdr {
+       unsigned int ac_dev_type;
+       unsigned short min_cpu_ver;
+       unsigned short max_cpu_ver;
+       short max_chunks;
+       short num_chunks;
+       unsigned int reserved1;
+       unsigned int reserved2;
+};
+
+struct icp_qat_uof_chunkhdr {
+       char chunk_id[ICP_QAT_UOF_OBJID_LEN];
+       unsigned int offset;
+       unsigned int size;
+};
+
+struct icp_qat_uof_memvar_attr {
+       unsigned int offset_in_byte;
+       unsigned int value;
+};
+
+struct icp_qat_uof_initmem {
+       unsigned int sym_name;
+       char region;
+       char scope;
+       unsigned short reserved1;
+       unsigned int addr;
+       unsigned int num_in_bytes;
+       unsigned int val_attr_num;
+};
+
+struct icp_qat_uof_init_regsym {
+       unsigned int sym_name;
+       char init_type;
+       char value_type;
+       char reg_type;
+       unsigned char ctx;
+       unsigned int reg_addr;
+       unsigned int value;
+};
+
+struct icp_qat_uof_varmem_seg {
+       unsigned int sram_base;
+       unsigned int sram_size;
+       unsigned int sram_alignment;
+       unsigned int sdram_base;
+       unsigned int sdram_size;
+       unsigned int sdram_alignment;
+       unsigned int sdram1_base;
+       unsigned int sdram1_size;
+       unsigned int sdram1_alignment;
+       unsigned int scratch_base;
+       unsigned int scratch_size;
+       unsigned int scratch_alignment;
+};
+
+struct icp_qat_uof_gtid {
+       char tool_id[ICP_QAT_UOF_OBJID_LEN];
+       int tool_ver;
+       unsigned int reserved1;
+       unsigned int reserved2;
+};
+
+struct icp_qat_uof_sbreak {
+       unsigned int page_num;
+       unsigned int virt_uaddr;
+       unsigned char sbreak_type;
+       unsigned char reg_type;
+       unsigned short reserved1;
+       unsigned int addr_offset;
+       unsigned int reg_addr;
+};
+
+struct icp_qat_uof_code_page {
+       unsigned int page_region;
+       unsigned int page_num;
+       unsigned char def_page;
+       unsigned char reserved2;
+       unsigned short reserved1;
+       unsigned int beg_addr_v;
+       unsigned int beg_addr_p;
+       unsigned int neigh_reg_tab_offset;
+       unsigned int uc_var_tab_offset;
+       unsigned int imp_var_tab_offset;
+       unsigned int imp_expr_tab_offset;
+       unsigned int code_area_offset;
+};
+
+struct icp_qat_uof_image {
+       unsigned int img_name;
+       unsigned int ae_assigned;
+       unsigned int ctx_assigned;
+       unsigned int ac_dev_type;
+       unsigned int entry_address;
+       unsigned int fill_pattern[2];
+       unsigned int reloadable_size;
+       unsigned char sensitivity;
+       unsigned char reserved;
+       unsigned short ae_mode;
+       unsigned short max_ver;
+       unsigned short min_ver;
+       unsigned short image_attrib;
+       unsigned short reserved2;
+       unsigned short page_region_num;
+       unsigned short numpages;
+       unsigned int reg_tab_offset;
+       unsigned int init_reg_sym_tab;
+       unsigned int sbreak_tab;
+       unsigned int app_metadata;
+};
+
+struct icp_qat_uof_objtable {
+       unsigned int entry_num;
+};
+
+struct icp_qat_uof_ae_reg {
+       unsigned int name;
+       unsigned int vis_name;
+       unsigned short type;
+       unsigned short addr;
+       unsigned short access_mode;
+       unsigned char visible;
+       unsigned char reserved1;
+       unsigned short ref_count;
+       unsigned short reserved2;
+       unsigned int xo_id;
+};
+
+struct icp_qat_uof_code_area {
+       unsigned int micro_words_num;
+       unsigned int uword_block_tab;
+};
+
+struct icp_qat_uof_batch_init {
+       unsigned int ae;
+       unsigned int addr;
+       unsigned int *value;
+       unsigned int size;
+       struct icp_qat_uof_batch_init *next;
+};
+
+struct icp_qat_suof_img_hdr {
+       char          *simg_buf;
+       unsigned long simg_len;
+       char          *css_header;
+       char          *css_key;
+       char          *css_signature;
+       char          *css_simg;
+       unsigned long simg_size;
+       unsigned int  ae_num;
+       unsigned int  ae_mask;
+       unsigned int  fw_type;
+       unsigned long simg_name;
+       unsigned long appmeta_data;
+};
+
+struct icp_qat_suof_img_tbl {
+       unsigned int num_simgs;
+       struct icp_qat_suof_img_hdr *simg_hdr;
+};
+
+struct icp_qat_suof_handle {
+       unsigned int  file_id;
+       unsigned int  check_sum;
+       char          min_ver;
+       char          maj_ver;
+       char          fw_type;
+       char          *suof_buf;
+       unsigned int  suof_size;
+       char          *sym_str;
+       unsigned int  sym_size;
+       struct icp_qat_suof_img_tbl img_table;
+};
+
+struct icp_qat_fw_auth_desc {
+       unsigned int   img_len;
+       unsigned int   ae_mask;
+       unsigned int   css_hdr_high;
+       unsigned int   css_hdr_low;
+       unsigned int   img_high;
+       unsigned int   img_low;
+       unsigned int   signature_high;
+       unsigned int   signature_low;
+       unsigned int   fwsk_pub_high;
+       unsigned int   fwsk_pub_low;
+       unsigned int   img_ae_mode_data_high;
+       unsigned int   img_ae_mode_data_low;
+       unsigned int   img_ae_init_data_high;
+       unsigned int   img_ae_init_data_low;
+       unsigned int   img_ae_insts_high;
+       unsigned int   img_ae_insts_low;
+};
+
+struct icp_qat_auth_chunk {
+       struct icp_qat_fw_auth_desc fw_auth_desc;
+       u64 chunk_size;
+       u64 chunk_bus_addr;
+};
+
+struct icp_qat_css_hdr {
+       unsigned int module_type;
+       unsigned int header_len;
+       unsigned int header_ver;
+       unsigned int module_id;
+       unsigned int module_vendor;
+       unsigned int date;
+       unsigned int size;
+       unsigned int key_size;
+       unsigned int module_size;
+       unsigned int exponent_size;
+       unsigned int fw_type;
+       unsigned int reserved[21];
+};
+
+struct icp_qat_simg_ae_mode {
+       unsigned int     file_id;
+       unsigned short   maj_ver;
+       unsigned short   min_ver;
+       unsigned int     dev_type;
+       unsigned short   devmax_ver;
+       unsigned short   devmin_ver;
+       unsigned int     ae_mask;
+       unsigned int     ctx_enables;
+       char             fw_type;
+       char             ctx_mode;
+       char             nn_mode;
+       char             lm0_mode;
+       char             lm1_mode;
+       char             scs_mode;
+       char             lm2_mode;
+       char             lm3_mode;
+       char             tindex_mode;
+       unsigned char    reserved[7];
+       char             simg_name[256];
+       char             appmeta_data[256];
+};
+
+struct icp_qat_suof_filehdr {
+       unsigned int     file_id;
+       unsigned int     check_sum;
+       char             min_ver;
+       char             maj_ver;
+       char             fw_type;
+       char             reserved;
+       unsigned short   max_chunks;
+       unsigned short   num_chunks;
+};
+
+struct icp_qat_suof_chunk_hdr {
+       char chunk_id[ICP_QAT_SUOF_OBJ_ID_LEN];
+       u64 offset;
+       u64 size;
+};
+
+struct icp_qat_suof_strtable {
+       unsigned int tab_length;
+       unsigned int strings;
+};
+
+struct icp_qat_suof_objhdr {
+       unsigned int img_length;
+       unsigned int reserved;
+};
+
+struct icp_qat_mof_file_hdr {
+       unsigned int file_id;
+       unsigned int checksum;
+       char min_ver;
+       char maj_ver;
+       unsigned short reserved;
+       unsigned short max_chunks;
+       unsigned short num_chunks;
+};
+
+struct icp_qat_mof_chunkhdr {
+       char chunk_id[ICP_QAT_MOF_OBJ_ID_LEN];
+       u64 offset;
+       u64 size;
+};
+
+struct icp_qat_mof_str_table {
+       unsigned int tab_len;
+       unsigned int strings;
+};
+
+struct icp_qat_mof_obj_hdr {
+       unsigned short max_chunks;
+       unsigned short num_chunks;
+       unsigned int reserved;
+};
+
+struct icp_qat_mof_obj_chunkhdr {
+       char chunk_id[ICP_QAT_MOF_OBJ_CHUNKID_LEN];
+       u64 offset;
+       u64 size;
+       unsigned int name;
+       unsigned int reserved;
+};
+
+struct icp_qat_mof_objhdr {
+       char *obj_name;
+       char *obj_buf;
+       unsigned int obj_size;
+};
+
+struct icp_qat_mof_table {
+       unsigned int num_objs;
+       struct icp_qat_mof_objhdr *obj_hdr;
+};
+
+struct icp_qat_mof_handle {
+       unsigned int file_id;
+       unsigned int checksum;
+       char min_ver;
+       char maj_ver;
+       char *mof_buf;
+       u32 mof_size;
+       char *sym_str;
+       unsigned int sym_size;
+       char *uobjs_hdr;
+       char *sobjs_hdr;
+       struct icp_qat_mof_table obj_table;
+};
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/qat_algs.c b/drivers/crypto/intel/qat/qat_common/qat_algs.c
new file mode 100644 (file)
index 0000000..538dcbf
--- /dev/null
@@ -0,0 +1,1424 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/crypto.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/cipher.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/aes.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+#include <crypto/hash.h>
+#include <crypto/hmac.h>
+#include <crypto/algapi.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/xts.h>
+#include <linux/dma-mapping.h>
+#include "adf_accel_devices.h"
+#include "qat_algs_send.h"
+#include "adf_common_drv.h"
+#include "qat_crypto.h"
+#include "icp_qat_hw.h"
+#include "icp_qat_fw.h"
+#include "icp_qat_fw_la.h"
+#include "qat_bl.h"
+
+#define QAT_AES_HW_CONFIG_ENC(alg, mode) \
+       ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
+                                      ICP_QAT_HW_CIPHER_NO_CONVERT, \
+                                      ICP_QAT_HW_CIPHER_ENCRYPT)
+
+#define QAT_AES_HW_CONFIG_DEC(alg, mode) \
+       ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
+                                      ICP_QAT_HW_CIPHER_KEY_CONVERT, \
+                                      ICP_QAT_HW_CIPHER_DECRYPT)
+
+#define QAT_AES_HW_CONFIG_DEC_NO_CONV(alg, mode) \
+       ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
+                                      ICP_QAT_HW_CIPHER_NO_CONVERT, \
+                                      ICP_QAT_HW_CIPHER_DECRYPT)
+
+#define HW_CAP_AES_V2(accel_dev) \
+       (GET_HW_DATA(accel_dev)->accel_capabilities_mask & \
+        ICP_ACCEL_CAPABILITIES_AES_V2)
+
+static DEFINE_MUTEX(algs_lock);
+static unsigned int active_devs;
+
+/* Common content descriptor */
+struct qat_alg_cd {
+       union {
+               struct qat_enc { /* Encrypt content desc */
+                       struct icp_qat_hw_cipher_algo_blk cipher;
+                       struct icp_qat_hw_auth_algo_blk hash;
+               } qat_enc_cd;
+               struct qat_dec { /* Decrypt content desc */
+                       struct icp_qat_hw_auth_algo_blk hash;
+                       struct icp_qat_hw_cipher_algo_blk cipher;
+               } qat_dec_cd;
+       };
+} __aligned(64);
+
+struct qat_alg_aead_ctx {
+       struct qat_alg_cd *enc_cd;
+       struct qat_alg_cd *dec_cd;
+       dma_addr_t enc_cd_paddr;
+       dma_addr_t dec_cd_paddr;
+       struct icp_qat_fw_la_bulk_req enc_fw_req;
+       struct icp_qat_fw_la_bulk_req dec_fw_req;
+       struct crypto_shash *hash_tfm;
+       enum icp_qat_hw_auth_algo qat_hash_alg;
+       struct qat_crypto_instance *inst;
+       union {
+               struct sha1_state sha1;
+               struct sha256_state sha256;
+               struct sha512_state sha512;
+       };
+       char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
+       char opad[SHA512_BLOCK_SIZE];
+};
+
+struct qat_alg_skcipher_ctx {
+       struct icp_qat_hw_cipher_algo_blk *enc_cd;
+       struct icp_qat_hw_cipher_algo_blk *dec_cd;
+       dma_addr_t enc_cd_paddr;
+       dma_addr_t dec_cd_paddr;
+       struct icp_qat_fw_la_bulk_req enc_fw_req;
+       struct icp_qat_fw_la_bulk_req dec_fw_req;
+       struct qat_crypto_instance *inst;
+       struct crypto_skcipher *ftfm;
+       struct crypto_cipher *tweak;
+       bool fallback;
+       int mode;
+};
+
+static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
+{
+       switch (qat_hash_alg) {
+       case ICP_QAT_HW_AUTH_ALGO_SHA1:
+               return ICP_QAT_HW_SHA1_STATE1_SZ;
+       case ICP_QAT_HW_AUTH_ALGO_SHA256:
+               return ICP_QAT_HW_SHA256_STATE1_SZ;
+       case ICP_QAT_HW_AUTH_ALGO_SHA512:
+               return ICP_QAT_HW_SHA512_STATE1_SZ;
+       default:
+               return -EFAULT;
+       }
+       return -EFAULT;
+}
+
+static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
+                                 struct qat_alg_aead_ctx *ctx,
+                                 const u8 *auth_key,
+                                 unsigned int auth_keylen)
+{
+       SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
+       int block_size = crypto_shash_blocksize(ctx->hash_tfm);
+       int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
+       __be32 *hash_state_out;
+       __be64 *hash512_state_out;
+       int i, offset;
+
+       memset(ctx->ipad, 0, block_size);
+       memset(ctx->opad, 0, block_size);
+       shash->tfm = ctx->hash_tfm;
+
+       if (auth_keylen > block_size) {
+               int ret = crypto_shash_digest(shash, auth_key,
+                                             auth_keylen, ctx->ipad);
+               if (ret)
+                       return ret;
+
+               memcpy(ctx->opad, ctx->ipad, digest_size);
+       } else {
+               memcpy(ctx->ipad, auth_key, auth_keylen);
+               memcpy(ctx->opad, auth_key, auth_keylen);
+       }
+
+       for (i = 0; i < block_size; i++) {
+               char *ipad_ptr = ctx->ipad + i;
+               char *opad_ptr = ctx->opad + i;
+               *ipad_ptr ^= HMAC_IPAD_VALUE;
+               *opad_ptr ^= HMAC_OPAD_VALUE;
+       }
+
+       if (crypto_shash_init(shash))
+               return -EFAULT;
+
+       if (crypto_shash_update(shash, ctx->ipad, block_size))
+               return -EFAULT;
+
+       hash_state_out = (__be32 *)hash->sha.state1;
+       hash512_state_out = (__be64 *)hash_state_out;
+
+       switch (ctx->qat_hash_alg) {
+       case ICP_QAT_HW_AUTH_ALGO_SHA1:
+               if (crypto_shash_export(shash, &ctx->sha1))
+                       return -EFAULT;
+               for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+                       *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
+               break;
+       case ICP_QAT_HW_AUTH_ALGO_SHA256:
+               if (crypto_shash_export(shash, &ctx->sha256))
+                       return -EFAULT;
+               for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+                       *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
+               break;
+       case ICP_QAT_HW_AUTH_ALGO_SHA512:
+               if (crypto_shash_export(shash, &ctx->sha512))
+                       return -EFAULT;
+               for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
+                       *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
+               break;
+       default:
+               return -EFAULT;
+       }
+
+       if (crypto_shash_init(shash))
+               return -EFAULT;
+
+       if (crypto_shash_update(shash, ctx->opad, block_size))
+               return -EFAULT;
+
+       offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
+       if (offset < 0)
+               return -EFAULT;
+
+       hash_state_out = (__be32 *)(hash->sha.state1 + offset);
+       hash512_state_out = (__be64 *)hash_state_out;
+
+       switch (ctx->qat_hash_alg) {
+       case ICP_QAT_HW_AUTH_ALGO_SHA1:
+               if (crypto_shash_export(shash, &ctx->sha1))
+                       return -EFAULT;
+               for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+                       *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
+               break;
+       case ICP_QAT_HW_AUTH_ALGO_SHA256:
+               if (crypto_shash_export(shash, &ctx->sha256))
+                       return -EFAULT;
+               for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+                       *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
+               break;
+       case ICP_QAT_HW_AUTH_ALGO_SHA512:
+               if (crypto_shash_export(shash, &ctx->sha512))
+                       return -EFAULT;
+               for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
+                       *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
+               break;
+       default:
+               return -EFAULT;
+       }
+       memzero_explicit(ctx->ipad, block_size);
+       memzero_explicit(ctx->opad, block_size);
+       return 0;
+}
+
+static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
+{
+       header->hdr_flags =
+               ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+       header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
+       header->comn_req_flags =
+               ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
+                                           QAT_COMN_PTR_TYPE_SGL);
+       ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
+                                 ICP_QAT_FW_LA_PARTIAL_NONE);
+       ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
+                                          ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
+       ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+                               ICP_QAT_FW_LA_NO_PROTO);
+       ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
+                                      ICP_QAT_FW_LA_NO_UPDATE_STATE);
+}
+
+static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
+                                        int alg,
+                                        struct crypto_authenc_keys *keys,
+                                        int mode)
+{
+       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
+       unsigned int digestsize = crypto_aead_authsize(aead_tfm);
+       struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
+       struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
+       struct icp_qat_hw_auth_algo_blk *hash =
+               (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
+               sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
+       struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
+       struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+       struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+       void *ptr = &req_tmpl->cd_ctrl;
+       struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
+       struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
+
+       /* CD setup */
+       cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
+       memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
+       hash->sha.inner_setup.auth_config.config =
+               ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
+                                            ctx->qat_hash_alg, digestsize);
+       hash->sha.inner_setup.auth_counter.counter =
+               cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
+
+       if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
+               return -EFAULT;
+
+       /* Request setup */
+       qat_alg_init_common_hdr(header);
+       header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+       ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+                                          ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+       ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+                                  ICP_QAT_FW_LA_RET_AUTH_RES);
+       ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+                                  ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
+       cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
+       cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
+
+       /* Cipher CD config setup */
+       cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
+       cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
+       cipher_cd_ctrl->cipher_cfg_offset = 0;
+       ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+       ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
+       /* Auth CD config setup */
+       hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
+       hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
+       hash_cd_ctrl->inner_res_sz = digestsize;
+       hash_cd_ctrl->final_sz = digestsize;
+
+       switch (ctx->qat_hash_alg) {
+       case ICP_QAT_HW_AUTH_ALGO_SHA1:
+               hash_cd_ctrl->inner_state1_sz =
+                       round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
+               hash_cd_ctrl->inner_state2_sz =
+                       round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
+               break;
+       case ICP_QAT_HW_AUTH_ALGO_SHA256:
+               hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
+               hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
+               break;
+       case ICP_QAT_HW_AUTH_ALGO_SHA512:
+               hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
+               hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
+               break;
+       default:
+               break;
+       }
+       hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
+                       ((sizeof(struct icp_qat_hw_auth_setup) +
+                        round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
+       ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
+       ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+       return 0;
+}
+
+static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
+                                        int alg,
+                                        struct crypto_authenc_keys *keys,
+                                        int mode)
+{
+       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
+       unsigned int digestsize = crypto_aead_authsize(aead_tfm);
+       struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
+       struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
+       struct icp_qat_hw_cipher_algo_blk *cipher =
+               (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
+               sizeof(struct icp_qat_hw_auth_setup) +
+               roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
+       struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
+       struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+       struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+       void *ptr = &req_tmpl->cd_ctrl;
+       struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
+       struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
+       struct icp_qat_fw_la_auth_req_params *auth_param =
+               (struct icp_qat_fw_la_auth_req_params *)
+               ((char *)&req_tmpl->serv_specif_rqpars +
+               sizeof(struct icp_qat_fw_la_cipher_req_params));
+
+       /* CD setup */
+       cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
+       memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
+       hash->sha.inner_setup.auth_config.config =
+               ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
+                                            ctx->qat_hash_alg,
+                                            digestsize);
+       hash->sha.inner_setup.auth_counter.counter =
+               cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
+
+       if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
+               return -EFAULT;
+
+       /* Request setup */
+       qat_alg_init_common_hdr(header);
+       header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+       ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+                                          ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+       ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+                                  ICP_QAT_FW_LA_NO_RET_AUTH_RES);
+       ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+                                  ICP_QAT_FW_LA_CMP_AUTH_RES);
+       cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
+       cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
+
+       /* Cipher CD config setup */
+       cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
+       cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
+       cipher_cd_ctrl->cipher_cfg_offset =
+               (sizeof(struct icp_qat_hw_auth_setup) +
+                roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
+       ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+       ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+
+       /* Auth CD config setup */
+       hash_cd_ctrl->hash_cfg_offset = 0;
+       hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
+       hash_cd_ctrl->inner_res_sz = digestsize;
+       hash_cd_ctrl->final_sz = digestsize;
+
+       switch (ctx->qat_hash_alg) {
+       case ICP_QAT_HW_AUTH_ALGO_SHA1:
+               hash_cd_ctrl->inner_state1_sz =
+                       round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
+               hash_cd_ctrl->inner_state2_sz =
+                       round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
+               break;
+       case ICP_QAT_HW_AUTH_ALGO_SHA256:
+               hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
+               hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
+               break;
+       case ICP_QAT_HW_AUTH_ALGO_SHA512:
+               hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
+               hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
+               break;
+       default:
+               break;
+       }
+
+       hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
+                       ((sizeof(struct icp_qat_hw_auth_setup) +
+                        round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
+       auth_param->auth_res_sz = digestsize;
+       ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
+       ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+       return 0;
+}
+
+static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
+                                     struct icp_qat_fw_la_bulk_req *req,
+                                     struct icp_qat_hw_cipher_algo_blk *cd,
+                                     const u8 *key, unsigned int keylen)
+{
+       struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
+       struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
+       struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
+       bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
+       int mode = ctx->mode;
+
+       qat_alg_init_common_hdr(header);
+       header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
+       cd_pars->u.s.content_desc_params_sz =
+                               sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
+
+       if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_XTS_MODE) {
+               ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags,
+                                            ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
+
+               /* Store both XTS keys in CD, only the first key is sent
+                * to the HW, the second key is used for tweak calculation
+                */
+               memcpy(cd->ucs_aes.key, key, keylen);
+               keylen = keylen / 2;
+       } else if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
+               ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags,
+                                            ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
+               memcpy(cd->ucs_aes.key, key, keylen);
+               keylen = round_up(keylen, 16);
+       } else {
+               memcpy(cd->aes.key, key, keylen);
+       }
+
+       /* Cipher CD config setup */
+       cd_ctrl->cipher_key_sz = keylen >> 3;
+       cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
+       cd_ctrl->cipher_cfg_offset = 0;
+       ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+       ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+}
+
+static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
+                                     int alg, const u8 *key,
+                                     unsigned int keylen, int mode)
+{
+       struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
+       struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
+       struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
+
+       qat_alg_skcipher_init_com(ctx, req, enc_cd, key, keylen);
+       cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
+       enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
+}
+
+static void qat_alg_xts_reverse_key(const u8 *key_forward, unsigned int keylen,
+                                   u8 *key_reverse)
+{
+       struct crypto_aes_ctx aes_expanded;
+       int nrounds;
+       u8 *key;
+
+       aes_expandkey(&aes_expanded, key_forward, keylen);
+       if (keylen == AES_KEYSIZE_128) {
+               nrounds = 10;
+               key = (u8 *)aes_expanded.key_enc + (AES_BLOCK_SIZE * nrounds);
+               memcpy(key_reverse, key, AES_BLOCK_SIZE);
+       } else {
+               /* AES_KEYSIZE_256 */
+               nrounds = 14;
+               key = (u8 *)aes_expanded.key_enc + (AES_BLOCK_SIZE * nrounds);
+               memcpy(key_reverse, key, AES_BLOCK_SIZE);
+               memcpy(key_reverse + AES_BLOCK_SIZE, key - AES_BLOCK_SIZE,
+                      AES_BLOCK_SIZE);
+       }
+}
+
+static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx,
+                                     int alg, const u8 *key,
+                                     unsigned int keylen, int mode)
+{
+       struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
+       struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
+       struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
+       bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
+
+       qat_alg_skcipher_init_com(ctx, req, dec_cd, key, keylen);
+       cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
+
+       if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_XTS_MODE) {
+               /* Key reversing not supported, set no convert */
+               dec_cd->aes.cipher_config.val =
+                               QAT_AES_HW_CONFIG_DEC_NO_CONV(alg, mode);
+
+               /* In-place key reversal */
+               qat_alg_xts_reverse_key(dec_cd->ucs_aes.key, keylen / 2,
+                                       dec_cd->ucs_aes.key);
+       } else if (mode != ICP_QAT_HW_CIPHER_CTR_MODE) {
+               dec_cd->aes.cipher_config.val =
+                                       QAT_AES_HW_CONFIG_DEC(alg, mode);
+       } else {
+               dec_cd->aes.cipher_config.val =
+                                       QAT_AES_HW_CONFIG_ENC(alg, mode);
+       }
+}
+
+static int qat_alg_validate_key(int key_len, int *alg, int mode)
+{
+       if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
+               switch (key_len) {
+               case AES_KEYSIZE_128:
+                       *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
+                       break;
+               case AES_KEYSIZE_192:
+                       *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
+                       break;
+               case AES_KEYSIZE_256:
+                       *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+       } else {
+               switch (key_len) {
+               case AES_KEYSIZE_128 << 1:
+                       *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
+                       break;
+               case AES_KEYSIZE_256 << 1:
+                       *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+       }
+       return 0;
+}
+
+static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
+                                     unsigned int keylen,  int mode)
+{
+       struct crypto_authenc_keys keys;
+       int alg;
+
+       if (crypto_authenc_extractkeys(&keys, key, keylen))
+               goto bad_key;
+
+       if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
+               goto bad_key;
+
+       if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
+               goto error;
+
+       if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
+               goto error;
+
+       memzero_explicit(&keys, sizeof(keys));
+       return 0;
+bad_key:
+       memzero_explicit(&keys, sizeof(keys));
+       return -EINVAL;
+error:
+       memzero_explicit(&keys, sizeof(keys));
+       return -EFAULT;
+}
+
+static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
+                                         const u8 *key,
+                                         unsigned int keylen,
+                                         int mode)
+{
+       int alg;
+
+       if (qat_alg_validate_key(keylen, &alg, mode))
+               return -EINVAL;
+
+       qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode);
+       qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode);
+       return 0;
+}
+
+static int qat_alg_aead_rekey(struct crypto_aead *tfm, const u8 *key,
+                             unsigned int keylen)
+{
+       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+       memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
+       memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
+       memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
+       memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
+
+       return qat_alg_aead_init_sessions(tfm, key, keylen,
+                                         ICP_QAT_HW_CIPHER_CBC_MODE);
+}
+
+static int qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key,
+                              unsigned int keylen)
+{
+       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
+       struct qat_crypto_instance *inst = NULL;
+       int node = numa_node_id();
+       struct device *dev;
+       int ret;
+
+       inst = qat_crypto_get_instance_node(node);
+       if (!inst)
+               return -EINVAL;
+       dev = &GET_DEV(inst->accel_dev);
+       ctx->inst = inst;
+       ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
+                                        &ctx->enc_cd_paddr,
+                                        GFP_ATOMIC);
+       if (!ctx->enc_cd) {
+               ret = -ENOMEM;
+               goto out_free_inst;
+       }
+       ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
+                                        &ctx->dec_cd_paddr,
+                                        GFP_ATOMIC);
+       if (!ctx->dec_cd) {
+               ret = -ENOMEM;
+               goto out_free_enc;
+       }
+
+       ret = qat_alg_aead_init_sessions(tfm, key, keylen,
+                                        ICP_QAT_HW_CIPHER_CBC_MODE);
+       if (ret)
+               goto out_free_all;
+
+       return 0;
+
+out_free_all:
+       memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
+       dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+                         ctx->dec_cd, ctx->dec_cd_paddr);
+       ctx->dec_cd = NULL;
+out_free_enc:
+       memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
+       dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+                         ctx->enc_cd, ctx->enc_cd_paddr);
+       ctx->enc_cd = NULL;
+out_free_inst:
+       ctx->inst = NULL;
+       qat_crypto_put_instance(inst);
+       return ret;
+}
+
+static int qat_alg_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+                              unsigned int keylen)
+{
+       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+       if (ctx->enc_cd)
+               return qat_alg_aead_rekey(tfm, key, keylen);
+       else
+               return qat_alg_aead_newkey(tfm, key, keylen);
+}
+
+static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
+                                 struct qat_crypto_request *qat_req)
+{
+       struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
+       struct qat_crypto_instance *inst = ctx->inst;
+       struct aead_request *areq = qat_req->aead_req;
+       u8 stat_filed = qat_resp->comn_resp.comn_status;
+       int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
+
+       qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
+       if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
+               res = -EBADMSG;
+       aead_request_complete(areq, res);
+}
+
+static void qat_alg_update_iv_ctr_mode(struct qat_crypto_request *qat_req)
+{
+       struct skcipher_request *sreq = qat_req->skcipher_req;
+       u64 iv_lo_prev;
+       u64 iv_lo;
+       u64 iv_hi;
+
+       memcpy(qat_req->iv, sreq->iv, AES_BLOCK_SIZE);
+
+       iv_lo = be64_to_cpu(qat_req->iv_lo);
+       iv_hi = be64_to_cpu(qat_req->iv_hi);
+
+       iv_lo_prev = iv_lo;
+       iv_lo += DIV_ROUND_UP(sreq->cryptlen, AES_BLOCK_SIZE);
+       if (iv_lo < iv_lo_prev)
+               iv_hi++;
+
+       qat_req->iv_lo = cpu_to_be64(iv_lo);
+       qat_req->iv_hi = cpu_to_be64(iv_hi);
+}
+
+static void qat_alg_update_iv_cbc_mode(struct qat_crypto_request *qat_req)
+{
+       struct skcipher_request *sreq = qat_req->skcipher_req;
+       int offset = sreq->cryptlen - AES_BLOCK_SIZE;
+       struct scatterlist *sgl;
+
+       if (qat_req->encryption)
+               sgl = sreq->dst;
+       else
+               sgl = sreq->src;
+
+       scatterwalk_map_and_copy(qat_req->iv, sgl, offset, AES_BLOCK_SIZE, 0);
+}
+
+static void qat_alg_update_iv(struct qat_crypto_request *qat_req)
+{
+       struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
+       struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+
+       switch (ctx->mode) {
+       case ICP_QAT_HW_CIPHER_CTR_MODE:
+               qat_alg_update_iv_ctr_mode(qat_req);
+               break;
+       case ICP_QAT_HW_CIPHER_CBC_MODE:
+               qat_alg_update_iv_cbc_mode(qat_req);
+               break;
+       case ICP_QAT_HW_CIPHER_XTS_MODE:
+               break;
+       default:
+               dev_warn(dev, "Unsupported IV update for cipher mode %d\n",
+                        ctx->mode);
+       }
+}
+
+static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
+                                     struct qat_crypto_request *qat_req)
+{
+       struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
+       struct qat_crypto_instance *inst = ctx->inst;
+       struct skcipher_request *sreq = qat_req->skcipher_req;
+       u8 stat_filed = qat_resp->comn_resp.comn_status;
+       int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
+
+       qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
+       if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
+               res = -EINVAL;
+
+       if (qat_req->encryption)
+               qat_alg_update_iv(qat_req);
+
+       memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE);
+
+       skcipher_request_complete(sreq, res);
+}
+
+void qat_alg_callback(void *resp)
+{
+       struct icp_qat_fw_la_resp *qat_resp = resp;
+       struct qat_crypto_request *qat_req =
+                               (void *)(__force long)qat_resp->opaque_data;
+       struct qat_instance_backlog *backlog = qat_req->alg_req.backlog;
+
+       qat_req->cb(qat_resp, qat_req);
+
+       qat_alg_send_backlog(backlog);
+}
+
+static int qat_alg_send_sym_message(struct qat_crypto_request *qat_req,
+                                   struct qat_crypto_instance *inst,
+                                   struct crypto_async_request *base)
+{
+       struct qat_alg_req *alg_req = &qat_req->alg_req;
+
+       alg_req->fw_req = (u32 *)&qat_req->req;
+       alg_req->tx_ring = inst->sym_tx;
+       alg_req->base = base;
+       alg_req->backlog = &inst->backlog;
+
+       return qat_alg_send_message(alg_req);
+}
+
+static int qat_alg_aead_dec(struct aead_request *areq)
+{
+       struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
+       struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
+       struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct qat_crypto_request *qat_req = aead_request_ctx(areq);
+       struct icp_qat_fw_la_cipher_req_params *cipher_param;
+       struct icp_qat_fw_la_auth_req_params *auth_param;
+       struct icp_qat_fw_la_bulk_req *msg;
+       int digst_size = crypto_aead_authsize(aead_tfm);
+       gfp_t f = qat_algs_alloc_flags(&areq->base);
+       int ret;
+       u32 cipher_len;
+
+       cipher_len = areq->cryptlen - digst_size;
+       if (cipher_len % AES_BLOCK_SIZE != 0)
+               return -EINVAL;
+
+       ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
+                                &qat_req->buf, NULL, f);
+       if (unlikely(ret))
+               return ret;
+
+       msg = &qat_req->req;
+       *msg = ctx->dec_fw_req;
+       qat_req->aead_ctx = ctx;
+       qat_req->aead_req = areq;
+       qat_req->cb = qat_aead_alg_callback;
+       qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
+       qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
+       qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+       cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+       cipher_param->cipher_length = cipher_len;
+       cipher_param->cipher_offset = areq->assoclen;
+       memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
+       auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
+       auth_param->auth_off = 0;
+       auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
+
+       ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
+       if (ret == -ENOSPC)
+               qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
+
+       return ret;
+}
+
+static int qat_alg_aead_enc(struct aead_request *areq)
+{
+       struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
+       struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
+       struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct qat_crypto_request *qat_req = aead_request_ctx(areq);
+       struct icp_qat_fw_la_cipher_req_params *cipher_param;
+       struct icp_qat_fw_la_auth_req_params *auth_param;
+       gfp_t f = qat_algs_alloc_flags(&areq->base);
+       struct icp_qat_fw_la_bulk_req *msg;
+       u8 *iv = areq->iv;
+       int ret;
+
+       if (areq->cryptlen % AES_BLOCK_SIZE != 0)
+               return -EINVAL;
+
+       ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
+                                &qat_req->buf, NULL, f);
+       if (unlikely(ret))
+               return ret;
+
+       msg = &qat_req->req;
+       *msg = ctx->enc_fw_req;
+       qat_req->aead_ctx = ctx;
+       qat_req->aead_req = areq;
+       qat_req->cb = qat_aead_alg_callback;
+       qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
+       qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
+       qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+       cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+       auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
+
+       memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
+       cipher_param->cipher_length = areq->cryptlen;
+       cipher_param->cipher_offset = areq->assoclen;
+
+       auth_param->auth_off = 0;
+       auth_param->auth_len = areq->assoclen + areq->cryptlen;
+
+       ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
+       if (ret == -ENOSPC)
+               qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
+
+       return ret;
+}
+
+static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx,
+                                 const u8 *key, unsigned int keylen,
+                                 int mode)
+{
+       memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
+       memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
+       memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
+       memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
+
+       return qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
+}
+
+static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx,
+                                  const u8 *key, unsigned int keylen,
+                                  int mode)
+{
+       struct qat_crypto_instance *inst = NULL;
+       struct device *dev;
+       int node = numa_node_id();
+       int ret;
+
+       inst = qat_crypto_get_instance_node(node);
+       if (!inst)
+               return -EINVAL;
+       dev = &GET_DEV(inst->accel_dev);
+       ctx->inst = inst;
+       ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
+                                        &ctx->enc_cd_paddr,
+                                        GFP_ATOMIC);
+       if (!ctx->enc_cd) {
+               ret = -ENOMEM;
+               goto out_free_instance;
+       }
+       ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
+                                        &ctx->dec_cd_paddr,
+                                        GFP_ATOMIC);
+       if (!ctx->dec_cd) {
+               ret = -ENOMEM;
+               goto out_free_enc;
+       }
+
+       ret = qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
+       if (ret)
+               goto out_free_all;
+
+       return 0;
+
+out_free_all:
+       memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
+       dma_free_coherent(dev, sizeof(*ctx->dec_cd),
+                         ctx->dec_cd, ctx->dec_cd_paddr);
+       ctx->dec_cd = NULL;
+out_free_enc:
+       memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
+       dma_free_coherent(dev, sizeof(*ctx->enc_cd),
+                         ctx->enc_cd, ctx->enc_cd_paddr);
+       ctx->enc_cd = NULL;
+out_free_instance:
+       ctx->inst = NULL;
+       qat_crypto_put_instance(inst);
+       return ret;
+}
+
+static int qat_alg_skcipher_setkey(struct crypto_skcipher *tfm,
+                                  const u8 *key, unsigned int keylen,
+                                  int mode)
+{
+       struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+       ctx->mode = mode;
+
+       if (ctx->enc_cd)
+               return qat_alg_skcipher_rekey(ctx, key, keylen, mode);
+       else
+               return qat_alg_skcipher_newkey(ctx, key, keylen, mode);
+}
+
+static int qat_alg_skcipher_cbc_setkey(struct crypto_skcipher *tfm,
+                                      const u8 *key, unsigned int keylen)
+{
+       return qat_alg_skcipher_setkey(tfm, key, keylen,
+                                      ICP_QAT_HW_CIPHER_CBC_MODE);
+}
+
+static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm,
+                                      const u8 *key, unsigned int keylen)
+{
+       return qat_alg_skcipher_setkey(tfm, key, keylen,
+                                      ICP_QAT_HW_CIPHER_CTR_MODE);
+}
+
+static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
+                                      const u8 *key, unsigned int keylen)
+{
+       struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+       int ret;
+
+       ret = xts_verify_key(tfm, key, keylen);
+       if (ret)
+               return ret;
+
+       if (keylen >> 1 == AES_KEYSIZE_192) {
+               ret = crypto_skcipher_setkey(ctx->ftfm, key, keylen);
+               if (ret)
+                       return ret;
+
+               ctx->fallback = true;
+
+               return 0;
+       }
+
+       ctx->fallback = false;
+
+       ret = qat_alg_skcipher_setkey(tfm, key, keylen,
+                                     ICP_QAT_HW_CIPHER_XTS_MODE);
+       if (ret)
+               return ret;
+
+       if (HW_CAP_AES_V2(ctx->inst->accel_dev))
+               ret = crypto_cipher_setkey(ctx->tweak, key + (keylen / 2),
+                                          keylen / 2);
+
+       return ret;
+}
+
+static void qat_alg_set_req_iv(struct qat_crypto_request *qat_req)
+{
+       struct icp_qat_fw_la_cipher_req_params *cipher_param;
+       struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
+       bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
+       u8 *iv = qat_req->skcipher_req->iv;
+
+       cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+
+       if (aes_v2_capable && ctx->mode == ICP_QAT_HW_CIPHER_XTS_MODE)
+               crypto_cipher_encrypt_one(ctx->tweak,
+                                         (u8 *)cipher_param->u.cipher_IV_array,
+                                         iv);
+       else
+               memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
+}
+
+static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
+{
+       struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+       struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
+       struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
+       struct icp_qat_fw_la_cipher_req_params *cipher_param;
+       gfp_t f = qat_algs_alloc_flags(&req->base);
+       struct icp_qat_fw_la_bulk_req *msg;
+       int ret;
+
+       if (req->cryptlen == 0)
+               return 0;
+
+       ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, req->src, req->dst,
+                                &qat_req->buf, NULL, f);
+       if (unlikely(ret))
+               return ret;
+
+       msg = &qat_req->req;
+       *msg = ctx->enc_fw_req;
+       qat_req->skcipher_ctx = ctx;
+       qat_req->skcipher_req = req;
+       qat_req->cb = qat_skcipher_alg_callback;
+       qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
+       qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
+       qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+       qat_req->encryption = true;
+       cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+       cipher_param->cipher_length = req->cryptlen;
+       cipher_param->cipher_offset = 0;
+
+       qat_alg_set_req_iv(qat_req);
+
+       ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
+       if (ret == -ENOSPC)
+               qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
+
+       return ret;
+}
+
+static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
+{
+       if (req->cryptlen % AES_BLOCK_SIZE != 0)
+               return -EINVAL;
+
+       return qat_alg_skcipher_encrypt(req);
+}
+
+static int qat_alg_skcipher_xts_encrypt(struct skcipher_request *req)
+{
+       struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+       struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
+       struct skcipher_request *nreq = skcipher_request_ctx(req);
+
+       if (req->cryptlen < XTS_BLOCK_SIZE)
+               return -EINVAL;
+
+       if (ctx->fallback) {
+               memcpy(nreq, req, sizeof(*req));
+               skcipher_request_set_tfm(nreq, ctx->ftfm);
+               return crypto_skcipher_encrypt(nreq);
+       }
+
+       return qat_alg_skcipher_encrypt(req);
+}
+
+static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
+{
+       struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+       struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
+       struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
+       struct icp_qat_fw_la_cipher_req_params *cipher_param;
+       gfp_t f = qat_algs_alloc_flags(&req->base);
+       struct icp_qat_fw_la_bulk_req *msg;
+       int ret;
+
+       if (req->cryptlen == 0)
+               return 0;
+
+       ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, req->src, req->dst,
+                                &qat_req->buf, NULL, f);
+       if (unlikely(ret))
+               return ret;
+
+       msg = &qat_req->req;
+       *msg = ctx->dec_fw_req;
+       qat_req->skcipher_ctx = ctx;
+       qat_req->skcipher_req = req;
+       qat_req->cb = qat_skcipher_alg_callback;
+       qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
+       qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
+       qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+       qat_req->encryption = false;
+       cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+       cipher_param->cipher_length = req->cryptlen;
+       cipher_param->cipher_offset = 0;
+
+       qat_alg_set_req_iv(qat_req);
+       qat_alg_update_iv(qat_req);
+
+       ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
+       if (ret == -ENOSPC)
+               qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
+
+       return ret;
+}
+
+static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
+{
+       if (req->cryptlen % AES_BLOCK_SIZE != 0)
+               return -EINVAL;
+
+       return qat_alg_skcipher_decrypt(req);
+}
+
+static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req)
+{
+       struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+       struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
+       struct skcipher_request *nreq = skcipher_request_ctx(req);
+
+       if (req->cryptlen < XTS_BLOCK_SIZE)
+               return -EINVAL;
+
+       if (ctx->fallback) {
+               memcpy(nreq, req, sizeof(*req));
+               skcipher_request_set_tfm(nreq, ctx->ftfm);
+               return crypto_skcipher_decrypt(nreq);
+       }
+
+       return qat_alg_skcipher_decrypt(req);
+}
+
+static int qat_alg_aead_init(struct crypto_aead *tfm,
+                            enum icp_qat_hw_auth_algo hash,
+                            const char *hash_name)
+{
+       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+       ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
+       if (IS_ERR(ctx->hash_tfm))
+               return PTR_ERR(ctx->hash_tfm);
+       ctx->qat_hash_alg = hash;
+       crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
+       return 0;
+}
+
+static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
+{
+       return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
+}
+
+static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
+{
+       return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
+}
+
+static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
+{
+       return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
+}
+
+static void qat_alg_aead_exit(struct crypto_aead *tfm)
+{
+       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
+       struct qat_crypto_instance *inst = ctx->inst;
+       struct device *dev;
+
+       crypto_free_shash(ctx->hash_tfm);
+
+       if (!inst)
+               return;
+
+       dev = &GET_DEV(inst->accel_dev);
+       if (ctx->enc_cd) {
+               memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
+               dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+                                 ctx->enc_cd, ctx->enc_cd_paddr);
+       }
+       if (ctx->dec_cd) {
+               memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
+               dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+                                 ctx->dec_cd, ctx->dec_cd_paddr);
+       }
+       qat_crypto_put_instance(inst);
+}
+
+static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm)
+{
+       crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request));
+       return 0;
+}
+
+static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm)
+{
+       struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+       int reqsize;
+
+       ctx->ftfm = crypto_alloc_skcipher("xts(aes)", 0,
+                                         CRYPTO_ALG_NEED_FALLBACK);
+       if (IS_ERR(ctx->ftfm))
+               return PTR_ERR(ctx->ftfm);
+
+       ctx->tweak = crypto_alloc_cipher("aes", 0, 0);
+       if (IS_ERR(ctx->tweak)) {
+               crypto_free_skcipher(ctx->ftfm);
+               return PTR_ERR(ctx->tweak);
+       }
+
+       reqsize = max(sizeof(struct qat_crypto_request),
+                     sizeof(struct skcipher_request) +
+                     crypto_skcipher_reqsize(ctx->ftfm));
+       crypto_skcipher_set_reqsize(tfm, reqsize);
+
+       return 0;
+}
+
+static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
+{
+       struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct qat_crypto_instance *inst = ctx->inst;
+       struct device *dev;
+
+       if (!inst)
+               return;
+
+       dev = &GET_DEV(inst->accel_dev);
+       if (ctx->enc_cd) {
+               memset(ctx->enc_cd, 0,
+                      sizeof(struct icp_qat_hw_cipher_algo_blk));
+               dma_free_coherent(dev,
+                                 sizeof(struct icp_qat_hw_cipher_algo_blk),
+                                 ctx->enc_cd, ctx->enc_cd_paddr);
+       }
+       if (ctx->dec_cd) {
+               memset(ctx->dec_cd, 0,
+                      sizeof(struct icp_qat_hw_cipher_algo_blk));
+               dma_free_coherent(dev,
+                                 sizeof(struct icp_qat_hw_cipher_algo_blk),
+                                 ctx->dec_cd, ctx->dec_cd_paddr);
+       }
+       qat_crypto_put_instance(inst);
+}
+
+static void qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher *tfm)
+{
+       struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+       if (ctx->ftfm)
+               crypto_free_skcipher(ctx->ftfm);
+
+       if (ctx->tweak)
+               crypto_free_cipher(ctx->tweak);
+
+       qat_alg_skcipher_exit_tfm(tfm);
+}
+
+static struct aead_alg qat_aeads[] = { {
+       .base = {
+               .cra_name = "authenc(hmac(sha1),cbc(aes))",
+               .cra_driver_name = "qat_aes_cbc_hmac_sha1",
+               .cra_priority = 4001,
+               .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
+               .cra_blocksize = AES_BLOCK_SIZE,
+               .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+               .cra_module = THIS_MODULE,
+       },
+       .init = qat_alg_aead_sha1_init,
+       .exit = qat_alg_aead_exit,
+       .setkey = qat_alg_aead_setkey,
+       .decrypt = qat_alg_aead_dec,
+       .encrypt = qat_alg_aead_enc,
+       .ivsize = AES_BLOCK_SIZE,
+       .maxauthsize = SHA1_DIGEST_SIZE,
+}, {
+       .base = {
+               .cra_name = "authenc(hmac(sha256),cbc(aes))",
+               .cra_driver_name = "qat_aes_cbc_hmac_sha256",
+               .cra_priority = 4001,
+               .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
+               .cra_blocksize = AES_BLOCK_SIZE,
+               .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+               .cra_module = THIS_MODULE,
+       },
+       .init = qat_alg_aead_sha256_init,
+       .exit = qat_alg_aead_exit,
+       .setkey = qat_alg_aead_setkey,
+       .decrypt = qat_alg_aead_dec,
+       .encrypt = qat_alg_aead_enc,
+       .ivsize = AES_BLOCK_SIZE,
+       .maxauthsize = SHA256_DIGEST_SIZE,
+}, {
+       .base = {
+               .cra_name = "authenc(hmac(sha512),cbc(aes))",
+               .cra_driver_name = "qat_aes_cbc_hmac_sha512",
+               .cra_priority = 4001,
+               .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
+               .cra_blocksize = AES_BLOCK_SIZE,
+               .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+               .cra_module = THIS_MODULE,
+       },
+       .init = qat_alg_aead_sha512_init,
+       .exit = qat_alg_aead_exit,
+       .setkey = qat_alg_aead_setkey,
+       .decrypt = qat_alg_aead_dec,
+       .encrypt = qat_alg_aead_enc,
+       .ivsize = AES_BLOCK_SIZE,
+       .maxauthsize = SHA512_DIGEST_SIZE,
+} };
+
+static struct skcipher_alg qat_skciphers[] = { {
+       .base.cra_name = "cbc(aes)",
+       .base.cra_driver_name = "qat_aes_cbc",
+       .base.cra_priority = 4001,
+       .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
+       .base.cra_blocksize = AES_BLOCK_SIZE,
+       .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
+       .base.cra_alignmask = 0,
+       .base.cra_module = THIS_MODULE,
+
+       .init = qat_alg_skcipher_init_tfm,
+       .exit = qat_alg_skcipher_exit_tfm,
+       .setkey = qat_alg_skcipher_cbc_setkey,
+       .decrypt = qat_alg_skcipher_blk_decrypt,
+       .encrypt = qat_alg_skcipher_blk_encrypt,
+       .min_keysize = AES_MIN_KEY_SIZE,
+       .max_keysize = AES_MAX_KEY_SIZE,
+       .ivsize = AES_BLOCK_SIZE,
+}, {
+       .base.cra_name = "ctr(aes)",
+       .base.cra_driver_name = "qat_aes_ctr",
+       .base.cra_priority = 4001,
+       .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
+       .base.cra_blocksize = 1,
+       .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
+       .base.cra_alignmask = 0,
+       .base.cra_module = THIS_MODULE,
+
+       .init = qat_alg_skcipher_init_tfm,
+       .exit = qat_alg_skcipher_exit_tfm,
+       .setkey = qat_alg_skcipher_ctr_setkey,
+       .decrypt = qat_alg_skcipher_decrypt,
+       .encrypt = qat_alg_skcipher_encrypt,
+       .min_keysize = AES_MIN_KEY_SIZE,
+       .max_keysize = AES_MAX_KEY_SIZE,
+       .ivsize = AES_BLOCK_SIZE,
+}, {
+       .base.cra_name = "xts(aes)",
+       .base.cra_driver_name = "qat_aes_xts",
+       .base.cra_priority = 4001,
+       .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
+                         CRYPTO_ALG_ALLOCATES_MEMORY,
+       .base.cra_blocksize = AES_BLOCK_SIZE,
+       .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
+       .base.cra_alignmask = 0,
+       .base.cra_module = THIS_MODULE,
+
+       .init = qat_alg_skcipher_init_xts_tfm,
+       .exit = qat_alg_skcipher_exit_xts_tfm,
+       .setkey = qat_alg_skcipher_xts_setkey,
+       .decrypt = qat_alg_skcipher_xts_decrypt,
+       .encrypt = qat_alg_skcipher_xts_encrypt,
+       .min_keysize = 2 * AES_MIN_KEY_SIZE,
+       .max_keysize = 2 * AES_MAX_KEY_SIZE,
+       .ivsize = AES_BLOCK_SIZE,
+} };
+
+int qat_algs_register(void)
+{
+       int ret = 0;
+
+       mutex_lock(&algs_lock);
+       if (++active_devs != 1)
+               goto unlock;
+
+       ret = crypto_register_skciphers(qat_skciphers,
+                                       ARRAY_SIZE(qat_skciphers));
+       if (ret)
+               goto unlock;
+
+       ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
+       if (ret)
+               goto unreg_algs;
+
+unlock:
+       mutex_unlock(&algs_lock);
+       return ret;
+
+unreg_algs:
+       crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
+       goto unlock;
+}
+
+void qat_algs_unregister(void)
+{
+       mutex_lock(&algs_lock);
+       if (--active_devs != 0)
+               goto unlock;
+
+       crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
+       crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
+
+unlock:
+       mutex_unlock(&algs_lock);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_algs_send.c b/drivers/crypto/intel/qat/qat_common/qat_algs_send.c
new file mode 100644 (file)
index 0000000..bb80455
--- /dev/null
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2022 Intel Corporation */
+#include <crypto/algapi.h>
+#include "adf_transport.h"
+#include "qat_algs_send.h"
+#include "qat_crypto.h"
+
+#define ADF_MAX_RETRIES                20
+
+static int qat_alg_send_message_retry(struct qat_alg_req *req)
+{
+       int ret = 0, ctr = 0;
+
+       do {
+               ret = adf_send_message(req->tx_ring, req->fw_req);
+       } while (ret == -EAGAIN && ctr++ < ADF_MAX_RETRIES);
+
+       if (ret == -EAGAIN)
+               return -ENOSPC;
+
+       return -EINPROGRESS;
+}
+
+void qat_alg_send_backlog(struct qat_instance_backlog *backlog)
+{
+       struct qat_alg_req *req, *tmp;
+
+       spin_lock_bh(&backlog->lock);
+       list_for_each_entry_safe(req, tmp, &backlog->list, list) {
+               if (adf_send_message(req->tx_ring, req->fw_req)) {
+                       /* The HW ring is full. Do nothing.
+                        * qat_alg_send_backlog() will be invoked again by
+                        * another callback.
+                        */
+                       break;
+               }
+               list_del(&req->list);
+               crypto_request_complete(req->base, -EINPROGRESS);
+       }
+       spin_unlock_bh(&backlog->lock);
+}
+
+static void qat_alg_backlog_req(struct qat_alg_req *req,
+                               struct qat_instance_backlog *backlog)
+{
+       INIT_LIST_HEAD(&req->list);
+
+       spin_lock_bh(&backlog->lock);
+       list_add_tail(&req->list, &backlog->list);
+       spin_unlock_bh(&backlog->lock);
+}
+
+static int qat_alg_send_message_maybacklog(struct qat_alg_req *req)
+{
+       struct qat_instance_backlog *backlog = req->backlog;
+       struct adf_etr_ring_data *tx_ring = req->tx_ring;
+       u32 *fw_req = req->fw_req;
+
+       /* If any request is already backlogged, then add to backlog list */
+       if (!list_empty(&backlog->list))
+               goto enqueue;
+
+       /* If ring is nearly full, then add to backlog list */
+       if (adf_ring_nearly_full(tx_ring))
+               goto enqueue;
+
+       /* If adding request to HW ring fails, then add to backlog list */
+       if (adf_send_message(tx_ring, fw_req))
+               goto enqueue;
+
+       return -EINPROGRESS;
+
+enqueue:
+       qat_alg_backlog_req(req, backlog);
+
+       return -EBUSY;
+}
+
+int qat_alg_send_message(struct qat_alg_req *req)
+{
+       u32 flags = req->base->flags;
+
+       if (flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
+               return qat_alg_send_message_maybacklog(req);
+       else
+               return qat_alg_send_message_retry(req);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_algs_send.h b/drivers/crypto/intel/qat/qat_common/qat_algs_send.h
new file mode 100644 (file)
index 0000000..0baca16
--- /dev/null
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef QAT_ALGS_SEND_H
+#define QAT_ALGS_SEND_H
+
+#include <linux/list.h>
+#include "adf_transport_internal.h"
+
+struct qat_instance_backlog {
+       struct list_head list;
+       spinlock_t lock; /* protects backlog list */
+};
+
+struct qat_alg_req {
+       u32 *fw_req;
+       struct adf_etr_ring_data *tx_ring;
+       struct crypto_async_request *base;
+       struct list_head list;
+       struct qat_instance_backlog *backlog;
+};
+
+int qat_alg_send_message(struct qat_alg_req *req);
+void qat_alg_send_backlog(struct qat_instance_backlog *backlog);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c b/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
new file mode 100644 (file)
index 0000000..935a7e0
--- /dev/null
@@ -0,0 +1,1309 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/module.h>
+#include <crypto/internal/rsa.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/akcipher.h>
+#include <crypto/kpp.h>
+#include <crypto/internal/kpp.h>
+#include <crypto/dh.h>
+#include <linux/dma-mapping.h>
+#include <linux/fips.h>
+#include <crypto/scatterwalk.h>
+#include "icp_qat_fw_pke.h"
+#include "adf_accel_devices.h"
+#include "qat_algs_send.h"
+#include "adf_transport.h"
+#include "adf_common_drv.h"
+#include "qat_crypto.h"
+
+static DEFINE_MUTEX(algs_lock);
+static unsigned int active_devs;
+
+struct qat_rsa_input_params {
+       union {
+               struct {
+                       dma_addr_t m;
+                       dma_addr_t e;
+                       dma_addr_t n;
+               } enc;
+               struct {
+                       dma_addr_t c;
+                       dma_addr_t d;
+                       dma_addr_t n;
+               } dec;
+               struct {
+                       dma_addr_t c;
+                       dma_addr_t p;
+                       dma_addr_t q;
+                       dma_addr_t dp;
+                       dma_addr_t dq;
+                       dma_addr_t qinv;
+               } dec_crt;
+               u64 in_tab[8];
+       };
+} __packed __aligned(64);
+
+struct qat_rsa_output_params {
+       union {
+               struct {
+                       dma_addr_t c;
+               } enc;
+               struct {
+                       dma_addr_t m;
+               } dec;
+               u64 out_tab[8];
+       };
+} __packed __aligned(64);
+
+struct qat_rsa_ctx {
+       char *n;
+       char *e;
+       char *d;
+       char *p;
+       char *q;
+       char *dp;
+       char *dq;
+       char *qinv;
+       dma_addr_t dma_n;
+       dma_addr_t dma_e;
+       dma_addr_t dma_d;
+       dma_addr_t dma_p;
+       dma_addr_t dma_q;
+       dma_addr_t dma_dp;
+       dma_addr_t dma_dq;
+       dma_addr_t dma_qinv;
+       unsigned int key_sz;
+       bool crt_mode;
+       struct qat_crypto_instance *inst;
+} __packed __aligned(64);
+
+struct qat_dh_input_params {
+       union {
+               struct {
+                       dma_addr_t b;
+                       dma_addr_t xa;
+                       dma_addr_t p;
+               } in;
+               struct {
+                       dma_addr_t xa;
+                       dma_addr_t p;
+               } in_g2;
+               u64 in_tab[8];
+       };
+} __packed __aligned(64);
+
+struct qat_dh_output_params {
+       union {
+               dma_addr_t r;
+               u64 out_tab[8];
+       };
+} __packed __aligned(64);
+
+struct qat_dh_ctx {
+       char *g;
+       char *xa;
+       char *p;
+       dma_addr_t dma_g;
+       dma_addr_t dma_xa;
+       dma_addr_t dma_p;
+       unsigned int p_size;
+       bool g2;
+       struct qat_crypto_instance *inst;
+} __packed __aligned(64);
+
+struct qat_asym_request {
+       union {
+               struct qat_rsa_input_params rsa;
+               struct qat_dh_input_params dh;
+       } in;
+       union {
+               struct qat_rsa_output_params rsa;
+               struct qat_dh_output_params dh;
+       } out;
+       dma_addr_t phy_in;
+       dma_addr_t phy_out;
+       char *src_align;
+       char *dst_align;
+       struct icp_qat_fw_pke_request req;
+       union {
+               struct qat_rsa_ctx *rsa;
+               struct qat_dh_ctx *dh;
+       } ctx;
+       union {
+               struct akcipher_request *rsa;
+               struct kpp_request *dh;
+       } areq;
+       int err;
+       void (*cb)(struct icp_qat_fw_pke_resp *resp);
+       struct qat_alg_req alg_req;
+} __aligned(64);
+
+static int qat_alg_send_asym_message(struct qat_asym_request *qat_req,
+                                    struct qat_crypto_instance *inst,
+                                    struct crypto_async_request *base)
+{
+       struct qat_alg_req *alg_req = &qat_req->alg_req;
+
+       alg_req->fw_req = (u32 *)&qat_req->req;
+       alg_req->tx_ring = inst->pke_tx;
+       alg_req->base = base;
+       alg_req->backlog = &inst->backlog;
+
+       return qat_alg_send_message(alg_req);
+}
+
+static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
+{
+       struct qat_asym_request *req = (void *)(__force long)resp->opaque;
+       struct kpp_request *areq = req->areq.dh;
+       struct device *dev = &GET_DEV(req->ctx.dh->inst->accel_dev);
+       int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
+                               resp->pke_resp_hdr.comn_resp_flags);
+
+       err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
+
+       if (areq->src) {
+               dma_unmap_single(dev, req->in.dh.in.b, req->ctx.dh->p_size,
+                                DMA_TO_DEVICE);
+               kfree_sensitive(req->src_align);
+       }
+
+       areq->dst_len = req->ctx.dh->p_size;
+       if (req->dst_align) {
+               scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
+                                        areq->dst_len, 1);
+               kfree_sensitive(req->dst_align);
+       }
+
+       dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
+                        DMA_FROM_DEVICE);
+
+       dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
+                        DMA_TO_DEVICE);
+       dma_unmap_single(dev, req->phy_out,
+                        sizeof(struct qat_dh_output_params),
+                        DMA_TO_DEVICE);
+
+       kpp_request_complete(areq, err);
+}
+
+#define PKE_DH_1536 0x390c1a49
+#define PKE_DH_G2_1536 0x2e0b1a3e
+#define PKE_DH_2048 0x4d0c1a60
+#define PKE_DH_G2_2048 0x3e0b1a55
+#define PKE_DH_3072 0x510c1a77
+#define PKE_DH_G2_3072 0x3a0b1a6c
+#define PKE_DH_4096 0x690c1a8e
+#define PKE_DH_G2_4096 0x4a0b1a83
+
+static unsigned long qat_dh_fn_id(unsigned int len, bool g2)
+{
+       unsigned int bitslen = len << 3;
+
+       switch (bitslen) {
+       case 1536:
+               return g2 ? PKE_DH_G2_1536 : PKE_DH_1536;
+       case 2048:
+               return g2 ? PKE_DH_G2_2048 : PKE_DH_2048;
+       case 3072:
+               return g2 ? PKE_DH_G2_3072 : PKE_DH_3072;
+       case 4096:
+               return g2 ? PKE_DH_G2_4096 : PKE_DH_4096;
+       default:
+               return 0;
+       }
+}
+
+static int qat_dh_compute_value(struct kpp_request *req)
+{
+       struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+       struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+       struct qat_crypto_instance *inst = ctx->inst;
+       struct device *dev = &GET_DEV(inst->accel_dev);
+       struct qat_asym_request *qat_req =
+                       PTR_ALIGN(kpp_request_ctx(req), 64);
+       struct icp_qat_fw_pke_request *msg = &qat_req->req;
+       gfp_t flags = qat_algs_alloc_flags(&req->base);
+       int n_input_params = 0;
+       u8 *vaddr;
+       int ret;
+
+       if (unlikely(!ctx->xa))
+               return -EINVAL;
+
+       if (req->dst_len < ctx->p_size) {
+               req->dst_len = ctx->p_size;
+               return -EOVERFLOW;
+       }
+
+       if (req->src_len > ctx->p_size)
+               return -EINVAL;
+
+       memset(msg, '\0', sizeof(*msg));
+       ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
+                                         ICP_QAT_FW_COMN_REQ_FLAG_SET);
+
+       msg->pke_hdr.cd_pars.func_id = qat_dh_fn_id(ctx->p_size,
+                                                   !req->src && ctx->g2);
+       if (unlikely(!msg->pke_hdr.cd_pars.func_id))
+               return -EINVAL;
+
+       qat_req->cb = qat_dh_cb;
+       qat_req->ctx.dh = ctx;
+       qat_req->areq.dh = req;
+       msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+       msg->pke_hdr.comn_req_flags =
+               ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
+                                           QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
+
+       /*
+        * If no source is provided use g as base
+        */
+       if (req->src) {
+               qat_req->in.dh.in.xa = ctx->dma_xa;
+               qat_req->in.dh.in.p = ctx->dma_p;
+               n_input_params = 3;
+       } else {
+               if (ctx->g2) {
+                       qat_req->in.dh.in_g2.xa = ctx->dma_xa;
+                       qat_req->in.dh.in_g2.p = ctx->dma_p;
+                       n_input_params = 2;
+               } else {
+                       qat_req->in.dh.in.b = ctx->dma_g;
+                       qat_req->in.dh.in.xa = ctx->dma_xa;
+                       qat_req->in.dh.in.p = ctx->dma_p;
+                       n_input_params = 3;
+               }
+       }
+
+       ret = -ENOMEM;
+       if (req->src) {
+               /*
+                * src can be of any size in valid range, but HW expects it to
+                * be the same as modulo p so in case it is different we need
+                * to allocate a new buf and copy src data.
+                * In other case we just need to map the user provided buffer.
+                * Also need to make sure that it is in contiguous buffer.
+                */
+               if (sg_is_last(req->src) && req->src_len == ctx->p_size) {
+                       qat_req->src_align = NULL;
+                       vaddr = sg_virt(req->src);
+               } else {
+                       int shift = ctx->p_size - req->src_len;
+
+                       qat_req->src_align = kzalloc(ctx->p_size, flags);
+                       if (unlikely(!qat_req->src_align))
+                               return ret;
+
+                       scatterwalk_map_and_copy(qat_req->src_align + shift,
+                                                req->src, 0, req->src_len, 0);
+
+                       vaddr = qat_req->src_align;
+               }
+
+               qat_req->in.dh.in.b = dma_map_single(dev, vaddr, ctx->p_size,
+                                                    DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(dev, qat_req->in.dh.in.b)))
+                       goto unmap_src;
+       }
+       /*
+        * dst can be of any size in valid range, but HW expects it to be the
+        * same as modulo m so in case it is different we need to allocate a
+        * new buf and copy src data.
+        * In other case we just need to map the user provided buffer.
+        * Also need to make sure that it is in contiguous buffer.
+        */
+       if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) {
+               qat_req->dst_align = NULL;
+               vaddr = sg_virt(req->dst);
+       } else {
+               qat_req->dst_align = kzalloc(ctx->p_size, flags);
+               if (unlikely(!qat_req->dst_align))
+                       goto unmap_src;
+
+               vaddr = qat_req->dst_align;
+       }
+       qat_req->out.dh.r = dma_map_single(dev, vaddr, ctx->p_size,
+                                          DMA_FROM_DEVICE);
+       if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
+               goto unmap_dst;
+
+       qat_req->in.dh.in_tab[n_input_params] = 0;
+       qat_req->out.dh.out_tab[1] = 0;
+       /* Mapping in.in.b or in.in_g2.xa is the same */
+       qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh,
+                                        sizeof(struct qat_dh_input_params),
+                                        DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
+               goto unmap_dst;
+
+       qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh,
+                                         sizeof(struct qat_dh_output_params),
+                                         DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
+               goto unmap_in_params;
+
+       msg->pke_mid.src_data_addr = qat_req->phy_in;
+       msg->pke_mid.dest_data_addr = qat_req->phy_out;
+       msg->pke_mid.opaque = (u64)(__force long)qat_req;
+       msg->input_param_count = n_input_params;
+       msg->output_param_count = 1;
+
+       ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
+       if (ret == -ENOSPC)
+               goto unmap_all;
+
+       return ret;
+
+unmap_all:
+       if (!dma_mapping_error(dev, qat_req->phy_out))
+               dma_unmap_single(dev, qat_req->phy_out,
+                                sizeof(struct qat_dh_output_params),
+                                DMA_TO_DEVICE);
+unmap_in_params:
+       if (!dma_mapping_error(dev, qat_req->phy_in))
+               dma_unmap_single(dev, qat_req->phy_in,
+                                sizeof(struct qat_dh_input_params),
+                                DMA_TO_DEVICE);
+unmap_dst:
+       if (!dma_mapping_error(dev, qat_req->out.dh.r))
+               dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
+                                DMA_FROM_DEVICE);
+       kfree_sensitive(qat_req->dst_align);
+unmap_src:
+       if (req->src) {
+               if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
+                       dma_unmap_single(dev, qat_req->in.dh.in.b,
+                                        ctx->p_size,
+                                        DMA_TO_DEVICE);
+               kfree_sensitive(qat_req->src_align);
+       }
+       return ret;
+}
+
+static int qat_dh_check_params_length(unsigned int p_len)
+{
+       switch (p_len) {
+       case 1536:
+       case 2048:
+       case 3072:
+       case 4096:
+               return 0;
+       }
+       return -EINVAL;
+}
+
+static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
+{
+       struct qat_crypto_instance *inst = ctx->inst;
+       struct device *dev = &GET_DEV(inst->accel_dev);
+
+       if (qat_dh_check_params_length(params->p_size << 3))
+               return -EINVAL;
+
+       ctx->p_size = params->p_size;
+       ctx->p = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
+       if (!ctx->p)
+               return -ENOMEM;
+       memcpy(ctx->p, params->p, ctx->p_size);
+
+       /* If g equals 2 don't copy it */
+       if (params->g_size == 1 && *(char *)params->g == 0x02) {
+               ctx->g2 = true;
+               return 0;
+       }
+
+       ctx->g = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
+       if (!ctx->g)
+               return -ENOMEM;
+       memcpy(ctx->g + (ctx->p_size - params->g_size), params->g,
+              params->g_size);
+
+       return 0;
+}
+
+static void qat_dh_clear_ctx(struct device *dev, struct qat_dh_ctx *ctx)
+{
+       if (ctx->g) {
+               memset(ctx->g, 0, ctx->p_size);
+               dma_free_coherent(dev, ctx->p_size, ctx->g, ctx->dma_g);
+               ctx->g = NULL;
+       }
+       if (ctx->xa) {
+               memset(ctx->xa, 0, ctx->p_size);
+               dma_free_coherent(dev, ctx->p_size, ctx->xa, ctx->dma_xa);
+               ctx->xa = NULL;
+       }
+       if (ctx->p) {
+               memset(ctx->p, 0, ctx->p_size);
+               dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
+               ctx->p = NULL;
+       }
+       ctx->p_size = 0;
+       ctx->g2 = false;
+}
+
+static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
+                            unsigned int len)
+{
+       struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+       struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+       struct dh params;
+       int ret;
+
+       if (crypto_dh_decode_key(buf, len, &params) < 0)
+               return -EINVAL;
+
+       /* Free old secret if any */
+       qat_dh_clear_ctx(dev, ctx);
+
+       ret = qat_dh_set_params(ctx, &params);
+       if (ret < 0)
+               goto err_clear_ctx;
+
+       ctx->xa = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
+                                    GFP_KERNEL);
+       if (!ctx->xa) {
+               ret = -ENOMEM;
+               goto err_clear_ctx;
+       }
+       memcpy(ctx->xa + (ctx->p_size - params.key_size), params.key,
+              params.key_size);
+
+       return 0;
+
+err_clear_ctx:
+       qat_dh_clear_ctx(dev, ctx);
+       return ret;
+}
+
+static unsigned int qat_dh_max_size(struct crypto_kpp *tfm)
+{
+       struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+
+       return ctx->p_size;
+}
+
+static int qat_dh_init_tfm(struct crypto_kpp *tfm)
+{
+       struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+       struct qat_crypto_instance *inst =
+                       qat_crypto_get_instance_node(numa_node_id());
+
+       if (!inst)
+               return -EINVAL;
+
+       kpp_set_reqsize(tfm, sizeof(struct qat_asym_request) + 64);
+
+       ctx->p_size = 0;
+       ctx->g2 = false;
+       ctx->inst = inst;
+       return 0;
+}
+
+static void qat_dh_exit_tfm(struct crypto_kpp *tfm)
+{
+       struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+       struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+
+       qat_dh_clear_ctx(dev, ctx);
+       qat_crypto_put_instance(ctx->inst);
+}
+
+static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
+{
+       struct qat_asym_request *req = (void *)(__force long)resp->opaque;
+       struct akcipher_request *areq = req->areq.rsa;
+       struct device *dev = &GET_DEV(req->ctx.rsa->inst->accel_dev);
+       int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
+                               resp->pke_resp_hdr.comn_resp_flags);
+
+       err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
+
+       kfree_sensitive(req->src_align);
+
+       dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
+                        DMA_TO_DEVICE);
+
+       areq->dst_len = req->ctx.rsa->key_sz;
+       if (req->dst_align) {
+               scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
+                                        areq->dst_len, 1);
+
+               kfree_sensitive(req->dst_align);
+       }
+
+       dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
+                        DMA_FROM_DEVICE);
+
+       dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
+                        DMA_TO_DEVICE);
+       dma_unmap_single(dev, req->phy_out,
+                        sizeof(struct qat_rsa_output_params),
+                        DMA_TO_DEVICE);
+
+       akcipher_request_complete(areq, err);
+}
+
+void qat_alg_asym_callback(void *_resp)
+{
+       struct icp_qat_fw_pke_resp *resp = _resp;
+       struct qat_asym_request *areq = (void *)(__force long)resp->opaque;
+       struct qat_instance_backlog *backlog = areq->alg_req.backlog;
+
+       areq->cb(resp);
+
+       qat_alg_send_backlog(backlog);
+}
+
+#define PKE_RSA_EP_512 0x1c161b21
+#define PKE_RSA_EP_1024 0x35111bf7
+#define PKE_RSA_EP_1536 0x4d111cdc
+#define PKE_RSA_EP_2048 0x6e111dba
+#define PKE_RSA_EP_3072 0x7d111ea3
+#define PKE_RSA_EP_4096 0xa5101f7e
+
+static unsigned long qat_rsa_enc_fn_id(unsigned int len)
+{
+       unsigned int bitslen = len << 3;
+
+       switch (bitslen) {
+       case 512:
+               return PKE_RSA_EP_512;
+       case 1024:
+               return PKE_RSA_EP_1024;
+       case 1536:
+               return PKE_RSA_EP_1536;
+       case 2048:
+               return PKE_RSA_EP_2048;
+       case 3072:
+               return PKE_RSA_EP_3072;
+       case 4096:
+               return PKE_RSA_EP_4096;
+       default:
+               return 0;
+       }
+}
+
+#define PKE_RSA_DP1_512 0x1c161b3c
+#define PKE_RSA_DP1_1024 0x35111c12
+#define PKE_RSA_DP1_1536 0x4d111cf7
+#define PKE_RSA_DP1_2048 0x6e111dda
+#define PKE_RSA_DP1_3072 0x7d111ebe
+#define PKE_RSA_DP1_4096 0xa5101f98
+
+static unsigned long qat_rsa_dec_fn_id(unsigned int len)
+{
+       unsigned int bitslen = len << 3;
+
+       switch (bitslen) {
+       case 512:
+               return PKE_RSA_DP1_512;
+       case 1024:
+               return PKE_RSA_DP1_1024;
+       case 1536:
+               return PKE_RSA_DP1_1536;
+       case 2048:
+               return PKE_RSA_DP1_2048;
+       case 3072:
+               return PKE_RSA_DP1_3072;
+       case 4096:
+               return PKE_RSA_DP1_4096;
+       default:
+               return 0;
+       }
+}
+
+#define PKE_RSA_DP2_512 0x1c131b57
+#define PKE_RSA_DP2_1024 0x26131c2d
+#define PKE_RSA_DP2_1536 0x45111d12
+#define PKE_RSA_DP2_2048 0x59121dfa
+#define PKE_RSA_DP2_3072 0x81121ed9
+#define PKE_RSA_DP2_4096 0xb1111fb2
+
+static unsigned long qat_rsa_dec_fn_id_crt(unsigned int len)
+{
+       unsigned int bitslen = len << 3;
+
+       switch (bitslen) {
+       case 512:
+               return PKE_RSA_DP2_512;
+       case 1024:
+               return PKE_RSA_DP2_1024;
+       case 1536:
+               return PKE_RSA_DP2_1536;
+       case 2048:
+               return PKE_RSA_DP2_2048;
+       case 3072:
+               return PKE_RSA_DP2_3072;
+       case 4096:
+               return PKE_RSA_DP2_4096;
+       default:
+               return 0;
+       }
+}
+
+static int qat_rsa_enc(struct akcipher_request *req)
+{
+       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+       struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+       struct qat_crypto_instance *inst = ctx->inst;
+       struct device *dev = &GET_DEV(inst->accel_dev);
+       struct qat_asym_request *qat_req =
+                       PTR_ALIGN(akcipher_request_ctx(req), 64);
+       struct icp_qat_fw_pke_request *msg = &qat_req->req;
+       gfp_t flags = qat_algs_alloc_flags(&req->base);
+       u8 *vaddr;
+       int ret;
+
+       if (unlikely(!ctx->n || !ctx->e))
+               return -EINVAL;
+
+       if (req->dst_len < ctx->key_sz) {
+               req->dst_len = ctx->key_sz;
+               return -EOVERFLOW;
+       }
+
+       if (req->src_len > ctx->key_sz)
+               return -EINVAL;
+
+       memset(msg, '\0', sizeof(*msg));
+       ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
+                                         ICP_QAT_FW_COMN_REQ_FLAG_SET);
+       msg->pke_hdr.cd_pars.func_id = qat_rsa_enc_fn_id(ctx->key_sz);
+       if (unlikely(!msg->pke_hdr.cd_pars.func_id))
+               return -EINVAL;
+
+       qat_req->cb = qat_rsa_cb;
+       qat_req->ctx.rsa = ctx;
+       qat_req->areq.rsa = req;
+       msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+       msg->pke_hdr.comn_req_flags =
+               ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
+                                           QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
+
+       qat_req->in.rsa.enc.e = ctx->dma_e;
+       qat_req->in.rsa.enc.n = ctx->dma_n;
+       ret = -ENOMEM;
+
+       /*
+        * src can be of any size in valid range, but HW expects it to be the
+        * same as modulo n so in case it is different we need to allocate a
+        * new buf and copy src data.
+        * In other case we just need to map the user provided buffer.
+        * Also need to make sure that it is in contiguous buffer.
+        */
+       if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
+               qat_req->src_align = NULL;
+               vaddr = sg_virt(req->src);
+       } else {
+               int shift = ctx->key_sz - req->src_len;
+
+               qat_req->src_align = kzalloc(ctx->key_sz, flags);
+               if (unlikely(!qat_req->src_align))
+                       return ret;
+
+               scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
+                                        0, req->src_len, 0);
+               vaddr = qat_req->src_align;
+       }
+
+       qat_req->in.rsa.enc.m = dma_map_single(dev, vaddr, ctx->key_sz,
+                                              DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m)))
+               goto unmap_src;
+
+       if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
+               qat_req->dst_align = NULL;
+               vaddr = sg_virt(req->dst);
+       } else {
+               qat_req->dst_align = kzalloc(ctx->key_sz, flags);
+               if (unlikely(!qat_req->dst_align))
+                       goto unmap_src;
+               vaddr = qat_req->dst_align;
+       }
+
+       qat_req->out.rsa.enc.c = dma_map_single(dev, vaddr, ctx->key_sz,
+                                               DMA_FROM_DEVICE);
+       if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c)))
+               goto unmap_dst;
+
+       qat_req->in.rsa.in_tab[3] = 0;
+       qat_req->out.rsa.out_tab[1] = 0;
+       qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa,
+                                        sizeof(struct qat_rsa_input_params),
+                                        DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
+               goto unmap_dst;
+
+       qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa,
+                                         sizeof(struct qat_rsa_output_params),
+                                         DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
+               goto unmap_in_params;
+
+       msg->pke_mid.src_data_addr = qat_req->phy_in;
+       msg->pke_mid.dest_data_addr = qat_req->phy_out;
+       msg->pke_mid.opaque = (u64)(__force long)qat_req;
+       msg->input_param_count = 3;
+       msg->output_param_count = 1;
+
+       ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
+       if (ret == -ENOSPC)
+               goto unmap_all;
+
+       return ret;
+
+unmap_all:
+       if (!dma_mapping_error(dev, qat_req->phy_out))
+               dma_unmap_single(dev, qat_req->phy_out,
+                                sizeof(struct qat_rsa_output_params),
+                                DMA_TO_DEVICE);
+unmap_in_params:
+       if (!dma_mapping_error(dev, qat_req->phy_in))
+               dma_unmap_single(dev, qat_req->phy_in,
+                                sizeof(struct qat_rsa_input_params),
+                                DMA_TO_DEVICE);
+unmap_dst:
+       if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c))
+               dma_unmap_single(dev, qat_req->out.rsa.enc.c,
+                                ctx->key_sz, DMA_FROM_DEVICE);
+       kfree_sensitive(qat_req->dst_align);
+unmap_src:
+       if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m))
+               dma_unmap_single(dev, qat_req->in.rsa.enc.m, ctx->key_sz,
+                                DMA_TO_DEVICE);
+       kfree_sensitive(qat_req->src_align);
+       return ret;
+}
+
+static int qat_rsa_dec(struct akcipher_request *req)
+{
+       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+       struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+       struct qat_crypto_instance *inst = ctx->inst;
+       struct device *dev = &GET_DEV(inst->accel_dev);
+       struct qat_asym_request *qat_req =
+                       PTR_ALIGN(akcipher_request_ctx(req), 64);
+       struct icp_qat_fw_pke_request *msg = &qat_req->req;
+       gfp_t flags = qat_algs_alloc_flags(&req->base);
+       u8 *vaddr;
+       int ret;
+
+       if (unlikely(!ctx->n || !ctx->d))
+               return -EINVAL;
+
+       if (req->dst_len < ctx->key_sz) {
+               req->dst_len = ctx->key_sz;
+               return -EOVERFLOW;
+       }
+
+       if (req->src_len > ctx->key_sz)
+               return -EINVAL;
+
+       memset(msg, '\0', sizeof(*msg));
+       ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
+                                         ICP_QAT_FW_COMN_REQ_FLAG_SET);
+       msg->pke_hdr.cd_pars.func_id = ctx->crt_mode ?
+               qat_rsa_dec_fn_id_crt(ctx->key_sz) :
+               qat_rsa_dec_fn_id(ctx->key_sz);
+       if (unlikely(!msg->pke_hdr.cd_pars.func_id))
+               return -EINVAL;
+
+       qat_req->cb = qat_rsa_cb;
+       qat_req->ctx.rsa = ctx;
+       qat_req->areq.rsa = req;
+       msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+       msg->pke_hdr.comn_req_flags =
+               ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
+                                           QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
+
+       if (ctx->crt_mode) {
+               qat_req->in.rsa.dec_crt.p = ctx->dma_p;
+               qat_req->in.rsa.dec_crt.q = ctx->dma_q;
+               qat_req->in.rsa.dec_crt.dp = ctx->dma_dp;
+               qat_req->in.rsa.dec_crt.dq = ctx->dma_dq;
+               qat_req->in.rsa.dec_crt.qinv = ctx->dma_qinv;
+       } else {
+               qat_req->in.rsa.dec.d = ctx->dma_d;
+               qat_req->in.rsa.dec.n = ctx->dma_n;
+       }
+       ret = -ENOMEM;
+
+       /*
+        * src can be of any size in valid range, but HW expects it to be the
+        * same as modulo n so in case it is different we need to allocate a
+        * new buf and copy src data.
+        * In other case we just need to map the user provided buffer.
+        * Also need to make sure that it is in contiguous buffer.
+        */
+       if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
+               qat_req->src_align = NULL;
+               vaddr = sg_virt(req->src);
+       } else {
+               int shift = ctx->key_sz - req->src_len;
+
+               qat_req->src_align = kzalloc(ctx->key_sz, flags);
+               if (unlikely(!qat_req->src_align))
+                       return ret;
+
+               scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
+                                        0, req->src_len, 0);
+               vaddr = qat_req->src_align;
+       }
+
+       qat_req->in.rsa.dec.c = dma_map_single(dev, vaddr, ctx->key_sz,
+                                              DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
+               goto unmap_src;
+
+       if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
+               qat_req->dst_align = NULL;
+               vaddr = sg_virt(req->dst);
+       } else {
+               qat_req->dst_align = kzalloc(ctx->key_sz, flags);
+               if (unlikely(!qat_req->dst_align))
+                       goto unmap_src;
+               vaddr = qat_req->dst_align;
+       }
+       qat_req->out.rsa.dec.m = dma_map_single(dev, vaddr, ctx->key_sz,
+                                               DMA_FROM_DEVICE);
+       if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
+               goto unmap_dst;
+
+       if (ctx->crt_mode)
+               qat_req->in.rsa.in_tab[6] = 0;
+       else
+               qat_req->in.rsa.in_tab[3] = 0;
+       qat_req->out.rsa.out_tab[1] = 0;
+       qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa,
+                                        sizeof(struct qat_rsa_input_params),
+                                        DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
+               goto unmap_dst;
+
+       qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa,
+                                         sizeof(struct qat_rsa_output_params),
+                                         DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
+               goto unmap_in_params;
+
+       msg->pke_mid.src_data_addr = qat_req->phy_in;
+       msg->pke_mid.dest_data_addr = qat_req->phy_out;
+       msg->pke_mid.opaque = (u64)(__force long)qat_req;
+       if (ctx->crt_mode)
+               msg->input_param_count = 6;
+       else
+               msg->input_param_count = 3;
+
+       msg->output_param_count = 1;
+
+       ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
+       if (ret == -ENOSPC)
+               goto unmap_all;
+
+       return ret;
+
+unmap_all:
+       if (!dma_mapping_error(dev, qat_req->phy_out))
+               dma_unmap_single(dev, qat_req->phy_out,
+                                sizeof(struct qat_rsa_output_params),
+                                DMA_TO_DEVICE);
+unmap_in_params:
+       if (!dma_mapping_error(dev, qat_req->phy_in))
+               dma_unmap_single(dev, qat_req->phy_in,
+                                sizeof(struct qat_rsa_input_params),
+                                DMA_TO_DEVICE);
+unmap_dst:
+       if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
+               dma_unmap_single(dev, qat_req->out.rsa.dec.m,
+                                ctx->key_sz, DMA_FROM_DEVICE);
+       kfree_sensitive(qat_req->dst_align);
+unmap_src:
+       if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
+               dma_unmap_single(dev, qat_req->in.rsa.dec.c, ctx->key_sz,
+                                DMA_TO_DEVICE);
+       kfree_sensitive(qat_req->src_align);
+       return ret;
+}
+
+static int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value,
+                        size_t vlen)
+{
+       struct qat_crypto_instance *inst = ctx->inst;
+       struct device *dev = &GET_DEV(inst->accel_dev);
+       const char *ptr = value;
+       int ret;
+
+       while (!*ptr && vlen) {
+               ptr++;
+               vlen--;
+       }
+
+       ctx->key_sz = vlen;
+       ret = -EINVAL;
+       /* invalid key size provided */
+       if (!qat_rsa_enc_fn_id(ctx->key_sz))
+               goto err;
+
+       ret = -ENOMEM;
+       ctx->n = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
+       if (!ctx->n)
+               goto err;
+
+       memcpy(ctx->n, ptr, ctx->key_sz);
+       return 0;
+err:
+       ctx->key_sz = 0;
+       ctx->n = NULL;
+       return ret;
+}
+
+static int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value,
+                        size_t vlen)
+{
+       struct qat_crypto_instance *inst = ctx->inst;
+       struct device *dev = &GET_DEV(inst->accel_dev);
+       const char *ptr = value;
+
+       while (!*ptr && vlen) {
+               ptr++;
+               vlen--;
+       }
+
+       if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) {
+               ctx->e = NULL;
+               return -EINVAL;
+       }
+
+       ctx->e = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
+       if (!ctx->e)
+               return -ENOMEM;
+
+       memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen);
+       return 0;
+}
+
+static int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value,
+                        size_t vlen)
+{
+       struct qat_crypto_instance *inst = ctx->inst;
+       struct device *dev = &GET_DEV(inst->accel_dev);
+       const char *ptr = value;
+       int ret;
+
+       while (!*ptr && vlen) {
+               ptr++;
+               vlen--;
+       }
+
+       ret = -EINVAL;
+       if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
+               goto err;
+
+       ret = -ENOMEM;
+       ctx->d = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
+       if (!ctx->d)
+               goto err;
+
+       memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen);
+       return 0;
+err:
+       ctx->d = NULL;
+       return ret;
+}
+
+static void qat_rsa_drop_leading_zeros(const char **ptr, unsigned int *len)
+{
+       while (!**ptr && *len) {
+               (*ptr)++;
+               (*len)--;
+       }
+}
+
+static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
+{
+       struct qat_crypto_instance *inst = ctx->inst;
+       struct device *dev = &GET_DEV(inst->accel_dev);
+       const char *ptr;
+       unsigned int len;
+       unsigned int half_key_sz = ctx->key_sz / 2;
+
+       /* p */
+       ptr = rsa_key->p;
+       len = rsa_key->p_sz;
+       qat_rsa_drop_leading_zeros(&ptr, &len);
+       if (!len)
+               goto err;
+       ctx->p = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL);
+       if (!ctx->p)
+               goto err;
+       memcpy(ctx->p + (half_key_sz - len), ptr, len);
+
+       /* q */
+       ptr = rsa_key->q;
+       len = rsa_key->q_sz;
+       qat_rsa_drop_leading_zeros(&ptr, &len);
+       if (!len)
+               goto free_p;
+       ctx->q = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL);
+       if (!ctx->q)
+               goto free_p;
+       memcpy(ctx->q + (half_key_sz - len), ptr, len);
+
+       /* dp */
+       ptr = rsa_key->dp;
+       len = rsa_key->dp_sz;
+       qat_rsa_drop_leading_zeros(&ptr, &len);
+       if (!len)
+               goto free_q;
+       ctx->dp = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dp,
+                                    GFP_KERNEL);
+       if (!ctx->dp)
+               goto free_q;
+       memcpy(ctx->dp + (half_key_sz - len), ptr, len);
+
+       /* dq */
+       ptr = rsa_key->dq;
+       len = rsa_key->dq_sz;
+       qat_rsa_drop_leading_zeros(&ptr, &len);
+       if (!len)
+               goto free_dp;
+       ctx->dq = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dq,
+                                    GFP_KERNEL);
+       if (!ctx->dq)
+               goto free_dp;
+       memcpy(ctx->dq + (half_key_sz - len), ptr, len);
+
+       /* qinv */
+       ptr = rsa_key->qinv;
+       len = rsa_key->qinv_sz;
+       qat_rsa_drop_leading_zeros(&ptr, &len);
+       if (!len)
+               goto free_dq;
+       ctx->qinv = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_qinv,
+                                      GFP_KERNEL);
+       if (!ctx->qinv)
+               goto free_dq;
+       memcpy(ctx->qinv + (half_key_sz - len), ptr, len);
+
+       ctx->crt_mode = true;
+       return;
+
+free_dq:
+       memset(ctx->dq, '\0', half_key_sz);
+       dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
+       ctx->dq = NULL;
+free_dp:
+       memset(ctx->dp, '\0', half_key_sz);
+       dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
+       ctx->dp = NULL;
+free_q:
+       memset(ctx->q, '\0', half_key_sz);
+       dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
+       ctx->q = NULL;
+free_p:
+       memset(ctx->p, '\0', half_key_sz);
+       dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
+       ctx->p = NULL;
+err:
+       ctx->crt_mode = false;
+}
+
+static void qat_rsa_clear_ctx(struct device *dev, struct qat_rsa_ctx *ctx)
+{
+       unsigned int half_key_sz = ctx->key_sz / 2;
+
+       /* Free the old key if any */
+       if (ctx->n)
+               dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
+       if (ctx->e)
+               dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
+       if (ctx->d) {
+               memset(ctx->d, '\0', ctx->key_sz);
+               dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
+       }
+       if (ctx->p) {
+               memset(ctx->p, '\0', half_key_sz);
+               dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
+       }
+       if (ctx->q) {
+               memset(ctx->q, '\0', half_key_sz);
+               dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
+       }
+       if (ctx->dp) {
+               memset(ctx->dp, '\0', half_key_sz);
+               dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
+       }
+       if (ctx->dq) {
+               memset(ctx->dq, '\0', half_key_sz);
+               dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
+       }
+       if (ctx->qinv) {
+               memset(ctx->qinv, '\0', half_key_sz);
+               dma_free_coherent(dev, half_key_sz, ctx->qinv, ctx->dma_qinv);
+       }
+
+       ctx->n = NULL;
+       ctx->e = NULL;
+       ctx->d = NULL;
+       ctx->p = NULL;
+       ctx->q = NULL;
+       ctx->dp = NULL;
+       ctx->dq = NULL;
+       ctx->qinv = NULL;
+       ctx->crt_mode = false;
+       ctx->key_sz = 0;
+}
+
+static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
+                         unsigned int keylen, bool private)
+{
+       struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+       struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+       struct rsa_key rsa_key;
+       int ret;
+
+       qat_rsa_clear_ctx(dev, ctx);
+
+       if (private)
+               ret = rsa_parse_priv_key(&rsa_key, key, keylen);
+       else
+               ret = rsa_parse_pub_key(&rsa_key, key, keylen);
+       if (ret < 0)
+               goto free;
+
+       ret = qat_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz);
+       if (ret < 0)
+               goto free;
+       ret = qat_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
+       if (ret < 0)
+               goto free;
+       if (private) {
+               ret = qat_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
+               if (ret < 0)
+                       goto free;
+               qat_rsa_setkey_crt(ctx, &rsa_key);
+       }
+
+       if (!ctx->n || !ctx->e) {
+               /* invalid key provided */
+               ret = -EINVAL;
+               goto free;
+       }
+       if (private && !ctx->d) {
+               /* invalid private key provided */
+               ret = -EINVAL;
+               goto free;
+       }
+
+       return 0;
+free:
+       qat_rsa_clear_ctx(dev, ctx);
+       return ret;
+}
+
+static int qat_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
+                            unsigned int keylen)
+{
+       return qat_rsa_setkey(tfm, key, keylen, false);
+}
+
+static int qat_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
+                             unsigned int keylen)
+{
+       return qat_rsa_setkey(tfm, key, keylen, true);
+}
+
+static unsigned int qat_rsa_max_size(struct crypto_akcipher *tfm)
+{
+       struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+       return ctx->key_sz;
+}
+
+static int qat_rsa_init_tfm(struct crypto_akcipher *tfm)
+{
+       struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+       struct qat_crypto_instance *inst =
+                       qat_crypto_get_instance_node(numa_node_id());
+
+       if (!inst)
+               return -EINVAL;
+
+       akcipher_set_reqsize(tfm, sizeof(struct qat_asym_request) + 64);
+
+       ctx->key_sz = 0;
+       ctx->inst = inst;
+       return 0;
+}
+
+static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+       struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+       struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+
+       qat_rsa_clear_ctx(dev, ctx);
+       qat_crypto_put_instance(ctx->inst);
+}
+
+static struct akcipher_alg rsa = {
+       .encrypt = qat_rsa_enc,
+       .decrypt = qat_rsa_dec,
+       .set_pub_key = qat_rsa_setpubkey,
+       .set_priv_key = qat_rsa_setprivkey,
+       .max_size = qat_rsa_max_size,
+       .init = qat_rsa_init_tfm,
+       .exit = qat_rsa_exit_tfm,
+       .base = {
+               .cra_name = "rsa",
+               .cra_driver_name = "qat-rsa",
+               .cra_priority = 1000,
+               .cra_module = THIS_MODULE,
+               .cra_ctxsize = sizeof(struct qat_rsa_ctx),
+       },
+};
+
+static struct kpp_alg dh = {
+       .set_secret = qat_dh_set_secret,
+       .generate_public_key = qat_dh_compute_value,
+       .compute_shared_secret = qat_dh_compute_value,
+       .max_size = qat_dh_max_size,
+       .init = qat_dh_init_tfm,
+       .exit = qat_dh_exit_tfm,
+       .base = {
+               .cra_name = "dh",
+               .cra_driver_name = "qat-dh",
+               .cra_priority = 1000,
+               .cra_module = THIS_MODULE,
+               .cra_ctxsize = sizeof(struct qat_dh_ctx),
+       },
+};
+
+int qat_asym_algs_register(void)
+{
+       int ret = 0;
+
+       mutex_lock(&algs_lock);
+       if (++active_devs == 1) {
+               rsa.base.cra_flags = 0;
+               ret = crypto_register_akcipher(&rsa);
+               if (ret)
+                       goto unlock;
+               ret = crypto_register_kpp(&dh);
+       }
+unlock:
+       mutex_unlock(&algs_lock);
+       return ret;
+}
+
+void qat_asym_algs_unregister(void)
+{
+       mutex_lock(&algs_lock);
+       if (--active_devs == 0) {
+               crypto_unregister_akcipher(&rsa);
+               crypto_unregister_kpp(&dh);
+       }
+       mutex_unlock(&algs_lock);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.c b/drivers/crypto/intel/qat/qat_common/qat_bl.c
new file mode 100644 (file)
index 0000000..76baed0
--- /dev/null
@@ -0,0 +1,410 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2014 - 2022 Intel Corporation */
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+#include "qat_bl.h"
+#include "qat_crypto.h"
+
+void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
+                     struct qat_request_buffs *buf)
+{
+       struct device *dev = &GET_DEV(accel_dev);
+       struct qat_alg_buf_list *bl = buf->bl;
+       struct qat_alg_buf_list *blout = buf->blout;
+       dma_addr_t blp = buf->blp;
+       dma_addr_t blpout = buf->bloutp;
+       size_t sz = buf->sz;
+       size_t sz_out = buf->sz_out;
+       int bl_dma_dir;
+       int i;
+
+       bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+
+       for (i = 0; i < bl->num_bufs; i++)
+               dma_unmap_single(dev, bl->buffers[i].addr,
+                                bl->buffers[i].len, bl_dma_dir);
+
+       dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
+
+       if (!buf->sgl_src_valid)
+               kfree(bl);
+
+       if (blp != blpout) {
+               for (i = 0; i < blout->num_mapped_bufs; i++) {
+                       dma_unmap_single(dev, blout->buffers[i].addr,
+                                        blout->buffers[i].len,
+                                        DMA_FROM_DEVICE);
+               }
+               dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
+
+               if (!buf->sgl_dst_valid)
+                       kfree(blout);
+       }
+}
+
+static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
+                               struct scatterlist *sgl,
+                               struct scatterlist *sglout,
+                               struct qat_request_buffs *buf,
+                               dma_addr_t extra_dst_buff,
+                               size_t sz_extra_dst_buff,
+                               unsigned int sskip,
+                               unsigned int dskip,
+                               gfp_t flags)
+{
+       struct device *dev = &GET_DEV(accel_dev);
+       int i, sg_nctr = 0;
+       int n = sg_nents(sgl);
+       struct qat_alg_buf_list *bufl;
+       struct qat_alg_buf_list *buflout = NULL;
+       dma_addr_t blp = DMA_MAPPING_ERROR;
+       dma_addr_t bloutp = DMA_MAPPING_ERROR;
+       struct scatterlist *sg;
+       size_t sz_out, sz = struct_size(bufl, buffers, n);
+       int node = dev_to_node(&GET_DEV(accel_dev));
+       unsigned int left;
+       int bufl_dma_dir;
+
+       if (unlikely(!n))
+               return -EINVAL;
+
+       buf->sgl_src_valid = false;
+       buf->sgl_dst_valid = false;
+
+       if (n > QAT_MAX_BUFF_DESC) {
+               bufl = kzalloc_node(sz, flags, node);
+               if (unlikely(!bufl))
+                       return -ENOMEM;
+       } else {
+               bufl = &buf->sgl_src.sgl_hdr;
+               memset(bufl, 0, sizeof(struct qat_alg_buf_list));
+               buf->sgl_src_valid = true;
+       }
+
+       bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+
+       for (i = 0; i < n; i++)
+               bufl->buffers[i].addr = DMA_MAPPING_ERROR;
+
+       left = sskip;
+
+       for_each_sg(sgl, sg, n, i) {
+               int y = sg_nctr;
+
+               if (!sg->length)
+                       continue;
+
+               if (left >= sg->length) {
+                       left -= sg->length;
+                       continue;
+               }
+               bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
+                                                      sg->length - left,
+                                                      bufl_dma_dir);
+               bufl->buffers[y].len = sg->length;
+               if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
+                       goto err_in;
+               sg_nctr++;
+               if (left) {
+                       bufl->buffers[y].len -= left;
+                       left = 0;
+               }
+       }
+       bufl->num_bufs = sg_nctr;
+       blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev, blp)))
+               goto err_in;
+       buf->bl = bufl;
+       buf->blp = blp;
+       buf->sz = sz;
+       /* Handle out of place operation */
+       if (sgl != sglout) {
+               struct qat_alg_buf *buffers;
+               int extra_buff = extra_dst_buff ? 1 : 0;
+               int n_sglout = sg_nents(sglout);
+
+               n = n_sglout + extra_buff;
+               sz_out = struct_size(buflout, buffers, n);
+               left = dskip;
+
+               sg_nctr = 0;
+
+               if (n > QAT_MAX_BUFF_DESC) {
+                       buflout = kzalloc_node(sz_out, flags, node);
+                       if (unlikely(!buflout))
+                               goto err_in;
+               } else {
+                       buflout = &buf->sgl_dst.sgl_hdr;
+                       memset(buflout, 0, sizeof(struct qat_alg_buf_list));
+                       buf->sgl_dst_valid = true;
+               }
+
+               buffers = buflout->buffers;
+               for (i = 0; i < n; i++)
+                       buffers[i].addr = DMA_MAPPING_ERROR;
+
+               for_each_sg(sglout, sg, n_sglout, i) {
+                       int y = sg_nctr;
+
+                       if (!sg->length)
+                               continue;
+
+                       if (left >= sg->length) {
+                               left -= sg->length;
+                               continue;
+                       }
+                       buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
+                                                        sg->length - left,
+                                                        DMA_FROM_DEVICE);
+                       if (unlikely(dma_mapping_error(dev, buffers[y].addr)))
+                               goto err_out;
+                       buffers[y].len = sg->length;
+                       sg_nctr++;
+                       if (left) {
+                               buffers[y].len -= left;
+                               left = 0;
+                       }
+               }
+               if (extra_buff) {
+                       buffers[sg_nctr].addr = extra_dst_buff;
+                       buffers[sg_nctr].len = sz_extra_dst_buff;
+               }
+
+               buflout->num_bufs = sg_nctr;
+               buflout->num_bufs += extra_buff;
+               buflout->num_mapped_bufs = sg_nctr;
+               bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(dev, bloutp)))
+                       goto err_out;
+               buf->blout = buflout;
+               buf->bloutp = bloutp;
+               buf->sz_out = sz_out;
+       } else {
+               /* Otherwise set the src and dst to the same address */
+               buf->bloutp = buf->blp;
+               buf->sz_out = 0;
+       }
+       return 0;
+
+err_out:
+       if (!dma_mapping_error(dev, bloutp))
+               dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
+
+       n = sg_nents(sglout);
+       for (i = 0; i < n; i++) {
+               if (buflout->buffers[i].addr == extra_dst_buff)
+                       break;
+               if (!dma_mapping_error(dev, buflout->buffers[i].addr))
+                       dma_unmap_single(dev, buflout->buffers[i].addr,
+                                        buflout->buffers[i].len,
+                                        DMA_FROM_DEVICE);
+       }
+
+       if (!buf->sgl_dst_valid)
+               kfree(buflout);
+
+err_in:
+       if (!dma_mapping_error(dev, blp))
+               dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
+
+       n = sg_nents(sgl);
+       for (i = 0; i < n; i++)
+               if (!dma_mapping_error(dev, bufl->buffers[i].addr))
+                       dma_unmap_single(dev, bufl->buffers[i].addr,
+                                        bufl->buffers[i].len,
+                                        bufl_dma_dir);
+
+       if (!buf->sgl_src_valid)
+               kfree(bufl);
+
+       dev_err(dev, "Failed to map buf for dma\n");
+       return -ENOMEM;
+}
+
+int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
+                      struct scatterlist *sgl,
+                      struct scatterlist *sglout,
+                      struct qat_request_buffs *buf,
+                      struct qat_sgl_to_bufl_params *params,
+                      gfp_t flags)
+{
+       dma_addr_t extra_dst_buff = 0;
+       size_t sz_extra_dst_buff = 0;
+       unsigned int sskip = 0;
+       unsigned int dskip = 0;
+
+       if (params) {
+               extra_dst_buff = params->extra_dst_buff;
+               sz_extra_dst_buff = params->sz_extra_dst_buff;
+               sskip = params->sskip;
+               dskip = params->dskip;
+       }
+
+       return __qat_bl_sgl_to_bufl(accel_dev, sgl, sglout, buf,
+                                   extra_dst_buff, sz_extra_dst_buff,
+                                   sskip, dskip, flags);
+}
+
+static void qat_bl_sgl_unmap(struct adf_accel_dev *accel_dev,
+                            struct qat_alg_buf_list *bl)
+{
+       struct device *dev = &GET_DEV(accel_dev);
+       int n = bl->num_bufs;
+       int i;
+
+       for (i = 0; i < n; i++)
+               if (!dma_mapping_error(dev, bl->buffers[i].addr))
+                       dma_unmap_single(dev, bl->buffers[i].addr,
+                                        bl->buffers[i].len, DMA_FROM_DEVICE);
+}
+
+static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
+                         struct scatterlist *sgl,
+                         struct qat_alg_buf_list **bl)
+{
+       struct device *dev = &GET_DEV(accel_dev);
+       struct qat_alg_buf_list *bufl;
+       int node = dev_to_node(dev);
+       struct scatterlist *sg;
+       int n, i, sg_nctr;
+       size_t sz;
+
+       n = sg_nents(sgl);
+       sz = struct_size(bufl, buffers, n);
+       bufl = kzalloc_node(sz, GFP_KERNEL, node);
+       if (unlikely(!bufl))
+               return -ENOMEM;
+
+       for (i = 0; i < n; i++)
+               bufl->buffers[i].addr = DMA_MAPPING_ERROR;
+
+       sg_nctr = 0;
+       for_each_sg(sgl, sg, n, i) {
+               int y = sg_nctr;
+
+               if (!sg->length)
+                       continue;
+
+               bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg),
+                                                      sg->length,
+                                                      DMA_FROM_DEVICE);
+               bufl->buffers[y].len = sg->length;
+               if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
+                       goto err_map;
+               sg_nctr++;
+       }
+       bufl->num_bufs = sg_nctr;
+       bufl->num_mapped_bufs = sg_nctr;
+
+       *bl = bufl;
+
+       return 0;
+
+err_map:
+       for (i = 0; i < n; i++)
+               if (!dma_mapping_error(dev, bufl->buffers[i].addr))
+                       dma_unmap_single(dev, bufl->buffers[i].addr,
+                                        bufl->buffers[i].len,
+                                        DMA_FROM_DEVICE);
+       kfree(bufl);
+       *bl = NULL;
+
+       return -ENOMEM;
+}
+
+static void qat_bl_sgl_free_unmap(struct adf_accel_dev *accel_dev,
+                                 struct scatterlist *sgl,
+                                 struct qat_alg_buf_list *bl,
+                                 bool free_bl)
+{
+       if (bl) {
+               qat_bl_sgl_unmap(accel_dev, bl);
+
+               if (free_bl)
+                       kfree(bl);
+       }
+       if (sgl)
+               sgl_free(sgl);
+}
+
+static int qat_bl_sgl_alloc_map(struct adf_accel_dev *accel_dev,
+                               struct scatterlist **sgl,
+                               struct qat_alg_buf_list **bl,
+                               unsigned int dlen,
+                               gfp_t gfp)
+{
+       struct scatterlist *dst;
+       int ret;
+
+       dst = sgl_alloc(dlen, gfp, NULL);
+       if (!dst) {
+               dev_err(&GET_DEV(accel_dev), "sg_alloc failed\n");
+               return -ENOMEM;
+       }
+
+       ret = qat_bl_sgl_map(accel_dev, dst, bl);
+       if (ret)
+               goto err;
+
+       *sgl = dst;
+
+       return 0;
+
+err:
+       sgl_free(dst);
+       *sgl = NULL;
+       return ret;
+}
+
+int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev,
+                              struct scatterlist **sg,
+                              unsigned int dlen,
+                              struct qat_request_buffs *qat_bufs,
+                              gfp_t gfp)
+{
+       struct device *dev = &GET_DEV(accel_dev);
+       dma_addr_t new_blp = DMA_MAPPING_ERROR;
+       struct qat_alg_buf_list *new_bl;
+       struct scatterlist *new_sg;
+       size_t new_bl_size;
+       int ret;
+
+       ret = qat_bl_sgl_alloc_map(accel_dev, &new_sg, &new_bl, dlen, gfp);
+       if (ret)
+               return ret;
+
+       new_bl_size = struct_size(new_bl, buffers, new_bl->num_bufs);
+
+       /* Map new firmware SGL descriptor */
+       new_blp = dma_map_single(dev, new_bl, new_bl_size, DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev, new_blp)))
+               goto err;
+
+       /* Unmap old firmware SGL descriptor */
+       dma_unmap_single(dev, qat_bufs->bloutp, qat_bufs->sz_out, DMA_TO_DEVICE);
+
+       /* Free and unmap old scatterlist */
+       qat_bl_sgl_free_unmap(accel_dev, *sg, qat_bufs->blout,
+                             !qat_bufs->sgl_dst_valid);
+
+       qat_bufs->sgl_dst_valid = false;
+       qat_bufs->blout = new_bl;
+       qat_bufs->bloutp = new_blp;
+       qat_bufs->sz_out = new_bl_size;
+
+       *sg = new_sg;
+
+       return 0;
+err:
+       qat_bl_sgl_free_unmap(accel_dev, new_sg, new_bl, true);
+
+       if (!dma_mapping_error(dev, new_blp))
+               dma_unmap_single(dev, new_blp, new_bl_size, DMA_TO_DEVICE);
+
+       return -ENOMEM;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.h b/drivers/crypto/intel/qat/qat_common/qat_bl.h
new file mode 100644 (file)
index 0000000..d87e4f3
--- /dev/null
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2014 - 2022 Intel Corporation */
+#ifndef QAT_BL_H
+#define QAT_BL_H
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+#include <linux/types.h>
+
+#define QAT_MAX_BUFF_DESC      4
+
+struct qat_alg_buf {
+       u32 len;
+       u32 resrvd;
+       u64 addr;
+} __packed;
+
+struct qat_alg_buf_list {
+       u64 resrvd;
+       u32 num_bufs;
+       u32 num_mapped_bufs;
+       struct qat_alg_buf buffers[];
+} __packed;
+
+struct qat_alg_fixed_buf_list {
+       struct qat_alg_buf_list sgl_hdr;
+       struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC];
+} __packed __aligned(64);
+
+struct qat_request_buffs {
+       struct qat_alg_buf_list *bl;
+       dma_addr_t blp;
+       struct qat_alg_buf_list *blout;
+       dma_addr_t bloutp;
+       size_t sz;
+       size_t sz_out;
+       bool sgl_src_valid;
+       bool sgl_dst_valid;
+       struct qat_alg_fixed_buf_list sgl_src;
+       struct qat_alg_fixed_buf_list sgl_dst;
+};
+
+struct qat_sgl_to_bufl_params {
+       dma_addr_t extra_dst_buff;
+       size_t sz_extra_dst_buff;
+       unsigned int sskip;
+       unsigned int dskip;
+};
+
+void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
+                     struct qat_request_buffs *buf);
+int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
+                      struct scatterlist *sgl,
+                      struct scatterlist *sglout,
+                      struct qat_request_buffs *buf,
+                      struct qat_sgl_to_bufl_params *params,
+                      gfp_t flags);
+
+static inline gfp_t qat_algs_alloc_flags(struct crypto_async_request *req)
+{
+       return req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
+}
+
+int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev,
+                              struct scatterlist **newd,
+                              unsigned int dlen,
+                              struct qat_request_buffs *qat_bufs,
+                              gfp_t gfp);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
new file mode 100644 (file)
index 0000000..b533984
--- /dev/null
@@ -0,0 +1,489 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation */
+#include <linux/crypto.h>
+#include <crypto/acompress.h>
+#include <crypto/internal/acompress.h>
+#include <crypto/scatterwalk.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "qat_bl.h"
+#include "qat_comp_req.h"
+#include "qat_compression.h"
+#include "qat_algs_send.h"
+
+#define QAT_RFC_1950_HDR_SIZE 2
+#define QAT_RFC_1950_FOOTER_SIZE 4
+#define QAT_RFC_1950_CM_DEFLATE 8
+#define QAT_RFC_1950_CM_DEFLATE_CINFO_32K 7
+#define QAT_RFC_1950_CM_MASK 0x0f
+#define QAT_RFC_1950_CM_OFFSET 4
+#define QAT_RFC_1950_DICT_MASK 0x20
+#define QAT_RFC_1950_COMP_HDR 0x785e
+
+static DEFINE_MUTEX(algs_lock);
+static unsigned int active_devs;
+
+enum direction {
+       DECOMPRESSION = 0,
+       COMPRESSION = 1,
+};
+
+struct qat_compression_req;
+
+struct qat_compression_ctx {
+       u8 comp_ctx[QAT_COMP_CTX_SIZE];
+       struct qat_compression_instance *inst;
+       int (*qat_comp_callback)(struct qat_compression_req *qat_req, void *resp);
+};
+
+struct qat_dst {
+       bool is_null;
+       int resubmitted;
+};
+
+struct qat_compression_req {
+       u8 req[QAT_COMP_REQ_SIZE];
+       struct qat_compression_ctx *qat_compression_ctx;
+       struct acomp_req *acompress_req;
+       struct qat_request_buffs buf;
+       enum direction dir;
+       int actual_dlen;
+       struct qat_alg_req alg_req;
+       struct work_struct resubmit;
+       struct qat_dst dst;
+};
+
+static int qat_alg_send_dc_message(struct qat_compression_req *qat_req,
+                                  struct qat_compression_instance *inst,
+                                  struct crypto_async_request *base)
+{
+       struct qat_alg_req *alg_req = &qat_req->alg_req;
+
+       alg_req->fw_req = (u32 *)&qat_req->req;
+       alg_req->tx_ring = inst->dc_tx;
+       alg_req->base = base;
+       alg_req->backlog = &inst->backlog;
+
+       return qat_alg_send_message(alg_req);
+}
+
+static void qat_comp_resubmit(struct work_struct *work)
+{
+       struct qat_compression_req *qat_req =
+               container_of(work, struct qat_compression_req, resubmit);
+       struct qat_compression_ctx *ctx = qat_req->qat_compression_ctx;
+       struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
+       struct qat_request_buffs *qat_bufs = &qat_req->buf;
+       struct qat_compression_instance *inst = ctx->inst;
+       struct acomp_req *areq = qat_req->acompress_req;
+       struct crypto_acomp *tfm = crypto_acomp_reqtfm(areq);
+       unsigned int dlen = CRYPTO_ACOMP_DST_MAX;
+       u8 *req = qat_req->req;
+       dma_addr_t dfbuf;
+       int ret;
+
+       areq->dlen = dlen;
+
+       dev_dbg(&GET_DEV(accel_dev), "[%s][%s] retry NULL dst request - dlen = %d\n",
+               crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)),
+               qat_req->dir == COMPRESSION ? "comp" : "decomp", dlen);
+
+       ret = qat_bl_realloc_map_new_dst(accel_dev, &areq->dst, dlen, qat_bufs,
+                                        qat_algs_alloc_flags(&areq->base));
+       if (ret)
+               goto err;
+
+       qat_req->dst.resubmitted = true;
+
+       dfbuf = qat_req->buf.bloutp;
+       qat_comp_override_dst(req, dfbuf, dlen);
+
+       ret = qat_alg_send_dc_message(qat_req, inst, &areq->base);
+       if (ret != -ENOSPC)
+               return;
+
+err:
+       qat_bl_free_bufl(accel_dev, qat_bufs);
+       acomp_request_complete(areq, ret);
+}
+
+static int parse_zlib_header(u16 zlib_h)
+{
+       int ret = -EINVAL;
+       __be16 header;
+       u8 *header_p;
+       u8 cmf, flg;
+
+       header = cpu_to_be16(zlib_h);
+       header_p = (u8 *)&header;
+
+       flg = header_p[0];
+       cmf = header_p[1];
+
+       if (cmf >> QAT_RFC_1950_CM_OFFSET > QAT_RFC_1950_CM_DEFLATE_CINFO_32K)
+               return ret;
+
+       if ((cmf & QAT_RFC_1950_CM_MASK) != QAT_RFC_1950_CM_DEFLATE)
+               return ret;
+
+       if (flg & QAT_RFC_1950_DICT_MASK)
+               return ret;
+
+       return 0;
+}
+
+static int qat_comp_rfc1950_callback(struct qat_compression_req *qat_req,
+                                    void *resp)
+{
+       struct acomp_req *areq = qat_req->acompress_req;
+       enum direction dir = qat_req->dir;
+       __be32 qat_produced_adler;
+
+       qat_produced_adler = cpu_to_be32(qat_comp_get_produced_adler32(resp));
+
+       if (dir == COMPRESSION) {
+               __be16 zlib_header;
+
+               zlib_header = cpu_to_be16(QAT_RFC_1950_COMP_HDR);
+               scatterwalk_map_and_copy(&zlib_header, areq->dst, 0, QAT_RFC_1950_HDR_SIZE, 1);
+               areq->dlen += QAT_RFC_1950_HDR_SIZE;
+
+               scatterwalk_map_and_copy(&qat_produced_adler, areq->dst, areq->dlen,
+                                        QAT_RFC_1950_FOOTER_SIZE, 1);
+               areq->dlen += QAT_RFC_1950_FOOTER_SIZE;
+       } else {
+               __be32 decomp_adler;
+               int footer_offset;
+               int consumed;
+
+               consumed = qat_comp_get_consumed_ctr(resp);
+               footer_offset = consumed + QAT_RFC_1950_HDR_SIZE;
+               if (footer_offset + QAT_RFC_1950_FOOTER_SIZE > areq->slen)
+                       return -EBADMSG;
+
+               scatterwalk_map_and_copy(&decomp_adler, areq->src, footer_offset,
+                                        QAT_RFC_1950_FOOTER_SIZE, 0);
+
+               if (qat_produced_adler != decomp_adler)
+                       return -EBADMSG;
+       }
+       return 0;
+}
+
+static void qat_comp_generic_callback(struct qat_compression_req *qat_req,
+                                     void *resp)
+{
+       struct acomp_req *areq = qat_req->acompress_req;
+       struct qat_compression_ctx *ctx = qat_req->qat_compression_ctx;
+       struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
+       struct crypto_acomp *tfm = crypto_acomp_reqtfm(areq);
+       struct qat_compression_instance *inst = ctx->inst;
+       int consumed, produced;
+       s8 cmp_err, xlt_err;
+       int res = -EBADMSG;
+       int status;
+       u8 cnv;
+
+       status = qat_comp_get_cmp_status(resp);
+       status |= qat_comp_get_xlt_status(resp);
+       cmp_err = qat_comp_get_cmp_err(resp);
+       xlt_err = qat_comp_get_xlt_err(resp);
+
+       consumed = qat_comp_get_consumed_ctr(resp);
+       produced = qat_comp_get_produced_ctr(resp);
+
+       dev_dbg(&GET_DEV(accel_dev),
+               "[%s][%s][%s] slen = %8d dlen = %8d consumed = %8d produced = %8d cmp_err = %3d xlt_err = %3d",
+               crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)),
+               qat_req->dir == COMPRESSION ? "comp  " : "decomp",
+               status ? "ERR" : "OK ",
+               areq->slen, areq->dlen, consumed, produced, cmp_err, xlt_err);
+
+       areq->dlen = 0;
+
+       if (qat_req->dir == DECOMPRESSION && qat_req->dst.is_null) {
+               if (cmp_err == ERR_CODE_OVERFLOW_ERROR) {
+                       if (qat_req->dst.resubmitted) {
+                               dev_dbg(&GET_DEV(accel_dev),
+                                       "Output does not fit destination buffer\n");
+                               res = -EOVERFLOW;
+                               goto end;
+                       }
+
+                       INIT_WORK(&qat_req->resubmit, qat_comp_resubmit);
+                       adf_misc_wq_queue_work(&qat_req->resubmit);
+                       return;
+               }
+       }
+
+       if (unlikely(status != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
+               goto end;
+
+       if (qat_req->dir == COMPRESSION) {
+               cnv = qat_comp_get_cmp_cnv_flag(resp);
+               if (unlikely(!cnv)) {
+                       dev_err(&GET_DEV(accel_dev),
+                               "Verified compression not supported\n");
+                       goto end;
+               }
+
+               if (unlikely(produced > qat_req->actual_dlen)) {
+                       memset(inst->dc_data->ovf_buff, 0,
+                              inst->dc_data->ovf_buff_sz);
+                       dev_dbg(&GET_DEV(accel_dev),
+                               "Actual buffer overflow: produced=%d, dlen=%d\n",
+                               produced, qat_req->actual_dlen);
+                       goto end;
+               }
+       }
+
+       res = 0;
+       areq->dlen = produced;
+
+       if (ctx->qat_comp_callback)
+               res = ctx->qat_comp_callback(qat_req, resp);
+
+end:
+       qat_bl_free_bufl(accel_dev, &qat_req->buf);
+       acomp_request_complete(areq, res);
+}
+
+void qat_comp_alg_callback(void *resp)
+{
+       struct qat_compression_req *qat_req =
+                       (void *)(__force long)qat_comp_get_opaque(resp);
+       struct qat_instance_backlog *backlog = qat_req->alg_req.backlog;
+
+       qat_comp_generic_callback(qat_req, resp);
+
+       qat_alg_send_backlog(backlog);
+}
+
+static int qat_comp_alg_init_tfm(struct crypto_acomp *acomp_tfm)
+{
+       struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
+       struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct qat_compression_instance *inst;
+       int node;
+
+       if (tfm->node == NUMA_NO_NODE)
+               node = numa_node_id();
+       else
+               node = tfm->node;
+
+       memset(ctx, 0, sizeof(*ctx));
+       inst = qat_compression_get_instance_node(node);
+       if (!inst)
+               return -EINVAL;
+       ctx->inst = inst;
+
+       ctx->inst->build_deflate_ctx(ctx->comp_ctx);
+
+       return 0;
+}
+
+static void qat_comp_alg_exit_tfm(struct crypto_acomp *acomp_tfm)
+{
+       struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
+       struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       qat_compression_put_instance(ctx->inst);
+       memset(ctx, 0, sizeof(*ctx));
+}
+
+static int qat_comp_alg_rfc1950_init_tfm(struct crypto_acomp *acomp_tfm)
+{
+       struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
+       struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+       int ret;
+
+       ret = qat_comp_alg_init_tfm(acomp_tfm);
+       ctx->qat_comp_callback = &qat_comp_rfc1950_callback;
+
+       return ret;
+}
+
+static int qat_comp_alg_compress_decompress(struct acomp_req *areq, enum direction dir,
+                                           unsigned int shdr, unsigned int sftr,
+                                           unsigned int dhdr, unsigned int dftr)
+{
+       struct qat_compression_req *qat_req = acomp_request_ctx(areq);
+       struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(areq);
+       struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
+       struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct qat_compression_instance *inst = ctx->inst;
+       gfp_t f = qat_algs_alloc_flags(&areq->base);
+       struct qat_sgl_to_bufl_params params = {0};
+       int slen = areq->slen - shdr - sftr;
+       int dlen = areq->dlen - dhdr - dftr;
+       dma_addr_t sfbuf, dfbuf;
+       u8 *req = qat_req->req;
+       size_t ovf_buff_sz;
+       int ret;
+
+       params.sskip = shdr;
+       params.dskip = dhdr;
+
+       if (!areq->src || !slen)
+               return -EINVAL;
+
+       if (areq->dst && !dlen)
+               return -EINVAL;
+
+       qat_req->dst.is_null = false;
+
+       /* Handle acomp requests that require the allocation of a destination
+        * buffer. The size of the destination buffer is double the source
+        * buffer (rounded up to the size of a page) to fit the decompressed
+        * output or an expansion on the data for compression.
+        */
+       if (!areq->dst) {
+               qat_req->dst.is_null = true;
+
+               dlen = round_up(2 * slen, PAGE_SIZE);
+               areq->dst = sgl_alloc(dlen, f, NULL);
+               if (!areq->dst)
+                       return -ENOMEM;
+
+               dlen -= dhdr + dftr;
+               areq->dlen = dlen;
+               qat_req->dst.resubmitted = false;
+       }
+
+       if (dir == COMPRESSION) {
+               params.extra_dst_buff = inst->dc_data->ovf_buff_p;
+               ovf_buff_sz = inst->dc_data->ovf_buff_sz;
+               params.sz_extra_dst_buff = ovf_buff_sz;
+       }
+
+       ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
+                                &qat_req->buf, &params, f);
+       if (unlikely(ret))
+               return ret;
+
+       sfbuf = qat_req->buf.blp;
+       dfbuf = qat_req->buf.bloutp;
+       qat_req->qat_compression_ctx = ctx;
+       qat_req->acompress_req = areq;
+       qat_req->dir = dir;
+
+       if (dir == COMPRESSION) {
+               qat_req->actual_dlen = dlen;
+               dlen += ovf_buff_sz;
+               qat_comp_create_compression_req(ctx->comp_ctx, req,
+                                               (u64)(__force long)sfbuf, slen,
+                                               (u64)(__force long)dfbuf, dlen,
+                                               (u64)(__force long)qat_req);
+       } else {
+               qat_comp_create_decompression_req(ctx->comp_ctx, req,
+                                                 (u64)(__force long)sfbuf, slen,
+                                                 (u64)(__force long)dfbuf, dlen,
+                                                 (u64)(__force long)qat_req);
+       }
+
+       ret = qat_alg_send_dc_message(qat_req, inst, &areq->base);
+       if (ret == -ENOSPC)
+               qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
+
+       return ret;
+}
+
+static int qat_comp_alg_compress(struct acomp_req *req)
+{
+       return qat_comp_alg_compress_decompress(req, COMPRESSION, 0, 0, 0, 0);
+}
+
+static int qat_comp_alg_decompress(struct acomp_req *req)
+{
+       return qat_comp_alg_compress_decompress(req, DECOMPRESSION, 0, 0, 0, 0);
+}
+
+static int qat_comp_alg_rfc1950_compress(struct acomp_req *req)
+{
+       if (!req->dst && req->dlen != 0)
+               return -EINVAL;
+
+       if (req->dst && req->dlen <= QAT_RFC_1950_HDR_SIZE + QAT_RFC_1950_FOOTER_SIZE)
+               return -EINVAL;
+
+       return qat_comp_alg_compress_decompress(req, COMPRESSION, 0, 0,
+                                               QAT_RFC_1950_HDR_SIZE,
+                                               QAT_RFC_1950_FOOTER_SIZE);
+}
+
+static int qat_comp_alg_rfc1950_decompress(struct acomp_req *req)
+{
+       struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(req);
+       struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
+       struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
+       u16 zlib_header;
+       int ret;
+
+       if (req->slen <= QAT_RFC_1950_HDR_SIZE + QAT_RFC_1950_FOOTER_SIZE)
+               return -EBADMSG;
+
+       scatterwalk_map_and_copy(&zlib_header, req->src, 0, QAT_RFC_1950_HDR_SIZE, 0);
+
+       ret = parse_zlib_header(zlib_header);
+       if (ret) {
+               dev_dbg(&GET_DEV(accel_dev), "Error parsing zlib header\n");
+               return ret;
+       }
+
+       return qat_comp_alg_compress_decompress(req, DECOMPRESSION, QAT_RFC_1950_HDR_SIZE,
+                                               QAT_RFC_1950_FOOTER_SIZE, 0, 0);
+}
+
+static struct acomp_alg qat_acomp[] = { {
+       .base = {
+               .cra_name = "deflate",
+               .cra_driver_name = "qat_deflate",
+               .cra_priority = 4001,
+               .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
+               .cra_ctxsize = sizeof(struct qat_compression_ctx),
+               .cra_module = THIS_MODULE,
+       },
+       .init = qat_comp_alg_init_tfm,
+       .exit = qat_comp_alg_exit_tfm,
+       .compress = qat_comp_alg_compress,
+       .decompress = qat_comp_alg_decompress,
+       .dst_free = sgl_free,
+       .reqsize = sizeof(struct qat_compression_req),
+}, {
+       .base = {
+               .cra_name = "zlib-deflate",
+               .cra_driver_name = "qat_zlib_deflate",
+               .cra_priority = 4001,
+               .cra_flags = CRYPTO_ALG_ASYNC,
+               .cra_ctxsize = sizeof(struct qat_compression_ctx),
+               .cra_module = THIS_MODULE,
+       },
+       .init = qat_comp_alg_rfc1950_init_tfm,
+       .exit = qat_comp_alg_exit_tfm,
+       .compress = qat_comp_alg_rfc1950_compress,
+       .decompress = qat_comp_alg_rfc1950_decompress,
+       .dst_free = sgl_free,
+       .reqsize = sizeof(struct qat_compression_req),
+} };
+
+int qat_comp_algs_register(void)
+{
+       int ret = 0;
+
+       mutex_lock(&algs_lock);
+       if (++active_devs == 1)
+               ret = crypto_register_acomps(qat_acomp, ARRAY_SIZE(qat_acomp));
+       mutex_unlock(&algs_lock);
+       return ret;
+}
+
+void qat_comp_algs_unregister(void)
+{
+       mutex_lock(&algs_lock);
+       if (--active_devs == 0)
+               crypto_unregister_acomps(qat_acomp, ARRAY_SIZE(qat_acomp));
+       mutex_unlock(&algs_lock);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_req.h b/drivers/crypto/intel/qat/qat_common/qat_comp_req.h
new file mode 100644 (file)
index 0000000..404e32c
--- /dev/null
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef _QAT_COMP_REQ_H_
+#define _QAT_COMP_REQ_H_
+
+#include "icp_qat_fw_comp.h"
+
+#define QAT_COMP_REQ_SIZE (sizeof(struct icp_qat_fw_comp_req))
+#define QAT_COMP_CTX_SIZE (QAT_COMP_REQ_SIZE * 2)
+
+static inline void qat_comp_create_req(void *ctx, void *req, u64 src, u32 slen,
+                                      u64 dst, u32 dlen, u64 opaque)
+{
+       struct icp_qat_fw_comp_req *fw_tmpl = ctx;
+       struct icp_qat_fw_comp_req *fw_req = req;
+       struct icp_qat_fw_comp_req_params *req_pars = &fw_req->comp_pars;
+
+       memcpy(fw_req, fw_tmpl, sizeof(*fw_req));
+       fw_req->comn_mid.src_data_addr = src;
+       fw_req->comn_mid.src_length = slen;
+       fw_req->comn_mid.dest_data_addr = dst;
+       fw_req->comn_mid.dst_length = dlen;
+       fw_req->comn_mid.opaque_data = opaque;
+       req_pars->comp_len = slen;
+       req_pars->out_buffer_sz = dlen;
+}
+
+static inline void qat_comp_override_dst(void *req, u64 dst, u32 dlen)
+{
+       struct icp_qat_fw_comp_req *fw_req = req;
+       struct icp_qat_fw_comp_req_params *req_pars = &fw_req->comp_pars;
+
+       fw_req->comn_mid.dest_data_addr = dst;
+       fw_req->comn_mid.dst_length = dlen;
+       req_pars->out_buffer_sz = dlen;
+}
+
+static inline void qat_comp_create_compression_req(void *ctx, void *req,
+                                                  u64 src, u32 slen,
+                                                  u64 dst, u32 dlen,
+                                                  u64 opaque)
+{
+       qat_comp_create_req(ctx, req, src, slen, dst, dlen, opaque);
+}
+
+static inline void qat_comp_create_decompression_req(void *ctx, void *req,
+                                                    u64 src, u32 slen,
+                                                    u64 dst, u32 dlen,
+                                                    u64 opaque)
+{
+       struct icp_qat_fw_comp_req *fw_tmpl = ctx;
+
+       fw_tmpl++;
+       qat_comp_create_req(fw_tmpl, req, src, slen, dst, dlen, opaque);
+}
+
+static inline u32 qat_comp_get_consumed_ctr(void *resp)
+{
+       struct icp_qat_fw_comp_resp *qat_resp = resp;
+
+       return qat_resp->comp_resp_pars.input_byte_counter;
+}
+
+static inline u32 qat_comp_get_produced_ctr(void *resp)
+{
+       struct icp_qat_fw_comp_resp *qat_resp = resp;
+
+       return qat_resp->comp_resp_pars.output_byte_counter;
+}
+
+static inline u32 qat_comp_get_produced_adler32(void *resp)
+{
+       struct icp_qat_fw_comp_resp *qat_resp = resp;
+
+       return qat_resp->comp_resp_pars.crc.legacy.curr_adler_32;
+}
+
+static inline u64 qat_comp_get_opaque(void *resp)
+{
+       struct icp_qat_fw_comp_resp *qat_resp = resp;
+
+       return qat_resp->opaque_data;
+}
+
+static inline s8 qat_comp_get_cmp_err(void *resp)
+{
+       struct icp_qat_fw_comp_resp *qat_resp = resp;
+
+       return qat_resp->comn_resp.comn_error.cmp_err_code;
+}
+
+static inline s8 qat_comp_get_xlt_err(void *resp)
+{
+       struct icp_qat_fw_comp_resp *qat_resp = resp;
+
+       return qat_resp->comn_resp.comn_error.xlat_err_code;
+}
+
+static inline s8 qat_comp_get_cmp_status(void *resp)
+{
+       struct icp_qat_fw_comp_resp *qat_resp = resp;
+       u8 stat_filed = qat_resp->comn_resp.comn_status;
+
+       return ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(stat_filed);
+}
+
+static inline s8 qat_comp_get_xlt_status(void *resp)
+{
+       struct icp_qat_fw_comp_resp *qat_resp = resp;
+       u8 stat_filed = qat_resp->comn_resp.comn_status;
+
+       return ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(stat_filed);
+}
+
+static inline u8 qat_comp_get_cmp_cnv_flag(void *resp)
+{
+       struct icp_qat_fw_comp_resp *qat_resp = resp;
+       u8 flags = qat_resp->comn_resp.hdr_flags;
+
+       return ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(flags);
+}
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/qat_compression.c b/drivers/crypto/intel/qat/qat_common/qat_compression.c
new file mode 100644 (file)
index 0000000..3f1f352
--- /dev/null
@@ -0,0 +1,297 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_transport.h"
+#include "adf_transport_access_macros.h"
+#include "adf_cfg.h"
+#include "adf_cfg_strings.h"
+#include "qat_compression.h"
+#include "icp_qat_fw.h"
+
+#define SEC ADF_KERNEL_SEC
+
+static struct service_hndl qat_compression;
+
+void qat_compression_put_instance(struct qat_compression_instance *inst)
+{
+       atomic_dec(&inst->refctr);
+       adf_dev_put(inst->accel_dev);
+}
+
+static int qat_compression_free_instances(struct adf_accel_dev *accel_dev)
+{
+       struct qat_compression_instance *inst;
+       struct list_head *list_ptr, *tmp;
+       int i;
+
+       list_for_each_safe(list_ptr, tmp, &accel_dev->compression_list) {
+               inst = list_entry(list_ptr,
+                                 struct qat_compression_instance, list);
+
+               for (i = 0; i < atomic_read(&inst->refctr); i++)
+                       qat_compression_put_instance(inst);
+
+               if (inst->dc_tx)
+                       adf_remove_ring(inst->dc_tx);
+
+               if (inst->dc_rx)
+                       adf_remove_ring(inst->dc_rx);
+
+               list_del(list_ptr);
+               kfree(inst);
+       }
+       return 0;
+}
+
+struct qat_compression_instance *qat_compression_get_instance_node(int node)
+{
+       struct qat_compression_instance *inst = NULL;
+       struct adf_accel_dev *accel_dev = NULL;
+       unsigned long best = ~0;
+       struct list_head *itr;
+
+       list_for_each(itr, adf_devmgr_get_head()) {
+               struct adf_accel_dev *tmp_dev;
+               unsigned long ctr;
+               int tmp_dev_node;
+
+               tmp_dev = list_entry(itr, struct adf_accel_dev, list);
+               tmp_dev_node = dev_to_node(&GET_DEV(tmp_dev));
+
+               if ((node == tmp_dev_node || tmp_dev_node < 0) &&
+                   adf_dev_started(tmp_dev) && !list_empty(&tmp_dev->compression_list)) {
+                       ctr = atomic_read(&tmp_dev->ref_count);
+                       if (best > ctr) {
+                               accel_dev = tmp_dev;
+                               best = ctr;
+                       }
+               }
+       }
+
+       if (!accel_dev) {
+               pr_debug_ratelimited("QAT: Could not find a device on node %d\n", node);
+               /* Get any started device */
+               list_for_each(itr, adf_devmgr_get_head()) {
+                       struct adf_accel_dev *tmp_dev;
+
+                       tmp_dev = list_entry(itr, struct adf_accel_dev, list);
+                       if (adf_dev_started(tmp_dev) &&
+                           !list_empty(&tmp_dev->compression_list)) {
+                               accel_dev = tmp_dev;
+                               break;
+                       }
+               }
+       }
+
+       if (!accel_dev)
+               return NULL;
+
+       best = ~0;
+       list_for_each(itr, &accel_dev->compression_list) {
+               struct qat_compression_instance *tmp_inst;
+               unsigned long ctr;
+
+               tmp_inst = list_entry(itr, struct qat_compression_instance, list);
+               ctr = atomic_read(&tmp_inst->refctr);
+               if (best > ctr) {
+                       inst = tmp_inst;
+                       best = ctr;
+               }
+       }
+       if (inst) {
+               if (adf_dev_get(accel_dev)) {
+                       dev_err(&GET_DEV(accel_dev), "Could not increment dev refctr\n");
+                       return NULL;
+               }
+               atomic_inc(&inst->refctr);
+       }
+       return inst;
+}
+
+static int qat_compression_create_instances(struct adf_accel_dev *accel_dev)
+{
+       struct qat_compression_instance *inst;
+       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+       char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+       unsigned long num_inst, num_msg_dc;
+       unsigned long bank;
+       int msg_size;
+       int ret;
+       int i;
+
+       INIT_LIST_HEAD(&accel_dev->compression_list);
+       strscpy(key, ADF_NUM_DC, sizeof(key));
+       ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
+       if (ret)
+               return ret;
+
+       ret = kstrtoul(val, 10, &num_inst);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < num_inst; i++) {
+               inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
+                                   dev_to_node(&GET_DEV(accel_dev)));
+               if (!inst) {
+                       ret = -ENOMEM;
+                       goto err;
+               }
+
+               list_add_tail(&inst->list, &accel_dev->compression_list);
+               inst->id = i;
+               atomic_set(&inst->refctr, 0);
+               inst->accel_dev = accel_dev;
+               inst->build_deflate_ctx = GET_DC_OPS(accel_dev)->build_deflate_ctx;
+
+               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
+               ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
+               if (ret)
+                       return ret;
+
+               ret = kstrtoul(val, 10, &bank);
+               if (ret)
+                       return ret;
+
+               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i);
+               ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
+               if (ret)
+                       return ret;
+
+               ret = kstrtoul(val, 10, &num_msg_dc);
+               if (ret)
+                       return ret;
+
+               msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
+               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i);
+               ret = adf_create_ring(accel_dev, SEC, bank, num_msg_dc,
+                                     msg_size, key, NULL, 0, &inst->dc_tx);
+               if (ret)
+                       return ret;
+
+               msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
+               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i);
+               ret = adf_create_ring(accel_dev, SEC, bank, num_msg_dc,
+                                     msg_size, key, qat_comp_alg_callback, 0,
+                                     &inst->dc_rx);
+               if (ret)
+                       return ret;
+
+               inst->dc_data = accel_dev->dc_data;
+               INIT_LIST_HEAD(&inst->backlog.list);
+               spin_lock_init(&inst->backlog.lock);
+       }
+       return 0;
+err:
+       qat_compression_free_instances(accel_dev);
+       return ret;
+}
+
+static int qat_compression_alloc_dc_data(struct adf_accel_dev *accel_dev)
+{
+       struct device *dev = &GET_DEV(accel_dev);
+       dma_addr_t obuff_p = DMA_MAPPING_ERROR;
+       size_t ovf_buff_sz = QAT_COMP_MAX_SKID;
+       struct adf_dc_data *dc_data = NULL;
+       u8 *obuff = NULL;
+
+       dc_data = devm_kzalloc(dev, sizeof(*dc_data), GFP_KERNEL);
+       if (!dc_data)
+               goto err;
+
+       obuff = kzalloc_node(ovf_buff_sz, GFP_KERNEL, dev_to_node(dev));
+       if (!obuff)
+               goto err;
+
+       obuff_p = dma_map_single(dev, obuff, ovf_buff_sz, DMA_FROM_DEVICE);
+       if (unlikely(dma_mapping_error(dev, obuff_p)))
+               goto err;
+
+       dc_data->ovf_buff = obuff;
+       dc_data->ovf_buff_p = obuff_p;
+       dc_data->ovf_buff_sz = ovf_buff_sz;
+
+       accel_dev->dc_data = dc_data;
+
+       return 0;
+
+err:
+       accel_dev->dc_data = NULL;
+       kfree(obuff);
+       devm_kfree(dev, dc_data);
+       return -ENOMEM;
+}
+
+static void qat_free_dc_data(struct adf_accel_dev *accel_dev)
+{
+       struct adf_dc_data *dc_data = accel_dev->dc_data;
+       struct device *dev = &GET_DEV(accel_dev);
+
+       if (!dc_data)
+               return;
+
+       dma_unmap_single(dev, dc_data->ovf_buff_p, dc_data->ovf_buff_sz,
+                        DMA_FROM_DEVICE);
+       memset(dc_data->ovf_buff, 0, dc_data->ovf_buff_sz);
+       kfree(dc_data->ovf_buff);
+       devm_kfree(dev, dc_data);
+       accel_dev->dc_data = NULL;
+}
+
+static int qat_compression_init(struct adf_accel_dev *accel_dev)
+{
+       int ret;
+
+       ret = qat_compression_alloc_dc_data(accel_dev);
+       if (ret)
+               return ret;
+
+       ret = qat_compression_create_instances(accel_dev);
+       if (ret)
+               qat_free_dc_data(accel_dev);
+
+       return ret;
+}
+
+static int qat_compression_shutdown(struct adf_accel_dev *accel_dev)
+{
+       qat_free_dc_data(accel_dev);
+       return qat_compression_free_instances(accel_dev);
+}
+
+static int qat_compression_event_handler(struct adf_accel_dev *accel_dev,
+                                        enum adf_event event)
+{
+       int ret;
+
+       switch (event) {
+       case ADF_EVENT_INIT:
+               ret = qat_compression_init(accel_dev);
+               break;
+       case ADF_EVENT_SHUTDOWN:
+               ret = qat_compression_shutdown(accel_dev);
+               break;
+       case ADF_EVENT_RESTARTING:
+       case ADF_EVENT_RESTARTED:
+       case ADF_EVENT_START:
+       case ADF_EVENT_STOP:
+       default:
+               ret = 0;
+       }
+       return ret;
+}
+
+int qat_compression_register(void)
+{
+       memset(&qat_compression, 0, sizeof(qat_compression));
+       qat_compression.event_hld = qat_compression_event_handler;
+       qat_compression.name = "qat_compression";
+       return adf_service_register(&qat_compression);
+}
+
+int qat_compression_unregister(void)
+{
+       return adf_service_unregister(&qat_compression);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_compression.h b/drivers/crypto/intel/qat/qat_common/qat_compression.h
new file mode 100644 (file)
index 0000000..aebac23
--- /dev/null
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef _QAT_COMPRESSION_H_
+#define _QAT_COMPRESSION_H_
+
+#include <linux/list.h>
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+#include "qat_algs_send.h"
+
+#define QAT_COMP_MAX_SKID 4096
+
+struct qat_compression_instance {
+       struct adf_etr_ring_data *dc_tx;
+       struct adf_etr_ring_data *dc_rx;
+       struct adf_accel_dev *accel_dev;
+       struct list_head list;
+       unsigned long state;
+       int id;
+       atomic_t refctr;
+       struct qat_instance_backlog backlog;
+       struct adf_dc_data *dc_data;
+       void (*build_deflate_ctx)(void *ctx);
+};
+
+static inline bool adf_hw_dev_has_compression(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+       u32 mask = ~hw_device->accel_capabilities_mask;
+
+       if (mask & ADF_ACCEL_CAPABILITIES_COMPRESSION)
+               return false;
+
+       return true;
+}
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/qat_crypto.c b/drivers/crypto/intel/qat/qat_common/qat_crypto.c
new file mode 100644 (file)
index 0000000..40c8e74
--- /dev/null
@@ -0,0 +1,287 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_transport.h"
+#include "adf_cfg.h"
+#include "adf_cfg_strings.h"
+#include "adf_gen2_hw_data.h"
+#include "qat_crypto.h"
+#include "icp_qat_fw.h"
+
+#define SEC ADF_KERNEL_SEC
+
+static struct service_hndl qat_crypto;
+
+void qat_crypto_put_instance(struct qat_crypto_instance *inst)
+{
+       atomic_dec(&inst->refctr);
+       adf_dev_put(inst->accel_dev);
+}
+
+static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
+{
+       struct qat_crypto_instance *inst, *tmp;
+       int i;
+
+       list_for_each_entry_safe(inst, tmp, &accel_dev->crypto_list, list) {
+               for (i = 0; i < atomic_read(&inst->refctr); i++)
+                       qat_crypto_put_instance(inst);
+
+               if (inst->sym_tx)
+                       adf_remove_ring(inst->sym_tx);
+
+               if (inst->sym_rx)
+                       adf_remove_ring(inst->sym_rx);
+
+               if (inst->pke_tx)
+                       adf_remove_ring(inst->pke_tx);
+
+               if (inst->pke_rx)
+                       adf_remove_ring(inst->pke_rx);
+
+               list_del(&inst->list);
+               kfree(inst);
+       }
+       return 0;
+}
+
+struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
+{
+       struct adf_accel_dev *accel_dev = NULL, *tmp_dev;
+       struct qat_crypto_instance *inst = NULL, *tmp_inst;
+       unsigned long best = ~0;
+
+       list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
+               unsigned long ctr;
+
+               if ((node == dev_to_node(&GET_DEV(tmp_dev)) ||
+                    dev_to_node(&GET_DEV(tmp_dev)) < 0) &&
+                   adf_dev_started(tmp_dev) &&
+                   !list_empty(&tmp_dev->crypto_list)) {
+                       ctr = atomic_read(&tmp_dev->ref_count);
+                       if (best > ctr) {
+                               accel_dev = tmp_dev;
+                               best = ctr;
+                       }
+               }
+       }
+
+       if (!accel_dev) {
+               pr_debug_ratelimited("QAT: Could not find a device on node %d\n", node);
+               /* Get any started device */
+               list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
+                       if (adf_dev_started(tmp_dev) &&
+                           !list_empty(&tmp_dev->crypto_list)) {
+                               accel_dev = tmp_dev;
+                               break;
+                       }
+               }
+       }
+
+       if (!accel_dev)
+               return NULL;
+
+       best = ~0;
+       list_for_each_entry(tmp_inst, &accel_dev->crypto_list, list) {
+               unsigned long ctr;
+
+               ctr = atomic_read(&tmp_inst->refctr);
+               if (best > ctr) {
+                       inst = tmp_inst;
+                       best = ctr;
+               }
+       }
+       if (inst) {
+               if (adf_dev_get(accel_dev)) {
+                       dev_err(&GET_DEV(accel_dev), "Could not increment dev refctr\n");
+                       return NULL;
+               }
+               atomic_inc(&inst->refctr);
+       }
+       return inst;
+}
+
+/**
+ * qat_crypto_vf_dev_config()
+ *     create dev config required to create crypto inst.
+ *
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function creates device configuration required to create
+ * asym, sym or, crypto instances
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int qat_crypto_vf_dev_config(struct adf_accel_dev *accel_dev)
+{
+       u16 ring_to_svc_map = GET_HW_DATA(accel_dev)->ring_to_svc_map;
+
+       if (ring_to_svc_map != ADF_GEN2_DEFAULT_RING_TO_SRV_MAP) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Unsupported ring/service mapping present on PF");
+               return -EFAULT;
+       }
+
+       return GET_HW_DATA(accel_dev)->dev_config(accel_dev);
+}
+
+static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
+{
+       unsigned long num_inst, num_msg_sym, num_msg_asym;
+       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+       char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+       unsigned long sym_bank, asym_bank;
+       struct qat_crypto_instance *inst;
+       int msg_size;
+       int ret;
+       int i;
+
+       INIT_LIST_HEAD(&accel_dev->crypto_list);
+       ret = adf_cfg_get_param_value(accel_dev, SEC, ADF_NUM_CY, val);
+       if (ret)
+               return ret;
+
+       ret = kstrtoul(val, 0, &num_inst);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < num_inst; i++) {
+               inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
+                                   dev_to_node(&GET_DEV(accel_dev)));
+               if (!inst) {
+                       ret = -ENOMEM;
+                       goto err;
+               }
+
+               list_add_tail(&inst->list, &accel_dev->crypto_list);
+               inst->id = i;
+               atomic_set(&inst->refctr, 0);
+               inst->accel_dev = accel_dev;
+
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i);
+               ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
+               if (ret)
+                       goto err;
+
+               ret = kstrtoul(val, 10, &sym_bank);
+               if (ret)
+                       goto err;
+
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
+               ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
+               if (ret)
+                       goto err;
+
+               ret = kstrtoul(val, 10, &asym_bank);
+               if (ret)
+                       goto err;
+
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
+               ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
+               if (ret)
+                       goto err;
+
+               ret = kstrtoul(val, 10, &num_msg_sym);
+               if (ret)
+                       goto err;
+
+               num_msg_sym = num_msg_sym >> 1;
+
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
+               ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
+               if (ret)
+                       goto err;
+
+               ret = kstrtoul(val, 10, &num_msg_asym);
+               if (ret)
+                       goto err;
+               num_msg_asym = num_msg_asym >> 1;
+
+               msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
+               ret = adf_create_ring(accel_dev, SEC, sym_bank, num_msg_sym,
+                                     msg_size, key, NULL, 0, &inst->sym_tx);
+               if (ret)
+                       goto err;
+
+               msg_size = msg_size >> 1;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
+               ret = adf_create_ring(accel_dev, SEC, asym_bank, num_msg_asym,
+                                     msg_size, key, NULL, 0, &inst->pke_tx);
+               if (ret)
+                       goto err;
+
+               msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
+               ret = adf_create_ring(accel_dev, SEC, sym_bank, num_msg_sym,
+                                     msg_size, key, qat_alg_callback, 0,
+                                     &inst->sym_rx);
+               if (ret)
+                       goto err;
+
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
+               ret = adf_create_ring(accel_dev, SEC, asym_bank, num_msg_asym,
+                                     msg_size, key, qat_alg_asym_callback, 0,
+                                     &inst->pke_rx);
+               if (ret)
+                       goto err;
+
+               INIT_LIST_HEAD(&inst->backlog.list);
+               spin_lock_init(&inst->backlog.lock);
+       }
+       return 0;
+err:
+       qat_crypto_free_instances(accel_dev);
+       return ret;
+}
+
+static int qat_crypto_init(struct adf_accel_dev *accel_dev)
+{
+       if (qat_crypto_create_instances(accel_dev))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int qat_crypto_shutdown(struct adf_accel_dev *accel_dev)
+{
+       return qat_crypto_free_instances(accel_dev);
+}
+
+static int qat_crypto_event_handler(struct adf_accel_dev *accel_dev,
+                                   enum adf_event event)
+{
+       int ret;
+
+       switch (event) {
+       case ADF_EVENT_INIT:
+               ret = qat_crypto_init(accel_dev);
+               break;
+       case ADF_EVENT_SHUTDOWN:
+               ret = qat_crypto_shutdown(accel_dev);
+               break;
+       case ADF_EVENT_RESTARTING:
+       case ADF_EVENT_RESTARTED:
+       case ADF_EVENT_START:
+       case ADF_EVENT_STOP:
+       default:
+               ret = 0;
+       }
+       return ret;
+}
+
+int qat_crypto_register(void)
+{
+       memset(&qat_crypto, 0, sizeof(qat_crypto));
+       qat_crypto.event_hld = qat_crypto_event_handler;
+       qat_crypto.name = "qat_crypto";
+       return adf_service_register(&qat_crypto);
+}
+
+int qat_crypto_unregister(void)
+{
+       return adf_service_unregister(&qat_crypto);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_crypto.h b/drivers/crypto/intel/qat/qat_common/qat_crypto.h
new file mode 100644 (file)
index 0000000..6a0e961
--- /dev/null
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef _QAT_CRYPTO_INSTANCE_H_
+#define _QAT_CRYPTO_INSTANCE_H_
+
+#include <crypto/aes.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include "adf_accel_devices.h"
+#include "icp_qat_fw_la.h"
+#include "qat_algs_send.h"
+#include "qat_bl.h"
+
+struct qat_crypto_instance {
+       struct adf_etr_ring_data *sym_tx;
+       struct adf_etr_ring_data *sym_rx;
+       struct adf_etr_ring_data *pke_tx;
+       struct adf_etr_ring_data *pke_rx;
+       struct adf_accel_dev *accel_dev;
+       struct list_head list;
+       unsigned long state;
+       int id;
+       atomic_t refctr;
+       struct qat_instance_backlog backlog;
+};
+
+struct qat_crypto_request;
+
+struct qat_crypto_request {
+       struct icp_qat_fw_la_bulk_req req;
+       union {
+               struct qat_alg_aead_ctx *aead_ctx;
+               struct qat_alg_skcipher_ctx *skcipher_ctx;
+       };
+       union {
+               struct aead_request *aead_req;
+               struct skcipher_request *skcipher_req;
+       };
+       struct qat_request_buffs buf;
+       void (*cb)(struct icp_qat_fw_la_resp *resp,
+                  struct qat_crypto_request *req);
+       union {
+               struct {
+                       __be64 iv_hi;
+                       __be64 iv_lo;
+               };
+               u8 iv[AES_BLOCK_SIZE];
+       };
+       bool encryption;
+       struct qat_alg_req alg_req;
+};
+
+static inline bool adf_hw_dev_has_crypto(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+       u32 mask = ~hw_device->accel_capabilities_mask;
+
+       if (mask & ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC)
+               return false;
+       if (mask & ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC)
+               return false;
+       if (mask & ADF_ACCEL_CAPABILITIES_AUTHENTICATION)
+               return false;
+
+       return true;
+}
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/qat_hal.c b/drivers/crypto/intel/qat/qat_common/qat_hal.c
new file mode 100644 (file)
index 0000000..cbb946a
--- /dev/null
@@ -0,0 +1,1594 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/pci_ids.h>
+
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "icp_qat_hal.h"
+#include "icp_qat_uclo.h"
+
+#define BAD_REGADDR           0xffff
+#define MAX_RETRY_TIMES           10000
+#define INIT_CTX_ARB_VALUE     0x0
+#define INIT_CTX_ENABLE_VALUE     0x0
+#define INIT_PC_VALUE       0x0
+#define INIT_WAKEUP_EVENTS_VALUE  0x1
+#define INIT_SIG_EVENTS_VALUE     0x1
+#define INIT_CCENABLE_VALUE       0x2000
+#define RST_CSR_QAT_LSB           20
+#define RST_CSR_AE_LSB           0
+#define MC_TIMESTAMP_ENABLE       (0x1 << 7)
+
+#define IGNORE_W1C_MASK ((~(1 << CE_BREAKPOINT_BITPOS)) & \
+       (~(1 << CE_CNTL_STORE_PARITY_ERROR_BITPOS)) & \
+       (~(1 << CE_REG_PAR_ERR_BITPOS)))
+#define INSERT_IMMED_GPRA_CONST(inst, const_val) \
+       (inst = ((inst & 0xFFFF00C03FFull) | \
+               ((((const_val) << 12) & 0x0FF00000ull) | \
+               (((const_val) << 10) & 0x0003FC00ull))))
+#define INSERT_IMMED_GPRB_CONST(inst, const_val) \
+       (inst = ((inst & 0xFFFF00FFF00ull) | \
+               ((((const_val) << 12) & 0x0FF00000ull) | \
+               (((const_val) <<  0) & 0x000000FFull))))
+
+#define AE(handle, ae) ((handle)->hal_handle->aes[ae])
+
+static const u64 inst_4b[] = {
+       0x0F0400C0000ull, 0x0F4400C0000ull, 0x0F040000300ull, 0x0F440000300ull,
+       0x0FC066C0000ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
+       0x0A021000000ull
+};
+
+static const u64 inst[] = {
+       0x0F0000C0000ull, 0x0F000000380ull, 0x0D805000011ull, 0x0FC082C0300ull,
+       0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
+       0x0A0643C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0F0000C0001ull,
+       0x0FC066C0001ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
+       0x0F000400300ull, 0x0A0610C0000ull, 0x0BAC0000301ull, 0x0D804400101ull,
+       0x0A0580C0000ull, 0x0A0581C0000ull, 0x0A0582C0000ull, 0x0A0583C0000ull,
+       0x0A0584C0000ull, 0x0A0585C0000ull, 0x0A0586C0000ull, 0x0A0587C0000ull,
+       0x0A0588C0000ull, 0x0A0589C0000ull, 0x0A058AC0000ull, 0x0A058BC0000ull,
+       0x0A058CC0000ull, 0x0A058DC0000ull, 0x0A058EC0000ull, 0x0A058FC0000ull,
+       0x0A05C0C0000ull, 0x0A05C1C0000ull, 0x0A05C2C0000ull, 0x0A05C3C0000ull,
+       0x0A05C4C0000ull, 0x0A05C5C0000ull, 0x0A05C6C0000ull, 0x0A05C7C0000ull,
+       0x0A05C8C0000ull, 0x0A05C9C0000ull, 0x0A05CAC0000ull, 0x0A05CBC0000ull,
+       0x0A05CCC0000ull, 0x0A05CDC0000ull, 0x0A05CEC0000ull, 0x0A05CFC0000ull,
+       0x0A0400C0000ull, 0x0B0400C0000ull, 0x0A0401C0000ull, 0x0B0401C0000ull,
+       0x0A0402C0000ull, 0x0B0402C0000ull, 0x0A0403C0000ull, 0x0B0403C0000ull,
+       0x0A0404C0000ull, 0x0B0404C0000ull, 0x0A0405C0000ull, 0x0B0405C0000ull,
+       0x0A0406C0000ull, 0x0B0406C0000ull, 0x0A0407C0000ull, 0x0B0407C0000ull,
+       0x0A0408C0000ull, 0x0B0408C0000ull, 0x0A0409C0000ull, 0x0B0409C0000ull,
+       0x0A040AC0000ull, 0x0B040AC0000ull, 0x0A040BC0000ull, 0x0B040BC0000ull,
+       0x0A040CC0000ull, 0x0B040CC0000ull, 0x0A040DC0000ull, 0x0B040DC0000ull,
+       0x0A040EC0000ull, 0x0B040EC0000ull, 0x0A040FC0000ull, 0x0B040FC0000ull,
+       0x0D81581C010ull, 0x0E000010000ull, 0x0E000010000ull,
+};
+
+void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
+                         unsigned char ae, unsigned int ctx_mask)
+{
+       AE(handle, ae).live_ctx_mask = ctx_mask;
+}
+
+#define CSR_RETRY_TIMES 500
+static int qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle,
+                            unsigned char ae, unsigned int csr)
+{
+       unsigned int iterations = CSR_RETRY_TIMES;
+       int value;
+
+       do {
+               value = GET_AE_CSR(handle, ae, csr);
+               if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
+                       return value;
+       } while (iterations--);
+
+       pr_err("QAT: Read CSR timeout\n");
+       return 0;
+}
+
+static int qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle,
+                            unsigned char ae, unsigned int csr,
+                            unsigned int value)
+{
+       unsigned int iterations = CSR_RETRY_TIMES;
+
+       do {
+               SET_AE_CSR(handle, ae, csr, value);
+               if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
+                       return 0;
+       } while (iterations--);
+
+       pr_err("QAT: Write CSR Timeout\n");
+       return -EFAULT;
+}
+
+static void qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle *handle,
+                                    unsigned char ae, unsigned char ctx,
+                                    unsigned int *events)
+{
+       unsigned int cur_ctx;
+
+       cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
+       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+       *events = qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT);
+       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle,
+                              unsigned char ae, unsigned int cycles,
+                              int chk_inactive)
+{
+       unsigned int base_cnt = 0, cur_cnt = 0;
+       unsigned int csr = (1 << ACS_ABO_BITPOS);
+       int times = MAX_RETRY_TIMES;
+       int elapsed_cycles = 0;
+
+       base_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
+       base_cnt &= 0xffff;
+       while ((int)cycles > elapsed_cycles && times--) {
+               if (chk_inactive)
+                       csr = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
+
+               cur_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
+               cur_cnt &= 0xffff;
+               elapsed_cycles = cur_cnt - base_cnt;
+
+               if (elapsed_cycles < 0)
+                       elapsed_cycles += 0x10000;
+
+               /* ensure at least 8 time cycles elapsed in wait_cycles */
+               if (elapsed_cycles >= 8 && !(csr & (1 << ACS_ABO_BITPOS)))
+                       return 0;
+       }
+       if (times < 0) {
+               pr_err("QAT: wait_num_cycles time out\n");
+               return -EFAULT;
+       }
+       return 0;
+}
+
+#define CLR_BIT(wrd, bit) ((wrd) & ~(1 << (bit)))
+#define SET_BIT(wrd, bit) ((wrd) | 1 << (bit))
+
+int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
+                           unsigned char ae, unsigned char mode)
+{
+       unsigned int csr, new_csr;
+
+       if (mode != 4 && mode != 8) {
+               pr_err("QAT: bad ctx mode=%d\n", mode);
+               return -EINVAL;
+       }
+
+       /* Sets the accelaration engine context mode to either four or eight */
+       csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+       csr = IGNORE_W1C_MASK & csr;
+       new_csr = (mode == 4) ?
+               SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) :
+               CLR_BIT(csr, CE_INUSE_CONTEXTS_BITPOS);
+       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
+       return 0;
+}
+
+int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
+                          unsigned char ae, unsigned char mode)
+{
+       unsigned int csr, new_csr;
+
+       csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+       csr &= IGNORE_W1C_MASK;
+
+       new_csr = (mode) ?
+               SET_BIT(csr, CE_NN_MODE_BITPOS) :
+               CLR_BIT(csr, CE_NN_MODE_BITPOS);
+
+       if (new_csr != csr)
+               qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
+
+       return 0;
+}
+
+int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
+                          unsigned char ae, enum icp_qat_uof_regtype lm_type,
+                          unsigned char mode)
+{
+       unsigned int csr, new_csr;
+
+       csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+       csr &= IGNORE_W1C_MASK;
+       switch (lm_type) {
+       case ICP_LMEM0:
+               new_csr = (mode) ?
+                       SET_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS) :
+                       CLR_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS);
+               break;
+       case ICP_LMEM1:
+               new_csr = (mode) ?
+                       SET_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS) :
+                       CLR_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS);
+               break;
+       case ICP_LMEM2:
+               new_csr = (mode) ?
+                       SET_BIT(csr, CE_LMADDR_2_GLOBAL_BITPOS) :
+                       CLR_BIT(csr, CE_LMADDR_2_GLOBAL_BITPOS);
+               break;
+       case ICP_LMEM3:
+               new_csr = (mode) ?
+                       SET_BIT(csr, CE_LMADDR_3_GLOBAL_BITPOS) :
+                       CLR_BIT(csr, CE_LMADDR_3_GLOBAL_BITPOS);
+               break;
+       default:
+               pr_err("QAT: lmType = 0x%x\n", lm_type);
+               return -EINVAL;
+       }
+
+       if (new_csr != csr)
+               qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
+       return 0;
+}
+
+void qat_hal_set_ae_tindex_mode(struct icp_qat_fw_loader_handle *handle,
+                               unsigned char ae, unsigned char mode)
+{
+       unsigned int csr, new_csr;
+
+       csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+       csr &= IGNORE_W1C_MASK;
+       new_csr = (mode) ?
+                 SET_BIT(csr, CE_T_INDEX_GLOBAL_BITPOS) :
+                 CLR_BIT(csr, CE_T_INDEX_GLOBAL_BITPOS);
+       if (new_csr != csr)
+               qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
+}
+
+static unsigned short qat_hal_get_reg_addr(unsigned int type,
+                                          unsigned short reg_num)
+{
+       unsigned short reg_addr;
+
+       switch (type) {
+       case ICP_GPA_ABS:
+       case ICP_GPB_ABS:
+               reg_addr = 0x80 | (reg_num & 0x7f);
+               break;
+       case ICP_GPA_REL:
+       case ICP_GPB_REL:
+               reg_addr = reg_num & 0x1f;
+               break;
+       case ICP_SR_RD_REL:
+       case ICP_SR_WR_REL:
+       case ICP_SR_REL:
+               reg_addr = 0x180 | (reg_num & 0x1f);
+               break;
+       case ICP_SR_ABS:
+               reg_addr = 0x140 | ((reg_num & 0x3) << 1);
+               break;
+       case ICP_DR_RD_REL:
+       case ICP_DR_WR_REL:
+       case ICP_DR_REL:
+               reg_addr = 0x1c0 | (reg_num & 0x1f);
+               break;
+       case ICP_DR_ABS:
+               reg_addr = 0x100 | ((reg_num & 0x3) << 1);
+               break;
+       case ICP_NEIGH_REL:
+               reg_addr = 0x280 | (reg_num & 0x1f);
+               break;
+       case ICP_LMEM0:
+               reg_addr = 0x200;
+               break;
+       case ICP_LMEM1:
+               reg_addr = 0x220;
+               break;
+       case ICP_LMEM2:
+               reg_addr = 0x2c0;
+               break;
+       case ICP_LMEM3:
+               reg_addr = 0x2e0;
+               break;
+       case ICP_NO_DEST:
+               reg_addr = 0x300 | (reg_num & 0xff);
+               break;
+       default:
+               reg_addr = BAD_REGADDR;
+               break;
+       }
+       return reg_addr;
+}
+
+void qat_hal_reset(struct icp_qat_fw_loader_handle *handle)
+{
+       unsigned int reset_mask = handle->chip_info->icp_rst_mask;
+       unsigned int reset_csr = handle->chip_info->icp_rst_csr;
+       unsigned int csr_val;
+
+       csr_val = GET_CAP_CSR(handle, reset_csr);
+       csr_val |= reset_mask;
+       SET_CAP_CSR(handle, reset_csr, csr_val);
+}
+
+static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle,
+                               unsigned char ae, unsigned int ctx_mask,
+                               unsigned int ae_csr, unsigned int csr_val)
+{
+       unsigned int ctx, cur_ctx;
+
+       cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
+
+       for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
+               if (!(ctx_mask & (1 << ctx)))
+                       continue;
+               qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+               qat_hal_wr_ae_csr(handle, ae, ae_csr, csr_val);
+       }
+
+       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static unsigned int qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle,
+                               unsigned char ae, unsigned char ctx,
+                               unsigned int ae_csr)
+{
+       unsigned int cur_ctx, csr_val;
+
+       cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
+       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+       csr_val = qat_hal_rd_ae_csr(handle, ae, ae_csr);
+       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+
+       return csr_val;
+}
+
+static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle,
+                                 unsigned char ae, unsigned int ctx_mask,
+                                 unsigned int events)
+{
+       unsigned int ctx, cur_ctx;
+
+       cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
+       for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
+               if (!(ctx_mask & (1 << ctx)))
+                       continue;
+               qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+               qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_INDIRECT, events);
+       }
+       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static void qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle *handle,
+                                    unsigned char ae, unsigned int ctx_mask,
+                                    unsigned int events)
+{
+       unsigned int ctx, cur_ctx;
+
+       cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
+       for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
+               if (!(ctx_mask & (1 << ctx)))
+                       continue;
+               qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+               qat_hal_wr_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT,
+                                 events);
+       }
+       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle)
+{
+       unsigned long ae_mask = handle->hal_handle->ae_mask;
+       unsigned int base_cnt, cur_cnt;
+       unsigned char ae;
+       int times = MAX_RETRY_TIMES;
+
+       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+               base_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
+               base_cnt &= 0xffff;
+
+               do {
+                       cur_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
+                       cur_cnt &= 0xffff;
+               } while (times-- && (cur_cnt == base_cnt));
+
+               if (times < 0) {
+                       pr_err("QAT: AE%d is inactive!!\n", ae);
+                       return -EFAULT;
+               }
+       }
+
+       return 0;
+}
+
+int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle,
+                           unsigned int ae)
+{
+       unsigned int enable = 0, active = 0;
+
+       enable = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+       active = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
+       if ((enable & (0xff << CE_ENABLE_BITPOS)) ||
+           (active & (1 << ACS_ABO_BITPOS)))
+               return 1;
+       else
+               return 0;
+}
+
+static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle)
+{
+       unsigned long ae_mask = handle->hal_handle->ae_mask;
+       unsigned int misc_ctl_csr, misc_ctl;
+       unsigned char ae;
+
+       misc_ctl_csr = handle->chip_info->misc_ctl_csr;
+       /* stop the timestamp timers */
+       misc_ctl = GET_CAP_CSR(handle, misc_ctl_csr);
+       if (misc_ctl & MC_TIMESTAMP_ENABLE)
+               SET_CAP_CSR(handle, misc_ctl_csr, misc_ctl &
+                           (~MC_TIMESTAMP_ENABLE));
+
+       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+               qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0);
+               qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_HIGH, 0);
+       }
+       /* start timestamp timers */
+       SET_CAP_CSR(handle, misc_ctl_csr, misc_ctl | MC_TIMESTAMP_ENABLE);
+}
+
+#define ESRAM_AUTO_TINIT       BIT(2)
+#define ESRAM_AUTO_TINIT_DONE  BIT(3)
+#define ESRAM_AUTO_INIT_USED_CYCLES (1640)
+#define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C
+static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
+{
+       void __iomem *csr_addr =
+                       (void __iomem *)((uintptr_t)handle->hal_ep_csr_addr_v +
+                       ESRAM_AUTO_INIT_CSR_OFFSET);
+       unsigned int csr_val;
+       int times = 30;
+
+       if (handle->pci_dev->device != PCI_DEVICE_ID_INTEL_QAT_DH895XCC)
+               return 0;
+
+       csr_val = ADF_CSR_RD(csr_addr, 0);
+       if ((csr_val & ESRAM_AUTO_TINIT) && (csr_val & ESRAM_AUTO_TINIT_DONE))
+               return 0;
+
+       csr_val = ADF_CSR_RD(csr_addr, 0);
+       csr_val |= ESRAM_AUTO_TINIT;
+       ADF_CSR_WR(csr_addr, 0, csr_val);
+
+       do {
+               qat_hal_wait_cycles(handle, 0, ESRAM_AUTO_INIT_USED_CYCLES, 0);
+               csr_val = ADF_CSR_RD(csr_addr, 0);
+       } while (!(csr_val & ESRAM_AUTO_TINIT_DONE) && times--);
+       if (times < 0) {
+               pr_err("QAT: Fail to init eSram!\n");
+               return -EFAULT;
+       }
+       return 0;
+}
+
+#define SHRAM_INIT_CYCLES 2060
+int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle)
+{
+       unsigned int clk_csr = handle->chip_info->glb_clk_enable_csr;
+       unsigned int reset_mask = handle->chip_info->icp_rst_mask;
+       unsigned int reset_csr = handle->chip_info->icp_rst_csr;
+       unsigned long ae_mask = handle->hal_handle->ae_mask;
+       unsigned char ae = 0;
+       unsigned int times = 100;
+       unsigned int csr_val;
+
+       /* write to the reset csr */
+       csr_val = GET_CAP_CSR(handle, reset_csr);
+       csr_val &= ~reset_mask;
+       do {
+               SET_CAP_CSR(handle, reset_csr, csr_val);
+               if (!(times--))
+                       goto out_err;
+               csr_val = GET_CAP_CSR(handle, reset_csr);
+               csr_val &= reset_mask;
+       } while (csr_val);
+       /* enable clock */
+       csr_val = GET_CAP_CSR(handle, clk_csr);
+       csr_val |= reset_mask;
+       SET_CAP_CSR(handle, clk_csr, csr_val);
+       if (qat_hal_check_ae_alive(handle))
+               goto out_err;
+
+       /* Set undefined power-up/reset states to reasonable default values */
+       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+               qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
+                                 INIT_CTX_ENABLE_VALUE);
+               qat_hal_wr_indr_csr(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX,
+                                   CTX_STS_INDIRECT,
+                                   handle->hal_handle->upc_mask &
+                                   INIT_PC_VALUE);
+               qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
+               qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
+               qat_hal_put_wakeup_event(handle, ae,
+                                        ICP_QAT_UCLO_AE_ALL_CTX,
+                                        INIT_WAKEUP_EVENTS_VALUE);
+               qat_hal_put_sig_event(handle, ae,
+                                     ICP_QAT_UCLO_AE_ALL_CTX,
+                                     INIT_SIG_EVENTS_VALUE);
+       }
+       if (qat_hal_init_esram(handle))
+               goto out_err;
+       if (qat_hal_wait_cycles(handle, 0, SHRAM_INIT_CYCLES, 0))
+               goto out_err;
+       qat_hal_reset_timestamp(handle);
+
+       return 0;
+out_err:
+       pr_err("QAT: failed to get device out of reset\n");
+       return -EFAULT;
+}
+
+static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle,
+                               unsigned char ae, unsigned int ctx_mask)
+{
+       unsigned int ctx;
+
+       ctx = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+       ctx &= IGNORE_W1C_MASK &
+               (~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS));
+       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
+}
+
+static u64 qat_hal_parity_64bit(u64 word)
+{
+       word ^= word >> 1;
+       word ^= word >> 2;
+       word ^= word >> 4;
+       word ^= word >> 8;
+       word ^= word >> 16;
+       word ^= word >> 32;
+       return word & 1;
+}
+
+static u64 qat_hal_set_uword_ecc(u64 uword)
+{
+       u64 bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL,
+               bit2_mask = 0xe387e0781e1ULL, bit3_mask = 0x7cb8e388e22ULL,
+               bit4_mask = 0xaf5b2c93244ULL, bit5_mask = 0xf56d5525488ULL,
+               bit6_mask = 0xdaf69a46910ULL;
+
+       /* clear the ecc bits */
+       uword &= ~(0x7fULL << 0x2C);
+       uword |= qat_hal_parity_64bit(bit0_mask & uword) << 0x2C;
+       uword |= qat_hal_parity_64bit(bit1_mask & uword) << 0x2D;
+       uword |= qat_hal_parity_64bit(bit2_mask & uword) << 0x2E;
+       uword |= qat_hal_parity_64bit(bit3_mask & uword) << 0x2F;
+       uword |= qat_hal_parity_64bit(bit4_mask & uword) << 0x30;
+       uword |= qat_hal_parity_64bit(bit5_mask & uword) << 0x31;
+       uword |= qat_hal_parity_64bit(bit6_mask & uword) << 0x32;
+       return uword;
+}
+
+void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
+                      unsigned char ae, unsigned int uaddr,
+                      unsigned int words_num, u64 *uword)
+{
+       unsigned int ustore_addr;
+       unsigned int i;
+
+       ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
+       uaddr |= UA_ECS;
+       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+       for (i = 0; i < words_num; i++) {
+               unsigned int uwrd_lo, uwrd_hi;
+               u64 tmp;
+
+               tmp = qat_hal_set_uword_ecc(uword[i]);
+               uwrd_lo = (unsigned int)(tmp & 0xffffffff);
+               uwrd_hi = (unsigned int)(tmp >> 0x20);
+               qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
+               qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
+       }
+       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
+}
+
+static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle,
+                              unsigned char ae, unsigned int ctx_mask)
+{
+       unsigned int ctx;
+
+       ctx = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+       ctx &= IGNORE_W1C_MASK;
+       ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF;
+       ctx |= (ctx_mask << CE_ENABLE_BITPOS);
+       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
+}
+
+static void qat_hal_clear_xfer(struct icp_qat_fw_loader_handle *handle)
+{
+       unsigned long ae_mask = handle->hal_handle->ae_mask;
+       unsigned char ae;
+       unsigned short reg;
+
+       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+               for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) {
+                       qat_hal_init_rd_xfer(handle, ae, 0, ICP_SR_RD_ABS,
+                                            reg, 0);
+                       qat_hal_init_rd_xfer(handle, ae, 0, ICP_DR_RD_ABS,
+                                            reg, 0);
+               }
+       }
+}
+
+static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
+{
+       unsigned long ae_mask = handle->hal_handle->ae_mask;
+       unsigned char ae;
+       unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX;
+       int times = MAX_RETRY_TIMES;
+       unsigned int csr_val = 0;
+       unsigned int savctx = 0;
+       int ret = 0;
+
+       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+               csr_val = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
+               csr_val &= ~(1 << MMC_SHARE_CS_BITPOS);
+               qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val);
+               csr_val = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+               csr_val &= IGNORE_W1C_MASK;
+               if (handle->chip_info->nn)
+                       csr_val |= CE_NN_MODE;
+
+               qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val);
+               qat_hal_wr_uwords(handle, ae, 0, ARRAY_SIZE(inst),
+                                 (u64 *)inst);
+               qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
+                                   handle->hal_handle->upc_mask &
+                                   INIT_PC_VALUE);
+               savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
+               qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0);
+               qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY);
+               qat_hal_wr_indr_csr(handle, ae, ctx_mask,
+                                   CTX_SIG_EVENTS_INDIRECT, 0);
+               qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
+               qat_hal_enable_ctx(handle, ae, ctx_mask);
+       }
+       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+               /* wait for AE to finish */
+               do {
+                       ret = qat_hal_wait_cycles(handle, ae, 20, 1);
+               } while (ret && times--);
+
+               if (times < 0) {
+                       pr_err("QAT: clear GPR of AE %d failed", ae);
+                       return -EINVAL;
+               }
+               qat_hal_disable_ctx(handle, ae, ctx_mask);
+               qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
+                                 savctx & ACS_ACNO);
+               qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
+                                 INIT_CTX_ENABLE_VALUE);
+               qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
+                                   handle->hal_handle->upc_mask &
+                                   INIT_PC_VALUE);
+               qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
+               qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
+               qat_hal_put_wakeup_event(handle, ae, ctx_mask,
+                                        INIT_WAKEUP_EVENTS_VALUE);
+               qat_hal_put_sig_event(handle, ae, ctx_mask,
+                                     INIT_SIG_EVENTS_VALUE);
+       }
+       return 0;
+}
+
+static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
+                            struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+       unsigned int max_en_ae_id = 0;
+       struct adf_bar *sram_bar;
+       unsigned int csr_val = 0;
+       unsigned long ae_mask;
+       unsigned char ae = 0;
+       int ret = 0;
+
+       handle->pci_dev = pci_info->pci_dev;
+       switch (handle->pci_dev->device) {
+       case ADF_4XXX_PCI_DEVICE_ID:
+       case ADF_401XX_PCI_DEVICE_ID:
+       case ADF_402XX_PCI_DEVICE_ID:
+               handle->chip_info->mmp_sram_size = 0;
+               handle->chip_info->nn = false;
+               handle->chip_info->lm2lm3 = true;
+               handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG_2X;
+               handle->chip_info->icp_rst_csr = ICP_RESET_CPP0;
+               handle->chip_info->icp_rst_mask = 0x100015;
+               handle->chip_info->glb_clk_enable_csr = ICP_GLOBAL_CLK_ENABLE_CPP0;
+               handle->chip_info->misc_ctl_csr = MISC_CONTROL_C4XXX;
+               handle->chip_info->wakeup_event_val = 0x80000000;
+               handle->chip_info->fw_auth = true;
+               handle->chip_info->css_3k = true;
+               handle->chip_info->tgroup_share_ustore = true;
+               handle->chip_info->fcu_ctl_csr = FCU_CONTROL_4XXX;
+               handle->chip_info->fcu_sts_csr = FCU_STATUS_4XXX;
+               handle->chip_info->fcu_dram_addr_hi = FCU_DRAM_ADDR_HI_4XXX;
+               handle->chip_info->fcu_dram_addr_lo = FCU_DRAM_ADDR_LO_4XXX;
+               handle->chip_info->fcu_loaded_ae_csr = FCU_AE_LOADED_4XXX;
+               handle->chip_info->fcu_loaded_ae_pos = 0;
+
+               handle->hal_cap_g_ctl_csr_addr_v = pmisc_addr + ICP_QAT_CAP_OFFSET_4XXX;
+               handle->hal_cap_ae_xfer_csr_addr_v = pmisc_addr + ICP_QAT_AE_OFFSET_4XXX;
+               handle->hal_ep_csr_addr_v = pmisc_addr + ICP_QAT_EP_OFFSET_4XXX;
+               handle->hal_cap_ae_local_csr_addr_v =
+                       (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v
+                       + LOCAL_TO_XFER_REG_OFFSET);
+               break;
+       case PCI_DEVICE_ID_INTEL_QAT_C62X:
+       case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
+               handle->chip_info->mmp_sram_size = 0;
+               handle->chip_info->nn = true;
+               handle->chip_info->lm2lm3 = false;
+               handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG;
+               handle->chip_info->icp_rst_csr = ICP_RESET;
+               handle->chip_info->icp_rst_mask = (hw_data->ae_mask << RST_CSR_AE_LSB) |
+                                                 (hw_data->accel_mask << RST_CSR_QAT_LSB);
+               handle->chip_info->glb_clk_enable_csr = ICP_GLOBAL_CLK_ENABLE;
+               handle->chip_info->misc_ctl_csr = MISC_CONTROL;
+               handle->chip_info->wakeup_event_val = WAKEUP_EVENT;
+               handle->chip_info->fw_auth = true;
+               handle->chip_info->css_3k = false;
+               handle->chip_info->tgroup_share_ustore = false;
+               handle->chip_info->fcu_ctl_csr = FCU_CONTROL;
+               handle->chip_info->fcu_sts_csr = FCU_STATUS;
+               handle->chip_info->fcu_dram_addr_hi = FCU_DRAM_ADDR_HI;
+               handle->chip_info->fcu_dram_addr_lo = FCU_DRAM_ADDR_LO;
+               handle->chip_info->fcu_loaded_ae_csr = FCU_STATUS;
+               handle->chip_info->fcu_loaded_ae_pos = FCU_LOADED_AE_POS;
+               handle->hal_cap_g_ctl_csr_addr_v = pmisc_addr + ICP_QAT_CAP_OFFSET;
+               handle->hal_cap_ae_xfer_csr_addr_v = pmisc_addr + ICP_QAT_AE_OFFSET;
+               handle->hal_ep_csr_addr_v = pmisc_addr + ICP_QAT_EP_OFFSET;
+               handle->hal_cap_ae_local_csr_addr_v =
+                       (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v
+                       + LOCAL_TO_XFER_REG_OFFSET);
+               break;
+       case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
+               handle->chip_info->mmp_sram_size = 0x40000;
+               handle->chip_info->nn = true;
+               handle->chip_info->lm2lm3 = false;
+               handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG;
+               handle->chip_info->icp_rst_csr = ICP_RESET;
+               handle->chip_info->icp_rst_mask = (hw_data->ae_mask << RST_CSR_AE_LSB) |
+                                                 (hw_data->accel_mask << RST_CSR_QAT_LSB);
+               handle->chip_info->glb_clk_enable_csr = ICP_GLOBAL_CLK_ENABLE;
+               handle->chip_info->misc_ctl_csr = MISC_CONTROL;
+               handle->chip_info->wakeup_event_val = WAKEUP_EVENT;
+               handle->chip_info->fw_auth = false;
+               handle->chip_info->css_3k = false;
+               handle->chip_info->tgroup_share_ustore = false;
+               handle->chip_info->fcu_ctl_csr = 0;
+               handle->chip_info->fcu_sts_csr = 0;
+               handle->chip_info->fcu_dram_addr_hi = 0;
+               handle->chip_info->fcu_dram_addr_lo = 0;
+               handle->chip_info->fcu_loaded_ae_csr = 0;
+               handle->chip_info->fcu_loaded_ae_pos = 0;
+               handle->hal_cap_g_ctl_csr_addr_v = pmisc_addr + ICP_QAT_CAP_OFFSET;
+               handle->hal_cap_ae_xfer_csr_addr_v = pmisc_addr + ICP_QAT_AE_OFFSET;
+               handle->hal_ep_csr_addr_v = pmisc_addr + ICP_QAT_EP_OFFSET;
+               handle->hal_cap_ae_local_csr_addr_v =
+                       (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v
+                       + LOCAL_TO_XFER_REG_OFFSET);
+               break;
+       default:
+               ret = -EINVAL;
+               goto out_err;
+       }
+
+       if (handle->chip_info->mmp_sram_size > 0) {
+               sram_bar =
+                       &pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
+               handle->hal_sram_addr_v = sram_bar->virt_addr;
+       }
+       handle->hal_handle->revision_id = accel_dev->accel_pci_dev.revid;
+       handle->hal_handle->ae_mask = hw_data->ae_mask;
+       handle->hal_handle->admin_ae_mask = hw_data->admin_ae_mask;
+       handle->hal_handle->slice_mask = hw_data->accel_mask;
+       handle->cfg_ae_mask = ALL_AE_MASK;
+       /* create AE objects */
+       handle->hal_handle->upc_mask = 0x1ffff;
+       handle->hal_handle->max_ustore = 0x4000;
+
+       ae_mask = handle->hal_handle->ae_mask;
+       for_each_set_bit(ae, &ae_mask, ICP_QAT_UCLO_MAX_AE) {
+               handle->hal_handle->aes[ae].free_addr = 0;
+               handle->hal_handle->aes[ae].free_size =
+                   handle->hal_handle->max_ustore;
+               handle->hal_handle->aes[ae].ustore_size =
+                   handle->hal_handle->max_ustore;
+               handle->hal_handle->aes[ae].live_ctx_mask =
+                                               ICP_QAT_UCLO_AE_ALL_CTX;
+               max_en_ae_id = ae;
+       }
+       handle->hal_handle->ae_max_num = max_en_ae_id + 1;
+
+       /* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */
+       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+               csr_val = qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE);
+               csr_val |= 0x1;
+               qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val);
+       }
+out_err:
+       return ret;
+}
+
+int qat_hal_init(struct adf_accel_dev *accel_dev)
+{
+       struct icp_qat_fw_loader_handle *handle;
+       int ret = 0;
+
+       handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+       if (!handle)
+               return -ENOMEM;
+
+       handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL);
+       if (!handle->hal_handle) {
+               ret = -ENOMEM;
+               goto out_hal_handle;
+       }
+
+       handle->chip_info = kzalloc(sizeof(*handle->chip_info), GFP_KERNEL);
+       if (!handle->chip_info) {
+               ret = -ENOMEM;
+               goto out_chip_info;
+       }
+
+       ret = qat_hal_chip_init(handle, accel_dev);
+       if (ret) {
+               dev_err(&GET_DEV(accel_dev), "qat_hal_chip_init error\n");
+               goto out_err;
+       }
+
+       /* take all AEs out of reset */
+       ret = qat_hal_clr_reset(handle);
+       if (ret) {
+               dev_err(&GET_DEV(accel_dev), "qat_hal_clr_reset error\n");
+               goto out_err;
+       }
+
+       qat_hal_clear_xfer(handle);
+       if (!handle->chip_info->fw_auth) {
+               ret = qat_hal_clear_gpr(handle);
+               if (ret)
+                       goto out_err;
+       }
+
+       accel_dev->fw_loader->fw_loader = handle;
+       return 0;
+
+out_err:
+       kfree(handle->chip_info);
+out_chip_info:
+       kfree(handle->hal_handle);
+out_hal_handle:
+       kfree(handle);
+       return ret;
+}
+
+void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle)
+{
+       if (!handle)
+               return;
+       kfree(handle->chip_info);
+       kfree(handle->hal_handle);
+       kfree(handle);
+}
+
+int qat_hal_start(struct icp_qat_fw_loader_handle *handle)
+{
+       unsigned long ae_mask = handle->hal_handle->ae_mask;
+       u32 wakeup_val = handle->chip_info->wakeup_event_val;
+       u32 fcu_ctl_csr, fcu_sts_csr;
+       unsigned int fcu_sts;
+       unsigned char ae;
+       u32 ae_ctr = 0;
+       int retry = 0;
+
+       if (handle->chip_info->fw_auth) {
+               fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
+               fcu_sts_csr = handle->chip_info->fcu_sts_csr;
+               ae_ctr = hweight32(ae_mask);
+               SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_START);
+               do {
+                       msleep(FW_AUTH_WAIT_PERIOD);
+                       fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
+                       if (((fcu_sts >> FCU_STS_DONE_POS) & 0x1))
+                               return ae_ctr;
+               } while (retry++ < FW_AUTH_MAX_RETRY);
+               pr_err("QAT: start error (FCU_STS = 0x%x)\n", fcu_sts);
+               return 0;
+       } else {
+               for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+                       qat_hal_put_wakeup_event(handle, ae, 0, wakeup_val);
+                       qat_hal_enable_ctx(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX);
+                       ae_ctr++;
+               }
+               return ae_ctr;
+       }
+}
+
+void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+                 unsigned int ctx_mask)
+{
+       if (!handle->chip_info->fw_auth)
+               qat_hal_disable_ctx(handle, ae, ctx_mask);
+}
+
+void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
+                   unsigned char ae, unsigned int ctx_mask, unsigned int upc)
+{
+       qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
+                           handle->hal_handle->upc_mask & upc);
+}
+
+static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle,
+                              unsigned char ae, unsigned int uaddr,
+                              unsigned int words_num, u64 *uword)
+{
+       unsigned int i, uwrd_lo, uwrd_hi;
+       unsigned int ustore_addr, misc_control;
+
+       misc_control = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
+       qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL,
+                         misc_control & 0xfffffffb);
+       ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
+       uaddr |= UA_ECS;
+       for (i = 0; i < words_num; i++) {
+               qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+               uaddr++;
+               uwrd_lo = qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER);
+               uwrd_hi = qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER);
+               uword[i] = uwrd_hi;
+               uword[i] = (uword[i] << 0x20) | uwrd_lo;
+       }
+       qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, misc_control);
+       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
+}
+
+void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle,
+                    unsigned char ae, unsigned int uaddr,
+                    unsigned int words_num, unsigned int *data)
+{
+       unsigned int i, ustore_addr;
+
+       ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
+       uaddr |= UA_ECS;
+       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+       for (i = 0; i < words_num; i++) {
+               unsigned int uwrd_lo, uwrd_hi, tmp;
+
+               uwrd_lo = ((data[i] & 0xfff0000) << 4) | (0x3 << 18) |
+                         ((data[i] & 0xff00) << 2) |
+                         (0x3 << 8) | (data[i] & 0xff);
+               uwrd_hi = (0xf << 4) | ((data[i] & 0xf0000000) >> 28);
+               uwrd_hi |= (hweight32(data[i] & 0xffff) & 0x1) << 8;
+               tmp = ((data[i] >> 0x10) & 0xffff);
+               uwrd_hi |= (hweight32(tmp) & 0x1) << 9;
+               qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
+               qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
+       }
+       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
+}
+
+#define MAX_EXEC_INST 100
+static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
+                                  unsigned char ae, unsigned char ctx,
+                                  u64 *micro_inst, unsigned int inst_num,
+                                  int code_off, unsigned int max_cycle,
+                                  unsigned int *endpc)
+{
+       unsigned int ind_lm_addr_byte0 = 0, ind_lm_addr_byte1 = 0;
+       unsigned int ind_lm_addr_byte2 = 0, ind_lm_addr_byte3 = 0;
+       unsigned int ind_t_index = 0, ind_t_index_byte = 0;
+       unsigned int ind_lm_addr0 = 0, ind_lm_addr1 = 0;
+       unsigned int ind_lm_addr2 = 0, ind_lm_addr3 = 0;
+       u64 savuwords[MAX_EXEC_INST];
+       unsigned int ind_cnt_sig;
+       unsigned int ind_sig, act_sig;
+       unsigned int csr_val = 0, newcsr_val;
+       unsigned int savctx;
+       unsigned int savcc, wakeup_events, savpc;
+       unsigned int ctxarb_ctl, ctx_enables;
+
+       if ((inst_num > handle->hal_handle->max_ustore) || !micro_inst) {
+               pr_err("QAT: invalid instruction num %d\n", inst_num);
+               return -EINVAL;
+       }
+       /* save current context */
+       ind_lm_addr0 = qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT);
+       ind_lm_addr1 = qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT);
+       ind_lm_addr_byte0 = qat_hal_rd_indr_csr(handle, ae, ctx,
+                                               INDIRECT_LM_ADDR_0_BYTE_INDEX);
+       ind_lm_addr_byte1 = qat_hal_rd_indr_csr(handle, ae, ctx,
+                                               INDIRECT_LM_ADDR_1_BYTE_INDEX);
+       if (handle->chip_info->lm2lm3) {
+               ind_lm_addr2 = qat_hal_rd_indr_csr(handle, ae, ctx,
+                                                  LM_ADDR_2_INDIRECT);
+               ind_lm_addr3 = qat_hal_rd_indr_csr(handle, ae, ctx,
+                                                  LM_ADDR_3_INDIRECT);
+               ind_lm_addr_byte2 = qat_hal_rd_indr_csr(handle, ae, ctx,
+                                                       INDIRECT_LM_ADDR_2_BYTE_INDEX);
+               ind_lm_addr_byte3 = qat_hal_rd_indr_csr(handle, ae, ctx,
+                                                       INDIRECT_LM_ADDR_3_BYTE_INDEX);
+               ind_t_index = qat_hal_rd_indr_csr(handle, ae, ctx,
+                                                 INDIRECT_T_INDEX);
+               ind_t_index_byte = qat_hal_rd_indr_csr(handle, ae, ctx,
+                                                      INDIRECT_T_INDEX_BYTE_INDEX);
+       }
+       if (inst_num <= MAX_EXEC_INST)
+               qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords);
+       qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events);
+       savpc = qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT);
+       savpc = (savpc & handle->hal_handle->upc_mask) >> 0;
+       ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+       ctx_enables &= IGNORE_W1C_MASK;
+       savcc = qat_hal_rd_ae_csr(handle, ae, CC_ENABLE);
+       savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
+       ctxarb_ctl = qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL);
+       ind_cnt_sig = qat_hal_rd_indr_csr(handle, ae, ctx,
+                                         FUTURE_COUNT_SIGNAL_INDIRECT);
+       ind_sig = qat_hal_rd_indr_csr(handle, ae, ctx,
+                                     CTX_SIG_EVENTS_INDIRECT);
+       act_sig = qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE);
+       /* execute micro codes */
+       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+       qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst);
+       qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, 0);
+       qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, ctx & ACS_ACNO);
+       if (code_off)
+               qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc & 0xffffdfff);
+       qat_hal_put_wakeup_event(handle, ae, (1 << ctx), XCWE_VOLUNTARY);
+       qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_SIG_EVENTS_INDIRECT, 0);
+       qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
+       qat_hal_enable_ctx(handle, ae, (1 << ctx));
+       /* wait for micro codes to finish */
+       if (qat_hal_wait_cycles(handle, ae, max_cycle, 1) != 0)
+               return -EFAULT;
+       if (endpc) {
+               unsigned int ctx_status;
+
+               ctx_status = qat_hal_rd_indr_csr(handle, ae, ctx,
+                                                CTX_STS_INDIRECT);
+               *endpc = ctx_status & handle->hal_handle->upc_mask;
+       }
+       /* retore to saved context */
+       qat_hal_disable_ctx(handle, ae, (1 << ctx));
+       if (inst_num <= MAX_EXEC_INST)
+               qat_hal_wr_uwords(handle, ae, 0, inst_num, savuwords);
+       qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events);
+       qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT,
+                           handle->hal_handle->upc_mask & savpc);
+       csr_val = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
+       newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
+       qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
+       qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc);
+       qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, savctx & ACS_ACNO);
+       qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_ctl);
+       qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+                           LM_ADDR_0_INDIRECT, ind_lm_addr0);
+       qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+                           LM_ADDR_1_INDIRECT, ind_lm_addr1);
+       qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+                           INDIRECT_LM_ADDR_0_BYTE_INDEX, ind_lm_addr_byte0);
+       qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+                           INDIRECT_LM_ADDR_1_BYTE_INDEX, ind_lm_addr_byte1);
+       if (handle->chip_info->lm2lm3) {
+               qat_hal_wr_indr_csr(handle, ae, BIT(ctx), LM_ADDR_2_INDIRECT,
+                                   ind_lm_addr2);
+               qat_hal_wr_indr_csr(handle, ae, BIT(ctx), LM_ADDR_3_INDIRECT,
+                                   ind_lm_addr3);
+               qat_hal_wr_indr_csr(handle, ae, BIT(ctx),
+                                   INDIRECT_LM_ADDR_2_BYTE_INDEX,
+                                   ind_lm_addr_byte2);
+               qat_hal_wr_indr_csr(handle, ae, BIT(ctx),
+                                   INDIRECT_LM_ADDR_3_BYTE_INDEX,
+                                   ind_lm_addr_byte3);
+               qat_hal_wr_indr_csr(handle, ae, BIT(ctx),
+                                   INDIRECT_T_INDEX, ind_t_index);
+               qat_hal_wr_indr_csr(handle, ae, BIT(ctx),
+                                   INDIRECT_T_INDEX_BYTE_INDEX,
+                                   ind_t_index_byte);
+       }
+       qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+                           FUTURE_COUNT_SIGNAL_INDIRECT, ind_cnt_sig);
+       qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+                           CTX_SIG_EVENTS_INDIRECT, ind_sig);
+       qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, act_sig);
+       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+
+       return 0;
+}
+
+static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
+                             unsigned char ae, unsigned char ctx,
+                             enum icp_qat_uof_regtype reg_type,
+                             unsigned short reg_num, unsigned int *data)
+{
+       unsigned int savctx, uaddr, uwrd_lo, uwrd_hi;
+       unsigned int ctxarb_cntl, ustore_addr, ctx_enables;
+       unsigned short reg_addr;
+       int status = 0;
+       u64 insts, savuword;
+
+       reg_addr = qat_hal_get_reg_addr(reg_type, reg_num);
+       if (reg_addr == BAD_REGADDR) {
+               pr_err("QAT: bad regaddr=0x%x\n", reg_addr);
+               return -EINVAL;
+       }
+       switch (reg_type) {
+       case ICP_GPA_REL:
+               insts = 0xA070000000ull | (reg_addr & 0x3ff);
+               break;
+       default:
+               insts = (u64)0xA030000000ull | ((reg_addr & 0x3ff) << 10);
+               break;
+       }
+       savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
+       ctxarb_cntl = qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL);
+       ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+       ctx_enables &= IGNORE_W1C_MASK;
+       if (ctx != (savctx & ACS_ACNO))
+               qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
+                                 ctx & ACS_ACNO);
+       qat_hal_get_uwords(handle, ae, 0, 1, &savuword);
+       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+       ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
+       uaddr = UA_ECS;
+       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+       insts = qat_hal_set_uword_ecc(insts);
+       uwrd_lo = (unsigned int)(insts & 0xffffffff);
+       uwrd_hi = (unsigned int)(insts >> 0x20);
+       qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
+       qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
+       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+       /* delay for at least 8 cycles */
+       qat_hal_wait_cycles(handle, ae, 0x8, 0);
+       /*
+        * read ALU output
+        * the instruction should have been executed
+        * prior to clearing the ECS in putUwords
+        */
+       *data = qat_hal_rd_ae_csr(handle, ae, ALU_OUT);
+       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
+       qat_hal_wr_uwords(handle, ae, 0, 1, &savuword);
+       if (ctx != (savctx & ACS_ACNO))
+               qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
+                                 savctx & ACS_ACNO);
+       qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_cntl);
+       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+
+       return status;
+}
+
+static int qat_hal_wr_rel_reg(struct icp_qat_fw_loader_handle *handle,
+                             unsigned char ae, unsigned char ctx,
+                             enum icp_qat_uof_regtype reg_type,
+                             unsigned short reg_num, unsigned int data)
+{
+       unsigned short src_hiaddr, src_lowaddr, dest_addr, data16hi, data16lo;
+       u64 insts[] = {
+               0x0F440000000ull,
+               0x0F040000000ull,
+               0x0F0000C0300ull,
+               0x0E000010000ull
+       };
+       const int num_inst = ARRAY_SIZE(insts), code_off = 1;
+       const int imm_w1 = 0, imm_w0 = 1;
+
+       dest_addr = qat_hal_get_reg_addr(reg_type, reg_num);
+       if (dest_addr == BAD_REGADDR) {
+               pr_err("QAT: bad destAddr=0x%x\n", dest_addr);
+               return -EINVAL;
+       }
+
+       data16lo = 0xffff & data;
+       data16hi = 0xffff & (data >> 0x10);
+       src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
+                                         (0xff & data16hi));
+       src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
+                                          (0xff & data16lo));
+       switch (reg_type) {
+       case ICP_GPA_REL:
+               insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
+                   ((src_hiaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
+               insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
+                   ((src_lowaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
+               break;
+       default:
+               insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
+                   ((dest_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
+
+               insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
+                   ((dest_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
+               break;
+       }
+
+       return qat_hal_exec_micro_inst(handle, ae, ctx, insts, num_inst,
+                                      code_off, num_inst * 0x5, NULL);
+}
+
+int qat_hal_get_ins_num(void)
+{
+       return ARRAY_SIZE(inst_4b);
+}
+
+static int qat_hal_concat_micro_code(u64 *micro_inst,
+                                    unsigned int inst_num, unsigned int size,
+                                    unsigned int addr, unsigned int *value)
+{
+       int i;
+       unsigned int cur_value;
+       const u64 *inst_arr;
+       int fixup_offset;
+       int usize = 0;
+       int orig_num;
+
+       orig_num = inst_num;
+       cur_value = value[0];
+       inst_arr = inst_4b;
+       usize = ARRAY_SIZE(inst_4b);
+       fixup_offset = inst_num;
+       for (i = 0; i < usize; i++)
+               micro_inst[inst_num++] = inst_arr[i];
+       INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], (addr));
+       fixup_offset++;
+       INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], 0);
+       fixup_offset++;
+       INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0));
+       fixup_offset++;
+       INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0x10));
+
+       return inst_num - orig_num;
+}
+
+static int qat_hal_exec_micro_init_lm(struct icp_qat_fw_loader_handle *handle,
+                                     unsigned char ae, unsigned char ctx,
+                                     int *pfirst_exec, u64 *micro_inst,
+                                     unsigned int inst_num)
+{
+       int stat = 0;
+       unsigned int gpra0 = 0, gpra1 = 0, gpra2 = 0;
+       unsigned int gprb0 = 0, gprb1 = 0;
+
+       if (*pfirst_exec) {
+               qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, &gpra0);
+               qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, &gpra1);
+               qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, &gpra2);
+               qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, &gprb0);
+               qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, &gprb1);
+               *pfirst_exec = 0;
+       }
+       stat = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, inst_num, 1,
+                                      inst_num * 0x5, NULL);
+       if (stat != 0)
+               return -EFAULT;
+       qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, gpra0);
+       qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, gpra1);
+       qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, gpra2);
+       qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, gprb0);
+       qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, gprb1);
+
+       return 0;
+}
+
+int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
+                       unsigned char ae,
+                       struct icp_qat_uof_batch_init *lm_init_header)
+{
+       struct icp_qat_uof_batch_init *plm_init;
+       u64 *micro_inst_arry;
+       int micro_inst_num;
+       int alloc_inst_size;
+       int first_exec = 1;
+       int stat = 0;
+
+       plm_init = lm_init_header->next;
+       alloc_inst_size = lm_init_header->size;
+       if ((unsigned int)alloc_inst_size > handle->hal_handle->max_ustore)
+               alloc_inst_size = handle->hal_handle->max_ustore;
+       micro_inst_arry = kmalloc_array(alloc_inst_size, sizeof(u64),
+                                       GFP_KERNEL);
+       if (!micro_inst_arry)
+               return -ENOMEM;
+       micro_inst_num = 0;
+       while (plm_init) {
+               unsigned int addr, *value, size;
+
+               ae = plm_init->ae;
+               addr = plm_init->addr;
+               value = plm_init->value;
+               size = plm_init->size;
+               micro_inst_num += qat_hal_concat_micro_code(micro_inst_arry,
+                                                           micro_inst_num,
+                                                           size, addr, value);
+               plm_init = plm_init->next;
+       }
+       /* exec micro codes */
+       if (micro_inst_arry && micro_inst_num > 0) {
+               micro_inst_arry[micro_inst_num++] = 0x0E000010000ull;
+               stat = qat_hal_exec_micro_init_lm(handle, ae, 0, &first_exec,
+                                                 micro_inst_arry,
+                                                 micro_inst_num);
+       }
+       kfree(micro_inst_arry);
+       return stat;
+}
+
+static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle,
+                                  unsigned char ae, unsigned char ctx,
+                                  enum icp_qat_uof_regtype reg_type,
+                                  unsigned short reg_num, unsigned int val)
+{
+       int status = 0;
+       unsigned int reg_addr;
+       unsigned int ctx_enables;
+       unsigned short mask;
+       unsigned short dr_offset = 0x10;
+
+       ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+       if (CE_INUSE_CONTEXTS & ctx_enables) {
+               if (ctx & 0x1) {
+                       pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx);
+                       return -EINVAL;
+               }
+               mask = 0x1f;
+               dr_offset = 0x20;
+       } else {
+               mask = 0x0f;
+       }
+       if (reg_num & ~mask)
+               return -EINVAL;
+       reg_addr = reg_num + (ctx << 0x5);
+       switch (reg_type) {
+       case ICP_SR_RD_REL:
+       case ICP_SR_REL:
+               SET_AE_XFER(handle, ae, reg_addr, val);
+               break;
+       case ICP_DR_RD_REL:
+       case ICP_DR_REL:
+               SET_AE_XFER(handle, ae, (reg_addr + dr_offset), val);
+               break;
+       default:
+               status = -EINVAL;
+               break;
+       }
+       return status;
+}
+
+static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle,
+                                  unsigned char ae, unsigned char ctx,
+                                  enum icp_qat_uof_regtype reg_type,
+                                  unsigned short reg_num, unsigned int data)
+{
+       unsigned int gprval, ctx_enables;
+       unsigned short src_hiaddr, src_lowaddr, gpr_addr, xfr_addr, data16hi,
+           data16low;
+       unsigned short reg_mask;
+       int status = 0;
+       u64 micro_inst[] = {
+               0x0F440000000ull,
+               0x0F040000000ull,
+               0x0A000000000ull,
+               0x0F0000C0300ull,
+               0x0E000010000ull
+       };
+       const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1;
+       const unsigned short gprnum = 0, dly = num_inst * 0x5;
+
+       ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+       if (CE_INUSE_CONTEXTS & ctx_enables) {
+               if (ctx & 0x1) {
+                       pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx);
+                       return -EINVAL;
+               }
+               reg_mask = (unsigned short)~0x1f;
+       } else {
+               reg_mask = (unsigned short)~0xf;
+       }
+       if (reg_num & reg_mask)
+               return -EINVAL;
+       xfr_addr = qat_hal_get_reg_addr(reg_type, reg_num);
+       if (xfr_addr == BAD_REGADDR) {
+               pr_err("QAT: bad xfrAddr=0x%x\n", xfr_addr);
+               return -EINVAL;
+       }
+       status = qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval);
+       if (status) {
+               pr_err("QAT: failed to read register");
+               return status;
+       }
+       gpr_addr = qat_hal_get_reg_addr(ICP_GPB_REL, gprnum);
+       data16low = 0xffff & data;
+       data16hi = 0xffff & (data >> 0x10);
+       src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
+                                         (unsigned short)(0xff & data16hi));
+       src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
+                                          (unsigned short)(0xff & data16low));
+       micro_inst[0] = micro_inst[0x0] | ((data16hi >> 8) << 20) |
+           ((gpr_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
+       micro_inst[1] = micro_inst[0x1] | ((data16low >> 8) << 20) |
+           ((gpr_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
+       micro_inst[0x2] = micro_inst[0x2] |
+           ((xfr_addr & 0x3ff) << 20) | ((gpr_addr & 0x3ff) << 10);
+       status = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, num_inst,
+                                        code_off, dly, NULL);
+       qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, gprval);
+       return status;
+}
+
+static int qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle *handle,
+                             unsigned char ae, unsigned char ctx,
+                             unsigned short nn, unsigned int val)
+{
+       unsigned int ctx_enables;
+       int stat = 0;
+
+       ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+       ctx_enables &= IGNORE_W1C_MASK;
+       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE);
+
+       stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, ICP_NEIGH_REL, nn, val);
+       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+       return stat;
+}
+
+static int qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle
+                                     *handle, unsigned char ae,
+                                     unsigned short absreg_num,
+                                     unsigned short *relreg,
+                                     unsigned char *ctx)
+{
+       unsigned int ctx_enables;
+
+       ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+       if (ctx_enables & CE_INUSE_CONTEXTS) {
+               /* 4-ctx mode */
+               *relreg = absreg_num & 0x1F;
+               *ctx = (absreg_num >> 0x4) & 0x6;
+       } else {
+               /* 8-ctx mode */
+               *relreg = absreg_num & 0x0F;
+               *ctx = (absreg_num >> 0x4) & 0x7;
+       }
+       return 0;
+}
+
+int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle,
+                    unsigned char ae, unsigned long ctx_mask,
+                    enum icp_qat_uof_regtype reg_type,
+                    unsigned short reg_num, unsigned int regdata)
+{
+       int stat = 0;
+       unsigned short reg;
+       unsigned char ctx = 0;
+       enum icp_qat_uof_regtype type;
+
+       if (reg_num >= ICP_QAT_UCLO_MAX_GPR_REG)
+               return -EINVAL;
+
+       do {
+               if (ctx_mask == 0) {
+                       qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
+                                                  &ctx);
+                       type = reg_type - 1;
+               } else {
+                       reg = reg_num;
+                       type = reg_type;
+                       if (!test_bit(ctx, &ctx_mask))
+                               continue;
+               }
+               stat = qat_hal_wr_rel_reg(handle, ae, ctx, type, reg, regdata);
+               if (stat) {
+                       pr_err("QAT: write gpr fail\n");
+                       return -EINVAL;
+               }
+       } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
+
+       return 0;
+}
+
+int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle,
+                        unsigned char ae, unsigned long ctx_mask,
+                        enum icp_qat_uof_regtype reg_type,
+                        unsigned short reg_num, unsigned int regdata)
+{
+       int stat = 0;
+       unsigned short reg;
+       unsigned char ctx = 0;
+       enum icp_qat_uof_regtype type;
+
+       if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
+               return -EINVAL;
+
+       do {
+               if (ctx_mask == 0) {
+                       qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
+                                                  &ctx);
+                       type = reg_type - 3;
+               } else {
+                       reg = reg_num;
+                       type = reg_type;
+                       if (!test_bit(ctx, &ctx_mask))
+                               continue;
+               }
+               stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, type, reg,
+                                              regdata);
+               if (stat) {
+                       pr_err("QAT: write wr xfer fail\n");
+                       return -EINVAL;
+               }
+       } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
+
+       return 0;
+}
+
+int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
+                        unsigned char ae, unsigned long ctx_mask,
+                        enum icp_qat_uof_regtype reg_type,
+                        unsigned short reg_num, unsigned int regdata)
+{
+       int stat = 0;
+       unsigned short reg;
+       unsigned char ctx = 0;
+       enum icp_qat_uof_regtype type;
+
+       if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
+               return -EINVAL;
+
+       do {
+               if (ctx_mask == 0) {
+                       qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
+                                                  &ctx);
+                       type = reg_type - 3;
+               } else {
+                       reg = reg_num;
+                       type = reg_type;
+                       if (!test_bit(ctx, &ctx_mask))
+                               continue;
+               }
+               stat = qat_hal_put_rel_rd_xfer(handle, ae, ctx, type, reg,
+                                              regdata);
+               if (stat) {
+                       pr_err("QAT: write rd xfer fail\n");
+                       return -EINVAL;
+               }
+       } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
+
+       return 0;
+}
+
+int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
+                   unsigned char ae, unsigned long ctx_mask,
+                   unsigned short reg_num, unsigned int regdata)
+{
+       int stat = 0;
+       unsigned char ctx;
+       if (!handle->chip_info->nn) {
+               dev_err(&handle->pci_dev->dev, "QAT: No next neigh in 0x%x\n",
+                       handle->pci_dev->device);
+               return -EINVAL;
+       }
+
+       if (ctx_mask == 0)
+               return -EINVAL;
+
+       for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
+               if (!test_bit(ctx, &ctx_mask))
+                       continue;
+               stat = qat_hal_put_rel_nn(handle, ae, ctx, reg_num, regdata);
+               if (stat) {
+                       pr_err("QAT: write neigh error\n");
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
new file mode 100644 (file)
index 0000000..3ba8ca2
--- /dev/null
@@ -0,0 +1,2133 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/pci_ids.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "icp_qat_uclo.h"
+#include "icp_qat_hal.h"
+#include "icp_qat_fw_loader_handle.h"
+
+#define UWORD_CPYBUF_SIZE 1024
+#define INVLD_UWORD 0xffffffffffull
+#define PID_MINOR_REV 0xf
+#define PID_MAJOR_REV (0xf << 4)
+
+static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle,
+                                unsigned int ae, unsigned int image_num)
+{
+       struct icp_qat_uclo_aedata *ae_data;
+       struct icp_qat_uclo_encapme *encap_image;
+       struct icp_qat_uclo_page *page = NULL;
+       struct icp_qat_uclo_aeslice *ae_slice = NULL;
+
+       ae_data = &obj_handle->ae_data[ae];
+       encap_image = &obj_handle->ae_uimage[image_num];
+       ae_slice = &ae_data->ae_slices[ae_data->slice_num];
+       ae_slice->encap_image = encap_image;
+
+       if (encap_image->img_ptr) {
+               ae_slice->ctx_mask_assigned =
+                                       encap_image->img_ptr->ctx_assigned;
+               ae_data->eff_ustore_size = obj_handle->ustore_phy_size;
+       } else {
+               ae_slice->ctx_mask_assigned = 0;
+       }
+       ae_slice->region = kzalloc(sizeof(*ae_slice->region), GFP_KERNEL);
+       if (!ae_slice->region)
+               return -ENOMEM;
+       ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL);
+       if (!ae_slice->page)
+               goto out_err;
+       page = ae_slice->page;
+       page->encap_page = encap_image->page;
+       ae_slice->page->region = ae_slice->region;
+       ae_data->slice_num++;
+       return 0;
+out_err:
+       kfree(ae_slice->region);
+       ae_slice->region = NULL;
+       return -ENOMEM;
+}
+
+static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
+{
+       unsigned int i;
+
+       if (!ae_data) {
+               pr_err("QAT: bad argument, ae_data is NULL\n ");
+               return -EINVAL;
+       }
+
+       for (i = 0; i < ae_data->slice_num; i++) {
+               kfree(ae_data->ae_slices[i].region);
+               ae_data->ae_slices[i].region = NULL;
+               kfree(ae_data->ae_slices[i].page);
+               ae_data->ae_slices[i].page = NULL;
+       }
+       return 0;
+}
+
+static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table,
+                                unsigned int str_offset)
+{
+       if (!str_table->table_len || str_offset > str_table->table_len)
+               return NULL;
+       return (char *)(((uintptr_t)(str_table->strings)) + str_offset);
+}
+
+static int qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr)
+{
+       int maj = hdr->maj_ver & 0xff;
+       int min = hdr->min_ver & 0xff;
+
+       if (hdr->file_id != ICP_QAT_UOF_FID) {
+               pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
+               return -EINVAL;
+       }
+       if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
+               pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
+                      maj, min);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int qat_uclo_check_suof_format(struct icp_qat_suof_filehdr *suof_hdr)
+{
+       int maj = suof_hdr->maj_ver & 0xff;
+       int min = suof_hdr->min_ver & 0xff;
+
+       if (suof_hdr->file_id != ICP_QAT_SUOF_FID) {
+               pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id);
+               return -EINVAL;
+       }
+       if (suof_hdr->fw_type != 0) {
+               pr_err("QAT: unsupported firmware type\n");
+               return -EINVAL;
+       }
+       if (suof_hdr->num_chunks <= 0x1) {
+               pr_err("QAT: SUOF chunk amount is incorrect\n");
+               return -EINVAL;
+       }
+       if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) {
+               pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n",
+                      maj, min);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
+                                     unsigned int addr, unsigned int *val,
+                                     unsigned int num_in_bytes)
+{
+       unsigned int outval;
+       unsigned char *ptr = (unsigned char *)val;
+
+       while (num_in_bytes) {
+               memcpy(&outval, ptr, 4);
+               SRAM_WRITE(handle, addr, outval);
+               num_in_bytes -= 4;
+               ptr += 4;
+               addr += 4;
+       }
+}
+
+static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle,
+                                     unsigned char ae, unsigned int addr,
+                                     unsigned int *val,
+                                     unsigned int num_in_bytes)
+{
+       unsigned int outval;
+       unsigned char *ptr = (unsigned char *)val;
+
+       addr >>= 0x2; /* convert to uword address */
+
+       while (num_in_bytes) {
+               memcpy(&outval, ptr, 4);
+               qat_hal_wr_umem(handle, ae, addr++, 1, &outval);
+               num_in_bytes -= 4;
+               ptr += 4;
+       }
+}
+
+static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle,
+                                  unsigned char ae,
+                                  struct icp_qat_uof_batch_init
+                                  *umem_init_header)
+{
+       struct icp_qat_uof_batch_init *umem_init;
+
+       if (!umem_init_header)
+               return;
+       umem_init = umem_init_header->next;
+       while (umem_init) {
+               unsigned int addr, *value, size;
+
+               ae = umem_init->ae;
+               addr = umem_init->addr;
+               value = umem_init->value;
+               size = umem_init->size;
+               qat_uclo_wr_umem_by_words(handle, ae, addr, value, size);
+               umem_init = umem_init->next;
+       }
+}
+
+static void
+qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
+                                struct icp_qat_uof_batch_init **base)
+{
+       struct icp_qat_uof_batch_init *umem_init;
+
+       umem_init = *base;
+       while (umem_init) {
+               struct icp_qat_uof_batch_init *pre;
+
+               pre = umem_init;
+               umem_init = umem_init->next;
+               kfree(pre);
+       }
+       *base = NULL;
+}
+
+static int qat_uclo_parse_num(char *str, unsigned int *num)
+{
+       char buf[16] = {0};
+       unsigned long ae = 0;
+       int i;
+
+       strncpy(buf, str, 15);
+       for (i = 0; i < 16; i++) {
+               if (!isdigit(buf[i])) {
+                       buf[i] = '\0';
+                       break;
+               }
+       }
+       if ((kstrtoul(buf, 10, &ae)))
+               return -EFAULT;
+
+       *num = (unsigned int)ae;
+       return 0;
+}
+
+static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
+                                    struct icp_qat_uof_initmem *init_mem,
+                                    unsigned int size_range, unsigned int *ae)
+{
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       char *str;
+
+       if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
+               pr_err("QAT: initmem is out of range");
+               return -EINVAL;
+       }
+       if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
+               pr_err("QAT: Memory scope for init_mem error\n");
+               return -EINVAL;
+       }
+       str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
+       if (!str) {
+               pr_err("QAT: AE name assigned in UOF init table is NULL\n");
+               return -EINVAL;
+       }
+       if (qat_uclo_parse_num(str, ae)) {
+               pr_err("QAT: Parse num for AE number failed\n");
+               return -EINVAL;
+       }
+       if (*ae >= ICP_QAT_UCLO_MAX_AE) {
+               pr_err("QAT: ae %d out of range\n", *ae);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
+                                          *handle, struct icp_qat_uof_initmem
+                                          *init_mem, unsigned int ae,
+                                          struct icp_qat_uof_batch_init
+                                          **init_tab_base)
+{
+       struct icp_qat_uof_batch_init *init_header, *tail;
+       struct icp_qat_uof_batch_init *mem_init, *tail_old;
+       struct icp_qat_uof_memvar_attr *mem_val_attr;
+       unsigned int i, flag = 0;
+
+       mem_val_attr =
+               (struct icp_qat_uof_memvar_attr *)((uintptr_t)init_mem +
+               sizeof(struct icp_qat_uof_initmem));
+
+       init_header = *init_tab_base;
+       if (!init_header) {
+               init_header = kzalloc(sizeof(*init_header), GFP_KERNEL);
+               if (!init_header)
+                       return -ENOMEM;
+               init_header->size = 1;
+               *init_tab_base = init_header;
+               flag = 1;
+       }
+       tail_old = init_header;
+       while (tail_old->next)
+               tail_old = tail_old->next;
+       tail = tail_old;
+       for (i = 0; i < init_mem->val_attr_num; i++) {
+               mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL);
+               if (!mem_init)
+                       goto out_err;
+               mem_init->ae = ae;
+               mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte;
+               mem_init->value = &mem_val_attr->value;
+               mem_init->size = 4;
+               mem_init->next = NULL;
+               tail->next = mem_init;
+               tail = mem_init;
+               init_header->size += qat_hal_get_ins_num();
+               mem_val_attr++;
+       }
+       return 0;
+out_err:
+       /* Do not free the list head unless we allocated it. */
+       tail_old = tail_old->next;
+       if (flag) {
+               kfree(*init_tab_base);
+               *init_tab_base = NULL;
+       }
+
+       while (tail_old) {
+               mem_init = tail_old->next;
+               kfree(tail_old);
+               tail_old = mem_init;
+       }
+       return -ENOMEM;
+}
+
+static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle,
+                                 struct icp_qat_uof_initmem *init_mem)
+{
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       unsigned int ae;
+
+       if (qat_uclo_fetch_initmem_ae(handle, init_mem,
+                                     handle->chip_info->lm_size, &ae))
+               return -EINVAL;
+       if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
+                                           &obj_handle->lm_init_tab[ae]))
+               return -EINVAL;
+       return 0;
+}
+
+static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
+                                 struct icp_qat_uof_initmem *init_mem)
+{
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       unsigned int ae, ustore_size, uaddr, i;
+       struct icp_qat_uclo_aedata *aed;
+
+       ustore_size = obj_handle->ustore_phy_size;
+       if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae))
+               return -EINVAL;
+       if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
+                                           &obj_handle->umem_init_tab[ae]))
+               return -EINVAL;
+       /* set the highest ustore address referenced */
+       uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2;
+       aed = &obj_handle->ae_data[ae];
+       for (i = 0; i < aed->slice_num; i++) {
+               if (aed->ae_slices[i].encap_image->uwords_num < uaddr)
+                       aed->ae_slices[i].encap_image->uwords_num = uaddr;
+       }
+       return 0;
+}
+
+static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
+                                  struct icp_qat_uof_initmem *init_mem)
+{
+       switch (init_mem->region) {
+       case ICP_QAT_UOF_LMEM_REGION:
+               if (qat_uclo_init_lmem_seg(handle, init_mem))
+                       return -EINVAL;
+               break;
+       case ICP_QAT_UOF_UMEM_REGION:
+               if (qat_uclo_init_umem_seg(handle, init_mem))
+                       return -EINVAL;
+               break;
+       default:
+               pr_err("QAT: initmem region error. region type=0x%x\n",
+                      init_mem->region);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
+                               struct icp_qat_uclo_encapme *image)
+{
+       unsigned int i;
+       struct icp_qat_uclo_encap_page *page;
+       struct icp_qat_uof_image *uof_image;
+       unsigned char ae;
+       unsigned int ustore_size;
+       unsigned int patt_pos;
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       unsigned long ae_mask = handle->hal_handle->ae_mask;
+       unsigned long cfg_ae_mask = handle->cfg_ae_mask;
+       u64 *fill_data;
+
+       uof_image = image->img_ptr;
+       fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(u64),
+                           GFP_KERNEL);
+       if (!fill_data)
+               return -ENOMEM;
+       for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++)
+               memcpy(&fill_data[i], &uof_image->fill_pattern,
+                      sizeof(u64));
+       page = image->page;
+
+       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+               unsigned long ae_assigned = uof_image->ae_assigned;
+
+               if (!test_bit(ae, &ae_assigned))
+                       continue;
+
+               if (!test_bit(ae, &cfg_ae_mask))
+                       continue;
+
+               ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
+               patt_pos = page->beg_addr_p + page->micro_words_num;
+
+               qat_hal_wr_uwords(handle, (unsigned char)ae, 0,
+                                 page->beg_addr_p, &fill_data[0]);
+               qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos,
+                                 ustore_size - patt_pos + 1,
+                                 &fill_data[page->beg_addr_p]);
+       }
+       kfree(fill_data);
+       return 0;
+}
+
+static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
+{
+       int i, ae;
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem;
+       unsigned long ae_mask = handle->hal_handle->ae_mask;
+
+       for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) {
+               if (initmem->num_in_bytes) {
+                       if (qat_uclo_init_ae_memory(handle, initmem))
+                               return -EINVAL;
+               }
+               initmem = (struct icp_qat_uof_initmem *)((uintptr_t)(
+                       (uintptr_t)initmem +
+                       sizeof(struct icp_qat_uof_initmem)) +
+                       (sizeof(struct icp_qat_uof_memvar_attr) *
+                       initmem->val_attr_num));
+       }
+
+       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+               if (qat_hal_batch_wr_lm(handle, ae,
+                                       obj_handle->lm_init_tab[ae])) {
+                       pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
+                       return -EINVAL;
+               }
+               qat_uclo_cleanup_batch_init_list(handle,
+                                                &obj_handle->lm_init_tab[ae]);
+               qat_uclo_batch_wr_umem(handle, ae,
+                                      obj_handle->umem_init_tab[ae]);
+               qat_uclo_cleanup_batch_init_list(handle,
+                                                &obj_handle->
+                                                umem_init_tab[ae]);
+       }
+       return 0;
+}
+
+static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
+                                char *chunk_id, void *cur)
+{
+       int i;
+       struct icp_qat_uof_chunkhdr *chunk_hdr =
+           (struct icp_qat_uof_chunkhdr *)
+           ((uintptr_t)obj_hdr + sizeof(struct icp_qat_uof_objhdr));
+
+       for (i = 0; i < obj_hdr->num_chunks; i++) {
+               if ((cur < (void *)&chunk_hdr[i]) &&
+                   !strncmp(chunk_hdr[i].chunk_id, chunk_id,
+                            ICP_QAT_UOF_OBJID_LEN)) {
+                       return &chunk_hdr[i];
+               }
+       }
+       return NULL;
+}
+
+static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch)
+{
+       int i;
+       unsigned int topbit = 1 << 0xF;
+       unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch);
+
+       reg ^= inbyte << 0x8;
+       for (i = 0; i < 0x8; i++) {
+               if (reg & topbit)
+                       reg = (reg << 1) ^ 0x1021;
+               else
+                       reg <<= 1;
+       }
+       return reg & 0xFFFF;
+}
+
+static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num)
+{
+       unsigned int chksum = 0;
+
+       if (ptr)
+               while (num--)
+                       chksum = qat_uclo_calc_checksum(chksum, *ptr++);
+       return chksum;
+}
+
+static struct icp_qat_uclo_objhdr *
+qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr,
+                  char *chunk_id)
+{
+       struct icp_qat_uof_filechunkhdr *file_chunk;
+       struct icp_qat_uclo_objhdr *obj_hdr;
+       char *chunk;
+       int i;
+
+       file_chunk = (struct icp_qat_uof_filechunkhdr *)
+               (buf + sizeof(struct icp_qat_uof_filehdr));
+       for (i = 0; i < file_hdr->num_chunks; i++) {
+               if (!strncmp(file_chunk->chunk_id, chunk_id,
+                            ICP_QAT_UOF_OBJID_LEN)) {
+                       chunk = buf + file_chunk->offset;
+                       if (file_chunk->checksum != qat_uclo_calc_str_checksum(
+                               chunk, file_chunk->size))
+                               break;
+                       obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL);
+                       if (!obj_hdr)
+                               break;
+                       obj_hdr->file_buff = chunk;
+                       obj_hdr->checksum = file_chunk->checksum;
+                       obj_hdr->size = file_chunk->size;
+                       return obj_hdr;
+               }
+               file_chunk++;
+       }
+       return NULL;
+}
+
+static int
+qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
+                           struct icp_qat_uof_image *image)
+{
+       struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab;
+       struct icp_qat_uof_objtable *neigh_reg_tab;
+       struct icp_qat_uof_code_page *code_page;
+
+       code_page = (struct icp_qat_uof_code_page *)
+                       ((char *)image + sizeof(struct icp_qat_uof_image));
+       uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
+                    code_page->uc_var_tab_offset);
+       imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
+                     code_page->imp_var_tab_offset);
+       imp_expr_tab = (struct icp_qat_uof_objtable *)
+                      (encap_uof_obj->beg_uof +
+                      code_page->imp_expr_tab_offset);
+       if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
+           imp_expr_tab->entry_num) {
+               pr_err("QAT: UOF can't contain imported variable to be parsed\n");
+               return -EINVAL;
+       }
+       neigh_reg_tab = (struct icp_qat_uof_objtable *)
+                       (encap_uof_obj->beg_uof +
+                       code_page->neigh_reg_tab_offset);
+       if (neigh_reg_tab->entry_num) {
+               pr_err("QAT: UOF can't contain neighbor register table\n");
+               return -EINVAL;
+       }
+       if (image->numpages > 1) {
+               pr_err("QAT: UOF can't contain multiple pages\n");
+               return -EINVAL;
+       }
+       if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
+               pr_err("QAT: UOF can't use shared control store feature\n");
+               return -EFAULT;
+       }
+       if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
+               pr_err("QAT: UOF can't use reloadable feature\n");
+               return -EFAULT;
+       }
+       return 0;
+}
+
+static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj
+                                    *encap_uof_obj,
+                                    struct icp_qat_uof_image *img,
+                                    struct icp_qat_uclo_encap_page *page)
+{
+       struct icp_qat_uof_code_page *code_page;
+       struct icp_qat_uof_code_area *code_area;
+       struct icp_qat_uof_objtable *uword_block_tab;
+       struct icp_qat_uof_uword_block *uwblock;
+       int i;
+
+       code_page = (struct icp_qat_uof_code_page *)
+                       ((char *)img + sizeof(struct icp_qat_uof_image));
+       page->def_page = code_page->def_page;
+       page->page_region = code_page->page_region;
+       page->beg_addr_v = code_page->beg_addr_v;
+       page->beg_addr_p = code_page->beg_addr_p;
+       code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof +
+                                               code_page->code_area_offset);
+       page->micro_words_num = code_area->micro_words_num;
+       uword_block_tab = (struct icp_qat_uof_objtable *)
+                         (encap_uof_obj->beg_uof +
+                         code_area->uword_block_tab);
+       page->uwblock_num = uword_block_tab->entry_num;
+       uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab +
+                       sizeof(struct icp_qat_uof_objtable));
+       page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
+       for (i = 0; i < uword_block_tab->entry_num; i++)
+               page->uwblock[i].micro_words =
+               (uintptr_t)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
+}
+
+static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
+                              struct icp_qat_uclo_encapme *ae_uimage,
+                              int max_image)
+{
+       int i, j;
+       struct icp_qat_uof_chunkhdr *chunk_hdr = NULL;
+       struct icp_qat_uof_image *image;
+       struct icp_qat_uof_objtable *ae_regtab;
+       struct icp_qat_uof_objtable *init_reg_sym_tab;
+       struct icp_qat_uof_objtable *sbreak_tab;
+       struct icp_qat_uof_encap_obj *encap_uof_obj =
+                                       &obj_handle->encap_uof_obj;
+
+       for (j = 0; j < max_image; j++) {
+               chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
+                                               ICP_QAT_UOF_IMAG, chunk_hdr);
+               if (!chunk_hdr)
+                       break;
+               image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof +
+                                                    chunk_hdr->offset);
+               ae_regtab = (struct icp_qat_uof_objtable *)
+                          (image->reg_tab_offset +
+                          obj_handle->obj_hdr->file_buff);
+               ae_uimage[j].ae_reg_num = ae_regtab->entry_num;
+               ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *)
+                       (((char *)ae_regtab) +
+                       sizeof(struct icp_qat_uof_objtable));
+               init_reg_sym_tab = (struct icp_qat_uof_objtable *)
+                                  (image->init_reg_sym_tab +
+                                  obj_handle->obj_hdr->file_buff);
+               ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num;
+               ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *)
+                       (((char *)init_reg_sym_tab) +
+                       sizeof(struct icp_qat_uof_objtable));
+               sbreak_tab = (struct icp_qat_uof_objtable *)
+                       (image->sbreak_tab + obj_handle->obj_hdr->file_buff);
+               ae_uimage[j].sbreak_num = sbreak_tab->entry_num;
+               ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *)
+                                     (((char *)sbreak_tab) +
+                                     sizeof(struct icp_qat_uof_objtable));
+               ae_uimage[j].img_ptr = image;
+               if (qat_uclo_check_image_compat(encap_uof_obj, image))
+                       goto out_err;
+               ae_uimage[j].page =
+                       kzalloc(sizeof(struct icp_qat_uclo_encap_page),
+                               GFP_KERNEL);
+               if (!ae_uimage[j].page)
+                       goto out_err;
+               qat_uclo_map_image_page(encap_uof_obj, image,
+                                       ae_uimage[j].page);
+       }
+       return j;
+out_err:
+       for (i = 0; i < j; i++)
+               kfree(ae_uimage[i].page);
+       return 0;
+}
+
+static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
+{
+       int i, ae;
+       int mflag = 0;
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       unsigned long ae_mask = handle->hal_handle->ae_mask;
+       unsigned long cfg_ae_mask = handle->cfg_ae_mask;
+
+       for_each_set_bit(ae, &ae_mask, max_ae) {
+               if (!test_bit(ae, &cfg_ae_mask))
+                       continue;
+
+               for (i = 0; i < obj_handle->uimage_num; i++) {
+                       unsigned long ae_assigned = obj_handle->ae_uimage[i].img_ptr->ae_assigned;
+
+                       if (!test_bit(ae, &ae_assigned))
+                               continue;
+                       mflag = 1;
+                       if (qat_uclo_init_ae_data(obj_handle, ae, i))
+                               return -EINVAL;
+               }
+       }
+       if (!mflag) {
+               pr_err("QAT: uimage uses AE not set\n");
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static struct icp_qat_uof_strtable *
+qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr,
+                      char *tab_name, struct icp_qat_uof_strtable *str_table)
+{
+       struct icp_qat_uof_chunkhdr *chunk_hdr;
+
+       chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)
+                                       obj_hdr->file_buff, tab_name, NULL);
+       if (chunk_hdr) {
+               int hdr_size;
+
+               memcpy(&str_table->table_len, obj_hdr->file_buff +
+                      chunk_hdr->offset, sizeof(str_table->table_len));
+               hdr_size = (char *)&str_table->strings - (char *)str_table;
+               str_table->strings = (uintptr_t)obj_hdr->file_buff +
+                                       chunk_hdr->offset + hdr_size;
+               return str_table;
+       }
+       return NULL;
+}
+
+static void
+qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj,
+                          struct icp_qat_uclo_init_mem_table *init_mem_tab)
+{
+       struct icp_qat_uof_chunkhdr *chunk_hdr;
+
+       chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
+                                       ICP_QAT_UOF_IMEM, NULL);
+       if (chunk_hdr) {
+               memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof +
+                       chunk_hdr->offset, sizeof(unsigned int));
+               init_mem_tab->init_mem = (struct icp_qat_uof_initmem *)
+               (encap_uof_obj->beg_uof + chunk_hdr->offset +
+               sizeof(unsigned int));
+       }
+}
+
+static unsigned int
+qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
+{
+       switch (handle->pci_dev->device) {
+       case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
+               return ICP_QAT_AC_895XCC_DEV_TYPE;
+       case PCI_DEVICE_ID_INTEL_QAT_C62X:
+               return ICP_QAT_AC_C62X_DEV_TYPE;
+       case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
+               return ICP_QAT_AC_C3XXX_DEV_TYPE;
+       case ADF_4XXX_PCI_DEVICE_ID:
+       case ADF_401XX_PCI_DEVICE_ID:
+       case ADF_402XX_PCI_DEVICE_ID:
+               return ICP_QAT_AC_4XXX_A_DEV_TYPE;
+       default:
+               pr_err("QAT: unsupported device 0x%x\n",
+                      handle->pci_dev->device);
+               return 0;
+       }
+}
+
+static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
+{
+       unsigned int maj_ver, prod_type = obj_handle->prod_type;
+
+       if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) {
+               pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n",
+                      obj_handle->encap_uof_obj.obj_hdr->ac_dev_type,
+                      prod_type);
+               return -EINVAL;
+       }
+       maj_ver = obj_handle->prod_rev & 0xff;
+       if (obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver ||
+           obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver) {
+               pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
+                            unsigned char ae, unsigned char ctx_mask,
+                            enum icp_qat_uof_regtype reg_type,
+                            unsigned short reg_addr, unsigned int value)
+{
+       switch (reg_type) {
+       case ICP_GPA_ABS:
+       case ICP_GPB_ABS:
+               ctx_mask = 0;
+               fallthrough;
+       case ICP_GPA_REL:
+       case ICP_GPB_REL:
+               return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
+                                       reg_addr, value);
+       case ICP_SR_ABS:
+       case ICP_DR_ABS:
+       case ICP_SR_RD_ABS:
+       case ICP_DR_RD_ABS:
+               ctx_mask = 0;
+               fallthrough;
+       case ICP_SR_REL:
+       case ICP_DR_REL:
+       case ICP_SR_RD_REL:
+       case ICP_DR_RD_REL:
+               return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type,
+                                           reg_addr, value);
+       case ICP_SR_WR_ABS:
+       case ICP_DR_WR_ABS:
+               ctx_mask = 0;
+               fallthrough;
+       case ICP_SR_WR_REL:
+       case ICP_DR_WR_REL:
+               return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
+                                           reg_addr, value);
+       case ICP_NEIGH_REL:
+               return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
+       default:
+               pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type);
+               return -EFAULT;
+       }
+       return 0;
+}
+
+static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
+                                unsigned int ae,
+                                struct icp_qat_uclo_encapme *encap_ae)
+{
+       unsigned int i;
+       unsigned char ctx_mask;
+       struct icp_qat_uof_init_regsym *init_regsym;
+
+       if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) ==
+           ICP_QAT_UCLO_MAX_CTX)
+               ctx_mask = 0xff;
+       else
+               ctx_mask = 0x55;
+
+       for (i = 0; i < encap_ae->init_regsym_num; i++) {
+               unsigned int exp_res;
+
+               init_regsym = &encap_ae->init_regsym[i];
+               exp_res = init_regsym->value;
+               switch (init_regsym->init_type) {
+               case ICP_QAT_UOF_INIT_REG:
+                       qat_uclo_init_reg(handle, ae, ctx_mask,
+                                         (enum icp_qat_uof_regtype)
+                                         init_regsym->reg_type,
+                                         (unsigned short)init_regsym->reg_addr,
+                                         exp_res);
+                       break;
+               case ICP_QAT_UOF_INIT_REG_CTX:
+                       /* check if ctx is appropriate for the ctxMode */
+                       if (!((1 << init_regsym->ctx) & ctx_mask)) {
+                               pr_err("QAT: invalid ctx num = 0x%x\n",
+                                      init_regsym->ctx);
+                               return -EINVAL;
+                       }
+                       qat_uclo_init_reg(handle, ae,
+                                         (unsigned char)
+                                         (1 << init_regsym->ctx),
+                                         (enum icp_qat_uof_regtype)
+                                         init_regsym->reg_type,
+                                         (unsigned short)init_regsym->reg_addr,
+                                         exp_res);
+                       break;
+               case ICP_QAT_UOF_INIT_EXPR:
+                       pr_err("QAT: INIT_EXPR feature not supported\n");
+                       return -EINVAL;
+               case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
+                       pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
+                       return -EINVAL;
+               default:
+                       break;
+               }
+       }
+       return 0;
+}
+
+static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
+{
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       unsigned long ae_mask = handle->hal_handle->ae_mask;
+       struct icp_qat_uclo_aedata *aed;
+       unsigned int s, ae;
+
+       if (obj_handle->global_inited)
+               return 0;
+       if (obj_handle->init_mem_tab.entry_num) {
+               if (qat_uclo_init_memory(handle)) {
+                       pr_err("QAT: initialize memory failed\n");
+                       return -EINVAL;
+               }
+       }
+
+       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+               aed = &obj_handle->ae_data[ae];
+               for (s = 0; s < aed->slice_num; s++) {
+                       if (!aed->ae_slices[s].encap_image)
+                               continue;
+                       if (qat_uclo_init_reg_sym(handle, ae, aed->ae_slices[s].encap_image))
+                               return -EINVAL;
+               }
+       }
+       obj_handle->global_inited = 1;
+       return 0;
+}
+
+static int qat_hal_set_modes(struct icp_qat_fw_loader_handle *handle,
+                            struct icp_qat_uclo_objhandle *obj_handle,
+                            unsigned char ae,
+                            struct icp_qat_uof_image *uof_image)
+{
+       unsigned char mode;
+       int ret;
+
+       mode = ICP_QAT_CTX_MODE(uof_image->ae_mode);
+       ret = qat_hal_set_ae_ctx_mode(handle, ae, mode);
+       if (ret) {
+               pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
+               return ret;
+       }
+       if (handle->chip_info->nn) {
+               mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
+               ret = qat_hal_set_ae_nn_mode(handle, ae, mode);
+               if (ret) {
+                       pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
+                       return ret;
+               }
+       }
+       mode = ICP_QAT_LOC_MEM0_MODE(uof_image->ae_mode);
+       ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0, mode);
+       if (ret) {
+               pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
+               return ret;
+       }
+       mode = ICP_QAT_LOC_MEM1_MODE(uof_image->ae_mode);
+       ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1, mode);
+       if (ret) {
+               pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
+               return ret;
+       }
+       if (handle->chip_info->lm2lm3) {
+               mode = ICP_QAT_LOC_MEM2_MODE(uof_image->ae_mode);
+               ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM2, mode);
+               if (ret) {
+                       pr_err("QAT: qat_hal_set_ae_lm_mode LMEM2 error\n");
+                       return ret;
+               }
+               mode = ICP_QAT_LOC_MEM3_MODE(uof_image->ae_mode);
+               ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM3, mode);
+               if (ret) {
+                       pr_err("QAT: qat_hal_set_ae_lm_mode LMEM3 error\n");
+                       return ret;
+               }
+               mode = ICP_QAT_LOC_TINDEX_MODE(uof_image->ae_mode);
+               qat_hal_set_ae_tindex_mode(handle, ae, mode);
+       }
+       return 0;
+}
+
+static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle)
+{
+       struct icp_qat_uof_image *uof_image;
+       struct icp_qat_uclo_aedata *ae_data;
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       unsigned long ae_mask = handle->hal_handle->ae_mask;
+       unsigned long cfg_ae_mask = handle->cfg_ae_mask;
+       unsigned char ae, s;
+       int error;
+
+       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+               if (!test_bit(ae, &cfg_ae_mask))
+                       continue;
+
+               ae_data = &obj_handle->ae_data[ae];
+               for (s = 0; s < min_t(unsigned int, ae_data->slice_num,
+                                     ICP_QAT_UCLO_MAX_CTX); s++) {
+                       if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
+                               continue;
+                       uof_image = ae_data->ae_slices[s].encap_image->img_ptr;
+                       error = qat_hal_set_modes(handle, obj_handle, ae,
+                                                 uof_image);
+                       if (error)
+                               return error;
+               }
+       }
+       return 0;
+}
+
+static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle)
+{
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       struct icp_qat_uclo_encapme *image;
+       int a;
+
+       for (a = 0; a < obj_handle->uimage_num; a++) {
+               image = &obj_handle->ae_uimage[a];
+               image->uwords_num = image->page->beg_addr_p +
+                                       image->page->micro_words_num;
+       }
+}
+
+static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
+{
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       unsigned int ae;
+
+       obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff;
+       obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
+                                            obj_handle->obj_hdr->file_buff;
+       obj_handle->uword_in_bytes = 6;
+       obj_handle->prod_type = qat_uclo_get_dev_type(handle);
+       obj_handle->prod_rev = PID_MAJOR_REV |
+                       (PID_MINOR_REV & handle->hal_handle->revision_id);
+       if (qat_uclo_check_uof_compat(obj_handle)) {
+               pr_err("QAT: UOF incompatible\n");
+               return -EINVAL;
+       }
+       obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(u64),
+                                       GFP_KERNEL);
+       if (!obj_handle->uword_buf)
+               return -ENOMEM;
+       obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE;
+       if (!obj_handle->obj_hdr->file_buff ||
+           !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
+                                   &obj_handle->str_table)) {
+               pr_err("QAT: UOF doesn't have effective images\n");
+               goto out_err;
+       }
+       obj_handle->uimage_num =
+               qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage,
+                                   ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX);
+       if (!obj_handle->uimage_num)
+               goto out_err;
+       if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
+               pr_err("QAT: Bad object\n");
+               goto out_check_uof_aemask_err;
+       }
+       qat_uclo_init_uword_num(handle);
+       qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj,
+                                  &obj_handle->init_mem_tab);
+       if (qat_uclo_set_ae_mode(handle))
+               goto out_check_uof_aemask_err;
+       return 0;
+out_check_uof_aemask_err:
+       for (ae = 0; ae < obj_handle->uimage_num; ae++)
+               kfree(obj_handle->ae_uimage[ae].page);
+out_err:
+       kfree(obj_handle->uword_buf);
+       return -EFAULT;
+}
+
+static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle,
+                                     struct icp_qat_suof_filehdr *suof_ptr,
+                                     int suof_size)
+{
+       unsigned int check_sum = 0;
+       unsigned int min_ver_offset = 0;
+       struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
+
+       suof_handle->file_id = ICP_QAT_SUOF_FID;
+       suof_handle->suof_buf = (char *)suof_ptr;
+       suof_handle->suof_size = suof_size;
+       min_ver_offset = suof_size - offsetof(struct icp_qat_suof_filehdr,
+                                             min_ver);
+       check_sum = qat_uclo_calc_str_checksum((char *)&suof_ptr->min_ver,
+                                              min_ver_offset);
+       if (check_sum != suof_ptr->check_sum) {
+               pr_err("QAT: incorrect SUOF checksum\n");
+               return -EINVAL;
+       }
+       suof_handle->check_sum = suof_ptr->check_sum;
+       suof_handle->min_ver = suof_ptr->min_ver;
+       suof_handle->maj_ver = suof_ptr->maj_ver;
+       suof_handle->fw_type = suof_ptr->fw_type;
+       return 0;
+}
+
+static void qat_uclo_map_simg(struct icp_qat_fw_loader_handle *handle,
+                             struct icp_qat_suof_img_hdr *suof_img_hdr,
+                             struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
+{
+       struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
+       struct icp_qat_simg_ae_mode *ae_mode;
+       struct icp_qat_suof_objhdr *suof_objhdr;
+
+       suof_img_hdr->simg_buf  = (suof_handle->suof_buf +
+                                  suof_chunk_hdr->offset +
+                                  sizeof(*suof_objhdr));
+       suof_img_hdr->simg_len = ((struct icp_qat_suof_objhdr *)(uintptr_t)
+                                 (suof_handle->suof_buf +
+                                  suof_chunk_hdr->offset))->img_length;
+
+       suof_img_hdr->css_header = suof_img_hdr->simg_buf;
+       suof_img_hdr->css_key = (suof_img_hdr->css_header +
+                                sizeof(struct icp_qat_css_hdr));
+       suof_img_hdr->css_signature = suof_img_hdr->css_key +
+                                     ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
+                                     ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle);
+       suof_img_hdr->css_simg = suof_img_hdr->css_signature +
+                                ICP_QAT_CSS_SIGNATURE_LEN(handle);
+
+       ae_mode = (struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg);
+       suof_img_hdr->ae_mask = ae_mode->ae_mask;
+       suof_img_hdr->simg_name = (unsigned long)&ae_mode->simg_name;
+       suof_img_hdr->appmeta_data = (unsigned long)&ae_mode->appmeta_data;
+       suof_img_hdr->fw_type = ae_mode->fw_type;
+}
+
+static void
+qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle *suof_handle,
+                         struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
+{
+       char **sym_str = (char **)&suof_handle->sym_str;
+       unsigned int *sym_size = &suof_handle->sym_size;
+       struct icp_qat_suof_strtable *str_table_obj;
+
+       *sym_size = *(unsigned int *)(uintptr_t)
+                  (suof_chunk_hdr->offset + suof_handle->suof_buf);
+       *sym_str = (char *)(uintptr_t)
+                  (suof_handle->suof_buf + suof_chunk_hdr->offset +
+                  sizeof(str_table_obj->tab_length));
+}
+
+static int qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle,
+                                     struct icp_qat_suof_img_hdr *img_hdr)
+{
+       struct icp_qat_simg_ae_mode *img_ae_mode = NULL;
+       unsigned int prod_rev, maj_ver, prod_type;
+
+       prod_type = qat_uclo_get_dev_type(handle);
+       img_ae_mode = (struct icp_qat_simg_ae_mode *)img_hdr->css_simg;
+       prod_rev = PID_MAJOR_REV |
+                        (PID_MINOR_REV & handle->hal_handle->revision_id);
+       if (img_ae_mode->dev_type != prod_type) {
+               pr_err("QAT: incompatible product type %x\n",
+                      img_ae_mode->dev_type);
+               return -EINVAL;
+       }
+       maj_ver = prod_rev & 0xff;
+       if (maj_ver > img_ae_mode->devmax_ver ||
+           maj_ver < img_ae_mode->devmin_ver) {
+               pr_err("QAT: incompatible device majver 0x%x\n", maj_ver);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static void qat_uclo_del_suof(struct icp_qat_fw_loader_handle *handle)
+{
+       struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
+
+       kfree(sobj_handle->img_table.simg_hdr);
+       sobj_handle->img_table.simg_hdr = NULL;
+       kfree(handle->sobj_handle);
+       handle->sobj_handle = NULL;
+}
+
+static void qat_uclo_tail_img(struct icp_qat_suof_img_hdr *suof_img_hdr,
+                             unsigned int img_id, unsigned int num_simgs)
+{
+       struct icp_qat_suof_img_hdr img_header;
+
+       if (img_id != num_simgs - 1) {
+               memcpy(&img_header, &suof_img_hdr[num_simgs - 1],
+                      sizeof(*suof_img_hdr));
+               memcpy(&suof_img_hdr[num_simgs - 1], &suof_img_hdr[img_id],
+                      sizeof(*suof_img_hdr));
+               memcpy(&suof_img_hdr[img_id], &img_header,
+                      sizeof(*suof_img_hdr));
+       }
+}
+
+static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
+                            struct icp_qat_suof_filehdr *suof_ptr,
+                            int suof_size)
+{
+       struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
+       struct icp_qat_suof_chunk_hdr *suof_chunk_hdr = NULL;
+       struct icp_qat_suof_img_hdr *suof_img_hdr = NULL;
+       int ret = 0, ae0_img = ICP_QAT_UCLO_MAX_AE;
+       unsigned int i = 0;
+       struct icp_qat_suof_img_hdr img_header;
+
+       if (!suof_ptr || suof_size == 0) {
+               pr_err("QAT: input parameter SUOF pointer/size is NULL\n");
+               return -EINVAL;
+       }
+       if (qat_uclo_check_suof_format(suof_ptr))
+               return -EINVAL;
+       ret = qat_uclo_map_suof_file_hdr(handle, suof_ptr, suof_size);
+       if (ret)
+               return ret;
+       suof_chunk_hdr = (struct icp_qat_suof_chunk_hdr *)
+                        ((uintptr_t)suof_ptr + sizeof(*suof_ptr));
+
+       qat_uclo_map_suof_symobjs(suof_handle, suof_chunk_hdr);
+       suof_handle->img_table.num_simgs = suof_ptr->num_chunks - 1;
+
+       if (suof_handle->img_table.num_simgs != 0) {
+               suof_img_hdr = kcalloc(suof_handle->img_table.num_simgs,
+                                      sizeof(img_header),
+                                      GFP_KERNEL);
+               if (!suof_img_hdr)
+                       return -ENOMEM;
+               suof_handle->img_table.simg_hdr = suof_img_hdr;
+
+               for (i = 0; i < suof_handle->img_table.num_simgs; i++) {
+                       qat_uclo_map_simg(handle, &suof_img_hdr[i],
+                                         &suof_chunk_hdr[1 + i]);
+                       ret = qat_uclo_check_simg_compat(handle,
+                                                        &suof_img_hdr[i]);
+                       if (ret)
+                               return ret;
+                       suof_img_hdr[i].ae_mask &= handle->cfg_ae_mask;
+                       if ((suof_img_hdr[i].ae_mask & 0x1) != 0)
+                               ae0_img = i;
+               }
+
+               if (!handle->chip_info->tgroup_share_ustore) {
+                       qat_uclo_tail_img(suof_img_hdr, ae0_img,
+                                         suof_handle->img_table.num_simgs);
+               }
+       }
+       return 0;
+}
+
+#define ADD_ADDR(high, low)  ((((u64)high) << 32) + low)
+#define BITS_IN_DWORD 32
+
+static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
+                           struct icp_qat_fw_auth_desc *desc)
+{
+       u32 fcu_sts, retry = 0;
+       u32 fcu_ctl_csr, fcu_sts_csr;
+       u32 fcu_dram_hi_csr, fcu_dram_lo_csr;
+       u64 bus_addr;
+
+       bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low)
+                          - sizeof(struct icp_qat_auth_chunk);
+
+       fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
+       fcu_sts_csr = handle->chip_info->fcu_sts_csr;
+       fcu_dram_hi_csr = handle->chip_info->fcu_dram_addr_hi;
+       fcu_dram_lo_csr = handle->chip_info->fcu_dram_addr_lo;
+
+       SET_CAP_CSR(handle, fcu_dram_hi_csr, (bus_addr >> BITS_IN_DWORD));
+       SET_CAP_CSR(handle, fcu_dram_lo_csr, bus_addr);
+       SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_AUTH);
+
+       do {
+               msleep(FW_AUTH_WAIT_PERIOD);
+               fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
+               if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL)
+                       goto auth_fail;
+               if (((fcu_sts >> FCU_STS_AUTHFWLD_POS) & 0x1))
+                       if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE)
+                               return 0;
+       } while (retry++ < FW_AUTH_MAX_RETRY);
+auth_fail:
+       pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n",
+              fcu_sts & FCU_AUTH_STS_MASK, retry);
+       return -EINVAL;
+}
+
+static bool qat_uclo_is_broadcast(struct icp_qat_fw_loader_handle *handle,
+                                 int imgid)
+{
+       struct icp_qat_suof_handle *sobj_handle;
+
+       if (!handle->chip_info->tgroup_share_ustore)
+               return false;
+
+       sobj_handle = (struct icp_qat_suof_handle *)handle->sobj_handle;
+       if (handle->hal_handle->admin_ae_mask &
+           sobj_handle->img_table.simg_hdr[imgid].ae_mask)
+               return false;
+
+       return true;
+}
+
+static int qat_uclo_broadcast_load_fw(struct icp_qat_fw_loader_handle *handle,
+                                     struct icp_qat_fw_auth_desc *desc)
+{
+       unsigned long ae_mask = handle->hal_handle->ae_mask;
+       unsigned long desc_ae_mask = desc->ae_mask;
+       u32 fcu_sts, ae_broadcast_mask = 0;
+       u32 fcu_loaded_csr, ae_loaded;
+       u32 fcu_sts_csr, fcu_ctl_csr;
+       unsigned int ae, retry = 0;
+
+       if (handle->chip_info->tgroup_share_ustore) {
+               fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
+               fcu_sts_csr = handle->chip_info->fcu_sts_csr;
+               fcu_loaded_csr = handle->chip_info->fcu_loaded_ae_csr;
+       } else {
+               pr_err("Chip 0x%x doesn't support broadcast load\n",
+                      handle->pci_dev->device);
+               return -EINVAL;
+       }
+
+       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+               if (qat_hal_check_ae_active(handle, (unsigned char)ae)) {
+                       pr_err("QAT: Broadcast load failed. AE is not enabled or active.\n");
+                       return -EINVAL;
+               }
+
+               if (test_bit(ae, &desc_ae_mask))
+                       ae_broadcast_mask |= 1 << ae;
+       }
+
+       if (ae_broadcast_mask) {
+               SET_CAP_CSR(handle, FCU_ME_BROADCAST_MASK_TYPE,
+                           ae_broadcast_mask);
+
+               SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_LOAD);
+
+               do {
+                       msleep(FW_AUTH_WAIT_PERIOD);
+                       fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
+                       fcu_sts &= FCU_AUTH_STS_MASK;
+
+                       if (fcu_sts == FCU_STS_LOAD_FAIL) {
+                               pr_err("Broadcast load failed: 0x%x)\n", fcu_sts);
+                               return -EINVAL;
+                       } else if (fcu_sts == FCU_STS_LOAD_DONE) {
+                               ae_loaded = GET_CAP_CSR(handle, fcu_loaded_csr);
+                               ae_loaded >>= handle->chip_info->fcu_loaded_ae_pos;
+
+                               if ((ae_loaded & ae_broadcast_mask) == ae_broadcast_mask)
+                                       break;
+                       }
+               } while (retry++ < FW_AUTH_MAX_RETRY);
+
+               if (retry > FW_AUTH_MAX_RETRY) {
+                       pr_err("QAT: broadcast load failed timeout %d\n", retry);
+                       return -EINVAL;
+               }
+       }
+       return 0;
+}
+
+static int qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle *handle,
+                              struct icp_firml_dram_desc *dram_desc,
+                              unsigned int size)
+{
+       void *vptr;
+       dma_addr_t ptr;
+
+       vptr = dma_alloc_coherent(&handle->pci_dev->dev,
+                                 size, &ptr, GFP_KERNEL);
+       if (!vptr)
+               return -ENOMEM;
+       dram_desc->dram_base_addr_v = vptr;
+       dram_desc->dram_bus_addr = ptr;
+       dram_desc->dram_size = size;
+       return 0;
+}
+
+static void qat_uclo_simg_free(struct icp_qat_fw_loader_handle *handle,
+                              struct icp_firml_dram_desc *dram_desc)
+{
+       if (handle && dram_desc && dram_desc->dram_base_addr_v) {
+               dma_free_coherent(&handle->pci_dev->dev,
+                                 (size_t)(dram_desc->dram_size),
+                                 dram_desc->dram_base_addr_v,
+                                 dram_desc->dram_bus_addr);
+       }
+
+       if (dram_desc)
+               memset(dram_desc, 0, sizeof(*dram_desc));
+}
+
+static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle,
+                                  struct icp_qat_fw_auth_desc **desc)
+{
+       struct icp_firml_dram_desc dram_desc;
+
+       if (*desc) {
+               dram_desc.dram_base_addr_v = *desc;
+               dram_desc.dram_bus_addr = ((struct icp_qat_auth_chunk *)
+                                          (*desc))->chunk_bus_addr;
+               dram_desc.dram_size = ((struct icp_qat_auth_chunk *)
+                                      (*desc))->chunk_size;
+               qat_uclo_simg_free(handle, &dram_desc);
+       }
+}
+
+static int qat_uclo_check_image(struct icp_qat_fw_loader_handle *handle,
+                               char *image, unsigned int size,
+                               unsigned int fw_type)
+{
+       char *fw_type_name = fw_type ? "MMP" : "AE";
+       unsigned int css_dword_size = sizeof(u32);
+
+       if (handle->chip_info->fw_auth) {
+               struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
+               unsigned int header_len = ICP_QAT_AE_IMG_OFFSET(handle);
+
+               if ((css_hdr->header_len * css_dword_size) != header_len)
+                       goto err;
+               if ((css_hdr->size * css_dword_size) != size)
+                       goto err;
+               if (fw_type != css_hdr->fw_type)
+                       goto err;
+               if (size <= header_len)
+                       goto err;
+               size -= header_len;
+       }
+
+       if (fw_type == CSS_AE_FIRMWARE) {
+               if (size < sizeof(struct icp_qat_simg_ae_mode *) +
+                   ICP_QAT_SIMG_AE_INIT_SEQ_LEN)
+                       goto err;
+               if (size > ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN)
+                       goto err;
+       } else if (fw_type == CSS_MMP_FIRMWARE) {
+               if (size > ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN)
+                       goto err;
+       } else {
+               pr_err("QAT: Unsupported firmware type\n");
+               return -EINVAL;
+       }
+       return 0;
+
+err:
+       pr_err("QAT: Invalid %s firmware image\n", fw_type_name);
+       return -EINVAL;
+}
+
+static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
+                               char *image, unsigned int size,
+                               struct icp_qat_fw_auth_desc **desc)
+{
+       struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
+       struct icp_qat_fw_auth_desc *auth_desc;
+       struct icp_qat_auth_chunk *auth_chunk;
+       u64 virt_addr,  bus_addr, virt_base;
+       unsigned int length, simg_offset = sizeof(*auth_chunk);
+       struct icp_qat_simg_ae_mode *simg_ae_mode;
+       struct icp_firml_dram_desc img_desc;
+
+       if (size > (ICP_QAT_AE_IMG_OFFSET(handle) + ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN)) {
+               pr_err("QAT: error, input image size overflow %d\n", size);
+               return -EINVAL;
+       }
+       length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ?
+                ICP_QAT_CSS_AE_SIMG_LEN(handle) + simg_offset :
+                size + ICP_QAT_CSS_FWSK_PAD_LEN(handle) + simg_offset;
+       if (qat_uclo_simg_alloc(handle, &img_desc, length)) {
+               pr_err("QAT: error, allocate continuous dram fail\n");
+               return -ENOMEM;
+       }
+
+       auth_chunk = img_desc.dram_base_addr_v;
+       auth_chunk->chunk_size = img_desc.dram_size;
+       auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr;
+       virt_base = (uintptr_t)img_desc.dram_base_addr_v + simg_offset;
+       bus_addr  = img_desc.dram_bus_addr + simg_offset;
+       auth_desc = img_desc.dram_base_addr_v;
+       auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
+       auth_desc->css_hdr_low = (unsigned int)bus_addr;
+       virt_addr = virt_base;
+
+       memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr));
+       /* pub key */
+       bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) +
+                          sizeof(*css_hdr);
+       virt_addr = virt_addr + sizeof(*css_hdr);
+
+       auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
+       auth_desc->fwsk_pub_low = (unsigned int)bus_addr;
+
+       memcpy((void *)(uintptr_t)virt_addr,
+              (void *)(image + sizeof(*css_hdr)),
+              ICP_QAT_CSS_FWSK_MODULUS_LEN(handle));
+       /* padding */
+       memset((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)),
+              0, ICP_QAT_CSS_FWSK_PAD_LEN(handle));
+
+       /* exponent */
+       memcpy((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
+              ICP_QAT_CSS_FWSK_PAD_LEN(handle)),
+              (void *)(image + sizeof(*css_hdr) +
+                       ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)),
+              sizeof(unsigned int));
+
+       /* signature */
+       bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high,
+                           auth_desc->fwsk_pub_low) +
+                  ICP_QAT_CSS_FWSK_PUB_LEN(handle);
+       virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN(handle);
+       auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
+       auth_desc->signature_low = (unsigned int)bus_addr;
+
+       memcpy((void *)(uintptr_t)virt_addr,
+              (void *)(image + sizeof(*css_hdr) +
+              ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
+              ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle)),
+              ICP_QAT_CSS_SIGNATURE_LEN(handle));
+
+       bus_addr = ADD_ADDR(auth_desc->signature_high,
+                           auth_desc->signature_low) +
+                  ICP_QAT_CSS_SIGNATURE_LEN(handle);
+       virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(handle);
+
+       auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
+       auth_desc->img_low = (unsigned int)bus_addr;
+       auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET(handle);
+       memcpy((void *)(uintptr_t)virt_addr,
+              (void *)(image + ICP_QAT_AE_IMG_OFFSET(handle)),
+              auth_desc->img_len);
+       virt_addr = virt_base;
+       /* AE firmware */
+       if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type ==
+           CSS_AE_FIRMWARE) {
+               auth_desc->img_ae_mode_data_high = auth_desc->img_high;
+               auth_desc->img_ae_mode_data_low = auth_desc->img_low;
+               bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high,
+                                   auth_desc->img_ae_mode_data_low) +
+                          sizeof(struct icp_qat_simg_ae_mode);
+
+               auth_desc->img_ae_init_data_high = (unsigned int)
+                                                (bus_addr >> BITS_IN_DWORD);
+               auth_desc->img_ae_init_data_low = (unsigned int)bus_addr;
+               bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN;
+               auth_desc->img_ae_insts_high = (unsigned int)
+                                            (bus_addr >> BITS_IN_DWORD);
+               auth_desc->img_ae_insts_low = (unsigned int)bus_addr;
+               virt_addr += sizeof(struct icp_qat_css_hdr);
+               virt_addr += ICP_QAT_CSS_FWSK_PUB_LEN(handle);
+               virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(handle);
+               simg_ae_mode = (struct icp_qat_simg_ae_mode *)(uintptr_t)virt_addr;
+               auth_desc->ae_mask = simg_ae_mode->ae_mask & handle->cfg_ae_mask;
+       } else {
+               auth_desc->img_ae_insts_high = auth_desc->img_high;
+               auth_desc->img_ae_insts_low = auth_desc->img_low;
+       }
+       *desc = auth_desc;
+       return 0;
+}
+
+static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
+                           struct icp_qat_fw_auth_desc *desc)
+{
+       unsigned long ae_mask = handle->hal_handle->ae_mask;
+       u32 fcu_sts_csr, fcu_ctl_csr;
+       u32 loaded_aes, loaded_csr;
+       unsigned int i;
+       u32 fcu_sts;
+
+       fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
+       fcu_sts_csr = handle->chip_info->fcu_sts_csr;
+       loaded_csr = handle->chip_info->fcu_loaded_ae_csr;
+
+       for_each_set_bit(i, &ae_mask, handle->hal_handle->ae_max_num) {
+               int retry = 0;
+
+               if (!((desc->ae_mask >> i) & 0x1))
+                       continue;
+               if (qat_hal_check_ae_active(handle, i)) {
+                       pr_err("QAT: AE %d is active\n", i);
+                       return -EINVAL;
+               }
+               SET_CAP_CSR(handle, fcu_ctl_csr,
+                           (FCU_CTRL_CMD_LOAD |
+                           (1 << FCU_CTRL_BROADCAST_POS) |
+                           (i << FCU_CTRL_AE_POS)));
+
+               do {
+                       msleep(FW_AUTH_WAIT_PERIOD);
+                       fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
+                       if ((fcu_sts & FCU_AUTH_STS_MASK) ==
+                           FCU_STS_LOAD_DONE) {
+                               loaded_aes = GET_CAP_CSR(handle, loaded_csr);
+                               loaded_aes >>= handle->chip_info->fcu_loaded_ae_pos;
+                               if (loaded_aes & (1 << i))
+                                       break;
+                       }
+               } while (retry++ < FW_AUTH_MAX_RETRY);
+               if (retry > FW_AUTH_MAX_RETRY) {
+                       pr_err("QAT: firmware load failed timeout %x\n", retry);
+                       return -EINVAL;
+               }
+       }
+       return 0;
+}
+
+static int qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle,
+                                void *addr_ptr, int mem_size)
+{
+       struct icp_qat_suof_handle *suof_handle;
+
+       suof_handle = kzalloc(sizeof(*suof_handle), GFP_KERNEL);
+       if (!suof_handle)
+               return -ENOMEM;
+       handle->sobj_handle = suof_handle;
+       if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) {
+               qat_uclo_del_suof(handle);
+               pr_err("QAT: map SUOF failed\n");
+               return -EINVAL;
+       }
+       return 0;
+}
+
+int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
+                      void *addr_ptr, int mem_size)
+{
+       struct icp_qat_fw_auth_desc *desc = NULL;
+       int status = 0;
+       int ret;
+
+       ret = qat_uclo_check_image(handle, addr_ptr, mem_size, CSS_MMP_FIRMWARE);
+       if (ret)
+               return ret;
+
+       if (handle->chip_info->fw_auth) {
+               status = qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc);
+               if (!status)
+                       status = qat_uclo_auth_fw(handle, desc);
+               qat_uclo_ummap_auth_fw(handle, &desc);
+       } else {
+               if (handle->chip_info->mmp_sram_size < mem_size) {
+                       pr_err("QAT: MMP size is too large: 0x%x\n", mem_size);
+                       return -EFBIG;
+               }
+               qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, mem_size);
+       }
+       return status;
+}
+
+static int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
+                               void *addr_ptr, int mem_size)
+{
+       struct icp_qat_uof_filehdr *filehdr;
+       struct icp_qat_uclo_objhandle *objhdl;
+
+       objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL);
+       if (!objhdl)
+               return -ENOMEM;
+       objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL);
+       if (!objhdl->obj_buf)
+               goto out_objbuf_err;
+       filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
+       if (qat_uclo_check_uof_format(filehdr))
+               goto out_objhdr_err;
+       objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
+                                            ICP_QAT_UOF_OBJS);
+       if (!objhdl->obj_hdr) {
+               pr_err("QAT: object file chunk is null\n");
+               goto out_objhdr_err;
+       }
+       handle->obj_handle = objhdl;
+       if (qat_uclo_parse_uof_obj(handle))
+               goto out_overlay_obj_err;
+       return 0;
+
+out_overlay_obj_err:
+       handle->obj_handle = NULL;
+       kfree(objhdl->obj_hdr);
+out_objhdr_err:
+       kfree(objhdl->obj_buf);
+out_objbuf_err:
+       kfree(objhdl);
+       return -ENOMEM;
+}
+
+static int qat_uclo_map_mof_file_hdr(struct icp_qat_fw_loader_handle *handle,
+                                    struct icp_qat_mof_file_hdr *mof_ptr,
+                                    u32 mof_size)
+{
+       struct icp_qat_mof_handle *mobj_handle = handle->mobj_handle;
+       unsigned int min_ver_offset;
+       unsigned int checksum;
+
+       mobj_handle->file_id = ICP_QAT_MOF_FID;
+       mobj_handle->mof_buf = (char *)mof_ptr;
+       mobj_handle->mof_size = mof_size;
+
+       min_ver_offset = mof_size - offsetof(struct icp_qat_mof_file_hdr,
+                                            min_ver);
+       checksum = qat_uclo_calc_str_checksum(&mof_ptr->min_ver,
+                                             min_ver_offset);
+       if (checksum != mof_ptr->checksum) {
+               pr_err("QAT: incorrect MOF checksum\n");
+               return -EINVAL;
+       }
+
+       mobj_handle->checksum = mof_ptr->checksum;
+       mobj_handle->min_ver = mof_ptr->min_ver;
+       mobj_handle->maj_ver = mof_ptr->maj_ver;
+       return 0;
+}
+
+static void qat_uclo_del_mof(struct icp_qat_fw_loader_handle *handle)
+{
+       struct icp_qat_mof_handle *mobj_handle = handle->mobj_handle;
+
+       kfree(mobj_handle->obj_table.obj_hdr);
+       mobj_handle->obj_table.obj_hdr = NULL;
+       kfree(handle->mobj_handle);
+       handle->mobj_handle = NULL;
+}
+
+static int qat_uclo_seek_obj_inside_mof(struct icp_qat_mof_handle *mobj_handle,
+                                       char *obj_name, char **obj_ptr,
+                                       unsigned int *obj_size)
+{
+       struct icp_qat_mof_objhdr *obj_hdr = mobj_handle->obj_table.obj_hdr;
+       unsigned int i;
+
+       for (i = 0; i < mobj_handle->obj_table.num_objs; i++) {
+               if (!strncmp(obj_hdr[i].obj_name, obj_name,
+                            ICP_QAT_SUOF_OBJ_NAME_LEN)) {
+                       *obj_ptr  = obj_hdr[i].obj_buf;
+                       *obj_size = obj_hdr[i].obj_size;
+                       return 0;
+               }
+       }
+
+       pr_err("QAT: object %s is not found inside MOF\n", obj_name);
+       return -EINVAL;
+}
+
+static int qat_uclo_map_obj_from_mof(struct icp_qat_mof_handle *mobj_handle,
+                                    struct icp_qat_mof_objhdr *mobj_hdr,
+                                    struct icp_qat_mof_obj_chunkhdr *obj_chunkhdr)
+{
+       u8 *obj;
+
+       if (!strncmp(obj_chunkhdr->chunk_id, ICP_QAT_UOF_IMAG,
+                    ICP_QAT_MOF_OBJ_CHUNKID_LEN)) {
+               obj = mobj_handle->uobjs_hdr + obj_chunkhdr->offset;
+       } else if (!strncmp(obj_chunkhdr->chunk_id, ICP_QAT_SUOF_IMAG,
+                           ICP_QAT_MOF_OBJ_CHUNKID_LEN)) {
+               obj = mobj_handle->sobjs_hdr + obj_chunkhdr->offset;
+       } else {
+               pr_err("QAT: unsupported chunk id\n");
+               return -EINVAL;
+       }
+       mobj_hdr->obj_buf = obj;
+       mobj_hdr->obj_size = (unsigned int)obj_chunkhdr->size;
+       mobj_hdr->obj_name = obj_chunkhdr->name + mobj_handle->sym_str;
+       return 0;
+}
+
+static int qat_uclo_map_objs_from_mof(struct icp_qat_mof_handle *mobj_handle)
+{
+       struct icp_qat_mof_obj_chunkhdr *uobj_chunkhdr;
+       struct icp_qat_mof_obj_chunkhdr *sobj_chunkhdr;
+       struct icp_qat_mof_obj_hdr *uobj_hdr;
+       struct icp_qat_mof_obj_hdr *sobj_hdr;
+       struct icp_qat_mof_objhdr *mobj_hdr;
+       unsigned int uobj_chunk_num = 0;
+       unsigned int sobj_chunk_num = 0;
+       unsigned int *valid_chunk;
+       int ret, i;
+
+       uobj_hdr = (struct icp_qat_mof_obj_hdr *)mobj_handle->uobjs_hdr;
+       sobj_hdr = (struct icp_qat_mof_obj_hdr *)mobj_handle->sobjs_hdr;
+       if (uobj_hdr)
+               uobj_chunk_num = uobj_hdr->num_chunks;
+       if (sobj_hdr)
+               sobj_chunk_num = sobj_hdr->num_chunks;
+
+       mobj_hdr = kzalloc((uobj_chunk_num + sobj_chunk_num) *
+                          sizeof(*mobj_hdr), GFP_KERNEL);
+       if (!mobj_hdr)
+               return -ENOMEM;
+
+       mobj_handle->obj_table.obj_hdr = mobj_hdr;
+       valid_chunk = &mobj_handle->obj_table.num_objs;
+       uobj_chunkhdr = (struct icp_qat_mof_obj_chunkhdr *)
+                        ((uintptr_t)uobj_hdr + sizeof(*uobj_hdr));
+       sobj_chunkhdr = (struct icp_qat_mof_obj_chunkhdr *)
+                       ((uintptr_t)sobj_hdr + sizeof(*sobj_hdr));
+
+       /* map uof objects */
+       for (i = 0; i < uobj_chunk_num; i++) {
+               ret = qat_uclo_map_obj_from_mof(mobj_handle,
+                                               &mobj_hdr[*valid_chunk],
+                                               &uobj_chunkhdr[i]);
+               if (ret)
+                       return ret;
+               (*valid_chunk)++;
+       }
+
+       /* map suof objects */
+       for (i = 0; i < sobj_chunk_num; i++) {
+               ret = qat_uclo_map_obj_from_mof(mobj_handle,
+                                               &mobj_hdr[*valid_chunk],
+                                               &sobj_chunkhdr[i]);
+               if (ret)
+                       return ret;
+               (*valid_chunk)++;
+       }
+
+       if ((uobj_chunk_num + sobj_chunk_num) != *valid_chunk) {
+               pr_err("QAT: inconsistent UOF/SUOF chunk amount\n");
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static void qat_uclo_map_mof_symobjs(struct icp_qat_mof_handle *mobj_handle,
+                                    struct icp_qat_mof_chunkhdr *mof_chunkhdr)
+{
+       char **sym_str = (char **)&mobj_handle->sym_str;
+       unsigned int *sym_size = &mobj_handle->sym_size;
+       struct icp_qat_mof_str_table *str_table_obj;
+
+       *sym_size = *(unsigned int *)(uintptr_t)
+                   (mof_chunkhdr->offset + mobj_handle->mof_buf);
+       *sym_str = (char *)(uintptr_t)
+                  (mobj_handle->mof_buf + mof_chunkhdr->offset +
+                   sizeof(str_table_obj->tab_len));
+}
+
+static void qat_uclo_map_mof_chunk(struct icp_qat_mof_handle *mobj_handle,
+                                  struct icp_qat_mof_chunkhdr *mof_chunkhdr)
+{
+       char *chunk_id = mof_chunkhdr->chunk_id;
+
+       if (!strncmp(chunk_id, ICP_QAT_MOF_SYM_OBJS, ICP_QAT_MOF_OBJ_ID_LEN))
+               qat_uclo_map_mof_symobjs(mobj_handle, mof_chunkhdr);
+       else if (!strncmp(chunk_id, ICP_QAT_UOF_OBJS, ICP_QAT_MOF_OBJ_ID_LEN))
+               mobj_handle->uobjs_hdr = mobj_handle->mof_buf +
+                                        mof_chunkhdr->offset;
+       else if (!strncmp(chunk_id, ICP_QAT_SUOF_OBJS, ICP_QAT_MOF_OBJ_ID_LEN))
+               mobj_handle->sobjs_hdr = mobj_handle->mof_buf +
+                                        mof_chunkhdr->offset;
+}
+
+static int qat_uclo_check_mof_format(struct icp_qat_mof_file_hdr *mof_hdr)
+{
+       int maj = mof_hdr->maj_ver & 0xff;
+       int min = mof_hdr->min_ver & 0xff;
+
+       if (mof_hdr->file_id != ICP_QAT_MOF_FID) {
+               pr_err("QAT: invalid header 0x%x\n", mof_hdr->file_id);
+               return -EINVAL;
+       }
+
+       if (mof_hdr->num_chunks <= 0x1) {
+               pr_err("QAT: MOF chunk amount is incorrect\n");
+               return -EINVAL;
+       }
+       if (maj != ICP_QAT_MOF_MAJVER || min != ICP_QAT_MOF_MINVER) {
+               pr_err("QAT: bad MOF version, major 0x%x, minor 0x%x\n",
+                      maj, min);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int qat_uclo_map_mof_obj(struct icp_qat_fw_loader_handle *handle,
+                               struct icp_qat_mof_file_hdr *mof_ptr,
+                               u32 mof_size, char *obj_name, char **obj_ptr,
+                               unsigned int *obj_size)
+{
+       struct icp_qat_mof_chunkhdr *mof_chunkhdr;
+       unsigned int file_id = mof_ptr->file_id;
+       struct icp_qat_mof_handle *mobj_handle;
+       unsigned short chunks_num;
+       unsigned int i;
+       int ret;
+
+       if (file_id == ICP_QAT_UOF_FID || file_id == ICP_QAT_SUOF_FID) {
+               if (obj_ptr)
+                       *obj_ptr = (char *)mof_ptr;
+               if (obj_size)
+                       *obj_size = mof_size;
+               return 0;
+       }
+       if (qat_uclo_check_mof_format(mof_ptr))
+               return -EINVAL;
+
+       mobj_handle = kzalloc(sizeof(*mobj_handle), GFP_KERNEL);
+       if (!mobj_handle)
+               return -ENOMEM;
+
+       handle->mobj_handle = mobj_handle;
+       ret = qat_uclo_map_mof_file_hdr(handle, mof_ptr, mof_size);
+       if (ret)
+               return ret;
+
+       mof_chunkhdr = (void *)mof_ptr + sizeof(*mof_ptr);
+       chunks_num = mof_ptr->num_chunks;
+
+       /* Parse MOF file chunks */
+       for (i = 0; i < chunks_num; i++)
+               qat_uclo_map_mof_chunk(mobj_handle, &mof_chunkhdr[i]);
+
+       /* All sym_objs uobjs and sobjs should be available */
+       if (!mobj_handle->sym_str ||
+           (!mobj_handle->uobjs_hdr && !mobj_handle->sobjs_hdr))
+               return -EINVAL;
+
+       ret = qat_uclo_map_objs_from_mof(mobj_handle);
+       if (ret)
+               return ret;
+
+       /* Seek specified uof object in MOF */
+       return qat_uclo_seek_obj_inside_mof(mobj_handle, obj_name,
+                                           obj_ptr, obj_size);
+}
+
+int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
+                    void *addr_ptr, u32 mem_size, char *obj_name)
+{
+       char *obj_addr;
+       u32 obj_size;
+       int ret;
+
+       BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >=
+                    (sizeof(handle->hal_handle->ae_mask) * 8));
+
+       if (!handle || !addr_ptr || mem_size < 24)
+               return -EINVAL;
+
+       if (obj_name) {
+               ret = qat_uclo_map_mof_obj(handle, addr_ptr, mem_size, obj_name,
+                                          &obj_addr, &obj_size);
+               if (ret)
+                       return ret;
+       } else {
+               obj_addr = addr_ptr;
+               obj_size = mem_size;
+       }
+
+       return (handle->chip_info->fw_auth) ?
+                       qat_uclo_map_suof_obj(handle, obj_addr, obj_size) :
+                       qat_uclo_map_uof_obj(handle, obj_addr, obj_size);
+}
+
+void qat_uclo_del_obj(struct icp_qat_fw_loader_handle *handle)
+{
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       unsigned int a;
+
+       if (handle->mobj_handle)
+               qat_uclo_del_mof(handle);
+       if (handle->sobj_handle)
+               qat_uclo_del_suof(handle);
+       if (!obj_handle)
+               return;
+
+       kfree(obj_handle->uword_buf);
+       for (a = 0; a < obj_handle->uimage_num; a++)
+               kfree(obj_handle->ae_uimage[a].page);
+
+       for (a = 0; a < handle->hal_handle->ae_max_num; a++)
+               qat_uclo_free_ae_data(&obj_handle->ae_data[a]);
+
+       kfree(obj_handle->obj_hdr);
+       kfree(obj_handle->obj_buf);
+       kfree(obj_handle);
+       handle->obj_handle = NULL;
+}
+
+static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
+                                struct icp_qat_uclo_encap_page *encap_page,
+                                u64 *uword, unsigned int addr_p,
+                                unsigned int raddr, u64 fill)
+{
+       unsigned int i, addr;
+       u64 uwrd = 0;
+
+       if (!encap_page) {
+               *uword = fill;
+               return;
+       }
+       addr = (encap_page->page_region) ? raddr : addr_p;
+       for (i = 0; i < encap_page->uwblock_num; i++) {
+               if (addr >= encap_page->uwblock[i].start_addr &&
+                   addr <= encap_page->uwblock[i].start_addr +
+                   encap_page->uwblock[i].words_num - 1) {
+                       addr -= encap_page->uwblock[i].start_addr;
+                       addr *= obj_handle->uword_in_bytes;
+                       memcpy(&uwrd, (void *)(((uintptr_t)
+                              encap_page->uwblock[i].micro_words) + addr),
+                              obj_handle->uword_in_bytes);
+                       uwrd = uwrd & GENMASK_ULL(43, 0);
+               }
+       }
+       *uword = uwrd;
+       if (*uword == INVLD_UWORD)
+               *uword = fill;
+}
+
+static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
+                                       struct icp_qat_uclo_encap_page
+                                       *encap_page, unsigned int ae)
+{
+       unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen;
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       u64 fill_pat;
+
+       /* load the page starting at appropriate ustore address */
+       /* get fill-pattern from an image -- they are all the same */
+       memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern,
+              sizeof(u64));
+       uw_physical_addr = encap_page->beg_addr_p;
+       uw_relative_addr = 0;
+       words_num = encap_page->micro_words_num;
+       while (words_num) {
+               if (words_num < UWORD_CPYBUF_SIZE)
+                       cpylen = words_num;
+               else
+                       cpylen = UWORD_CPYBUF_SIZE;
+
+               /* load the buffer */
+               for (i = 0; i < cpylen; i++)
+                       qat_uclo_fill_uwords(obj_handle, encap_page,
+                                            &obj_handle->uword_buf[i],
+                                            uw_physical_addr + i,
+                                            uw_relative_addr + i, fill_pat);
+
+               /* copy the buffer to ustore */
+               qat_hal_wr_uwords(handle, (unsigned char)ae,
+                                 uw_physical_addr, cpylen,
+                                 obj_handle->uword_buf);
+
+               uw_physical_addr += cpylen;
+               uw_relative_addr += cpylen;
+               words_num -= cpylen;
+       }
+}
+
+static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle,
+                                   struct icp_qat_uof_image *image)
+{
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       unsigned long ae_mask = handle->hal_handle->ae_mask;
+       unsigned long cfg_ae_mask = handle->cfg_ae_mask;
+       unsigned long ae_assigned = image->ae_assigned;
+       struct icp_qat_uclo_aedata *aed;
+       unsigned int ctx_mask, s;
+       struct icp_qat_uclo_page *page;
+       unsigned char ae;
+       int ctx;
+
+       if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX)
+               ctx_mask = 0xff;
+       else
+               ctx_mask = 0x55;
+       /* load the default page and set assigned CTX PC
+        * to the entrypoint address */
+       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+               if (!test_bit(ae, &cfg_ae_mask))
+                       continue;
+
+               if (!test_bit(ae, &ae_assigned))
+                       continue;
+
+               aed = &obj_handle->ae_data[ae];
+               /* find the slice to which this image is assigned */
+               for (s = 0; s < aed->slice_num; s++) {
+                       if (image->ctx_assigned &
+                           aed->ae_slices[s].ctx_mask_assigned)
+                               break;
+               }
+               if (s >= aed->slice_num)
+                       continue;
+               page = aed->ae_slices[s].page;
+               if (!page->encap_page->def_page)
+                       continue;
+               qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae);
+
+               page = aed->ae_slices[s].page;
+               for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++)
+                       aed->ae_slices[s].cur_page[ctx] =
+                                       (ctx_mask & (1 << ctx)) ? page : NULL;
+               qat_hal_set_live_ctx(handle, (unsigned char)ae,
+                                    image->ctx_assigned);
+               qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned,
+                              image->entry_address);
+       }
+}
+
+static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle)
+{
+       unsigned int i;
+       struct icp_qat_fw_auth_desc *desc = NULL;
+       struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
+       struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr;
+       int ret;
+
+       for (i = 0; i < sobj_handle->img_table.num_simgs; i++) {
+               ret = qat_uclo_check_image(handle, simg_hdr[i].simg_buf,
+                                          simg_hdr[i].simg_len,
+                                          CSS_AE_FIRMWARE);
+               if (ret)
+                       return ret;
+
+               if (qat_uclo_map_auth_fw(handle,
+                                        (char *)simg_hdr[i].simg_buf,
+                                        (unsigned int)
+                                        simg_hdr[i].simg_len,
+                                        &desc))
+                       goto wr_err;
+               if (qat_uclo_auth_fw(handle, desc))
+                       goto wr_err;
+               if (qat_uclo_is_broadcast(handle, i)) {
+                       if (qat_uclo_broadcast_load_fw(handle, desc))
+                               goto wr_err;
+               } else {
+                       if (qat_uclo_load_fw(handle, desc))
+                               goto wr_err;
+               }
+               qat_uclo_ummap_auth_fw(handle, &desc);
+       }
+       return 0;
+wr_err:
+       qat_uclo_ummap_auth_fw(handle, &desc);
+       return -EINVAL;
+}
+
+static int qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle *handle)
+{
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       unsigned int i;
+
+       if (qat_uclo_init_globals(handle))
+               return -EINVAL;
+       for (i = 0; i < obj_handle->uimage_num; i++) {
+               if (!obj_handle->ae_uimage[i].img_ptr)
+                       return -EINVAL;
+               if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i]))
+                       return -EINVAL;
+               qat_uclo_wr_uimage_page(handle,
+                                       obj_handle->ae_uimage[i].img_ptr);
+       }
+       return 0;
+}
+
+int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
+{
+       return (handle->chip_info->fw_auth) ? qat_uclo_wr_suof_img(handle) :
+                                  qat_uclo_wr_uof_img(handle);
+}
+
+int qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle,
+                            unsigned int cfg_ae_mask)
+{
+       if (!cfg_ae_mask)
+               return -EINVAL;
+
+       handle->cfg_ae_mask = cfg_ae_mask;
+       return 0;
+}
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/Makefile b/drivers/crypto/intel/qat/qat_dh895xcc/Makefile
new file mode 100644 (file)
index 0000000..38d6f8e
--- /dev/null
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y := -I $(srctree)/$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o
+qat_dh895xcc-objs := adf_drv.o adf_dh895xcc_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
new file mode 100644 (file)
index 0000000..1ebe0b3
--- /dev/null
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2021 Intel Corporation */
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_gen2_config.h>
+#include <adf_gen2_dc.h>
+#include <adf_gen2_hw_data.h>
+#include <adf_gen2_pfvf.h>
+#include "adf_dh895xcc_hw_data.h"
+#include "icp_qat_hw.h"
+
+#define ADF_DH895XCC_VF_MSK    0xFFFFFFFF
+
+/* Worker thread to service arbiter mappings */
+static const u32 thrd_to_arb_map[ADF_DH895XCC_MAX_ACCELENGINES] = {
+       0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
+       0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
+       0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222
+};
+
+static struct adf_hw_device_class dh895xcc_class = {
+       .name = ADF_DH895XCC_DEVICE_NAME,
+       .type = DEV_DH895XCC,
+       .instances = 0
+};
+
+static u32 get_accel_mask(struct adf_hw_device_data *self)
+{
+       u32 fuses = self->fuses;
+
+       return ~fuses >> ADF_DH895XCC_ACCELERATORS_REG_OFFSET &
+                        ADF_DH895XCC_ACCELERATORS_MASK;
+}
+
+static u32 get_ae_mask(struct adf_hw_device_data *self)
+{
+       u32 fuses = self->fuses;
+
+       return ~fuses & ADF_DH895XCC_ACCELENGINES_MASK;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_DH895XCC_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_DH895XCC_ETR_BAR;
+}
+
+static u32 get_sram_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_DH895XCC_SRAM_BAR;
+}
+
+static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
+{
+       struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
+       u32 capabilities;
+       u32 legfuses;
+
+       capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
+                      ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
+                      ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
+                      ICP_ACCEL_CAPABILITIES_CIPHER |
+                      ICP_ACCEL_CAPABILITIES_COMPRESSION;
+
+       /* Read accelerator capabilities mask */
+       pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, &legfuses);
+
+       /* A set bit in legfuses means the feature is OFF in this SKU */
+       if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) {
+               capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
+               capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+       }
+       if (legfuses & ICP_ACCEL_MASK_PKE_SLICE)
+               capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
+       if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE) {
+               capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+               capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+       }
+       if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE)
+               capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
+
+       return capabilities;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+       int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK)
+           >> ADF_DH895XCC_FUSECTL_SKU_SHIFT;
+
+       switch (sku) {
+       case ADF_DH895XCC_FUSECTL_SKU_1:
+               return DEV_SKU_1;
+       case ADF_DH895XCC_FUSECTL_SKU_2:
+               return DEV_SKU_2;
+       case ADF_DH895XCC_FUSECTL_SKU_3:
+               return DEV_SKU_3;
+       case ADF_DH895XCC_FUSECTL_SKU_4:
+               return DEV_SKU_4;
+       default:
+               return DEV_SKU_UNKNOWN;
+       }
+       return DEV_SKU_UNKNOWN;
+}
+
+static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
+{
+       return thrd_to_arb_map;
+}
+
+static void enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
+{
+       /* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */
+       if (vf_mask & 0xFFFF) {
+               u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
+                         & ~ADF_DH895XCC_ERR_MSK_VF2PF_L(vf_mask);
+               ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
+       }
+
+       /* Enable VF2PF Messaging Ints - VFs 16 through 31 per vf_mask[31:16] */
+       if (vf_mask >> 16) {
+               u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5)
+                         & ~ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask);
+               ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val);
+       }
+}
+
+static void disable_all_vf2pf_interrupts(void __iomem *pmisc_addr)
+{
+       u32 val;
+
+       /* Disable VF2PF interrupts for VFs 0 through 15 per vf_mask[15:0] */
+       val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
+             | ADF_DH895XCC_ERR_MSK_VF2PF_L(ADF_DH895XCC_VF_MSK);
+       ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
+
+       /* Disable VF2PF interrupts for VFs 16 through 31 per vf_mask[31:16] */
+       val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5)
+             | ADF_DH895XCC_ERR_MSK_VF2PF_U(ADF_DH895XCC_VF_MSK);
+       ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val);
+}
+
+static u32 disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
+{
+       u32 sources, pending, disabled;
+       u32 errsou3, errmsk3;
+       u32 errsou5, errmsk5;
+
+       /* Get the interrupt sources triggered by VFs */
+       errsou3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU3);
+       errsou5 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU5);
+       sources = ADF_DH895XCC_ERR_REG_VF2PF_L(errsou3)
+                 | ADF_DH895XCC_ERR_REG_VF2PF_U(errsou5);
+
+       if (!sources)
+               return 0;
+
+       /* Get the already disabled interrupts */
+       errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3);
+       errmsk5 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5);
+       disabled = ADF_DH895XCC_ERR_REG_VF2PF_L(errmsk3)
+                  | ADF_DH895XCC_ERR_REG_VF2PF_U(errmsk5);
+
+       pending = sources & ~disabled;
+       if (!pending)
+               return 0;
+
+       /* Due to HW limitations, when disabling the interrupts, we can't
+        * just disable the requested sources, as this would lead to missed
+        * interrupts if sources changes just before writing to ERRMSK3 and
+        * ERRMSK5.
+        * To work around it, disable all and re-enable only the sources that
+        * are not in vf_mask and were not already disabled. Re-enabling will
+        * trigger a new interrupt for the sources that have changed in the
+        * meantime, if any.
+        */
+       errmsk3 |= ADF_DH895XCC_ERR_MSK_VF2PF_L(ADF_DH895XCC_VF_MSK);
+       errmsk5 |= ADF_DH895XCC_ERR_MSK_VF2PF_U(ADF_DH895XCC_VF_MSK);
+       ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
+       ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5);
+
+       errmsk3 &= ADF_DH895XCC_ERR_MSK_VF2PF_L(sources | disabled);
+       errmsk5 &= ADF_DH895XCC_ERR_MSK_VF2PF_U(sources | disabled);
+       ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
+       ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5);
+
+       /* Return the sources of the (new) interrupt(s) */
+       return pending;
+}
+
+static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
+{
+       adf_gen2_cfg_iov_thds(accel_dev, enable,
+                             ADF_DH895XCC_AE2FUNC_MAP_GRP_A_NUM_REGS,
+                             ADF_DH895XCC_AE2FUNC_MAP_GRP_B_NUM_REGS);
+}
+
+void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
+{
+       hw_data->dev_class = &dh895xcc_class;
+       hw_data->instance_id = dh895xcc_class.instances++;
+       hw_data->num_banks = ADF_DH895XCC_ETR_MAX_BANKS;
+       hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
+       hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS;
+       hw_data->num_logical_accel = 1;
+       hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES;
+       hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET;
+       hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK;
+       hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
+       hw_data->alloc_irq = adf_isr_resource_alloc;
+       hw_data->free_irq = adf_isr_resource_free;
+       hw_data->enable_error_correction = adf_gen2_enable_error_correction;
+       hw_data->get_accel_mask = get_accel_mask;
+       hw_data->get_ae_mask = get_ae_mask;
+       hw_data->get_accel_cap = get_accel_cap;
+       hw_data->get_num_accels = adf_gen2_get_num_accels;
+       hw_data->get_num_aes = adf_gen2_get_num_aes;
+       hw_data->get_etr_bar_id = get_etr_bar_id;
+       hw_data->get_misc_bar_id = get_misc_bar_id;
+       hw_data->get_admin_info = adf_gen2_get_admin_info;
+       hw_data->get_arb_info = adf_gen2_get_arb_info;
+       hw_data->get_sram_bar_id = get_sram_bar_id;
+       hw_data->get_sku = get_sku;
+       hw_data->fw_name = ADF_DH895XCC_FW;
+       hw_data->fw_mmp_name = ADF_DH895XCC_MMP;
+       hw_data->init_admin_comms = adf_init_admin_comms;
+       hw_data->exit_admin_comms = adf_exit_admin_comms;
+       hw_data->configure_iov_threads = configure_iov_threads;
+       hw_data->send_admin_init = adf_send_admin_init;
+       hw_data->init_arb = adf_init_arb;
+       hw_data->exit_arb = adf_exit_arb;
+       hw_data->get_arb_mapping = adf_get_arbiter_mapping;
+       hw_data->enable_ints = adf_gen2_enable_ints;
+       hw_data->reset_device = adf_reset_sbr;
+       hw_data->disable_iov = adf_disable_sriov;
+       hw_data->dev_config = adf_gen2_dev_config;
+
+       adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
+       hw_data->pfvf_ops.enable_vf2pf_interrupts = enable_vf2pf_interrupts;
+       hw_data->pfvf_ops.disable_all_vf2pf_interrupts = disable_all_vf2pf_interrupts;
+       hw_data->pfvf_ops.disable_pending_vf2pf_interrupts = disable_pending_vf2pf_interrupts;
+       adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
+       adf_gen2_init_dc_ops(&hw_data->dc_ops);
+}
+
+void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
+{
+       hw_data->dev_class->instances--;
+}
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
new file mode 100644 (file)
index 0000000..7b674bb
--- /dev/null
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef ADF_DH895x_HW_DATA_H_
+#define ADF_DH895x_HW_DATA_H_
+
+/* PCIe configuration space */
+#define ADF_DH895XCC_SRAM_BAR 0
+#define ADF_DH895XCC_PMISC_BAR 1
+#define ADF_DH895XCC_ETR_BAR 2
+#define ADF_DH895XCC_FUSECTL_SKU_MASK 0x300000
+#define ADF_DH895XCC_FUSECTL_SKU_SHIFT 20
+#define ADF_DH895XCC_FUSECTL_SKU_1 0x0
+#define ADF_DH895XCC_FUSECTL_SKU_2 0x1
+#define ADF_DH895XCC_FUSECTL_SKU_3 0x2
+#define ADF_DH895XCC_FUSECTL_SKU_4 0x3
+#define ADF_DH895XCC_MAX_ACCELERATORS 6
+#define ADF_DH895XCC_MAX_ACCELENGINES 12
+#define ADF_DH895XCC_ACCELERATORS_REG_OFFSET 13
+#define ADF_DH895XCC_ACCELERATORS_MASK 0x3F
+#define ADF_DH895XCC_ACCELENGINES_MASK 0xFFF
+#define ADF_DH895XCC_ETR_MAX_BANKS 32
+
+/* Masks for VF2PF interrupts */
+#define ADF_DH895XCC_ERR_REG_VF2PF_L(vf_src)   (((vf_src) & 0x01FFFE00) >> 9)
+#define ADF_DH895XCC_ERR_MSK_VF2PF_L(vf_mask)  (((vf_mask) & 0xFFFF) << 9)
+#define ADF_DH895XCC_ERR_REG_VF2PF_U(vf_src)   (((vf_src) & 0x0000FFFF) << 16)
+#define ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask)  ((vf_mask) >> 16)
+
+/* AE to function mapping */
+#define ADF_DH895XCC_AE2FUNC_MAP_GRP_A_NUM_REGS 96
+#define ADF_DH895XCC_AE2FUNC_MAP_GRP_B_NUM_REGS 12
+
+/* FW names */
+#define ADF_DH895XCC_FW "qat_895xcc.bin"
+#define ADF_DH895XCC_MMP "qat_895xcc_mmp.bin"
+
+void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
+#endif
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
new file mode 100644 (file)
index 0000000..e18860a
--- /dev/null
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include "adf_dh895xcc_hw_data.h"
+
+static const struct pci_device_id adf_pci_tbl[] = {
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_DH895XCC), },
+       { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+       .id_table = adf_pci_tbl,
+       .name = ADF_DH895XCC_DEVICE_NAME,
+       .probe = adf_probe,
+       .remove = adf_remove,
+       .sriov_configure = adf_sriov_configure,
+       .err_handler = &adf_err_handler,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+       pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+       pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+       int i;
+
+       for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+               struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+               if (bar->virt_addr)
+                       pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+       }
+
+       if (accel_dev->hw_device) {
+               switch (accel_pci_dev->pci_dev->device) {
+               case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
+                       adf_clean_hw_data_dh895xcc(accel_dev->hw_device);
+                       break;
+               default:
+                       break;
+               }
+               kfree(accel_dev->hw_device);
+               accel_dev->hw_device = NULL;
+       }
+       adf_cfg_dev_remove(accel_dev);
+       debugfs_remove(accel_dev->debugfs_dir);
+       adf_devmgr_rm_dev(accel_dev, NULL);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       struct adf_accel_dev *accel_dev;
+       struct adf_accel_pci *accel_pci_dev;
+       struct adf_hw_device_data *hw_data;
+       char name[ADF_DEVICE_NAME_LENGTH];
+       unsigned int i, bar_nr;
+       unsigned long bar_mask;
+       int ret;
+
+       switch (ent->device) {
+       case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
+               break;
+       default:
+               dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+               return -ENODEV;
+       }
+
+       if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
+               /* If the accelerator is connected to a node with no memory
+                * there is no point in using the accelerator since the remote
+                * memory transaction will be very slow. */
+               dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
+               return -EINVAL;
+       }
+
+       accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+                                dev_to_node(&pdev->dev));
+       if (!accel_dev)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&accel_dev->crypto_list);
+       accel_pci_dev = &accel_dev->accel_pci_dev;
+       accel_pci_dev->pci_dev = pdev;
+
+       /* Add accel device to accel table.
+        * This should be called before adf_cleanup_accel is called */
+       if (adf_devmgr_add_dev(accel_dev, NULL)) {
+               dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+               kfree(accel_dev);
+               return -EFAULT;
+       }
+
+       accel_dev->owner = THIS_MODULE;
+       /* Allocate and configure device configuration structure */
+       hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+                              dev_to_node(&pdev->dev));
+       if (!hw_data) {
+               ret = -ENOMEM;
+               goto out_err;
+       }
+
+       accel_dev->hw_device = hw_data;
+       adf_init_hw_data_dh895xcc(accel_dev->hw_device);
+       pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
+       pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET,
+                             &hw_data->fuses);
+
+       /* Get Accelerators and Accelerators Engines masks */
+       hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
+       hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
+       accel_pci_dev->sku = hw_data->get_sku(hw_data);
+       /* If the device has no acceleration engines then ignore it. */
+       if (!hw_data->accel_mask || !hw_data->ae_mask ||
+           ((~hw_data->ae_mask) & 0x01)) {
+               dev_err(&pdev->dev, "No acceleration units found");
+               ret = -EFAULT;
+               goto out_err;
+       }
+
+       /* Create dev top level debugfs entry */
+       snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
+                hw_data->dev_class->name, pci_name(pdev));
+
+       accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+
+       /* Create device configuration table */
+       ret = adf_cfg_dev_add(accel_dev);
+       if (ret)
+               goto out_err;
+
+       pcie_set_readrq(pdev, 1024);
+
+       /* enable PCI device */
+       if (pci_enable_device(pdev)) {
+               ret = -EFAULT;
+               goto out_err;
+       }
+
+       /* set dma identifier */
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+       if (ret) {
+               dev_err(&pdev->dev, "No usable DMA configuration\n");
+               goto out_err_disable;
+       }
+
+       if (pci_request_regions(pdev, ADF_DH895XCC_DEVICE_NAME)) {
+               ret = -EFAULT;
+               goto out_err_disable;
+       }
+
+       /* Get accelerator capabilities mask */
+       hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
+
+       /* Find and map all the device's BARS */
+       i = 0;
+       bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
+               struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+               bar->base_addr = pci_resource_start(pdev, bar_nr);
+               if (!bar->base_addr)
+                       break;
+               bar->size = pci_resource_len(pdev, bar_nr);
+               bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+               if (!bar->virt_addr) {
+                       dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+                       ret = -EFAULT;
+                       goto out_err_free_reg;
+               }
+       }
+       pci_set_master(pdev);
+
+       if (pci_save_state(pdev)) {
+               dev_err(&pdev->dev, "Failed to save pci state\n");
+               ret = -ENOMEM;
+               goto out_err_free_reg;
+       }
+
+       ret = adf_dev_up(accel_dev, true);
+       if (ret)
+               goto out_err_dev_stop;
+
+       return ret;
+
+out_err_dev_stop:
+       adf_dev_down(accel_dev, false);
+out_err_free_reg:
+       pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+       pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+       adf_cleanup_accel(accel_dev);
+       kfree(accel_dev);
+       return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+       if (!accel_dev) {
+               pr_err("QAT: Driver removal failed\n");
+               return;
+       }
+       adf_dev_down(accel_dev, false);
+       adf_cleanup_accel(accel_dev);
+       adf_cleanup_pci_dev(accel_dev);
+       kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+       request_module("intel_qat");
+
+       if (pci_register_driver(&adf_driver)) {
+               pr_err("QAT: Driver initialization failed\n");
+               return -EFAULT;
+       }
+       return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+       pci_unregister_driver(&adf_driver);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_FIRMWARE(ADF_DH895XCC_FW);
+MODULE_FIRMWARE(ADF_DH895XCC_MMP);
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile b/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile
new file mode 100644 (file)
index 0000000..0153c85
--- /dev/null
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y := -I $(srctree)/$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o
+qat_dh895xccvf-objs := adf_drv.o adf_dh895xccvf_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
new file mode 100644 (file)
index 0000000..70e56cc
--- /dev/null
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_gen2_config.h>
+#include <adf_gen2_dc.h>
+#include <adf_gen2_hw_data.h>
+#include <adf_gen2_pfvf.h>
+#include <adf_pfvf_vf_msg.h>
+#include "adf_dh895xccvf_hw_data.h"
+
+static struct adf_hw_device_class dh895xcciov_class = {
+       .name = ADF_DH895XCCVF_DEVICE_NAME,
+       .type = DEV_DH895XCCVF,
+       .instances = 0
+};
+
+static u32 get_accel_mask(struct adf_hw_device_data *self)
+{
+       return ADF_DH895XCCIOV_ACCELERATORS_MASK;
+}
+
+static u32 get_ae_mask(struct adf_hw_device_data *self)
+{
+       return ADF_DH895XCCIOV_ACCELENGINES_MASK;
+}
+
+static u32 get_num_accels(struct adf_hw_device_data *self)
+{
+       return ADF_DH895XCCIOV_MAX_ACCELERATORS;
+}
+
+static u32 get_num_aes(struct adf_hw_device_data *self)
+{
+       return ADF_DH895XCCIOV_MAX_ACCELENGINES;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_DH895XCCIOV_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_DH895XCCIOV_ETR_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+       return DEV_SKU_VF;
+}
+
+static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
+{
+       return 0;
+}
+
+static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
+{
+}
+
+void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
+{
+       hw_data->dev_class = &dh895xcciov_class;
+       hw_data->num_banks = ADF_DH895XCCIOV_ETR_MAX_BANKS;
+       hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
+       hw_data->num_accel = ADF_DH895XCCIOV_MAX_ACCELERATORS;
+       hw_data->num_logical_accel = 1;
+       hw_data->num_engines = ADF_DH895XCCIOV_MAX_ACCELENGINES;
+       hw_data->tx_rx_gap = ADF_DH895XCCIOV_RX_RINGS_OFFSET;
+       hw_data->tx_rings_mask = ADF_DH895XCCIOV_TX_RINGS_MASK;
+       hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
+       hw_data->alloc_irq = adf_vf_isr_resource_alloc;
+       hw_data->free_irq = adf_vf_isr_resource_free;
+       hw_data->enable_error_correction = adf_vf_void_noop;
+       hw_data->init_admin_comms = adf_vf_int_noop;
+       hw_data->exit_admin_comms = adf_vf_void_noop;
+       hw_data->send_admin_init = adf_vf2pf_notify_init;
+       hw_data->init_arb = adf_vf_int_noop;
+       hw_data->exit_arb = adf_vf_void_noop;
+       hw_data->disable_iov = adf_vf2pf_notify_shutdown;
+       hw_data->get_accel_mask = get_accel_mask;
+       hw_data->get_ae_mask = get_ae_mask;
+       hw_data->get_num_accels = get_num_accels;
+       hw_data->get_num_aes = get_num_aes;
+       hw_data->get_etr_bar_id = get_etr_bar_id;
+       hw_data->get_misc_bar_id = get_misc_bar_id;
+       hw_data->get_sku = get_sku;
+       hw_data->enable_ints = adf_vf_void_noop;
+       hw_data->dev_class->instances++;
+       hw_data->dev_config = adf_gen2_dev_config;
+       adf_devmgr_update_class_index(hw_data);
+       adf_gen2_init_vf_pfvf_ops(&hw_data->pfvf_ops);
+       adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
+       adf_gen2_init_dc_ops(&hw_data->dc_ops);
+}
+
+void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
+{
+       hw_data->dev_class->instances--;
+       adf_devmgr_update_class_index(hw_data);
+}
diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
new file mode 100644 (file)
index 0000000..6973fa9
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2015 - 2020 Intel Corporation */
+#ifndef ADF_DH895XVF_HW_DATA_H_
+#define ADF_DH895XVF_HW_DATA_H_
+
+#define ADF_DH895XCCIOV_PMISC_BAR 1
+#define ADF_DH895XCCIOV_ACCELERATORS_MASK 0x1
+#define ADF_DH895XCCIOV_ACCELENGINES_MASK 0x1
+#define ADF_DH895XCCIOV_MAX_ACCELERATORS 1
+#define ADF_DH895XCCIOV_MAX_ACCELENGINES 1
+#define ADF_DH895XCCIOV_RX_RINGS_OFFSET 8
+#define ADF_DH895XCCIOV_TX_RINGS_MASK 0xFF
+#define ADF_DH895XCCIOV_ETR_BAR 0
+#define ADF_DH895XCCIOV_ETR_MAX_BANKS 1
+
+void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
+#endif
diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c
new file mode 100644 (file)
index 0000000..96854a1
--- /dev/null
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include "adf_dh895xccvf_hw_data.h"
+
+static const struct pci_device_id adf_pci_tbl[] = {
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF), },
+       { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+       .id_table = adf_pci_tbl,
+       .name = ADF_DH895XCCVF_DEVICE_NAME,
+       .probe = adf_probe,
+       .remove = adf_remove,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+       pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+       pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+       struct adf_accel_dev *pf;
+       int i;
+
+       for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+               struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+               if (bar->virt_addr)
+                       pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+       }
+
+       if (accel_dev->hw_device) {
+               switch (accel_pci_dev->pci_dev->device) {
+               case PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF:
+                       adf_clean_hw_data_dh895xcciov(accel_dev->hw_device);
+                       break;
+               default:
+                       break;
+               }
+               kfree(accel_dev->hw_device);
+               accel_dev->hw_device = NULL;
+       }
+       adf_cfg_dev_remove(accel_dev);
+       debugfs_remove(accel_dev->debugfs_dir);
+       pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
+       adf_devmgr_rm_dev(accel_dev, pf);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       struct adf_accel_dev *accel_dev;
+       struct adf_accel_dev *pf;
+       struct adf_accel_pci *accel_pci_dev;
+       struct adf_hw_device_data *hw_data;
+       char name[ADF_DEVICE_NAME_LENGTH];
+       unsigned int i, bar_nr;
+       unsigned long bar_mask;
+       int ret;
+
+       switch (ent->device) {
+       case PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF:
+               break;
+       default:
+               dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+               return -ENODEV;
+       }
+
+       accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+                                dev_to_node(&pdev->dev));
+       if (!accel_dev)
+               return -ENOMEM;
+
+       accel_dev->is_vf = true;
+       pf = adf_devmgr_pci_to_accel_dev(pdev->physfn);
+       accel_pci_dev = &accel_dev->accel_pci_dev;
+       accel_pci_dev->pci_dev = pdev;
+
+       /* Add accel device to accel table */
+       if (adf_devmgr_add_dev(accel_dev, pf)) {
+               dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+               kfree(accel_dev);
+               return -EFAULT;
+       }
+       INIT_LIST_HEAD(&accel_dev->crypto_list);
+
+       accel_dev->owner = THIS_MODULE;
+       /* Allocate and configure device configuration structure */
+       hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+                              dev_to_node(&pdev->dev));
+       if (!hw_data) {
+               ret = -ENOMEM;
+               goto out_err;
+       }
+       accel_dev->hw_device = hw_data;
+       adf_init_hw_data_dh895xcciov(accel_dev->hw_device);
+
+       /* Get Accelerators and Accelerators Engines masks */
+       hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
+       hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
+       accel_pci_dev->sku = hw_data->get_sku(hw_data);
+
+       /* Create dev top level debugfs entry */
+       snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
+                hw_data->dev_class->name, pci_name(pdev));
+
+       accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+
+       /* Create device configuration table */
+       ret = adf_cfg_dev_add(accel_dev);
+       if (ret)
+               goto out_err;
+
+       /* enable PCI device */
+       if (pci_enable_device(pdev)) {
+               ret = -EFAULT;
+               goto out_err;
+       }
+
+       /* set dma identifier */
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+       if (ret) {
+               dev_err(&pdev->dev, "No usable DMA configuration\n");
+               goto out_err_disable;
+       }
+
+       if (pci_request_regions(pdev, ADF_DH895XCCVF_DEVICE_NAME)) {
+               ret = -EFAULT;
+               goto out_err_disable;
+       }
+
+       /* Find and map all the device's BARS */
+       i = 0;
+       bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
+               struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+               bar->base_addr = pci_resource_start(pdev, bar_nr);
+               if (!bar->base_addr)
+                       break;
+               bar->size = pci_resource_len(pdev, bar_nr);
+               bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+               if (!bar->virt_addr) {
+                       dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+                       ret = -EFAULT;
+                       goto out_err_free_reg;
+               }
+       }
+       pci_set_master(pdev);
+       /* Completion for VF2PF request/response message exchange */
+       init_completion(&accel_dev->vf.msg_received);
+
+       ret = adf_dev_up(accel_dev, false);
+       if (ret)
+               goto out_err_dev_stop;
+
+       return ret;
+
+out_err_dev_stop:
+       adf_dev_down(accel_dev, false);
+out_err_free_reg:
+       pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+       pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+       adf_cleanup_accel(accel_dev);
+       kfree(accel_dev);
+       return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+       if (!accel_dev) {
+               pr_err("QAT: Driver removal failed\n");
+               return;
+       }
+       adf_flush_vf_wq(accel_dev);
+       adf_dev_down(accel_dev, false);
+       adf_cleanup_accel(accel_dev);
+       adf_cleanup_pci_dev(accel_dev);
+       kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+       request_module("intel_qat");
+
+       if (pci_register_driver(&adf_driver)) {
+               pr_err("QAT: Driver initialization failed\n");
+               return -EFAULT;
+       }
+       return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+       pci_unregister_driver(&adf_driver);
+       adf_clean_vf_map(true);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
deleted file mode 100644 (file)
index b63e235..0000000
+++ /dev/null
@@ -1,1601 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel IXP4xx NPE-C crypto driver
- *
- * Copyright (C) 2008 Christian Hohnstaedt <[email protected]>
- */
-
-#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
-#include <linux/crypto.h>
-#include <linux/kernel.h>
-#include <linux/rtnetlink.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/gfp.h>
-#include <linux/module.h>
-#include <linux/of.h>
-
-#include <crypto/ctr.h>
-#include <crypto/internal/des.h>
-#include <crypto/aes.h>
-#include <crypto/hmac.h>
-#include <crypto/sha1.h>
-#include <crypto/algapi.h>
-#include <crypto/internal/aead.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/authenc.h>
-#include <crypto/scatterwalk.h>
-
-#include <linux/soc/ixp4xx/npe.h>
-#include <linux/soc/ixp4xx/qmgr.h>
-
-/* Intermittent includes, delete this after v5.14-rc1 */
-#include <linux/soc/ixp4xx/cpu.h>
-
-#define MAX_KEYLEN 32
-
-/* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
-#define NPE_CTX_LEN 80
-#define AES_BLOCK128 16
-
-#define NPE_OP_HASH_VERIFY   0x01
-#define NPE_OP_CCM_ENABLE    0x04
-#define NPE_OP_CRYPT_ENABLE  0x08
-#define NPE_OP_HASH_ENABLE   0x10
-#define NPE_OP_NOT_IN_PLACE  0x20
-#define NPE_OP_HMAC_DISABLE  0x40
-#define NPE_OP_CRYPT_ENCRYPT 0x80
-
-#define NPE_OP_CCM_GEN_MIC   0xcc
-#define NPE_OP_HASH_GEN_ICV  0x50
-#define NPE_OP_ENC_GEN_KEY   0xc9
-
-#define MOD_ECB     0x0000
-#define MOD_CTR     0x1000
-#define MOD_CBC_ENC 0x2000
-#define MOD_CBC_DEC 0x3000
-#define MOD_CCM_ENC 0x4000
-#define MOD_CCM_DEC 0x5000
-
-#define KEYLEN_128  4
-#define KEYLEN_192  6
-#define KEYLEN_256  8
-
-#define CIPH_DECR   0x0000
-#define CIPH_ENCR   0x0400
-
-#define MOD_DES     0x0000
-#define MOD_TDEA2   0x0100
-#define MOD_3DES   0x0200
-#define MOD_AES     0x0800
-#define MOD_AES128  (0x0800 | KEYLEN_128)
-#define MOD_AES192  (0x0900 | KEYLEN_192)
-#define MOD_AES256  (0x0a00 | KEYLEN_256)
-
-#define MAX_IVLEN   16
-#define NPE_QLEN    16
-/* Space for registering when the first
- * NPE_QLEN crypt_ctl are busy */
-#define NPE_QLEN_TOTAL 64
-
-#define CTL_FLAG_UNUSED                0x0000
-#define CTL_FLAG_USED          0x1000
-#define CTL_FLAG_PERFORM_ABLK  0x0001
-#define CTL_FLAG_GEN_ICV       0x0002
-#define CTL_FLAG_GEN_REVAES    0x0004
-#define CTL_FLAG_PERFORM_AEAD  0x0008
-#define CTL_FLAG_MASK          0x000f
-
-#define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
-
-#define MD5_DIGEST_SIZE   16
-
-struct buffer_desc {
-       u32 phys_next;
-#ifdef __ARMEB__
-       u16 buf_len;
-       u16 pkt_len;
-#else
-       u16 pkt_len;
-       u16 buf_len;
-#endif
-       dma_addr_t phys_addr;
-       u32 __reserved[4];
-       struct buffer_desc *next;
-       enum dma_data_direction dir;
-};
-
-struct crypt_ctl {
-#ifdef __ARMEB__
-       u8 mode;                /* NPE_OP_*  operation mode */
-       u8 init_len;
-       u16 reserved;
-#else
-       u16 reserved;
-       u8 init_len;
-       u8 mode;                /* NPE_OP_*  operation mode */
-#endif
-       u8 iv[MAX_IVLEN];       /* IV for CBC mode or CTR IV for CTR mode */
-       dma_addr_t icv_rev_aes; /* icv or rev aes */
-       dma_addr_t src_buf;
-       dma_addr_t dst_buf;
-#ifdef __ARMEB__
-       u16 auth_offs;          /* Authentication start offset */
-       u16 auth_len;           /* Authentication data length */
-       u16 crypt_offs;         /* Cryption start offset */
-       u16 crypt_len;          /* Cryption data length */
-#else
-       u16 auth_len;           /* Authentication data length */
-       u16 auth_offs;          /* Authentication start offset */
-       u16 crypt_len;          /* Cryption data length */
-       u16 crypt_offs;         /* Cryption start offset */
-#endif
-       u32 aadAddr;            /* Additional Auth Data Addr for CCM mode */
-       u32 crypto_ctx;         /* NPE Crypto Param structure address */
-
-       /* Used by Host: 4*4 bytes*/
-       unsigned int ctl_flags;
-       union {
-               struct skcipher_request *ablk_req;
-               struct aead_request *aead_req;
-               struct crypto_tfm *tfm;
-       } data;
-       struct buffer_desc *regist_buf;
-       u8 *regist_ptr;
-};
-
-struct ablk_ctx {
-       struct buffer_desc *src;
-       struct buffer_desc *dst;
-       u8 iv[MAX_IVLEN];
-       bool encrypt;
-       struct skcipher_request fallback_req;   // keep at the end
-};
-
-struct aead_ctx {
-       struct buffer_desc *src;
-       struct buffer_desc *dst;
-       struct scatterlist ivlist;
-       /* used when the hmac is not on one sg entry */
-       u8 *hmac_virt;
-       int encrypt;
-};
-
-struct ix_hash_algo {
-       u32 cfgword;
-       unsigned char *icv;
-};
-
-struct ix_sa_dir {
-       unsigned char *npe_ctx;
-       dma_addr_t npe_ctx_phys;
-       int npe_ctx_idx;
-       u8 npe_mode;
-};
-
-struct ixp_ctx {
-       struct ix_sa_dir encrypt;
-       struct ix_sa_dir decrypt;
-       int authkey_len;
-       u8 authkey[MAX_KEYLEN];
-       int enckey_len;
-       u8 enckey[MAX_KEYLEN];
-       u8 salt[MAX_IVLEN];
-       u8 nonce[CTR_RFC3686_NONCE_SIZE];
-       unsigned int salted;
-       atomic_t configuring;
-       struct completion completion;
-       struct crypto_skcipher *fallback_tfm;
-};
-
-struct ixp_alg {
-       struct skcipher_alg crypto;
-       const struct ix_hash_algo *hash;
-       u32 cfg_enc;
-       u32 cfg_dec;
-
-       int registered;
-};
-
-struct ixp_aead_alg {
-       struct aead_alg crypto;
-       const struct ix_hash_algo *hash;
-       u32 cfg_enc;
-       u32 cfg_dec;
-
-       int registered;
-};
-
-static const struct ix_hash_algo hash_alg_md5 = {
-       .cfgword        = 0xAA010004,
-       .icv            = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
-                         "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
-};
-
-static const struct ix_hash_algo hash_alg_sha1 = {
-       .cfgword        = 0x00000005,
-       .icv            = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
-                         "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
-};
-
-static struct npe *npe_c;
-
-static unsigned int send_qid;
-static unsigned int recv_qid;
-static struct dma_pool *buffer_pool;
-static struct dma_pool *ctx_pool;
-
-static struct crypt_ctl *crypt_virt;
-static dma_addr_t crypt_phys;
-
-static int support_aes = 1;
-
-static struct platform_device *pdev;
-
-static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
-{
-       return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
-}
-
-static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
-{
-       return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
-}
-
-static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
-{
-       return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->cfg_enc;
-}
-
-static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
-{
-       return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->cfg_dec;
-}
-
-static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
-{
-       return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->hash;
-}
-
-static int setup_crypt_desc(void)
-{
-       struct device *dev = &pdev->dev;
-
-       BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
-       crypt_virt = dma_alloc_coherent(dev,
-                                       NPE_QLEN * sizeof(struct crypt_ctl),
-                                       &crypt_phys, GFP_ATOMIC);
-       if (!crypt_virt)
-               return -ENOMEM;
-       return 0;
-}
-
-static DEFINE_SPINLOCK(desc_lock);
-static struct crypt_ctl *get_crypt_desc(void)
-{
-       int i;
-       static int idx;
-       unsigned long flags;
-
-       spin_lock_irqsave(&desc_lock, flags);
-
-       if (unlikely(!crypt_virt))
-               setup_crypt_desc();
-       if (unlikely(!crypt_virt)) {
-               spin_unlock_irqrestore(&desc_lock, flags);
-               return NULL;
-       }
-       i = idx;
-       if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
-               if (++idx >= NPE_QLEN)
-                       idx = 0;
-               crypt_virt[i].ctl_flags = CTL_FLAG_USED;
-               spin_unlock_irqrestore(&desc_lock, flags);
-               return crypt_virt + i;
-       } else {
-               spin_unlock_irqrestore(&desc_lock, flags);
-               return NULL;
-       }
-}
-
-static DEFINE_SPINLOCK(emerg_lock);
-static struct crypt_ctl *get_crypt_desc_emerg(void)
-{
-       int i;
-       static int idx = NPE_QLEN;
-       struct crypt_ctl *desc;
-       unsigned long flags;
-
-       desc = get_crypt_desc();
-       if (desc)
-               return desc;
-       if (unlikely(!crypt_virt))
-               return NULL;
-
-       spin_lock_irqsave(&emerg_lock, flags);
-       i = idx;
-       if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
-               if (++idx >= NPE_QLEN_TOTAL)
-                       idx = NPE_QLEN;
-               crypt_virt[i].ctl_flags = CTL_FLAG_USED;
-               spin_unlock_irqrestore(&emerg_lock, flags);
-               return crypt_virt + i;
-       } else {
-               spin_unlock_irqrestore(&emerg_lock, flags);
-               return NULL;
-       }
-}
-
-static void free_buf_chain(struct device *dev, struct buffer_desc *buf,
-                          dma_addr_t phys)
-{
-       while (buf) {
-               struct buffer_desc *buf1;
-               u32 phys1;
-
-               buf1 = buf->next;
-               phys1 = buf->phys_next;
-               dma_unmap_single(dev, buf->phys_addr, buf->buf_len, buf->dir);
-               dma_pool_free(buffer_pool, buf, phys);
-               buf = buf1;
-               phys = phys1;
-       }
-}
-
-static struct tasklet_struct crypto_done_tasklet;
-
-static void finish_scattered_hmac(struct crypt_ctl *crypt)
-{
-       struct aead_request *req = crypt->data.aead_req;
-       struct aead_ctx *req_ctx = aead_request_ctx(req);
-       struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-       int authsize = crypto_aead_authsize(tfm);
-       int decryptlen = req->assoclen + req->cryptlen - authsize;
-
-       if (req_ctx->encrypt) {
-               scatterwalk_map_and_copy(req_ctx->hmac_virt, req->dst,
-                                        decryptlen, authsize, 1);
-       }
-       dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
-}
-
-static void one_packet(dma_addr_t phys)
-{
-       struct device *dev = &pdev->dev;
-       struct crypt_ctl *crypt;
-       struct ixp_ctx *ctx;
-       int failed;
-
-       failed = phys & 0x1 ? -EBADMSG : 0;
-       phys &= ~0x3;
-       crypt = crypt_phys2virt(phys);
-
-       switch (crypt->ctl_flags & CTL_FLAG_MASK) {
-       case CTL_FLAG_PERFORM_AEAD: {
-               struct aead_request *req = crypt->data.aead_req;
-               struct aead_ctx *req_ctx = aead_request_ctx(req);
-
-               free_buf_chain(dev, req_ctx->src, crypt->src_buf);
-               free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
-               if (req_ctx->hmac_virt)
-                       finish_scattered_hmac(crypt);
-
-               aead_request_complete(req, failed);
-               break;
-       }
-       case CTL_FLAG_PERFORM_ABLK: {
-               struct skcipher_request *req = crypt->data.ablk_req;
-               struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
-               struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-               unsigned int ivsize = crypto_skcipher_ivsize(tfm);
-               unsigned int offset;
-
-               if (ivsize > 0) {
-                       offset = req->cryptlen - ivsize;
-                       if (req_ctx->encrypt) {
-                               scatterwalk_map_and_copy(req->iv, req->dst,
-                                                        offset, ivsize, 0);
-                       } else {
-                               memcpy(req->iv, req_ctx->iv, ivsize);
-                               memzero_explicit(req_ctx->iv, ivsize);
-                       }
-               }
-
-               if (req_ctx->dst)
-                       free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
-
-               free_buf_chain(dev, req_ctx->src, crypt->src_buf);
-               skcipher_request_complete(req, failed);
-               break;
-       }
-       case CTL_FLAG_GEN_ICV:
-               ctx = crypto_tfm_ctx(crypt->data.tfm);
-               dma_pool_free(ctx_pool, crypt->regist_ptr,
-                             crypt->regist_buf->phys_addr);
-               dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
-               if (atomic_dec_and_test(&ctx->configuring))
-                       complete(&ctx->completion);
-               break;
-       case CTL_FLAG_GEN_REVAES:
-               ctx = crypto_tfm_ctx(crypt->data.tfm);
-               *(__be32 *)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
-               if (atomic_dec_and_test(&ctx->configuring))
-                       complete(&ctx->completion);
-               break;
-       default:
-               BUG();
-       }
-       crypt->ctl_flags = CTL_FLAG_UNUSED;
-}
-
-static void irqhandler(void *_unused)
-{
-       tasklet_schedule(&crypto_done_tasklet);
-}
-
-static void crypto_done_action(unsigned long arg)
-{
-       int i;
-
-       for (i = 0; i < 4; i++) {
-               dma_addr_t phys = qmgr_get_entry(recv_qid);
-               if (!phys)
-                       return;
-               one_packet(phys);
-       }
-       tasklet_schedule(&crypto_done_tasklet);
-}
-
-static int init_ixp_crypto(struct device *dev)
-{
-       struct device_node *np = dev->of_node;
-       u32 msg[2] = { 0, 0 };
-       int ret = -ENODEV;
-       u32 npe_id;
-
-       dev_info(dev, "probing...\n");
-
-       /* Locate the NPE and queue manager to use from device tree */
-       if (IS_ENABLED(CONFIG_OF) && np) {
-               struct of_phandle_args queue_spec;
-               struct of_phandle_args npe_spec;
-
-               ret = of_parse_phandle_with_fixed_args(np, "intel,npe-handle",
-                                                      1, 0, &npe_spec);
-               if (ret) {
-                       dev_err(dev, "no NPE engine specified\n");
-                       return -ENODEV;
-               }
-               npe_id = npe_spec.args[0];
-
-               ret = of_parse_phandle_with_fixed_args(np, "queue-rx", 1, 0,
-                                                      &queue_spec);
-               if (ret) {
-                       dev_err(dev, "no rx queue phandle\n");
-                       return -ENODEV;
-               }
-               recv_qid = queue_spec.args[0];
-
-               ret = of_parse_phandle_with_fixed_args(np, "queue-txready", 1, 0,
-                                                      &queue_spec);
-               if (ret) {
-                       dev_err(dev, "no txready queue phandle\n");
-                       return -ENODEV;
-               }
-               send_qid = queue_spec.args[0];
-       } else {
-               /*
-                * Hardcoded engine when using platform data, this goes away
-                * when we switch to using DT only.
-                */
-               npe_id = 2;
-               send_qid = 29;
-               recv_qid = 30;
-       }
-
-       npe_c = npe_request(npe_id);
-       if (!npe_c)
-               return ret;
-
-       if (!npe_running(npe_c)) {
-               ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
-               if (ret)
-                       goto npe_release;
-               if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
-                       goto npe_error;
-       } else {
-               if (npe_send_message(npe_c, msg, "STATUS_MSG"))
-                       goto npe_error;
-
-               if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
-                       goto npe_error;
-       }
-
-       switch ((msg[1] >> 16) & 0xff) {
-       case 3:
-               dev_warn(dev, "Firmware of %s lacks AES support\n", npe_name(npe_c));
-               support_aes = 0;
-               break;
-       case 4:
-       case 5:
-               support_aes = 1;
-               break;
-       default:
-               dev_err(dev, "Firmware of %s lacks crypto support\n", npe_name(npe_c));
-               ret = -ENODEV;
-               goto npe_release;
-       }
-       /* buffer_pool will also be used to sometimes store the hmac,
-        * so assure it is large enough
-        */
-       BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
-       buffer_pool = dma_pool_create("buffer", dev, sizeof(struct buffer_desc),
-                                     32, 0);
-       ret = -ENOMEM;
-       if (!buffer_pool)
-               goto err;
-
-       ctx_pool = dma_pool_create("context", dev, NPE_CTX_LEN, 16, 0);
-       if (!ctx_pool)
-               goto err;
-
-       ret = qmgr_request_queue(send_qid, NPE_QLEN_TOTAL, 0, 0,
-                                "ixp_crypto:out", NULL);
-       if (ret)
-               goto err;
-       ret = qmgr_request_queue(recv_qid, NPE_QLEN, 0, 0,
-                                "ixp_crypto:in", NULL);
-       if (ret) {
-               qmgr_release_queue(send_qid);
-               goto err;
-       }
-       qmgr_set_irq(recv_qid, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
-       tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
-
-       qmgr_enable_irq(recv_qid);
-       return 0;
-
-npe_error:
-       dev_err(dev, "%s not responding\n", npe_name(npe_c));
-       ret = -EIO;
-err:
-       dma_pool_destroy(ctx_pool);
-       dma_pool_destroy(buffer_pool);
-npe_release:
-       npe_release(npe_c);
-       return ret;
-}
-
-static void release_ixp_crypto(struct device *dev)
-{
-       qmgr_disable_irq(recv_qid);
-       tasklet_kill(&crypto_done_tasklet);
-
-       qmgr_release_queue(send_qid);
-       qmgr_release_queue(recv_qid);
-
-       dma_pool_destroy(ctx_pool);
-       dma_pool_destroy(buffer_pool);
-
-       npe_release(npe_c);
-
-       if (crypt_virt)
-               dma_free_coherent(dev, NPE_QLEN * sizeof(struct crypt_ctl),
-                                 crypt_virt, crypt_phys);
-}
-
-static void reset_sa_dir(struct ix_sa_dir *dir)
-{
-       memset(dir->npe_ctx, 0, NPE_CTX_LEN);
-       dir->npe_ctx_idx = 0;
-       dir->npe_mode = 0;
-}
-
-static int init_sa_dir(struct ix_sa_dir *dir)
-{
-       dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
-       if (!dir->npe_ctx)
-               return -ENOMEM;
-
-       reset_sa_dir(dir);
-       return 0;
-}
-
-static void free_sa_dir(struct ix_sa_dir *dir)
-{
-       memset(dir->npe_ctx, 0, NPE_CTX_LEN);
-       dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
-}
-
-static int init_tfm(struct crypto_tfm *tfm)
-{
-       struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
-       int ret;
-
-       atomic_set(&ctx->configuring, 0);
-       ret = init_sa_dir(&ctx->encrypt);
-       if (ret)
-               return ret;
-       ret = init_sa_dir(&ctx->decrypt);
-       if (ret)
-               free_sa_dir(&ctx->encrypt);
-
-       return ret;
-}
-
-static int init_tfm_ablk(struct crypto_skcipher *tfm)
-{
-       struct crypto_tfm *ctfm = crypto_skcipher_tfm(tfm);
-       struct ixp_ctx *ctx = crypto_tfm_ctx(ctfm);
-       const char *name = crypto_tfm_alg_name(ctfm);
-
-       ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
-       if (IS_ERR(ctx->fallback_tfm)) {
-               pr_err("ERROR: Cannot allocate fallback for %s %ld\n",
-                       name, PTR_ERR(ctx->fallback_tfm));
-               return PTR_ERR(ctx->fallback_tfm);
-       }
-
-       pr_info("Fallback for %s is %s\n",
-                crypto_tfm_alg_driver_name(&tfm->base),
-                crypto_tfm_alg_driver_name(crypto_skcipher_tfm(ctx->fallback_tfm))
-                );
-
-       crypto_skcipher_set_reqsize(tfm, sizeof(struct ablk_ctx) + crypto_skcipher_reqsize(ctx->fallback_tfm));
-       return init_tfm(crypto_skcipher_tfm(tfm));
-}
-
-static int init_tfm_aead(struct crypto_aead *tfm)
-{
-       crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx));
-       return init_tfm(crypto_aead_tfm(tfm));
-}
-
-static void exit_tfm(struct crypto_tfm *tfm)
-{
-       struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
-
-       free_sa_dir(&ctx->encrypt);
-       free_sa_dir(&ctx->decrypt);
-}
-
-static void exit_tfm_ablk(struct crypto_skcipher *tfm)
-{
-       struct crypto_tfm *ctfm = crypto_skcipher_tfm(tfm);
-       struct ixp_ctx *ctx = crypto_tfm_ctx(ctfm);
-
-       crypto_free_skcipher(ctx->fallback_tfm);
-       exit_tfm(crypto_skcipher_tfm(tfm));
-}
-
-static void exit_tfm_aead(struct crypto_aead *tfm)
-{
-       exit_tfm(crypto_aead_tfm(tfm));
-}
-
-static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
-                             int init_len, u32 ctx_addr, const u8 *key,
-                             int key_len)
-{
-       struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
-       struct crypt_ctl *crypt;
-       struct buffer_desc *buf;
-       int i;
-       u8 *pad;
-       dma_addr_t pad_phys, buf_phys;
-
-       BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
-       pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
-       if (!pad)
-               return -ENOMEM;
-       buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
-       if (!buf) {
-               dma_pool_free(ctx_pool, pad, pad_phys);
-               return -ENOMEM;
-       }
-       crypt = get_crypt_desc_emerg();
-       if (!crypt) {
-               dma_pool_free(ctx_pool, pad, pad_phys);
-               dma_pool_free(buffer_pool, buf, buf_phys);
-               return -EAGAIN;
-       }
-
-       memcpy(pad, key, key_len);
-       memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
-       for (i = 0; i < HMAC_PAD_BLOCKLEN; i++)
-               pad[i] ^= xpad;
-
-       crypt->data.tfm = tfm;
-       crypt->regist_ptr = pad;
-       crypt->regist_buf = buf;
-
-       crypt->auth_offs = 0;
-       crypt->auth_len = HMAC_PAD_BLOCKLEN;
-       crypt->crypto_ctx = ctx_addr;
-       crypt->src_buf = buf_phys;
-       crypt->icv_rev_aes = target;
-       crypt->mode = NPE_OP_HASH_GEN_ICV;
-       crypt->init_len = init_len;
-       crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
-
-       buf->next = NULL;
-       buf->buf_len = HMAC_PAD_BLOCKLEN;
-       buf->pkt_len = 0;
-       buf->phys_addr = pad_phys;
-
-       atomic_inc(&ctx->configuring);
-       qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
-       BUG_ON(qmgr_stat_overflow(send_qid));
-       return 0;
-}
-
-static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned int authsize,
-                     const u8 *key, int key_len, unsigned int digest_len)
-{
-       u32 itarget, otarget, npe_ctx_addr;
-       unsigned char *cinfo;
-       int init_len, ret = 0;
-       u32 cfgword;
-       struct ix_sa_dir *dir;
-       struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
-       const struct ix_hash_algo *algo;
-
-       dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
-       cinfo = dir->npe_ctx + dir->npe_ctx_idx;
-       algo = ix_hash(tfm);
-
-       /* write cfg word to cryptinfo */
-       cfgword = algo->cfgword | (authsize << 6); /* (authsize/4) << 8 */
-#ifndef __ARMEB__
-       cfgword ^= 0xAA000000; /* change the "byte swap" flags */
-#endif
-       *(__be32 *)cinfo = cpu_to_be32(cfgword);
-       cinfo += sizeof(cfgword);
-
-       /* write ICV to cryptinfo */
-       memcpy(cinfo, algo->icv, digest_len);
-       cinfo += digest_len;
-
-       itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
-                               + sizeof(algo->cfgword);
-       otarget = itarget + digest_len;
-       init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
-       npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
-
-       dir->npe_ctx_idx += init_len;
-       dir->npe_mode |= NPE_OP_HASH_ENABLE;
-
-       if (!encrypt)
-               dir->npe_mode |= NPE_OP_HASH_VERIFY;
-
-       ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
-                                init_len, npe_ctx_addr, key, key_len);
-       if (ret)
-               return ret;
-       return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
-                                 init_len, npe_ctx_addr, key, key_len);
-}
-
-static int gen_rev_aes_key(struct crypto_tfm *tfm)
-{
-       struct crypt_ctl *crypt;
-       struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
-       struct ix_sa_dir *dir = &ctx->decrypt;
-
-       crypt = get_crypt_desc_emerg();
-       if (!crypt)
-               return -EAGAIN;
-
-       *(__be32 *)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
-
-       crypt->data.tfm = tfm;
-       crypt->crypt_offs = 0;
-       crypt->crypt_len = AES_BLOCK128;
-       crypt->src_buf = 0;
-       crypt->crypto_ctx = dir->npe_ctx_phys;
-       crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
-       crypt->mode = NPE_OP_ENC_GEN_KEY;
-       crypt->init_len = dir->npe_ctx_idx;
-       crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
-
-       atomic_inc(&ctx->configuring);
-       qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
-       BUG_ON(qmgr_stat_overflow(send_qid));
-       return 0;
-}
-
-static int setup_cipher(struct crypto_tfm *tfm, int encrypt, const u8 *key,
-                       int key_len)
-{
-       u8 *cinfo;
-       u32 cipher_cfg;
-       u32 keylen_cfg = 0;
-       struct ix_sa_dir *dir;
-       struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
-       int err;
-
-       dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
-       cinfo = dir->npe_ctx;
-
-       if (encrypt) {
-               cipher_cfg = cipher_cfg_enc(tfm);
-               dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
-       } else {
-               cipher_cfg = cipher_cfg_dec(tfm);
-       }
-       if (cipher_cfg & MOD_AES) {
-               switch (key_len) {
-               case 16:
-                       keylen_cfg = MOD_AES128;
-                       break;
-               case 24:
-                       keylen_cfg = MOD_AES192;
-                       break;
-               case 32:
-                       keylen_cfg = MOD_AES256;
-                       break;
-               default:
-                       return -EINVAL;
-               }
-               cipher_cfg |= keylen_cfg;
-       } else {
-               err = crypto_des_verify_key(tfm, key);
-               if (err)
-                       return err;
-       }
-       /* write cfg word to cryptinfo */
-       *(__be32 *)cinfo = cpu_to_be32(cipher_cfg);
-       cinfo += sizeof(cipher_cfg);
-
-       /* write cipher key to cryptinfo */
-       memcpy(cinfo, key, key_len);
-       /* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
-       if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
-               memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE - key_len);
-               key_len = DES3_EDE_KEY_SIZE;
-       }
-       dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
-       dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
-       if ((cipher_cfg & MOD_AES) && !encrypt)
-               return gen_rev_aes_key(tfm);
-
-       return 0;
-}
-
-static struct buffer_desc *chainup_buffers(struct device *dev,
-               struct scatterlist *sg, unsigned int nbytes,
-               struct buffer_desc *buf, gfp_t flags,
-               enum dma_data_direction dir)
-{
-       for (; nbytes > 0; sg = sg_next(sg)) {
-               unsigned int len = min(nbytes, sg->length);
-               struct buffer_desc *next_buf;
-               dma_addr_t next_buf_phys;
-               void *ptr;
-
-               nbytes -= len;
-               ptr = sg_virt(sg);
-               next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
-               if (!next_buf) {
-                       buf = NULL;
-                       break;
-               }
-               sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
-               buf->next = next_buf;
-               buf->phys_next = next_buf_phys;
-               buf = next_buf;
-
-               buf->phys_addr = sg_dma_address(sg);
-               buf->buf_len = len;
-               buf->dir = dir;
-       }
-       buf->next = NULL;
-       buf->phys_next = 0;
-       return buf;
-}
-
-static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key,
-                      unsigned int key_len)
-{
-       struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
-       int ret;
-
-       init_completion(&ctx->completion);
-       atomic_inc(&ctx->configuring);
-
-       reset_sa_dir(&ctx->encrypt);
-       reset_sa_dir(&ctx->decrypt);
-
-       ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
-       ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
-
-       ret = setup_cipher(&tfm->base, 0, key, key_len);
-       if (ret)
-               goto out;
-       ret = setup_cipher(&tfm->base, 1, key, key_len);
-out:
-       if (!atomic_dec_and_test(&ctx->configuring))
-               wait_for_completion(&ctx->completion);
-       if (ret)
-               return ret;
-       crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK);
-       crypto_skcipher_set_flags(ctx->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
-
-       return crypto_skcipher_setkey(ctx->fallback_tfm, key, key_len);
-}
-
-static int ablk_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
-                           unsigned int key_len)
-{
-       return verify_skcipher_des3_key(tfm, key) ?:
-              ablk_setkey(tfm, key, key_len);
-}
-
-static int ablk_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key,
-                              unsigned int key_len)
-{
-       struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
-
-       /* the nonce is stored in bytes at end of key */
-       if (key_len < CTR_RFC3686_NONCE_SIZE)
-               return -EINVAL;
-
-       memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
-              CTR_RFC3686_NONCE_SIZE);
-
-       key_len -= CTR_RFC3686_NONCE_SIZE;
-       return ablk_setkey(tfm, key, key_len);
-}
-
-static int ixp4xx_cipher_fallback(struct skcipher_request *areq, int encrypt)
-{
-       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
-       struct ixp_ctx *op = crypto_skcipher_ctx(tfm);
-       struct ablk_ctx *rctx = skcipher_request_ctx(areq);
-       int err;
-
-       skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
-       skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
-                                     areq->base.complete, areq->base.data);
-       skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
-                                  areq->cryptlen, areq->iv);
-       if (encrypt)
-               err = crypto_skcipher_encrypt(&rctx->fallback_req);
-       else
-               err = crypto_skcipher_decrypt(&rctx->fallback_req);
-       return err;
-}
-
-static int ablk_perform(struct skcipher_request *req, int encrypt)
-{
-       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-       struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
-       unsigned int ivsize = crypto_skcipher_ivsize(tfm);
-       struct ix_sa_dir *dir;
-       struct crypt_ctl *crypt;
-       unsigned int nbytes = req->cryptlen;
-       enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
-       struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
-       struct buffer_desc src_hook;
-       struct device *dev = &pdev->dev;
-       unsigned int offset;
-       gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
-                               GFP_KERNEL : GFP_ATOMIC;
-
-       if (sg_nents(req->src) > 1 || sg_nents(req->dst) > 1)
-               return ixp4xx_cipher_fallback(req, encrypt);
-
-       if (qmgr_stat_full(send_qid))
-               return -EAGAIN;
-       if (atomic_read(&ctx->configuring))
-               return -EAGAIN;
-
-       dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
-       req_ctx->encrypt = encrypt;
-
-       crypt = get_crypt_desc();
-       if (!crypt)
-               return -ENOMEM;
-
-       crypt->data.ablk_req = req;
-       crypt->crypto_ctx = dir->npe_ctx_phys;
-       crypt->mode = dir->npe_mode;
-       crypt->init_len = dir->npe_ctx_idx;
-
-       crypt->crypt_offs = 0;
-       crypt->crypt_len = nbytes;
-
-       BUG_ON(ivsize && !req->iv);
-       memcpy(crypt->iv, req->iv, ivsize);
-       if (ivsize > 0 && !encrypt) {
-               offset = req->cryptlen - ivsize;
-               scatterwalk_map_and_copy(req_ctx->iv, req->src, offset, ivsize, 0);
-       }
-       if (req->src != req->dst) {
-               struct buffer_desc dst_hook;
-
-               crypt->mode |= NPE_OP_NOT_IN_PLACE;
-               /* This was never tested by Intel
-                * for more than one dst buffer, I think. */
-               req_ctx->dst = NULL;
-               if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
-                                    flags, DMA_FROM_DEVICE))
-                       goto free_buf_dest;
-               src_direction = DMA_TO_DEVICE;
-               req_ctx->dst = dst_hook.next;
-               crypt->dst_buf = dst_hook.phys_next;
-       } else {
-               req_ctx->dst = NULL;
-       }
-       req_ctx->src = NULL;
-       if (!chainup_buffers(dev, req->src, nbytes, &src_hook, flags,
-                            src_direction))
-               goto free_buf_src;
-
-       req_ctx->src = src_hook.next;
-       crypt->src_buf = src_hook.phys_next;
-       crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
-       qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
-       BUG_ON(qmgr_stat_overflow(send_qid));
-       return -EINPROGRESS;
-
-free_buf_src:
-       free_buf_chain(dev, req_ctx->src, crypt->src_buf);
-free_buf_dest:
-       if (req->src != req->dst)
-               free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
-
-       crypt->ctl_flags = CTL_FLAG_UNUSED;
-       return -ENOMEM;
-}
-
-static int ablk_encrypt(struct skcipher_request *req)
-{
-       return ablk_perform(req, 1);
-}
-
-static int ablk_decrypt(struct skcipher_request *req)
-{
-       return ablk_perform(req, 0);
-}
-
-static int ablk_rfc3686_crypt(struct skcipher_request *req)
-{
-       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-       struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
-       u8 iv[CTR_RFC3686_BLOCK_SIZE];
-       u8 *info = req->iv;
-       int ret;
-
-       /* set up counter block */
-       memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
-       memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
-
-       /* initialize counter portion of counter block */
-       *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
-               cpu_to_be32(1);
-
-       req->iv = iv;
-       ret = ablk_perform(req, 1);
-       req->iv = info;
-       return ret;
-}
-
-static int aead_perform(struct aead_request *req, int encrypt,
-                       int cryptoffset, int eff_cryptlen, u8 *iv)
-{
-       struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-       struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
-       unsigned int ivsize = crypto_aead_ivsize(tfm);
-       unsigned int authsize = crypto_aead_authsize(tfm);
-       struct ix_sa_dir *dir;
-       struct crypt_ctl *crypt;
-       unsigned int cryptlen;
-       struct buffer_desc *buf, src_hook;
-       struct aead_ctx *req_ctx = aead_request_ctx(req);
-       struct device *dev = &pdev->dev;
-       gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
-                               GFP_KERNEL : GFP_ATOMIC;
-       enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
-       unsigned int lastlen;
-
-       if (qmgr_stat_full(send_qid))
-               return -EAGAIN;
-       if (atomic_read(&ctx->configuring))
-               return -EAGAIN;
-
-       if (encrypt) {
-               dir = &ctx->encrypt;
-               cryptlen = req->cryptlen;
-       } else {
-               dir = &ctx->decrypt;
-               /* req->cryptlen includes the authsize when decrypting */
-               cryptlen = req->cryptlen - authsize;
-               eff_cryptlen -= authsize;
-       }
-       crypt = get_crypt_desc();
-       if (!crypt)
-               return -ENOMEM;
-
-       crypt->data.aead_req = req;
-       crypt->crypto_ctx = dir->npe_ctx_phys;
-       crypt->mode = dir->npe_mode;
-       crypt->init_len = dir->npe_ctx_idx;
-
-       crypt->crypt_offs = cryptoffset;
-       crypt->crypt_len = eff_cryptlen;
-
-       crypt->auth_offs = 0;
-       crypt->auth_len = req->assoclen + cryptlen;
-       BUG_ON(ivsize && !req->iv);
-       memcpy(crypt->iv, req->iv, ivsize);
-
-       buf = chainup_buffers(dev, req->src, crypt->auth_len,
-                             &src_hook, flags, src_direction);
-       req_ctx->src = src_hook.next;
-       crypt->src_buf = src_hook.phys_next;
-       if (!buf)
-               goto free_buf_src;
-
-       lastlen = buf->buf_len;
-       if (lastlen >= authsize)
-               crypt->icv_rev_aes = buf->phys_addr +
-                                    buf->buf_len - authsize;
-
-       req_ctx->dst = NULL;
-
-       if (req->src != req->dst) {
-               struct buffer_desc dst_hook;
-
-               crypt->mode |= NPE_OP_NOT_IN_PLACE;
-               src_direction = DMA_TO_DEVICE;
-
-               buf = chainup_buffers(dev, req->dst, crypt->auth_len,
-                                     &dst_hook, flags, DMA_FROM_DEVICE);
-               req_ctx->dst = dst_hook.next;
-               crypt->dst_buf = dst_hook.phys_next;
-
-               if (!buf)
-                       goto free_buf_dst;
-
-               if (encrypt) {
-                       lastlen = buf->buf_len;
-                       if (lastlen >= authsize)
-                               crypt->icv_rev_aes = buf->phys_addr +
-                                                    buf->buf_len - authsize;
-               }
-       }
-
-       if (unlikely(lastlen < authsize)) {
-               /* The 12 hmac bytes are scattered,
-                * we need to copy them into a safe buffer */
-               req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
-                                                   &crypt->icv_rev_aes);
-               if (unlikely(!req_ctx->hmac_virt))
-                       goto free_buf_dst;
-               if (!encrypt) {
-                       scatterwalk_map_and_copy(req_ctx->hmac_virt,
-                                                req->src, cryptlen, authsize, 0);
-               }
-               req_ctx->encrypt = encrypt;
-       } else {
-               req_ctx->hmac_virt = NULL;
-       }
-
-       crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
-       qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
-       BUG_ON(qmgr_stat_overflow(send_qid));
-       return -EINPROGRESS;
-
-free_buf_dst:
-       free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
-free_buf_src:
-       free_buf_chain(dev, req_ctx->src, crypt->src_buf);
-       crypt->ctl_flags = CTL_FLAG_UNUSED;
-       return -ENOMEM;
-}
-
-static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
-{
-       struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
-       unsigned int digest_len = crypto_aead_maxauthsize(tfm);
-       int ret;
-
-       if (!ctx->enckey_len && !ctx->authkey_len)
-               return 0;
-       init_completion(&ctx->completion);
-       atomic_inc(&ctx->configuring);
-
-       reset_sa_dir(&ctx->encrypt);
-       reset_sa_dir(&ctx->decrypt);
-
-       ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
-       if (ret)
-               goto out;
-       ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
-       if (ret)
-               goto out;
-       ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
-                        ctx->authkey_len, digest_len);
-       if (ret)
-               goto out;
-       ret = setup_auth(&tfm->base, 1, authsize,  ctx->authkey,
-                        ctx->authkey_len, digest_len);
-out:
-       if (!atomic_dec_and_test(&ctx->configuring))
-               wait_for_completion(&ctx->completion);
-       return ret;
-}
-
-static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
-{
-       int max = crypto_aead_maxauthsize(tfm) >> 2;
-
-       if ((authsize >> 2) < 1 || (authsize >> 2) > max || (authsize & 3))
-               return -EINVAL;
-       return aead_setup(tfm, authsize);
-}
-
-static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
-                      unsigned int keylen)
-{
-       struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
-       struct crypto_authenc_keys keys;
-
-       if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
-               goto badkey;
-
-       if (keys.authkeylen > sizeof(ctx->authkey))
-               goto badkey;
-
-       if (keys.enckeylen > sizeof(ctx->enckey))
-               goto badkey;
-
-       memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
-       memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
-       ctx->authkey_len = keys.authkeylen;
-       ctx->enckey_len = keys.enckeylen;
-
-       memzero_explicit(&keys, sizeof(keys));
-       return aead_setup(tfm, crypto_aead_authsize(tfm));
-badkey:
-       memzero_explicit(&keys, sizeof(keys));
-       return -EINVAL;
-}
-
-static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key,
-                           unsigned int keylen)
-{
-       struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
-       struct crypto_authenc_keys keys;
-       int err;
-
-       err = crypto_authenc_extractkeys(&keys, key, keylen);
-       if (unlikely(err))
-               goto badkey;
-
-       err = -EINVAL;
-       if (keys.authkeylen > sizeof(ctx->authkey))
-               goto badkey;
-
-       err = verify_aead_des3_key(tfm, keys.enckey, keys.enckeylen);
-       if (err)
-               goto badkey;
-
-       memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
-       memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
-       ctx->authkey_len = keys.authkeylen;
-       ctx->enckey_len = keys.enckeylen;
-
-       memzero_explicit(&keys, sizeof(keys));
-       return aead_setup(tfm, crypto_aead_authsize(tfm));
-badkey:
-       memzero_explicit(&keys, sizeof(keys));
-       return err;
-}
-
-static int aead_encrypt(struct aead_request *req)
-{
-       return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
-}
-
-static int aead_decrypt(struct aead_request *req)
-{
-       return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv);
-}
-
-static struct ixp_alg ixp4xx_algos[] = {
-{
-       .crypto = {
-               .base.cra_name          = "cbc(des)",
-               .base.cra_blocksize     = DES_BLOCK_SIZE,
-
-               .min_keysize            = DES_KEY_SIZE,
-               .max_keysize            = DES_KEY_SIZE,
-               .ivsize                 = DES_BLOCK_SIZE,
-       },
-       .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
-       .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
-
-}, {
-       .crypto = {
-               .base.cra_name          = "ecb(des)",
-               .base.cra_blocksize     = DES_BLOCK_SIZE,
-               .min_keysize            = DES_KEY_SIZE,
-               .max_keysize            = DES_KEY_SIZE,
-       },
-       .cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
-       .cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
-}, {
-       .crypto = {
-               .base.cra_name          = "cbc(des3_ede)",
-               .base.cra_blocksize     = DES3_EDE_BLOCK_SIZE,
-
-               .min_keysize            = DES3_EDE_KEY_SIZE,
-               .max_keysize            = DES3_EDE_KEY_SIZE,
-               .ivsize                 = DES3_EDE_BLOCK_SIZE,
-               .setkey                 = ablk_des3_setkey,
-       },
-       .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
-       .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
-}, {
-       .crypto = {
-               .base.cra_name          = "ecb(des3_ede)",
-               .base.cra_blocksize     = DES3_EDE_BLOCK_SIZE,
-
-               .min_keysize            = DES3_EDE_KEY_SIZE,
-               .max_keysize            = DES3_EDE_KEY_SIZE,
-               .setkey                 = ablk_des3_setkey,
-       },
-       .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
-       .cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
-}, {
-       .crypto = {
-               .base.cra_name          = "cbc(aes)",
-               .base.cra_blocksize     = AES_BLOCK_SIZE,
-
-               .min_keysize            = AES_MIN_KEY_SIZE,
-               .max_keysize            = AES_MAX_KEY_SIZE,
-               .ivsize                 = AES_BLOCK_SIZE,
-       },
-       .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
-       .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
-}, {
-       .crypto = {
-               .base.cra_name          = "ecb(aes)",
-               .base.cra_blocksize     = AES_BLOCK_SIZE,
-
-               .min_keysize            = AES_MIN_KEY_SIZE,
-               .max_keysize            = AES_MAX_KEY_SIZE,
-       },
-       .cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
-       .cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
-}, {
-       .crypto = {
-               .base.cra_name          = "ctr(aes)",
-               .base.cra_blocksize     = 1,
-
-               .min_keysize            = AES_MIN_KEY_SIZE,
-               .max_keysize            = AES_MAX_KEY_SIZE,
-               .ivsize                 = AES_BLOCK_SIZE,
-       },
-       .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
-       .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
-}, {
-       .crypto = {
-               .base.cra_name          = "rfc3686(ctr(aes))",
-               .base.cra_blocksize     = 1,
-
-               .min_keysize            = AES_MIN_KEY_SIZE,
-               .max_keysize            = AES_MAX_KEY_SIZE,
-               .ivsize                 = AES_BLOCK_SIZE,
-               .setkey                 = ablk_rfc3686_setkey,
-               .encrypt                = ablk_rfc3686_crypt,
-               .decrypt                = ablk_rfc3686_crypt,
-       },
-       .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
-       .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
-} };
-
-static struct ixp_aead_alg ixp4xx_aeads[] = {
-{
-       .crypto = {
-               .base = {
-                       .cra_name       = "authenc(hmac(md5),cbc(des))",
-                       .cra_blocksize  = DES_BLOCK_SIZE,
-               },
-               .ivsize         = DES_BLOCK_SIZE,
-               .maxauthsize    = MD5_DIGEST_SIZE,
-       },
-       .hash = &hash_alg_md5,
-       .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
-       .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
-}, {
-       .crypto = {
-               .base = {
-                       .cra_name       = "authenc(hmac(md5),cbc(des3_ede))",
-                       .cra_blocksize  = DES3_EDE_BLOCK_SIZE,
-               },
-               .ivsize         = DES3_EDE_BLOCK_SIZE,
-               .maxauthsize    = MD5_DIGEST_SIZE,
-               .setkey         = des3_aead_setkey,
-       },
-       .hash = &hash_alg_md5,
-       .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
-       .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
-}, {
-       .crypto = {
-               .base = {
-                       .cra_name       = "authenc(hmac(sha1),cbc(des))",
-                       .cra_blocksize  = DES_BLOCK_SIZE,
-               },
-                       .ivsize         = DES_BLOCK_SIZE,
-                       .maxauthsize    = SHA1_DIGEST_SIZE,
-       },
-       .hash = &hash_alg_sha1,
-       .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
-       .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
-}, {
-       .crypto = {
-               .base = {
-                       .cra_name       = "authenc(hmac(sha1),cbc(des3_ede))",
-                       .cra_blocksize  = DES3_EDE_BLOCK_SIZE,
-               },
-               .ivsize         = DES3_EDE_BLOCK_SIZE,
-               .maxauthsize    = SHA1_DIGEST_SIZE,
-               .setkey         = des3_aead_setkey,
-       },
-       .hash = &hash_alg_sha1,
-       .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
-       .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
-}, {
-       .crypto = {
-               .base = {
-                       .cra_name       = "authenc(hmac(md5),cbc(aes))",
-                       .cra_blocksize  = AES_BLOCK_SIZE,
-               },
-               .ivsize         = AES_BLOCK_SIZE,
-               .maxauthsize    = MD5_DIGEST_SIZE,
-       },
-       .hash = &hash_alg_md5,
-       .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
-       .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
-}, {
-       .crypto = {
-               .base = {
-                       .cra_name       = "authenc(hmac(sha1),cbc(aes))",
-                       .cra_blocksize  = AES_BLOCK_SIZE,
-               },
-               .ivsize         = AES_BLOCK_SIZE,
-               .maxauthsize    = SHA1_DIGEST_SIZE,
-       },
-       .hash = &hash_alg_sha1,
-       .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
-       .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
-} };
-
-#define IXP_POSTFIX "-ixp4xx"
-
-static int ixp_crypto_probe(struct platform_device *_pdev)
-{
-       struct device *dev = &_pdev->dev;
-       int num = ARRAY_SIZE(ixp4xx_algos);
-       int i, err;
-
-       pdev = _pdev;
-
-       err = init_ixp_crypto(dev);
-       if (err)
-               return err;
-
-       for (i = 0; i < num; i++) {
-               struct skcipher_alg *cra = &ixp4xx_algos[i].crypto;
-
-               if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
-                            "%s"IXP_POSTFIX, cra->base.cra_name) >=
-                            CRYPTO_MAX_ALG_NAME)
-                       continue;
-               if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
-                       continue;
-
-               /* block ciphers */
-               cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
-                                     CRYPTO_ALG_ASYNC |
-                                     CRYPTO_ALG_ALLOCATES_MEMORY |
-                                     CRYPTO_ALG_NEED_FALLBACK;
-               if (!cra->setkey)
-                       cra->setkey = ablk_setkey;
-               if (!cra->encrypt)
-                       cra->encrypt = ablk_encrypt;
-               if (!cra->decrypt)
-                       cra->decrypt = ablk_decrypt;
-               cra->init = init_tfm_ablk;
-               cra->exit = exit_tfm_ablk;
-
-               cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
-               cra->base.cra_module = THIS_MODULE;
-               cra->base.cra_alignmask = 3;
-               cra->base.cra_priority = 300;
-               if (crypto_register_skcipher(cra))
-                       dev_err(&pdev->dev, "Failed to register '%s'\n",
-                               cra->base.cra_name);
-               else
-                       ixp4xx_algos[i].registered = 1;
-       }
-
-       for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
-               struct aead_alg *cra = &ixp4xx_aeads[i].crypto;
-
-               if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
-                            "%s"IXP_POSTFIX, cra->base.cra_name) >=
-                   CRYPTO_MAX_ALG_NAME)
-                       continue;
-               if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
-                       continue;
-
-               /* authenc */
-               cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
-                                     CRYPTO_ALG_ASYNC |
-                                     CRYPTO_ALG_ALLOCATES_MEMORY;
-               cra->setkey = cra->setkey ?: aead_setkey;
-               cra->setauthsize = aead_setauthsize;
-               cra->encrypt = aead_encrypt;
-               cra->decrypt = aead_decrypt;
-               cra->init = init_tfm_aead;
-               cra->exit = exit_tfm_aead;
-
-               cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
-               cra->base.cra_module = THIS_MODULE;
-               cra->base.cra_alignmask = 3;
-               cra->base.cra_priority = 300;
-
-               if (crypto_register_aead(cra))
-                       dev_err(&pdev->dev, "Failed to register '%s'\n",
-                               cra->base.cra_driver_name);
-               else
-                       ixp4xx_aeads[i].registered = 1;
-       }
-       return 0;
-}
-
-static int ixp_crypto_remove(struct platform_device *pdev)
-{
-       int num = ARRAY_SIZE(ixp4xx_algos);
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
-               if (ixp4xx_aeads[i].registered)
-                       crypto_unregister_aead(&ixp4xx_aeads[i].crypto);
-       }
-
-       for (i = 0; i < num; i++) {
-               if (ixp4xx_algos[i].registered)
-                       crypto_unregister_skcipher(&ixp4xx_algos[i].crypto);
-       }
-       release_ixp_crypto(&pdev->dev);
-
-       return 0;
-}
-static const struct of_device_id ixp4xx_crypto_of_match[] = {
-       {
-               .compatible = "intel,ixp4xx-crypto",
-       },
-       {},
-};
-
-static struct platform_driver ixp_crypto_driver = {
-       .probe = ixp_crypto_probe,
-       .remove = ixp_crypto_remove,
-       .driver = {
-               .name = "ixp4xx_crypto",
-               .of_match_table = ixp4xx_crypto_of_match,
-       },
-};
-module_platform_driver(ixp_crypto_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Christian Hohnstaedt <[email protected]>");
-MODULE_DESCRIPTION("IXP4xx hardware crypto");
-
diff --git a/drivers/crypto/keembay/Kconfig b/drivers/crypto/keembay/Kconfig
deleted file mode 100644 (file)
index 1cd62f9..0000000
+++ /dev/null
@@ -1,90 +0,0 @@
-config CRYPTO_DEV_KEEMBAY_OCS_AES_SM4
-       tristate "Support for Intel Keem Bay OCS AES/SM4 HW acceleration"
-       depends on HAS_IOMEM
-       depends on ARCH_KEEMBAY || COMPILE_TEST
-       select CRYPTO_SKCIPHER
-       select CRYPTO_AEAD
-       select CRYPTO_ENGINE
-       help
-         Support for Intel Keem Bay Offload and Crypto Subsystem (OCS) AES and
-         SM4 cipher hardware acceleration for use with Crypto API.
-
-         Provides HW acceleration for the following transformations:
-         cbc(aes), ctr(aes), ccm(aes), gcm(aes), cbc(sm4), ctr(sm4), ccm(sm4)
-         and gcm(sm4).
-
-         Optionally, support for the following transformations can also be
-         enabled: ecb(aes), cts(cbc(aes)), ecb(sm4) and cts(cbc(sm4)).
-
-config CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
-       bool "Support for Intel Keem Bay OCS AES/SM4 ECB HW acceleration"
-       depends on CRYPTO_DEV_KEEMBAY_OCS_AES_SM4
-       help
-         Support for Intel Keem Bay Offload and Crypto Subsystem (OCS)
-         AES/SM4 ECB mode hardware acceleration for use with Crypto API.
-
-         Provides OCS version of ecb(aes) and ecb(sm4)
-
-         Intel does not recommend use of ECB mode with AES/SM4.
-
-config CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
-       bool "Support for Intel Keem Bay OCS AES/SM4 CTS HW acceleration"
-       depends on CRYPTO_DEV_KEEMBAY_OCS_AES_SM4
-       help
-         Support for Intel Keem Bay Offload and Crypto Subsystem (OCS)
-         AES/SM4 CBC with CTS mode hardware acceleration for use with
-         Crypto API.
-
-         Provides OCS version of cts(cbc(aes)) and cts(cbc(sm4)).
-
-         Intel does not recommend use of CTS mode with AES/SM4.
-
-config CRYPTO_DEV_KEEMBAY_OCS_ECC
-       tristate "Support for Intel Keem Bay OCS ECC HW acceleration"
-       depends on ARCH_KEEMBAY || COMPILE_TEST
-       depends on OF
-       depends on HAS_IOMEM
-       select CRYPTO_ECDH
-       select CRYPTO_ENGINE
-       help
-         Support for Intel Keem Bay Offload and Crypto Subsystem (OCS)
-         Elliptic Curve Cryptography (ECC) hardware acceleration for use with
-         Crypto API.
-
-         Provides OCS acceleration for ECDH-256 and ECDH-384.
-
-         Say Y or M if you are compiling for the Intel Keem Bay SoC. The
-         module will be called keembay-ocs-ecc.
-
-         If unsure, say N.
-
-config CRYPTO_DEV_KEEMBAY_OCS_HCU
-       tristate "Support for Intel Keem Bay OCS HCU HW acceleration"
-       select CRYPTO_HASH
-       select CRYPTO_ENGINE
-       depends on HAS_IOMEM
-       depends on ARCH_KEEMBAY || COMPILE_TEST
-       depends on OF
-       help
-         Support for Intel Keem Bay Offload and Crypto Subsystem (OCS) Hash
-         Control Unit (HCU) hardware acceleration for use with Crypto API.
-
-         Provides OCS HCU hardware acceleration of sha256, sha384, sha512, and
-         sm3, as well as the HMAC variant of these algorithms.
-
-         Say Y or M if you're building for the Intel Keem Bay SoC. If compiled
-         as a module, the module will be called keembay-ocs-hcu.
-
-         If unsure, say N.
-
-config CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
-       bool "Enable sha224 and hmac(sha224) support in Intel Keem Bay OCS HCU"
-       depends on CRYPTO_DEV_KEEMBAY_OCS_HCU
-       help
-         Enables support for sha224 and hmac(sha224) algorithms in the Intel
-         Keem Bay OCS HCU driver. Intel recommends not to use these
-         algorithms.
-
-         Provides OCS HCU hardware acceleration of sha224 and hmac(224).
-
-         If unsure, say N.
diff --git a/drivers/crypto/keembay/Makefile b/drivers/crypto/keembay/Makefile
deleted file mode 100644 (file)
index 7c12c3c..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-#
-# Makefile for Intel Keem Bay OCS Crypto API Linux drivers
-#
-obj-$(CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4) += keembay-ocs-aes.o
-keembay-ocs-aes-objs := keembay-ocs-aes-core.o ocs-aes.o
-
-obj-$(CONFIG_CRYPTO_DEV_KEEMBAY_OCS_ECC) += keembay-ocs-ecc.o
-
-obj-$(CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU) += keembay-ocs-hcu.o
-keembay-ocs-hcu-objs := keembay-ocs-hcu-core.o ocs-hcu.o
diff --git a/drivers/crypto/keembay/keembay-ocs-aes-core.c b/drivers/crypto/keembay/keembay-ocs-aes-core.c
deleted file mode 100644 (file)
index 9953f55..0000000
+++ /dev/null
@@ -1,1706 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel Keem Bay OCS AES Crypto Driver.
- *
- * Copyright (C) 2018-2020 Intel Corporation
- */
-
-#include <linux/clk.h>
-#include <linux/completion.h>
-#include <linux/crypto.h>
-#include <linux/dma-mapping.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/types.h>
-
-#include <crypto/aes.h>
-#include <crypto/engine.h>
-#include <crypto/gcm.h>
-#include <crypto/scatterwalk.h>
-
-#include <crypto/internal/aead.h>
-#include <crypto/internal/skcipher.h>
-
-#include "ocs-aes.h"
-
-#define KMB_OCS_PRIORITY       350
-#define DRV_NAME               "keembay-ocs-aes"
-
-#define OCS_AES_MIN_KEY_SIZE   16
-#define OCS_AES_MAX_KEY_SIZE   32
-#define OCS_AES_KEYSIZE_128    16
-#define OCS_AES_KEYSIZE_192    24
-#define OCS_AES_KEYSIZE_256    32
-#define OCS_SM4_KEY_SIZE       16
-
-/**
- * struct ocs_aes_tctx - OCS AES Transform context
- * @engine_ctx:                Engine context.
- * @aes_dev:           The OCS AES device.
- * @key:               AES/SM4 key.
- * @key_len:           The length (in bytes) of @key.
- * @cipher:            OCS cipher to use (either AES or SM4).
- * @sw_cipher:         The cipher to use as fallback.
- * @use_fallback:      Whether or not fallback cipher should be used.
- */
-struct ocs_aes_tctx {
-       struct crypto_engine_ctx engine_ctx;
-       struct ocs_aes_dev *aes_dev;
-       u8 key[OCS_AES_KEYSIZE_256];
-       unsigned int key_len;
-       enum ocs_cipher cipher;
-       union {
-               struct crypto_sync_skcipher *sk;
-               struct crypto_aead *aead;
-       } sw_cipher;
-       bool use_fallback;
-};
-
-/**
- * struct ocs_aes_rctx - OCS AES Request context.
- * @instruction:       Instruction to be executed (encrypt / decrypt).
- * @mode:              Mode to use (ECB, CBC, CTR, CCm, GCM, CTS)
- * @src_nents:         Number of source SG entries.
- * @dst_nents:         Number of destination SG entries.
- * @src_dma_count:     The number of DMA-mapped entries of the source SG.
- * @dst_dma_count:     The number of DMA-mapped entries of the destination SG.
- * @in_place:          Whether or not this is an in place request, i.e.,
- *                     src_sg == dst_sg.
- * @src_dll:           OCS DMA linked list for input data.
- * @dst_dll:           OCS DMA linked list for output data.
- * @last_ct_blk:       Buffer to hold last cipher text block (only used in CBC
- *                     mode).
- * @cts_swap:          Whether or not CTS swap must be performed.
- * @aad_src_dll:       OCS DMA linked list for input AAD data.
- * @aad_dst_dll:       OCS DMA linked list for output AAD data.
- * @in_tag:            Buffer to hold input encrypted tag (only used for
- *                     CCM/GCM decrypt).
- * @out_tag:           Buffer to hold output encrypted / decrypted tag (only
- *                     used for GCM encrypt / decrypt).
- */
-struct ocs_aes_rctx {
-       /* Fields common across all modes. */
-       enum ocs_instruction    instruction;
-       enum ocs_mode           mode;
-       int                     src_nents;
-       int                     dst_nents;
-       int                     src_dma_count;
-       int                     dst_dma_count;
-       bool                    in_place;
-       struct ocs_dll_desc     src_dll;
-       struct ocs_dll_desc     dst_dll;
-
-       /* CBC specific */
-       u8                      last_ct_blk[AES_BLOCK_SIZE];
-
-       /* CTS specific */
-       int                     cts_swap;
-
-       /* CCM/GCM specific */
-       struct ocs_dll_desc     aad_src_dll;
-       struct ocs_dll_desc     aad_dst_dll;
-       u8                      in_tag[AES_BLOCK_SIZE];
-
-       /* GCM specific */
-       u8                      out_tag[AES_BLOCK_SIZE];
-};
-
-/* Driver data. */
-struct ocs_aes_drv {
-       struct list_head dev_list;
-       spinlock_t lock;        /* Protects dev_list. */
-};
-
-static struct ocs_aes_drv ocs_aes = {
-       .dev_list = LIST_HEAD_INIT(ocs_aes.dev_list),
-       .lock = __SPIN_LOCK_UNLOCKED(ocs_aes.lock),
-};
-
-static struct ocs_aes_dev *kmb_ocs_aes_find_dev(struct ocs_aes_tctx *tctx)
-{
-       struct ocs_aes_dev *aes_dev;
-
-       spin_lock(&ocs_aes.lock);
-
-       if (tctx->aes_dev) {
-               aes_dev = tctx->aes_dev;
-               goto exit;
-       }
-
-       /* Only a single OCS device available */
-       aes_dev = list_first_entry(&ocs_aes.dev_list, struct ocs_aes_dev, list);
-       tctx->aes_dev = aes_dev;
-
-exit:
-       spin_unlock(&ocs_aes.lock);
-
-       return aes_dev;
-}
-
-/*
- * Ensure key is 128-bit or 256-bit for AES or 128-bit for SM4 and an actual
- * key is being passed in.
- *
- * Return: 0 if key is valid, -EINVAL otherwise.
- */
-static int check_key(const u8 *in_key, size_t key_len, enum ocs_cipher cipher)
-{
-       if (!in_key)
-               return -EINVAL;
-
-       /* For AES, only 128-byte or 256-byte keys are supported. */
-       if (cipher == OCS_AES && (key_len == OCS_AES_KEYSIZE_128 ||
-                                 key_len == OCS_AES_KEYSIZE_256))
-               return 0;
-
-       /* For SM4, only 128-byte keys are supported. */
-       if (cipher == OCS_SM4 && key_len == OCS_AES_KEYSIZE_128)
-               return 0;
-
-       /* Everything else is unsupported. */
-       return -EINVAL;
-}
-
-/* Save key into transformation context. */
-static int save_key(struct ocs_aes_tctx *tctx, const u8 *in_key, size_t key_len,
-                   enum ocs_cipher cipher)
-{
-       int ret;
-
-       ret = check_key(in_key, key_len, cipher);
-       if (ret)
-               return ret;
-
-       memcpy(tctx->key, in_key, key_len);
-       tctx->key_len = key_len;
-       tctx->cipher = cipher;
-
-       return 0;
-}
-
-/* Set key for symmetric cypher. */
-static int kmb_ocs_sk_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
-                             size_t key_len, enum ocs_cipher cipher)
-{
-       struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
-
-       /* Fallback is used for AES with 192-bit key. */
-       tctx->use_fallback = (cipher == OCS_AES &&
-                             key_len == OCS_AES_KEYSIZE_192);
-
-       if (!tctx->use_fallback)
-               return save_key(tctx, in_key, key_len, cipher);
-
-       crypto_sync_skcipher_clear_flags(tctx->sw_cipher.sk,
-                                        CRYPTO_TFM_REQ_MASK);
-       crypto_sync_skcipher_set_flags(tctx->sw_cipher.sk,
-                                      tfm->base.crt_flags &
-                                      CRYPTO_TFM_REQ_MASK);
-
-       return crypto_sync_skcipher_setkey(tctx->sw_cipher.sk, in_key, key_len);
-}
-
-/* Set key for AEAD cipher. */
-static int kmb_ocs_aead_set_key(struct crypto_aead *tfm, const u8 *in_key,
-                               size_t key_len, enum ocs_cipher cipher)
-{
-       struct ocs_aes_tctx *tctx = crypto_aead_ctx(tfm);
-
-       /* Fallback is used for AES with 192-bit key. */
-       tctx->use_fallback = (cipher == OCS_AES &&
-                             key_len == OCS_AES_KEYSIZE_192);
-
-       if (!tctx->use_fallback)
-               return save_key(tctx, in_key, key_len, cipher);
-
-       crypto_aead_clear_flags(tctx->sw_cipher.aead, CRYPTO_TFM_REQ_MASK);
-       crypto_aead_set_flags(tctx->sw_cipher.aead,
-                             crypto_aead_get_flags(tfm) & CRYPTO_TFM_REQ_MASK);
-
-       return crypto_aead_setkey(tctx->sw_cipher.aead, in_key, key_len);
-}
-
-/* Swap two AES blocks in SG lists. */
-static void sg_swap_blocks(struct scatterlist *sgl, unsigned int nents,
-                          off_t blk1_offset, off_t blk2_offset)
-{
-       u8 tmp_buf1[AES_BLOCK_SIZE], tmp_buf2[AES_BLOCK_SIZE];
-
-       /*
-        * No easy way to copy within sg list, so copy both blocks to temporary
-        * buffers first.
-        */
-       sg_pcopy_to_buffer(sgl, nents, tmp_buf1, AES_BLOCK_SIZE, blk1_offset);
-       sg_pcopy_to_buffer(sgl, nents, tmp_buf2, AES_BLOCK_SIZE, blk2_offset);
-       sg_pcopy_from_buffer(sgl, nents, tmp_buf1, AES_BLOCK_SIZE, blk2_offset);
-       sg_pcopy_from_buffer(sgl, nents, tmp_buf2, AES_BLOCK_SIZE, blk1_offset);
-}
-
-/* Initialize request context to default values. */
-static void ocs_aes_init_rctx(struct ocs_aes_rctx *rctx)
-{
-       /* Zero everything. */
-       memset(rctx, 0, sizeof(*rctx));
-
-       /* Set initial value for DMA addresses. */
-       rctx->src_dll.dma_addr = DMA_MAPPING_ERROR;
-       rctx->dst_dll.dma_addr = DMA_MAPPING_ERROR;
-       rctx->aad_src_dll.dma_addr = DMA_MAPPING_ERROR;
-       rctx->aad_dst_dll.dma_addr = DMA_MAPPING_ERROR;
-}
-
-static int kmb_ocs_sk_validate_input(struct skcipher_request *req,
-                                    enum ocs_mode mode)
-{
-       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-       int iv_size = crypto_skcipher_ivsize(tfm);
-
-       switch (mode) {
-       case OCS_MODE_ECB:
-               /* Ensure input length is multiple of block size */
-               if (req->cryptlen % AES_BLOCK_SIZE != 0)
-                       return -EINVAL;
-
-               return 0;
-
-       case OCS_MODE_CBC:
-               /* Ensure input length is multiple of block size */
-               if (req->cryptlen % AES_BLOCK_SIZE != 0)
-                       return -EINVAL;
-
-               /* Ensure IV is present and block size in length */
-               if (!req->iv || iv_size != AES_BLOCK_SIZE)
-                       return -EINVAL;
-               /*
-                * NOTE: Since req->cryptlen == 0 case was already handled in
-                * kmb_ocs_sk_common(), the above two conditions also guarantee
-                * that: cryptlen >= iv_size
-                */
-               return 0;
-
-       case OCS_MODE_CTR:
-               /* Ensure IV is present and block size in length */
-               if (!req->iv || iv_size != AES_BLOCK_SIZE)
-                       return -EINVAL;
-               return 0;
-
-       case OCS_MODE_CTS:
-               /* Ensure input length >= block size */
-               if (req->cryptlen < AES_BLOCK_SIZE)
-                       return -EINVAL;
-
-               /* Ensure IV is present and block size in length */
-               if (!req->iv || iv_size != AES_BLOCK_SIZE)
-                       return -EINVAL;
-
-               return 0;
-       default:
-               return -EINVAL;
-       }
-}
-
-/*
- * Called by encrypt() / decrypt() skcipher functions.
- *
- * Use fallback if needed, otherwise initialize context and enqueue request
- * into engine.
- */
-static int kmb_ocs_sk_common(struct skcipher_request *req,
-                            enum ocs_cipher cipher,
-                            enum ocs_instruction instruction,
-                            enum ocs_mode mode)
-{
-       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-       struct ocs_aes_rctx *rctx = skcipher_request_ctx(req);
-       struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
-       struct ocs_aes_dev *aes_dev;
-       int rc;
-
-       if (tctx->use_fallback) {
-               SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, tctx->sw_cipher.sk);
-
-               skcipher_request_set_sync_tfm(subreq, tctx->sw_cipher.sk);
-               skcipher_request_set_callback(subreq, req->base.flags, NULL,
-                                             NULL);
-               skcipher_request_set_crypt(subreq, req->src, req->dst,
-                                          req->cryptlen, req->iv);
-
-               if (instruction == OCS_ENCRYPT)
-                       rc = crypto_skcipher_encrypt(subreq);
-               else
-                       rc = crypto_skcipher_decrypt(subreq);
-
-               skcipher_request_zero(subreq);
-
-               return rc;
-       }
-
-       /*
-        * If cryptlen == 0, no processing needed for ECB, CBC and CTR.
-        *
-        * For CTS continue: kmb_ocs_sk_validate_input() will return -EINVAL.
-        */
-       if (!req->cryptlen && mode != OCS_MODE_CTS)
-               return 0;
-
-       rc = kmb_ocs_sk_validate_input(req, mode);
-       if (rc)
-               return rc;
-
-       aes_dev = kmb_ocs_aes_find_dev(tctx);
-       if (!aes_dev)
-               return -ENODEV;
-
-       if (cipher != tctx->cipher)
-               return -EINVAL;
-
-       ocs_aes_init_rctx(rctx);
-       rctx->instruction = instruction;
-       rctx->mode = mode;
-
-       return crypto_transfer_skcipher_request_to_engine(aes_dev->engine, req);
-}
-
-static void cleanup_ocs_dma_linked_list(struct device *dev,
-                                       struct ocs_dll_desc *dll)
-{
-       if (dll->vaddr)
-               dma_free_coherent(dev, dll->size, dll->vaddr, dll->dma_addr);
-       dll->vaddr = NULL;
-       dll->size = 0;
-       dll->dma_addr = DMA_MAPPING_ERROR;
-}
-
-static void kmb_ocs_sk_dma_cleanup(struct skcipher_request *req)
-{
-       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-       struct ocs_aes_rctx *rctx = skcipher_request_ctx(req);
-       struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
-       struct device *dev = tctx->aes_dev->dev;
-
-       if (rctx->src_dma_count) {
-               dma_unmap_sg(dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
-               rctx->src_dma_count = 0;
-       }
-
-       if (rctx->dst_dma_count) {
-               dma_unmap_sg(dev, req->dst, rctx->dst_nents, rctx->in_place ?
-                                                            DMA_BIDIRECTIONAL :
-                                                            DMA_FROM_DEVICE);
-               rctx->dst_dma_count = 0;
-       }
-
-       /* Clean up OCS DMA linked lists */
-       cleanup_ocs_dma_linked_list(dev, &rctx->src_dll);
-       cleanup_ocs_dma_linked_list(dev, &rctx->dst_dll);
-}
-
-static int kmb_ocs_sk_prepare_inplace(struct skcipher_request *req)
-{
-       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-       struct ocs_aes_rctx *rctx = skcipher_request_ctx(req);
-       struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
-       int iv_size = crypto_skcipher_ivsize(tfm);
-       int rc;
-
-       /*
-        * For CBC decrypt, save last block (iv) to last_ct_blk buffer.
-        *
-        * Note: if we are here, we already checked that cryptlen >= iv_size
-        * and iv_size == AES_BLOCK_SIZE (i.e., the size of last_ct_blk); see
-        * kmb_ocs_sk_validate_input().
-        */
-       if (rctx->mode == OCS_MODE_CBC && rctx->instruction == OCS_DECRYPT)
-               scatterwalk_map_and_copy(rctx->last_ct_blk, req->src,
-                                        req->cryptlen - iv_size, iv_size, 0);
-
-       /* For CTS decrypt, swap last two blocks, if needed. */
-       if (rctx->cts_swap && rctx->instruction == OCS_DECRYPT)
-               sg_swap_blocks(req->dst, rctx->dst_nents,
-                              req->cryptlen - AES_BLOCK_SIZE,
-                              req->cryptlen - (2 * AES_BLOCK_SIZE));
-
-       /* src and dst buffers are the same, use bidirectional DMA mapping. */
-       rctx->dst_dma_count = dma_map_sg(tctx->aes_dev->dev, req->dst,
-                                        rctx->dst_nents, DMA_BIDIRECTIONAL);
-       if (rctx->dst_dma_count == 0) {
-               dev_err(tctx->aes_dev->dev, "Failed to map destination sg\n");
-               return -ENOMEM;
-       }
-
-       /* Create DST linked list */
-       rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->dst,
-                                           rctx->dst_dma_count, &rctx->dst_dll,
-                                           req->cryptlen, 0);
-       if (rc)
-               return rc;
-       /*
-        * If descriptor creation was successful, set the src_dll.dma_addr to
-        * the value of dst_dll.dma_addr, as we do in-place AES operation on
-        * the src.
-        */
-       rctx->src_dll.dma_addr = rctx->dst_dll.dma_addr;
-
-       return 0;
-}
-
-static int kmb_ocs_sk_prepare_notinplace(struct skcipher_request *req)
-{
-       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-       struct ocs_aes_rctx *rctx = skcipher_request_ctx(req);
-       struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
-       int rc;
-
-       rctx->src_nents =  sg_nents_for_len(req->src, req->cryptlen);
-       if (rctx->src_nents < 0)
-               return -EBADMSG;
-
-       /* Map SRC SG. */
-       rctx->src_dma_count = dma_map_sg(tctx->aes_dev->dev, req->src,
-                                        rctx->src_nents, DMA_TO_DEVICE);
-       if (rctx->src_dma_count == 0) {
-               dev_err(tctx->aes_dev->dev, "Failed to map source sg\n");
-               return -ENOMEM;
-       }
-
-       /* Create SRC linked list */
-       rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->src,
-                                           rctx->src_dma_count, &rctx->src_dll,
-                                           req->cryptlen, 0);
-       if (rc)
-               return rc;
-
-       /* Map DST SG. */
-       rctx->dst_dma_count = dma_map_sg(tctx->aes_dev->dev, req->dst,
-                                        rctx->dst_nents, DMA_FROM_DEVICE);
-       if (rctx->dst_dma_count == 0) {
-               dev_err(tctx->aes_dev->dev, "Failed to map destination sg\n");
-               return -ENOMEM;
-       }
-
-       /* Create DST linked list */
-       rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->dst,
-                                           rctx->dst_dma_count, &rctx->dst_dll,
-                                           req->cryptlen, 0);
-       if (rc)
-               return rc;
-
-       /* If this is not a CTS decrypt operation with swapping, we are done. */
-       if (!(rctx->cts_swap && rctx->instruction == OCS_DECRYPT))
-               return 0;
-
-       /*
-        * Otherwise, we have to copy src to dst (as we cannot modify src).
-        * Use OCS AES bypass mode to copy src to dst via DMA.
-        *
-        * NOTE: for anything other than small data sizes this is rather
-        * inefficient.
-        */
-       rc = ocs_aes_bypass_op(tctx->aes_dev, rctx->dst_dll.dma_addr,
-                              rctx->src_dll.dma_addr, req->cryptlen);
-       if (rc)
-               return rc;
-
-       /*
-        * Now dst == src, so clean up what we did so far and use in_place
-        * logic.
-        */
-       kmb_ocs_sk_dma_cleanup(req);
-       rctx->in_place = true;
-
-       return kmb_ocs_sk_prepare_inplace(req);
-}
-
-static int kmb_ocs_sk_run(struct skcipher_request *req)
-{
-       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-       struct ocs_aes_rctx *rctx = skcipher_request_ctx(req);
-       struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
-       struct ocs_aes_dev *aes_dev = tctx->aes_dev;
-       int iv_size = crypto_skcipher_ivsize(tfm);
-       int rc;
-
-       rctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
-       if (rctx->dst_nents < 0)
-               return -EBADMSG;
-
-       /*
-        * If 2 blocks or greater, and multiple of block size swap last two
-        * blocks to be compatible with other crypto API CTS implementations:
-        * OCS mode uses CBC-CS2, whereas other crypto API implementations use
-        * CBC-CS3.
-        * CBC-CS2 and CBC-CS3 defined by:
-        * https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38a-add.pdf
-        */
-       rctx->cts_swap = (rctx->mode == OCS_MODE_CTS &&
-                         req->cryptlen > AES_BLOCK_SIZE &&
-                         req->cryptlen % AES_BLOCK_SIZE == 0);
-
-       rctx->in_place = (req->src == req->dst);
-
-       if (rctx->in_place)
-               rc = kmb_ocs_sk_prepare_inplace(req);
-       else
-               rc = kmb_ocs_sk_prepare_notinplace(req);
-
-       if (rc)
-               goto error;
-
-       rc = ocs_aes_op(aes_dev, rctx->mode, tctx->cipher, rctx->instruction,
-                       rctx->dst_dll.dma_addr, rctx->src_dll.dma_addr,
-                       req->cryptlen, req->iv, iv_size);
-       if (rc)
-               goto error;
-
-       /* Clean-up DMA before further processing output. */
-       kmb_ocs_sk_dma_cleanup(req);
-
-       /* For CTS Encrypt, swap last 2 blocks, if needed. */
-       if (rctx->cts_swap && rctx->instruction == OCS_ENCRYPT) {
-               sg_swap_blocks(req->dst, rctx->dst_nents,
-                              req->cryptlen - AES_BLOCK_SIZE,
-                              req->cryptlen - (2 * AES_BLOCK_SIZE));
-               return 0;
-       }
-
-       /* For CBC copy IV to req->IV. */
-       if (rctx->mode == OCS_MODE_CBC) {
-               /* CBC encrypt case. */
-               if (rctx->instruction == OCS_ENCRYPT) {
-                       scatterwalk_map_and_copy(req->iv, req->dst,
-                                                req->cryptlen - iv_size,
-                                                iv_size, 0);
-                       return 0;
-               }
-               /* CBC decrypt case. */
-               if (rctx->in_place)
-                       memcpy(req->iv, rctx->last_ct_blk, iv_size);
-               else
-                       scatterwalk_map_and_copy(req->iv, req->src,
-                                                req->cryptlen - iv_size,
-                                                iv_size, 0);
-               return 0;
-       }
-       /* For all other modes there's nothing to do. */
-
-       return 0;
-
-error:
-       kmb_ocs_sk_dma_cleanup(req);
-
-       return rc;
-}
-
-static int kmb_ocs_aead_validate_input(struct aead_request *req,
-                                      enum ocs_instruction instruction,
-                                      enum ocs_mode mode)
-{
-       struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-       int tag_size = crypto_aead_authsize(tfm);
-       int iv_size = crypto_aead_ivsize(tfm);
-
-       /* For decrypt crytplen == len(PT) + len(tag). */
-       if (instruction == OCS_DECRYPT && req->cryptlen < tag_size)
-               return -EINVAL;
-
-       /* IV is mandatory. */
-       if (!req->iv)
-               return -EINVAL;
-
-       switch (mode) {
-       case OCS_MODE_GCM:
-               if (iv_size != GCM_AES_IV_SIZE)
-                       return -EINVAL;
-
-               return 0;
-
-       case OCS_MODE_CCM:
-               /* Ensure IV is present and block size in length */
-               if (iv_size != AES_BLOCK_SIZE)
-                       return -EINVAL;
-
-               return 0;
-
-       default:
-               return -EINVAL;
-       }
-}
-
-/*
- * Called by encrypt() / decrypt() aead functions.
- *
- * Use fallback if needed, otherwise initialize context and enqueue request
- * into engine.
- */
-static int kmb_ocs_aead_common(struct aead_request *req,
-                              enum ocs_cipher cipher,
-                              enum ocs_instruction instruction,
-                              enum ocs_mode mode)
-{
-       struct ocs_aes_tctx *tctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
-       struct ocs_aes_rctx *rctx = aead_request_ctx(req);
-       struct ocs_aes_dev *dd;
-       int rc;
-
-       if (tctx->use_fallback) {
-               struct aead_request *subreq = aead_request_ctx(req);
-
-               aead_request_set_tfm(subreq, tctx->sw_cipher.aead);
-               aead_request_set_callback(subreq, req->base.flags,
-                                         req->base.complete, req->base.data);
-               aead_request_set_crypt(subreq, req->src, req->dst,
-                                      req->cryptlen, req->iv);
-               aead_request_set_ad(subreq, req->assoclen);
-               rc = crypto_aead_setauthsize(tctx->sw_cipher.aead,
-                                            crypto_aead_authsize(crypto_aead_reqtfm(req)));
-               if (rc)
-                       return rc;
-
-               return (instruction == OCS_ENCRYPT) ?
-                      crypto_aead_encrypt(subreq) :
-                      crypto_aead_decrypt(subreq);
-       }
-
-       rc = kmb_ocs_aead_validate_input(req, instruction, mode);
-       if (rc)
-               return rc;
-
-       dd = kmb_ocs_aes_find_dev(tctx);
-       if (!dd)
-               return -ENODEV;
-
-       if (cipher != tctx->cipher)
-               return -EINVAL;
-
-       ocs_aes_init_rctx(rctx);
-       rctx->instruction = instruction;
-       rctx->mode = mode;
-
-       return crypto_transfer_aead_request_to_engine(dd->engine, req);
-}
-
-static void kmb_ocs_aead_dma_cleanup(struct aead_request *req)
-{
-       struct ocs_aes_tctx *tctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
-       struct ocs_aes_rctx *rctx = aead_request_ctx(req);
-       struct device *dev = tctx->aes_dev->dev;
-
-       if (rctx->src_dma_count) {
-               dma_unmap_sg(dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
-               rctx->src_dma_count = 0;
-       }
-
-       if (rctx->dst_dma_count) {
-               dma_unmap_sg(dev, req->dst, rctx->dst_nents, rctx->in_place ?
-                                                            DMA_BIDIRECTIONAL :
-                                                            DMA_FROM_DEVICE);
-               rctx->dst_dma_count = 0;
-       }
-       /* Clean up OCS DMA linked lists */
-       cleanup_ocs_dma_linked_list(dev, &rctx->src_dll);
-       cleanup_ocs_dma_linked_list(dev, &rctx->dst_dll);
-       cleanup_ocs_dma_linked_list(dev, &rctx->aad_src_dll);
-       cleanup_ocs_dma_linked_list(dev, &rctx->aad_dst_dll);
-}
-
-/**
- * kmb_ocs_aead_dma_prepare() - Do DMA mapping for AEAD processing.
- * @req:               The AEAD request being processed.
- * @src_dll_size:      Where to store the length of the data mapped into the
- *                     src_dll OCS DMA list.
- *
- * Do the following:
- * - DMA map req->src and req->dst
- * - Initialize the following OCS DMA linked lists: rctx->src_dll,
- *   rctx->dst_dll, rctx->aad_src_dll and rxtc->aad_dst_dll.
- *
- * Return: 0 on success, negative error code otherwise.
- */
-static int kmb_ocs_aead_dma_prepare(struct aead_request *req, u32 *src_dll_size)
-{
-       struct ocs_aes_tctx *tctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
-       const int tag_size = crypto_aead_authsize(crypto_aead_reqtfm(req));
-       struct ocs_aes_rctx *rctx = aead_request_ctx(req);
-       u32 in_size;    /* The length of the data to be mapped by src_dll. */
-       u32 out_size;   /* The length of the data to be mapped by dst_dll. */
-       u32 dst_size;   /* The length of the data in dst_sg. */
-       int rc;
-
-       /* Get number of entries in input data SG list. */
-       rctx->src_nents = sg_nents_for_len(req->src,
-                                          req->assoclen + req->cryptlen);
-       if (rctx->src_nents < 0)
-               return -EBADMSG;
-
-       if (rctx->instruction == OCS_DECRYPT) {
-               /*
-                * For decrypt:
-                * - src sg list is:            AAD|CT|tag
-                * - dst sg list expects:       AAD|PT
-                *
-                * in_size == len(CT); out_size == len(PT)
-                */
-
-               /* req->cryptlen includes both CT and tag. */
-               in_size = req->cryptlen - tag_size;
-
-               /* out_size = PT size == CT size */
-               out_size = in_size;
-
-               /* len(dst_sg) == len(AAD) + len(PT) */
-               dst_size = req->assoclen + out_size;
-
-               /*
-                * Copy tag from source SG list to 'in_tag' buffer.
-                *
-                * Note: this needs to be done here, before DMA mapping src_sg.
-                */
-               sg_pcopy_to_buffer(req->src, rctx->src_nents, rctx->in_tag,
-                                  tag_size, req->assoclen + in_size);
-
-       } else { /* OCS_ENCRYPT */
-               /*
-                * For encrypt:
-                *      src sg list is:         AAD|PT
-                *      dst sg list expects:    AAD|CT|tag
-                */
-               /* in_size == len(PT) */
-               in_size = req->cryptlen;
-
-               /*
-                * In CCM mode the OCS engine appends the tag to the ciphertext,
-                * but in GCM mode the tag must be read from the tag registers
-                * and appended manually below
-                */
-               out_size = (rctx->mode == OCS_MODE_CCM) ? in_size + tag_size :
-                                                         in_size;
-               /* len(dst_sg) == len(AAD) + len(CT) + len(tag) */
-               dst_size = req->assoclen + in_size + tag_size;
-       }
-       *src_dll_size = in_size;
-
-       /* Get number of entries in output data SG list. */
-       rctx->dst_nents = sg_nents_for_len(req->dst, dst_size);
-       if (rctx->dst_nents < 0)
-               return -EBADMSG;
-
-       rctx->in_place = (req->src == req->dst) ? 1 : 0;
-
-       /* Map destination; use bidirectional mapping for in-place case. */
-       rctx->dst_dma_count = dma_map_sg(tctx->aes_dev->dev, req->dst,
-                                        rctx->dst_nents,
-                                        rctx->in_place ? DMA_BIDIRECTIONAL :
-                                                         DMA_FROM_DEVICE);
-       if (rctx->dst_dma_count == 0 && rctx->dst_nents != 0) {
-               dev_err(tctx->aes_dev->dev, "Failed to map destination sg\n");
-               return -ENOMEM;
-       }
-
-       /* Create AAD DST list: maps dst[0:AAD_SIZE-1]. */
-       rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->dst,
-                                           rctx->dst_dma_count,
-                                           &rctx->aad_dst_dll, req->assoclen,
-                                           0);
-       if (rc)
-               return rc;
-
-       /* Create DST list: maps dst[AAD_SIZE:out_size] */
-       rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->dst,
-                                           rctx->dst_dma_count, &rctx->dst_dll,
-                                           out_size, req->assoclen);
-       if (rc)
-               return rc;
-
-       if (rctx->in_place) {
-               /* If this is not CCM encrypt, we are done. */
-               if (!(rctx->mode == OCS_MODE_CCM &&
-                     rctx->instruction == OCS_ENCRYPT)) {
-                       /*
-                        * SRC and DST are the same, so re-use the same DMA
-                        * addresses (to avoid allocating new DMA lists
-                        * identical to the dst ones).
-                        */
-                       rctx->src_dll.dma_addr = rctx->dst_dll.dma_addr;
-                       rctx->aad_src_dll.dma_addr = rctx->aad_dst_dll.dma_addr;
-
-                       return 0;
-               }
-               /*
-                * For CCM encrypt the input and output linked lists contain
-                * different amounts of data, so, we need to create different
-                * SRC and AAD SRC lists, even for the in-place case.
-                */
-               rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->dst,
-                                                   rctx->dst_dma_count,
-                                                   &rctx->aad_src_dll,
-                                                   req->assoclen, 0);
-               if (rc)
-                       return rc;
-               rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->dst,
-                                                   rctx->dst_dma_count,
-                                                   &rctx->src_dll, in_size,
-                                                   req->assoclen);
-               if (rc)
-                       return rc;
-
-               return 0;
-       }
-       /* Not in-place case. */
-
-       /* Map source SG. */
-       rctx->src_dma_count = dma_map_sg(tctx->aes_dev->dev, req->src,
-                                        rctx->src_nents, DMA_TO_DEVICE);
-       if (rctx->src_dma_count == 0 && rctx->src_nents != 0) {
-               dev_err(tctx->aes_dev->dev, "Failed to map source sg\n");
-               return -ENOMEM;
-       }
-
-       /* Create AAD SRC list. */
-       rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->src,
-                                           rctx->src_dma_count,
-                                           &rctx->aad_src_dll,
-                                           req->assoclen, 0);
-       if (rc)
-               return rc;
-
-       /* Create SRC list. */
-       rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->src,
-                                           rctx->src_dma_count,
-                                           &rctx->src_dll, in_size,
-                                           req->assoclen);
-       if (rc)
-               return rc;
-
-       if (req->assoclen == 0)
-               return 0;
-
-       /* Copy AAD from src sg to dst sg using OCS DMA. */
-       rc = ocs_aes_bypass_op(tctx->aes_dev, rctx->aad_dst_dll.dma_addr,
-                              rctx->aad_src_dll.dma_addr, req->cryptlen);
-       if (rc)
-               dev_err(tctx->aes_dev->dev,
-                       "Failed to copy source AAD to destination AAD\n");
-
-       return rc;
-}
-
-static int kmb_ocs_aead_run(struct aead_request *req)
-{
-       struct ocs_aes_tctx *tctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
-       const int tag_size = crypto_aead_authsize(crypto_aead_reqtfm(req));
-       struct ocs_aes_rctx *rctx = aead_request_ctx(req);
-       u32 in_size;    /* The length of the data mapped by src_dll. */
-       int rc;
-
-       rc = kmb_ocs_aead_dma_prepare(req, &in_size);
-       if (rc)
-               goto exit;
-
-       /* For CCM, we just call the OCS processing and we are done. */
-       if (rctx->mode == OCS_MODE_CCM) {
-               rc = ocs_aes_ccm_op(tctx->aes_dev, tctx->cipher,
-                                   rctx->instruction, rctx->dst_dll.dma_addr,
-                                   rctx->src_dll.dma_addr, in_size,
-                                   req->iv,
-                                   rctx->aad_src_dll.dma_addr, req->assoclen,
-                                   rctx->in_tag, tag_size);
-               goto exit;
-       }
-       /* GCM case; invoke OCS processing. */
-       rc = ocs_aes_gcm_op(tctx->aes_dev, tctx->cipher,
-                           rctx->instruction,
-                           rctx->dst_dll.dma_addr,
-                           rctx->src_dll.dma_addr, in_size,
-                           req->iv,
-                           rctx->aad_src_dll.dma_addr, req->assoclen,
-                           rctx->out_tag, tag_size);
-       if (rc)
-               goto exit;
-
-       /* For GCM decrypt, we have to compare in_tag with out_tag. */
-       if (rctx->instruction == OCS_DECRYPT) {
-               rc = memcmp(rctx->in_tag, rctx->out_tag, tag_size) ?
-                    -EBADMSG : 0;
-               goto exit;
-       }
-
-       /* For GCM encrypt, we must manually copy out_tag to DST sg. */
-
-       /* Clean-up must be called before the sg_pcopy_from_buffer() below. */
-       kmb_ocs_aead_dma_cleanup(req);
-
-       /* Copy tag to destination sg after AAD and CT. */
-       sg_pcopy_from_buffer(req->dst, rctx->dst_nents, rctx->out_tag,
-                            tag_size, req->assoclen + req->cryptlen);
-
-       /* Return directly as DMA cleanup already done. */
-       return 0;
-
-exit:
-       kmb_ocs_aead_dma_cleanup(req);
-
-       return rc;
-}
-
-static int kmb_ocs_aes_sk_do_one_request(struct crypto_engine *engine,
-                                        void *areq)
-{
-       struct skcipher_request *req =
-                       container_of(areq, struct skcipher_request, base);
-       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-       struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
-       int err;
-
-       if (!tctx->aes_dev) {
-               err = -ENODEV;
-               goto exit;
-       }
-
-       err = ocs_aes_set_key(tctx->aes_dev, tctx->key_len, tctx->key,
-                             tctx->cipher);
-       if (err)
-               goto exit;
-
-       err = kmb_ocs_sk_run(req);
-
-exit:
-       crypto_finalize_skcipher_request(engine, req, err);
-
-       return 0;
-}
-
-static int kmb_ocs_aes_aead_do_one_request(struct crypto_engine *engine,
-                                          void *areq)
-{
-       struct aead_request *req = container_of(areq,
-                                               struct aead_request, base);
-       struct ocs_aes_tctx *tctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
-       int err;
-
-       if (!tctx->aes_dev)
-               return -ENODEV;
-
-       err = ocs_aes_set_key(tctx->aes_dev, tctx->key_len, tctx->key,
-                             tctx->cipher);
-       if (err)
-               goto exit;
-
-       err = kmb_ocs_aead_run(req);
-
-exit:
-       crypto_finalize_aead_request(tctx->aes_dev->engine, req, err);
-
-       return 0;
-}
-
-static int kmb_ocs_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
-                              unsigned int key_len)
-{
-       return kmb_ocs_sk_set_key(tfm, in_key, key_len, OCS_AES);
-}
-
-static int kmb_ocs_aes_aead_set_key(struct crypto_aead *tfm, const u8 *in_key,
-                                   unsigned int key_len)
-{
-       return kmb_ocs_aead_set_key(tfm, in_key, key_len, OCS_AES);
-}
-
-#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
-static int kmb_ocs_aes_ecb_encrypt(struct skcipher_request *req)
-{
-       return kmb_ocs_sk_common(req, OCS_AES, OCS_ENCRYPT, OCS_MODE_ECB);
-}
-
-static int kmb_ocs_aes_ecb_decrypt(struct skcipher_request *req)
-{
-       return kmb_ocs_sk_common(req, OCS_AES, OCS_DECRYPT, OCS_MODE_ECB);
-}
-#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
-
-static int kmb_ocs_aes_cbc_encrypt(struct skcipher_request *req)
-{
-       return kmb_ocs_sk_common(req, OCS_AES, OCS_ENCRYPT, OCS_MODE_CBC);
-}
-
-static int kmb_ocs_aes_cbc_decrypt(struct skcipher_request *req)
-{
-       return kmb_ocs_sk_common(req, OCS_AES, OCS_DECRYPT, OCS_MODE_CBC);
-}
-
-static int kmb_ocs_aes_ctr_encrypt(struct skcipher_request *req)
-{
-       return kmb_ocs_sk_common(req, OCS_AES, OCS_ENCRYPT, OCS_MODE_CTR);
-}
-
-static int kmb_ocs_aes_ctr_decrypt(struct skcipher_request *req)
-{
-       return kmb_ocs_sk_common(req, OCS_AES, OCS_DECRYPT, OCS_MODE_CTR);
-}
-
-#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
-static int kmb_ocs_aes_cts_encrypt(struct skcipher_request *req)
-{
-       return kmb_ocs_sk_common(req, OCS_AES, OCS_ENCRYPT, OCS_MODE_CTS);
-}
-
-static int kmb_ocs_aes_cts_decrypt(struct skcipher_request *req)
-{
-       return kmb_ocs_sk_common(req, OCS_AES, OCS_DECRYPT, OCS_MODE_CTS);
-}
-#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
-
-static int kmb_ocs_aes_gcm_encrypt(struct aead_request *req)
-{
-       return kmb_ocs_aead_common(req, OCS_AES, OCS_ENCRYPT, OCS_MODE_GCM);
-}
-
-static int kmb_ocs_aes_gcm_decrypt(struct aead_request *req)
-{
-       return kmb_ocs_aead_common(req, OCS_AES, OCS_DECRYPT, OCS_MODE_GCM);
-}
-
-static int kmb_ocs_aes_ccm_encrypt(struct aead_request *req)
-{
-       return kmb_ocs_aead_common(req, OCS_AES, OCS_ENCRYPT, OCS_MODE_CCM);
-}
-
-static int kmb_ocs_aes_ccm_decrypt(struct aead_request *req)
-{
-       return kmb_ocs_aead_common(req, OCS_AES, OCS_DECRYPT, OCS_MODE_CCM);
-}
-
-static int kmb_ocs_sm4_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
-                              unsigned int key_len)
-{
-       return kmb_ocs_sk_set_key(tfm, in_key, key_len, OCS_SM4);
-}
-
-static int kmb_ocs_sm4_aead_set_key(struct crypto_aead *tfm, const u8 *in_key,
-                                   unsigned int key_len)
-{
-       return kmb_ocs_aead_set_key(tfm, in_key, key_len, OCS_SM4);
-}
-
-#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
-static int kmb_ocs_sm4_ecb_encrypt(struct skcipher_request *req)
-{
-       return kmb_ocs_sk_common(req, OCS_SM4, OCS_ENCRYPT, OCS_MODE_ECB);
-}
-
-static int kmb_ocs_sm4_ecb_decrypt(struct skcipher_request *req)
-{
-       return kmb_ocs_sk_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_ECB);
-}
-#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
-
-static int kmb_ocs_sm4_cbc_encrypt(struct skcipher_request *req)
-{
-       return kmb_ocs_sk_common(req, OCS_SM4, OCS_ENCRYPT, OCS_MODE_CBC);
-}
-
-static int kmb_ocs_sm4_cbc_decrypt(struct skcipher_request *req)
-{
-       return kmb_ocs_sk_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_CBC);
-}
-
-static int kmb_ocs_sm4_ctr_encrypt(struct skcipher_request *req)
-{
-       return kmb_ocs_sk_common(req, OCS_SM4, OCS_ENCRYPT, OCS_MODE_CTR);
-}
-
-static int kmb_ocs_sm4_ctr_decrypt(struct skcipher_request *req)
-{
-       return kmb_ocs_sk_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_CTR);
-}
-
-#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
-static int kmb_ocs_sm4_cts_encrypt(struct skcipher_request *req)
-{
-       return kmb_ocs_sk_common(req, OCS_SM4, OCS_ENCRYPT, OCS_MODE_CTS);
-}
-
-static int kmb_ocs_sm4_cts_decrypt(struct skcipher_request *req)
-{
-       return kmb_ocs_sk_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_CTS);
-}
-#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
-
-static int kmb_ocs_sm4_gcm_encrypt(struct aead_request *req)
-{
-       return kmb_ocs_aead_common(req, OCS_SM4, OCS_ENCRYPT, OCS_MODE_GCM);
-}
-
-static int kmb_ocs_sm4_gcm_decrypt(struct aead_request *req)
-{
-       return kmb_ocs_aead_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_GCM);
-}
-
-static int kmb_ocs_sm4_ccm_encrypt(struct aead_request *req)
-{
-       return kmb_ocs_aead_common(req, OCS_SM4, OCS_ENCRYPT, OCS_MODE_CCM);
-}
-
-static int kmb_ocs_sm4_ccm_decrypt(struct aead_request *req)
-{
-       return kmb_ocs_aead_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_CCM);
-}
-
-static inline int ocs_common_init(struct ocs_aes_tctx *tctx)
-{
-       tctx->engine_ctx.op.prepare_request = NULL;
-       tctx->engine_ctx.op.do_one_request = kmb_ocs_aes_sk_do_one_request;
-       tctx->engine_ctx.op.unprepare_request = NULL;
-
-       return 0;
-}
-
-static int ocs_aes_init_tfm(struct crypto_skcipher *tfm)
-{
-       const char *alg_name = crypto_tfm_alg_name(&tfm->base);
-       struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
-       struct crypto_sync_skcipher *blk;
-
-       /* set fallback cipher in case it will be needed */
-       blk = crypto_alloc_sync_skcipher(alg_name, 0, CRYPTO_ALG_NEED_FALLBACK);
-       if (IS_ERR(blk))
-               return PTR_ERR(blk);
-
-       tctx->sw_cipher.sk = blk;
-
-       crypto_skcipher_set_reqsize(tfm, sizeof(struct ocs_aes_rctx));
-
-       return ocs_common_init(tctx);
-}
-
-static int ocs_sm4_init_tfm(struct crypto_skcipher *tfm)
-{
-       struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
-
-       crypto_skcipher_set_reqsize(tfm, sizeof(struct ocs_aes_rctx));
-
-       return ocs_common_init(tctx);
-}
-
-static inline void clear_key(struct ocs_aes_tctx *tctx)
-{
-       memzero_explicit(tctx->key, OCS_AES_KEYSIZE_256);
-
-       /* Zero key registers if set */
-       if (tctx->aes_dev)
-               ocs_aes_set_key(tctx->aes_dev, OCS_AES_KEYSIZE_256,
-                               tctx->key, OCS_AES);
-}
-
-static void ocs_exit_tfm(struct crypto_skcipher *tfm)
-{
-       struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
-
-       clear_key(tctx);
-
-       if (tctx->sw_cipher.sk) {
-               crypto_free_sync_skcipher(tctx->sw_cipher.sk);
-               tctx->sw_cipher.sk = NULL;
-       }
-}
-
-static inline int ocs_common_aead_init(struct ocs_aes_tctx *tctx)
-{
-       tctx->engine_ctx.op.prepare_request = NULL;
-       tctx->engine_ctx.op.do_one_request = kmb_ocs_aes_aead_do_one_request;
-       tctx->engine_ctx.op.unprepare_request = NULL;
-
-       return 0;
-}
-
-static int ocs_aes_aead_cra_init(struct crypto_aead *tfm)
-{
-       const char *alg_name = crypto_tfm_alg_name(&tfm->base);
-       struct ocs_aes_tctx *tctx = crypto_aead_ctx(tfm);
-       struct crypto_aead *blk;
-
-       /* Set fallback cipher in case it will be needed */
-       blk = crypto_alloc_aead(alg_name, 0, CRYPTO_ALG_NEED_FALLBACK);
-       if (IS_ERR(blk))
-               return PTR_ERR(blk);
-
-       tctx->sw_cipher.aead = blk;
-
-       crypto_aead_set_reqsize(tfm,
-                               max(sizeof(struct ocs_aes_rctx),
-                                   (sizeof(struct aead_request) +
-                                    crypto_aead_reqsize(tctx->sw_cipher.aead))));
-
-       return ocs_common_aead_init(tctx);
-}
-
-static int kmb_ocs_aead_ccm_setauthsize(struct crypto_aead *tfm,
-                                       unsigned int authsize)
-{
-       switch (authsize) {
-       case 4:
-       case 6:
-       case 8:
-       case 10:
-       case 12:
-       case 14:
-       case 16:
-               return 0;
-       default:
-               return -EINVAL;
-       }
-}
-
-static int kmb_ocs_aead_gcm_setauthsize(struct crypto_aead *tfm,
-                                       unsigned int authsize)
-{
-       return crypto_gcm_check_authsize(authsize);
-}
-
-static int ocs_sm4_aead_cra_init(struct crypto_aead *tfm)
-{
-       struct ocs_aes_tctx *tctx = crypto_aead_ctx(tfm);
-
-       crypto_aead_set_reqsize(tfm, sizeof(struct ocs_aes_rctx));
-
-       return ocs_common_aead_init(tctx);
-}
-
-static void ocs_aead_cra_exit(struct crypto_aead *tfm)
-{
-       struct ocs_aes_tctx *tctx = crypto_aead_ctx(tfm);
-
-       clear_key(tctx);
-
-       if (tctx->sw_cipher.aead) {
-               crypto_free_aead(tctx->sw_cipher.aead);
-               tctx->sw_cipher.aead = NULL;
-       }
-}
-
-static struct skcipher_alg algs[] = {
-#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
-       {
-               .base.cra_name = "ecb(aes)",
-               .base.cra_driver_name = "ecb-aes-keembay-ocs",
-               .base.cra_priority = KMB_OCS_PRIORITY,
-               .base.cra_flags = CRYPTO_ALG_ASYNC |
-                                 CRYPTO_ALG_KERN_DRIVER_ONLY |
-                                 CRYPTO_ALG_NEED_FALLBACK,
-               .base.cra_blocksize = AES_BLOCK_SIZE,
-               .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
-               .base.cra_module = THIS_MODULE,
-               .base.cra_alignmask = 0,
-
-               .min_keysize = OCS_AES_MIN_KEY_SIZE,
-               .max_keysize = OCS_AES_MAX_KEY_SIZE,
-               .setkey = kmb_ocs_aes_set_key,
-               .encrypt = kmb_ocs_aes_ecb_encrypt,
-               .decrypt = kmb_ocs_aes_ecb_decrypt,
-               .init = ocs_aes_init_tfm,
-               .exit = ocs_exit_tfm,
-       },
-#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
-       {
-               .base.cra_name = "cbc(aes)",
-               .base.cra_driver_name = "cbc-aes-keembay-ocs",
-               .base.cra_priority = KMB_OCS_PRIORITY,
-               .base.cra_flags = CRYPTO_ALG_ASYNC |
-                                 CRYPTO_ALG_KERN_DRIVER_ONLY |
-                                 CRYPTO_ALG_NEED_FALLBACK,
-               .base.cra_blocksize = AES_BLOCK_SIZE,
-               .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
-               .base.cra_module = THIS_MODULE,
-               .base.cra_alignmask = 0,
-
-               .min_keysize = OCS_AES_MIN_KEY_SIZE,
-               .max_keysize = OCS_AES_MAX_KEY_SIZE,
-               .ivsize = AES_BLOCK_SIZE,
-               .setkey = kmb_ocs_aes_set_key,
-               .encrypt = kmb_ocs_aes_cbc_encrypt,
-               .decrypt = kmb_ocs_aes_cbc_decrypt,
-               .init = ocs_aes_init_tfm,
-               .exit = ocs_exit_tfm,
-       },
-       {
-               .base.cra_name = "ctr(aes)",
-               .base.cra_driver_name = "ctr-aes-keembay-ocs",
-               .base.cra_priority = KMB_OCS_PRIORITY,
-               .base.cra_flags = CRYPTO_ALG_ASYNC |
-                                 CRYPTO_ALG_KERN_DRIVER_ONLY |
-                                 CRYPTO_ALG_NEED_FALLBACK,
-               .base.cra_blocksize = 1,
-               .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
-               .base.cra_module = THIS_MODULE,
-               .base.cra_alignmask = 0,
-
-               .min_keysize = OCS_AES_MIN_KEY_SIZE,
-               .max_keysize = OCS_AES_MAX_KEY_SIZE,
-               .ivsize = AES_BLOCK_SIZE,
-               .setkey = kmb_ocs_aes_set_key,
-               .encrypt = kmb_ocs_aes_ctr_encrypt,
-               .decrypt = kmb_ocs_aes_ctr_decrypt,
-               .init = ocs_aes_init_tfm,
-               .exit = ocs_exit_tfm,
-       },
-#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
-       {
-               .base.cra_name = "cts(cbc(aes))",
-               .base.cra_driver_name = "cts-aes-keembay-ocs",
-               .base.cra_priority = KMB_OCS_PRIORITY,
-               .base.cra_flags = CRYPTO_ALG_ASYNC |
-                                 CRYPTO_ALG_KERN_DRIVER_ONLY |
-                                 CRYPTO_ALG_NEED_FALLBACK,
-               .base.cra_blocksize = AES_BLOCK_SIZE,
-               .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
-               .base.cra_module = THIS_MODULE,
-               .base.cra_alignmask = 0,
-
-               .min_keysize = OCS_AES_MIN_KEY_SIZE,
-               .max_keysize = OCS_AES_MAX_KEY_SIZE,
-               .ivsize = AES_BLOCK_SIZE,
-               .setkey = kmb_ocs_aes_set_key,
-               .encrypt = kmb_ocs_aes_cts_encrypt,
-               .decrypt = kmb_ocs_aes_cts_decrypt,
-               .init = ocs_aes_init_tfm,
-               .exit = ocs_exit_tfm,
-       },
-#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
-#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
-       {
-               .base.cra_name = "ecb(sm4)",
-               .base.cra_driver_name = "ecb-sm4-keembay-ocs",
-               .base.cra_priority = KMB_OCS_PRIORITY,
-               .base.cra_flags = CRYPTO_ALG_ASYNC |
-                                 CRYPTO_ALG_KERN_DRIVER_ONLY,
-               .base.cra_blocksize = AES_BLOCK_SIZE,
-               .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
-               .base.cra_module = THIS_MODULE,
-               .base.cra_alignmask = 0,
-
-               .min_keysize = OCS_SM4_KEY_SIZE,
-               .max_keysize = OCS_SM4_KEY_SIZE,
-               .setkey = kmb_ocs_sm4_set_key,
-               .encrypt = kmb_ocs_sm4_ecb_encrypt,
-               .decrypt = kmb_ocs_sm4_ecb_decrypt,
-               .init = ocs_sm4_init_tfm,
-               .exit = ocs_exit_tfm,
-       },
-#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
-       {
-               .base.cra_name = "cbc(sm4)",
-               .base.cra_driver_name = "cbc-sm4-keembay-ocs",
-               .base.cra_priority = KMB_OCS_PRIORITY,
-               .base.cra_flags = CRYPTO_ALG_ASYNC |
-                                 CRYPTO_ALG_KERN_DRIVER_ONLY,
-               .base.cra_blocksize = AES_BLOCK_SIZE,
-               .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
-               .base.cra_module = THIS_MODULE,
-               .base.cra_alignmask = 0,
-
-               .min_keysize = OCS_SM4_KEY_SIZE,
-               .max_keysize = OCS_SM4_KEY_SIZE,
-               .ivsize = AES_BLOCK_SIZE,
-               .setkey = kmb_ocs_sm4_set_key,
-               .encrypt = kmb_ocs_sm4_cbc_encrypt,
-               .decrypt = kmb_ocs_sm4_cbc_decrypt,
-               .init = ocs_sm4_init_tfm,
-               .exit = ocs_exit_tfm,
-       },
-       {
-               .base.cra_name = "ctr(sm4)",
-               .base.cra_driver_name = "ctr-sm4-keembay-ocs",
-               .base.cra_priority = KMB_OCS_PRIORITY,
-               .base.cra_flags = CRYPTO_ALG_ASYNC |
-                                 CRYPTO_ALG_KERN_DRIVER_ONLY,
-               .base.cra_blocksize = 1,
-               .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
-               .base.cra_module = THIS_MODULE,
-               .base.cra_alignmask = 0,
-
-               .min_keysize = OCS_SM4_KEY_SIZE,
-               .max_keysize = OCS_SM4_KEY_SIZE,
-               .ivsize = AES_BLOCK_SIZE,
-               .setkey = kmb_ocs_sm4_set_key,
-               .encrypt = kmb_ocs_sm4_ctr_encrypt,
-               .decrypt = kmb_ocs_sm4_ctr_decrypt,
-               .init = ocs_sm4_init_tfm,
-               .exit = ocs_exit_tfm,
-       },
-#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
-       {
-               .base.cra_name = "cts(cbc(sm4))",
-               .base.cra_driver_name = "cts-sm4-keembay-ocs",
-               .base.cra_priority = KMB_OCS_PRIORITY,
-               .base.cra_flags = CRYPTO_ALG_ASYNC |
-                                 CRYPTO_ALG_KERN_DRIVER_ONLY,
-               .base.cra_blocksize = AES_BLOCK_SIZE,
-               .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
-               .base.cra_module = THIS_MODULE,
-               .base.cra_alignmask = 0,
-
-               .min_keysize = OCS_SM4_KEY_SIZE,
-               .max_keysize = OCS_SM4_KEY_SIZE,
-               .ivsize = AES_BLOCK_SIZE,
-               .setkey = kmb_ocs_sm4_set_key,
-               .encrypt = kmb_ocs_sm4_cts_encrypt,
-               .decrypt = kmb_ocs_sm4_cts_decrypt,
-               .init = ocs_sm4_init_tfm,
-               .exit = ocs_exit_tfm,
-       }
-#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
-};
-
-static struct aead_alg algs_aead[] = {
-       {
-               .base = {
-                       .cra_name = "gcm(aes)",
-                       .cra_driver_name = "gcm-aes-keembay-ocs",
-                       .cra_priority = KMB_OCS_PRIORITY,
-                       .cra_flags = CRYPTO_ALG_ASYNC |
-                                    CRYPTO_ALG_KERN_DRIVER_ONLY |
-                                    CRYPTO_ALG_NEED_FALLBACK,
-                       .cra_blocksize = 1,
-                       .cra_ctxsize = sizeof(struct ocs_aes_tctx),
-                       .cra_alignmask = 0,
-                       .cra_module = THIS_MODULE,
-               },
-               .init = ocs_aes_aead_cra_init,
-               .exit = ocs_aead_cra_exit,
-               .ivsize = GCM_AES_IV_SIZE,
-               .maxauthsize = AES_BLOCK_SIZE,
-               .setauthsize = kmb_ocs_aead_gcm_setauthsize,
-               .setkey = kmb_ocs_aes_aead_set_key,
-               .encrypt = kmb_ocs_aes_gcm_encrypt,
-               .decrypt = kmb_ocs_aes_gcm_decrypt,
-       },
-       {
-               .base = {
-                       .cra_name = "ccm(aes)",
-                       .cra_driver_name = "ccm-aes-keembay-ocs",
-                       .cra_priority = KMB_OCS_PRIORITY,
-                       .cra_flags = CRYPTO_ALG_ASYNC |
-                                    CRYPTO_ALG_KERN_DRIVER_ONLY |
-                                    CRYPTO_ALG_NEED_FALLBACK,
-                       .cra_blocksize = 1,
-                       .cra_ctxsize = sizeof(struct ocs_aes_tctx),
-                       .cra_alignmask = 0,
-                       .cra_module = THIS_MODULE,
-               },
-               .init = ocs_aes_aead_cra_init,
-               .exit = ocs_aead_cra_exit,
-               .ivsize = AES_BLOCK_SIZE,
-               .maxauthsize = AES_BLOCK_SIZE,
-               .setauthsize = kmb_ocs_aead_ccm_setauthsize,
-               .setkey = kmb_ocs_aes_aead_set_key,
-               .encrypt = kmb_ocs_aes_ccm_encrypt,
-               .decrypt = kmb_ocs_aes_ccm_decrypt,
-       },
-       {
-               .base = {
-                       .cra_name = "gcm(sm4)",
-                       .cra_driver_name = "gcm-sm4-keembay-ocs",
-                       .cra_priority = KMB_OCS_PRIORITY,
-                       .cra_flags = CRYPTO_ALG_ASYNC |
-                                    CRYPTO_ALG_KERN_DRIVER_ONLY,
-                       .cra_blocksize = 1,
-                       .cra_ctxsize = sizeof(struct ocs_aes_tctx),
-                       .cra_alignmask = 0,
-                       .cra_module = THIS_MODULE,
-               },
-               .init = ocs_sm4_aead_cra_init,
-               .exit = ocs_aead_cra_exit,
-               .ivsize = GCM_AES_IV_SIZE,
-               .maxauthsize = AES_BLOCK_SIZE,
-               .setauthsize = kmb_ocs_aead_gcm_setauthsize,
-               .setkey = kmb_ocs_sm4_aead_set_key,
-               .encrypt = kmb_ocs_sm4_gcm_encrypt,
-               .decrypt = kmb_ocs_sm4_gcm_decrypt,
-       },
-       {
-               .base = {
-                       .cra_name = "ccm(sm4)",
-                       .cra_driver_name = "ccm-sm4-keembay-ocs",
-                       .cra_priority = KMB_OCS_PRIORITY,
-                       .cra_flags = CRYPTO_ALG_ASYNC |
-                                    CRYPTO_ALG_KERN_DRIVER_ONLY,
-                       .cra_blocksize = 1,
-                       .cra_ctxsize = sizeof(struct ocs_aes_tctx),
-                       .cra_alignmask = 0,
-                       .cra_module = THIS_MODULE,
-               },
-               .init = ocs_sm4_aead_cra_init,
-               .exit = ocs_aead_cra_exit,
-               .ivsize = AES_BLOCK_SIZE,
-               .maxauthsize = AES_BLOCK_SIZE,
-               .setauthsize = kmb_ocs_aead_ccm_setauthsize,
-               .setkey = kmb_ocs_sm4_aead_set_key,
-               .encrypt = kmb_ocs_sm4_ccm_encrypt,
-               .decrypt = kmb_ocs_sm4_ccm_decrypt,
-       }
-};
-
-static void unregister_aes_algs(struct ocs_aes_dev *aes_dev)
-{
-       crypto_unregister_aeads(algs_aead, ARRAY_SIZE(algs_aead));
-       crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
-}
-
-static int register_aes_algs(struct ocs_aes_dev *aes_dev)
-{
-       int ret;
-
-       /*
-        * If any algorithm fails to register, all preceding algorithms that
-        * were successfully registered will be automatically unregistered.
-        */
-       ret = crypto_register_aeads(algs_aead, ARRAY_SIZE(algs_aead));
-       if (ret)
-               return ret;
-
-       ret = crypto_register_skciphers(algs, ARRAY_SIZE(algs));
-       if (ret)
-               crypto_unregister_aeads(algs_aead, ARRAY_SIZE(algs));
-
-       return ret;
-}
-
-/* Device tree driver match. */
-static const struct of_device_id kmb_ocs_aes_of_match[] = {
-       {
-               .compatible = "intel,keembay-ocs-aes",
-       },
-       {}
-};
-
-static int kmb_ocs_aes_remove(struct platform_device *pdev)
-{
-       struct ocs_aes_dev *aes_dev;
-
-       aes_dev = platform_get_drvdata(pdev);
-       if (!aes_dev)
-               return -ENODEV;
-
-       unregister_aes_algs(aes_dev);
-
-       spin_lock(&ocs_aes.lock);
-       list_del(&aes_dev->list);
-       spin_unlock(&ocs_aes.lock);
-
-       crypto_engine_exit(aes_dev->engine);
-
-       return 0;
-}
-
-static int kmb_ocs_aes_probe(struct platform_device *pdev)
-{
-       struct device *dev = &pdev->dev;
-       struct ocs_aes_dev *aes_dev;
-       int rc;
-
-       aes_dev = devm_kzalloc(dev, sizeof(*aes_dev), GFP_KERNEL);
-       if (!aes_dev)
-               return -ENOMEM;
-
-       aes_dev->dev = dev;
-
-       platform_set_drvdata(pdev, aes_dev);
-
-       rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
-       if (rc) {
-               dev_err(dev, "Failed to set 32 bit dma mask %d\n", rc);
-               return rc;
-       }
-
-       /* Get base register address. */
-       aes_dev->base_reg = devm_platform_ioremap_resource(pdev, 0);
-       if (IS_ERR(aes_dev->base_reg))
-               return PTR_ERR(aes_dev->base_reg);
-
-       /* Get and request IRQ */
-       aes_dev->irq = platform_get_irq(pdev, 0);
-       if (aes_dev->irq < 0)
-               return aes_dev->irq;
-
-       rc = devm_request_threaded_irq(dev, aes_dev->irq, ocs_aes_irq_handler,
-                                      NULL, 0, "keembay-ocs-aes", aes_dev);
-       if (rc < 0) {
-               dev_err(dev, "Could not request IRQ\n");
-               return rc;
-       }
-
-       INIT_LIST_HEAD(&aes_dev->list);
-       spin_lock(&ocs_aes.lock);
-       list_add_tail(&aes_dev->list, &ocs_aes.dev_list);
-       spin_unlock(&ocs_aes.lock);
-
-       init_completion(&aes_dev->irq_completion);
-
-       /* Initialize crypto engine */
-       aes_dev->engine = crypto_engine_alloc_init(dev, true);
-       if (!aes_dev->engine) {
-               rc = -ENOMEM;
-               goto list_del;
-       }
-
-       rc = crypto_engine_start(aes_dev->engine);
-       if (rc) {
-               dev_err(dev, "Could not start crypto engine\n");
-               goto cleanup;
-       }
-
-       rc = register_aes_algs(aes_dev);
-       if (rc) {
-               dev_err(dev,
-                       "Could not register OCS algorithms with Crypto API\n");
-               goto cleanup;
-       }
-
-       return 0;
-
-cleanup:
-       crypto_engine_exit(aes_dev->engine);
-list_del:
-       spin_lock(&ocs_aes.lock);
-       list_del(&aes_dev->list);
-       spin_unlock(&ocs_aes.lock);
-
-       return rc;
-}
-
-/* The OCS driver is a platform device. */
-static struct platform_driver kmb_ocs_aes_driver = {
-       .probe = kmb_ocs_aes_probe,
-       .remove = kmb_ocs_aes_remove,
-       .driver = {
-                       .name = DRV_NAME,
-                       .of_match_table = kmb_ocs_aes_of_match,
-               },
-};
-
-module_platform_driver(kmb_ocs_aes_driver);
-
-MODULE_DESCRIPTION("Intel Keem Bay Offload and Crypto Subsystem (OCS) AES/SM4 Driver");
-MODULE_LICENSE("GPL");
-
-MODULE_ALIAS_CRYPTO("cbc-aes-keembay-ocs");
-MODULE_ALIAS_CRYPTO("ctr-aes-keembay-ocs");
-MODULE_ALIAS_CRYPTO("gcm-aes-keembay-ocs");
-MODULE_ALIAS_CRYPTO("ccm-aes-keembay-ocs");
-
-MODULE_ALIAS_CRYPTO("cbc-sm4-keembay-ocs");
-MODULE_ALIAS_CRYPTO("ctr-sm4-keembay-ocs");
-MODULE_ALIAS_CRYPTO("gcm-sm4-keembay-ocs");
-MODULE_ALIAS_CRYPTO("ccm-sm4-keembay-ocs");
-
-#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
-MODULE_ALIAS_CRYPTO("ecb-aes-keembay-ocs");
-MODULE_ALIAS_CRYPTO("ecb-sm4-keembay-ocs");
-#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
-
-#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
-MODULE_ALIAS_CRYPTO("cts-aes-keembay-ocs");
-MODULE_ALIAS_CRYPTO("cts-sm4-keembay-ocs");
-#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
diff --git a/drivers/crypto/keembay/keembay-ocs-ecc.c b/drivers/crypto/keembay/keembay-ocs-ecc.c
deleted file mode 100644 (file)
index 2269df1..0000000
+++ /dev/null
@@ -1,1016 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel Keem Bay OCS ECC Crypto Driver.
- *
- * Copyright (C) 2019-2021 Intel Corporation
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/clk.h>
-#include <linux/completion.h>
-#include <linux/crypto.h>
-#include <linux/delay.h>
-#include <linux/fips.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/iopoll.h>
-#include <linux/irq.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/scatterlist.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-
-#include <crypto/ecc_curve.h>
-#include <crypto/ecdh.h>
-#include <crypto/engine.h>
-#include <crypto/kpp.h>
-#include <crypto/rng.h>
-
-#include <crypto/internal/ecc.h>
-#include <crypto/internal/kpp.h>
-
-#define DRV_NAME                       "keembay-ocs-ecc"
-
-#define KMB_OCS_ECC_PRIORITY           350
-
-#define HW_OFFS_OCS_ECC_COMMAND                0x00000000
-#define HW_OFFS_OCS_ECC_STATUS         0x00000004
-#define HW_OFFS_OCS_ECC_DATA_IN                0x00000080
-#define HW_OFFS_OCS_ECC_CX_DATA_OUT    0x00000100
-#define HW_OFFS_OCS_ECC_CY_DATA_OUT    0x00000180
-#define HW_OFFS_OCS_ECC_ISR            0x00000400
-#define HW_OFFS_OCS_ECC_IER            0x00000404
-
-#define HW_OCS_ECC_ISR_INT_STATUS_DONE BIT(0)
-#define HW_OCS_ECC_COMMAND_INS_BP      BIT(0)
-
-#define HW_OCS_ECC_COMMAND_START_VAL   BIT(0)
-
-#define OCS_ECC_OP_SIZE_384            BIT(8)
-#define OCS_ECC_OP_SIZE_256            0
-
-/* ECC Instruction : for ECC_COMMAND */
-#define OCS_ECC_INST_WRITE_AX          (0x1 << HW_OCS_ECC_COMMAND_INS_BP)
-#define OCS_ECC_INST_WRITE_AY          (0x2 << HW_OCS_ECC_COMMAND_INS_BP)
-#define OCS_ECC_INST_WRITE_BX_D                (0x3 << HW_OCS_ECC_COMMAND_INS_BP)
-#define OCS_ECC_INST_WRITE_BY_L                (0x4 << HW_OCS_ECC_COMMAND_INS_BP)
-#define OCS_ECC_INST_WRITE_P           (0x5 << HW_OCS_ECC_COMMAND_INS_BP)
-#define OCS_ECC_INST_WRITE_A           (0x6 << HW_OCS_ECC_COMMAND_INS_BP)
-#define OCS_ECC_INST_CALC_D_IDX_A      (0x8 << HW_OCS_ECC_COMMAND_INS_BP)
-#define OCS_ECC_INST_CALC_A_POW_B_MODP (0xB << HW_OCS_ECC_COMMAND_INS_BP)
-#define OCS_ECC_INST_CALC_A_MUL_B_MODP (0xC  << HW_OCS_ECC_COMMAND_INS_BP)
-#define OCS_ECC_INST_CALC_A_ADD_B_MODP (0xD << HW_OCS_ECC_COMMAND_INS_BP)
-
-#define ECC_ENABLE_INTR                        1
-
-#define POLL_USEC                      100
-#define TIMEOUT_USEC                   10000
-
-#define KMB_ECC_VLI_MAX_DIGITS         ECC_CURVE_NIST_P384_DIGITS
-#define KMB_ECC_VLI_MAX_BYTES          (KMB_ECC_VLI_MAX_DIGITS \
-                                        << ECC_DIGITS_TO_BYTES_SHIFT)
-
-#define POW_CUBE                       3
-
-/**
- * struct ocs_ecc_dev - ECC device context
- * @list: List of device contexts
- * @dev: OCS ECC device
- * @base_reg: IO base address of OCS ECC
- * @engine: Crypto engine for the device
- * @irq_done: IRQ done completion.
- * @irq: IRQ number
- */
-struct ocs_ecc_dev {
-       struct list_head list;
-       struct device *dev;
-       void __iomem *base_reg;
-       struct crypto_engine *engine;
-       struct completion irq_done;
-       int irq;
-};
-
-/**
- * struct ocs_ecc_ctx - Transformation context.
- * @engine_ctx:         Crypto engine ctx.
- * @ecc_dev:    The ECC driver associated with this context.
- * @curve:      The elliptic curve used by this transformation.
- * @private_key: The private key.
- */
-struct ocs_ecc_ctx {
-       struct crypto_engine_ctx engine_ctx;
-       struct ocs_ecc_dev *ecc_dev;
-       const struct ecc_curve *curve;
-       u64 private_key[KMB_ECC_VLI_MAX_DIGITS];
-};
-
-/* Driver data. */
-struct ocs_ecc_drv {
-       struct list_head dev_list;
-       spinlock_t lock;        /* Protects dev_list. */
-};
-
-/* Global variable holding the list of OCS ECC devices (only one expected). */
-static struct ocs_ecc_drv ocs_ecc = {
-       .dev_list = LIST_HEAD_INIT(ocs_ecc.dev_list),
-       .lock = __SPIN_LOCK_UNLOCKED(ocs_ecc.lock),
-};
-
-/* Get OCS ECC tfm context from kpp_request. */
-static inline struct ocs_ecc_ctx *kmb_ocs_ecc_tctx(struct kpp_request *req)
-{
-       return kpp_tfm_ctx(crypto_kpp_reqtfm(req));
-}
-
-/* Converts number of digits to number of bytes. */
-static inline unsigned int digits_to_bytes(unsigned int n)
-{
-       return n << ECC_DIGITS_TO_BYTES_SHIFT;
-}
-
-/*
- * Wait for ECC idle i.e when an operation (other than write operations)
- * is done.
- */
-static inline int ocs_ecc_wait_idle(struct ocs_ecc_dev *dev)
-{
-       u32 value;
-
-       return readl_poll_timeout((dev->base_reg + HW_OFFS_OCS_ECC_STATUS),
-                                 value,
-                                 !(value & HW_OCS_ECC_ISR_INT_STATUS_DONE),
-                                 POLL_USEC, TIMEOUT_USEC);
-}
-
-static void ocs_ecc_cmd_start(struct ocs_ecc_dev *ecc_dev, u32 op_size)
-{
-       iowrite32(op_size | HW_OCS_ECC_COMMAND_START_VAL,
-                 ecc_dev->base_reg + HW_OFFS_OCS_ECC_COMMAND);
-}
-
-/* Direct write of u32 buffer to ECC engine with associated instruction. */
-static void ocs_ecc_write_cmd_and_data(struct ocs_ecc_dev *dev,
-                                      u32 op_size,
-                                      u32 inst,
-                                      const void *data_in,
-                                      size_t data_size)
-{
-       iowrite32(op_size | inst, dev->base_reg + HW_OFFS_OCS_ECC_COMMAND);
-
-       /* MMIO Write src uint32 to dst. */
-       memcpy_toio(dev->base_reg + HW_OFFS_OCS_ECC_DATA_IN, data_in,
-                   data_size);
-}
-
-/* Start OCS ECC operation and wait for its completion. */
-static int ocs_ecc_trigger_op(struct ocs_ecc_dev *ecc_dev, u32 op_size,
-                             u32 inst)
-{
-       reinit_completion(&ecc_dev->irq_done);
-
-       iowrite32(ECC_ENABLE_INTR, ecc_dev->base_reg + HW_OFFS_OCS_ECC_IER);
-       iowrite32(op_size | inst, ecc_dev->base_reg + HW_OFFS_OCS_ECC_COMMAND);
-
-       return wait_for_completion_interruptible(&ecc_dev->irq_done);
-}
-
-/**
- * ocs_ecc_read_cx_out() - Read the CX data output buffer.
- * @dev:       The OCS ECC device to read from.
- * @cx_out:    The buffer where to store the CX value. Must be at least
- *             @byte_count byte long.
- * @byte_count:        The amount of data to read.
- */
-static inline void ocs_ecc_read_cx_out(struct ocs_ecc_dev *dev, void *cx_out,
-                                      size_t byte_count)
-{
-       memcpy_fromio(cx_out, dev->base_reg + HW_OFFS_OCS_ECC_CX_DATA_OUT,
-                     byte_count);
-}
-
-/**
- * ocs_ecc_read_cy_out() - Read the CX data output buffer.
- * @dev:       The OCS ECC device to read from.
- * @cy_out:    The buffer where to store the CY value. Must be at least
- *             @byte_count byte long.
- * @byte_count:        The amount of data to read.
- */
-static inline void ocs_ecc_read_cy_out(struct ocs_ecc_dev *dev, void *cy_out,
-                                      size_t byte_count)
-{
-       memcpy_fromio(cy_out, dev->base_reg + HW_OFFS_OCS_ECC_CY_DATA_OUT,
-                     byte_count);
-}
-
-static struct ocs_ecc_dev *kmb_ocs_ecc_find_dev(struct ocs_ecc_ctx *tctx)
-{
-       if (tctx->ecc_dev)
-               return tctx->ecc_dev;
-
-       spin_lock(&ocs_ecc.lock);
-
-       /* Only a single OCS device available. */
-       tctx->ecc_dev = list_first_entry(&ocs_ecc.dev_list, struct ocs_ecc_dev,
-                                        list);
-
-       spin_unlock(&ocs_ecc.lock);
-
-       return tctx->ecc_dev;
-}
-
-/* Do point multiplication using OCS ECC HW. */
-static int kmb_ecc_point_mult(struct ocs_ecc_dev *ecc_dev,
-                             struct ecc_point *result,
-                             const struct ecc_point *point,
-                             u64 *scalar,
-                             const struct ecc_curve *curve)
-{
-       u8 sca[KMB_ECC_VLI_MAX_BYTES]; /* Use the maximum data size. */
-       u32 op_size = (curve->g.ndigits > ECC_CURVE_NIST_P256_DIGITS) ?
-                     OCS_ECC_OP_SIZE_384 : OCS_ECC_OP_SIZE_256;
-       size_t nbytes = digits_to_bytes(curve->g.ndigits);
-       int rc = 0;
-
-       /* Generate random nbytes for Simple and Differential SCA protection. */
-       rc = crypto_get_default_rng();
-       if (rc)
-               return rc;
-
-       rc = crypto_rng_get_bytes(crypto_default_rng, sca, nbytes);
-       crypto_put_default_rng();
-       if (rc)
-               return rc;
-
-       /* Wait engine to be idle before starting new operation. */
-       rc = ocs_ecc_wait_idle(ecc_dev);
-       if (rc)
-               return rc;
-
-       /* Send ecc_start pulse as well as indicating operation size. */
-       ocs_ecc_cmd_start(ecc_dev, op_size);
-
-       /* Write ax param; Base point (Gx). */
-       ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_AX,
-                                  point->x, nbytes);
-
-       /* Write ay param; Base point (Gy). */
-       ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_AY,
-                                  point->y, nbytes);
-
-       /*
-        * Write the private key into DATA_IN reg.
-        *
-        * Since DATA_IN register is used to write different values during the
-        * computation private Key value is overwritten with
-        * side-channel-resistance value.
-        */
-       ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_BX_D,
-                                  scalar, nbytes);
-
-       /* Write operand by/l. */
-       ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_BY_L,
-                                  sca, nbytes);
-       memzero_explicit(sca, sizeof(sca));
-
-       /* Write p = curve prime(GF modulus). */
-       ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_P,
-                                  curve->p, nbytes);
-
-       /* Write a = curve coefficient. */
-       ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_A,
-                                  curve->a, nbytes);
-
-       /* Make hardware perform the multiplication. */
-       rc = ocs_ecc_trigger_op(ecc_dev, op_size, OCS_ECC_INST_CALC_D_IDX_A);
-       if (rc)
-               return rc;
-
-       /* Read result. */
-       ocs_ecc_read_cx_out(ecc_dev, result->x, nbytes);
-       ocs_ecc_read_cy_out(ecc_dev, result->y, nbytes);
-
-       return 0;
-}
-
-/**
- * kmb_ecc_do_scalar_op() - Perform Scalar operation using OCS ECC HW.
- * @ecc_dev:   The OCS ECC device to use.
- * @scalar_out:        Where to store the output scalar.
- * @scalar_a:  Input scalar operand 'a'.
- * @scalar_b:  Input scalar operand 'b'
- * @curve:     The curve on which the operation is performed.
- * @ndigits:   The size of the operands (in digits).
- * @inst:      The operation to perform (as an OCS ECC instruction).
- *
- * Return:     0 on success, negative error code otherwise.
- */
-static int kmb_ecc_do_scalar_op(struct ocs_ecc_dev *ecc_dev, u64 *scalar_out,
-                               const u64 *scalar_a, const u64 *scalar_b,
-                               const struct ecc_curve *curve,
-                               unsigned int ndigits, const u32 inst)
-{
-       u32 op_size = (ndigits > ECC_CURVE_NIST_P256_DIGITS) ?
-                     OCS_ECC_OP_SIZE_384 : OCS_ECC_OP_SIZE_256;
-       size_t nbytes = digits_to_bytes(ndigits);
-       int rc;
-
-       /* Wait engine to be idle before starting new operation. */
-       rc = ocs_ecc_wait_idle(ecc_dev);
-       if (rc)
-               return rc;
-
-       /* Send ecc_start pulse as well as indicating operation size. */
-       ocs_ecc_cmd_start(ecc_dev, op_size);
-
-       /* Write ax param (Base point (Gx).*/
-       ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_AX,
-                                  scalar_a, nbytes);
-
-       /* Write ay param Base point (Gy).*/
-       ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_AY,
-                                  scalar_b, nbytes);
-
-       /* Write p = curve prime(GF modulus).*/
-       ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_P,
-                                  curve->p, nbytes);
-
-       /* Give instruction A.B or A+B to ECC engine. */
-       rc = ocs_ecc_trigger_op(ecc_dev, op_size, inst);
-       if (rc)
-               return rc;
-
-       ocs_ecc_read_cx_out(ecc_dev, scalar_out, nbytes);
-
-       if (vli_is_zero(scalar_out, ndigits))
-               return -EINVAL;
-
-       return 0;
-}
-
-/* SP800-56A section 5.6.2.3.4 partial verification: ephemeral keys only */
-static int kmb_ocs_ecc_is_pubkey_valid_partial(struct ocs_ecc_dev *ecc_dev,
-                                              const struct ecc_curve *curve,
-                                              struct ecc_point *pk)
-{
-       u64 xxx[KMB_ECC_VLI_MAX_DIGITS] = { 0 };
-       u64 yy[KMB_ECC_VLI_MAX_DIGITS] = { 0 };
-       u64 w[KMB_ECC_VLI_MAX_DIGITS] = { 0 };
-       int rc;
-
-       if (WARN_ON(pk->ndigits != curve->g.ndigits))
-               return -EINVAL;
-
-       /* Check 1: Verify key is not the zero point. */
-       if (ecc_point_is_zero(pk))
-               return -EINVAL;
-
-       /* Check 2: Verify key is in the range [0, p-1]. */
-       if (vli_cmp(curve->p, pk->x, pk->ndigits) != 1)
-               return -EINVAL;
-
-       if (vli_cmp(curve->p, pk->y, pk->ndigits) != 1)
-               return -EINVAL;
-
-       /* Check 3: Verify that y^2 == (x^3 + a·x + b) mod p */
-
-        /* y^2 */
-       /* Compute y^2 -> store in yy */
-       rc = kmb_ecc_do_scalar_op(ecc_dev, yy, pk->y, pk->y, curve, pk->ndigits,
-                                 OCS_ECC_INST_CALC_A_MUL_B_MODP);
-       if (rc)
-               goto exit;
-
-       /* x^3 */
-       /* Assigning w = 3, used for calculating x^3. */
-       w[0] = POW_CUBE;
-       /* Load the next stage.*/
-       rc = kmb_ecc_do_scalar_op(ecc_dev, xxx, pk->x, w, curve, pk->ndigits,
-                                 OCS_ECC_INST_CALC_A_POW_B_MODP);
-       if (rc)
-               goto exit;
-
-       /* Do a*x -> store in w. */
-       rc = kmb_ecc_do_scalar_op(ecc_dev, w, curve->a, pk->x, curve,
-                                 pk->ndigits,
-                                 OCS_ECC_INST_CALC_A_MUL_B_MODP);
-       if (rc)
-               goto exit;
-
-       /* Do ax + b == w + b; store in w. */
-       rc = kmb_ecc_do_scalar_op(ecc_dev, w, w, curve->b, curve,
-                                 pk->ndigits,
-                                 OCS_ECC_INST_CALC_A_ADD_B_MODP);
-       if (rc)
-               goto exit;
-
-       /* x^3 + ax + b == x^3 + w -> store in w. */
-       rc = kmb_ecc_do_scalar_op(ecc_dev, w, xxx, w, curve, pk->ndigits,
-                                 OCS_ECC_INST_CALC_A_ADD_B_MODP);
-       if (rc)
-               goto exit;
-
-       /* Compare y^2 == x^3 + a·x + b. */
-       rc = vli_cmp(yy, w, pk->ndigits);
-       if (rc)
-               rc = -EINVAL;
-
-exit:
-       memzero_explicit(xxx, sizeof(xxx));
-       memzero_explicit(yy, sizeof(yy));
-       memzero_explicit(w, sizeof(w));
-
-       return rc;
-}
-
-/* SP800-56A section 5.6.2.3.3 full verification */
-static int kmb_ocs_ecc_is_pubkey_valid_full(struct ocs_ecc_dev *ecc_dev,
-                                           const struct ecc_curve *curve,
-                                           struct ecc_point *pk)
-{
-       struct ecc_point *nQ;
-       int rc;
-
-       /* Checks 1 through 3 */
-       rc = kmb_ocs_ecc_is_pubkey_valid_partial(ecc_dev, curve, pk);
-       if (rc)
-               return rc;
-
-       /* Check 4: Verify that nQ is the zero point. */
-       nQ = ecc_alloc_point(pk->ndigits);
-       if (!nQ)
-               return -ENOMEM;
-
-       rc = kmb_ecc_point_mult(ecc_dev, nQ, pk, curve->n, curve);
-       if (rc)
-               goto exit;
-
-       if (!ecc_point_is_zero(nQ))
-               rc = -EINVAL;
-
-exit:
-       ecc_free_point(nQ);
-
-       return rc;
-}
-
-static int kmb_ecc_is_key_valid(const struct ecc_curve *curve,
-                               const u64 *private_key, size_t private_key_len)
-{
-       size_t ndigits = curve->g.ndigits;
-       u64 one[KMB_ECC_VLI_MAX_DIGITS] = {1};
-       u64 res[KMB_ECC_VLI_MAX_DIGITS];
-
-       if (private_key_len != digits_to_bytes(ndigits))
-               return -EINVAL;
-
-       if (!private_key)
-               return -EINVAL;
-
-       /* Make sure the private key is in the range [2, n-3]. */
-       if (vli_cmp(one, private_key, ndigits) != -1)
-               return -EINVAL;
-
-       vli_sub(res, curve->n, one, ndigits);
-       vli_sub(res, res, one, ndigits);
-       if (vli_cmp(res, private_key, ndigits) != 1)
-               return -EINVAL;
-
-       return 0;
-}
-
-/*
- * ECC private keys are generated using the method of extra random bits,
- * equivalent to that described in FIPS 186-4, Appendix B.4.1.
- *
- * d = (c mod(n–1)) + 1    where c is a string of random bits, 64 bits longer
- *                         than requested
- * 0 <= c mod(n-1) <= n-2  and implies that
- * 1 <= d <= n-1
- *
- * This method generates a private key uniformly distributed in the range
- * [1, n-1].
- */
-static int kmb_ecc_gen_privkey(const struct ecc_curve *curve, u64 *privkey)
-{
-       size_t nbytes = digits_to_bytes(curve->g.ndigits);
-       u64 priv[KMB_ECC_VLI_MAX_DIGITS];
-       size_t nbits;
-       int rc;
-
-       nbits = vli_num_bits(curve->n, curve->g.ndigits);
-
-       /* Check that N is included in Table 1 of FIPS 186-4, section 6.1.1 */
-       if (nbits < 160 || curve->g.ndigits > ARRAY_SIZE(priv))
-               return -EINVAL;
-
-       /*
-        * FIPS 186-4 recommends that the private key should be obtained from a
-        * RBG with a security strength equal to or greater than the security
-        * strength associated with N.
-        *
-        * The maximum security strength identified by NIST SP800-57pt1r4 for
-        * ECC is 256 (N >= 512).
-        *
-        * This condition is met by the default RNG because it selects a favored
-        * DRBG with a security strength of 256.
-        */
-       if (crypto_get_default_rng())
-               return -EFAULT;
-
-       rc = crypto_rng_get_bytes(crypto_default_rng, (u8 *)priv, nbytes);
-       crypto_put_default_rng();
-       if (rc)
-               goto cleanup;
-
-       rc = kmb_ecc_is_key_valid(curve, priv, nbytes);
-       if (rc)
-               goto cleanup;
-
-       ecc_swap_digits(priv, privkey, curve->g.ndigits);
-
-cleanup:
-       memzero_explicit(&priv, sizeof(priv));
-
-       return rc;
-}
-
-static int kmb_ocs_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
-                                  unsigned int len)
-{
-       struct ocs_ecc_ctx *tctx = kpp_tfm_ctx(tfm);
-       struct ecdh params;
-       int rc = 0;
-
-       rc = crypto_ecdh_decode_key(buf, len, &params);
-       if (rc)
-               goto cleanup;
-
-       /* Ensure key size is not bigger then expected. */
-       if (params.key_size > digits_to_bytes(tctx->curve->g.ndigits)) {
-               rc = -EINVAL;
-               goto cleanup;
-       }
-
-       /* Auto-generate private key is not provided. */
-       if (!params.key || !params.key_size) {
-               rc = kmb_ecc_gen_privkey(tctx->curve, tctx->private_key);
-               goto cleanup;
-       }
-
-       rc = kmb_ecc_is_key_valid(tctx->curve, (const u64 *)params.key,
-                                 params.key_size);
-       if (rc)
-               goto cleanup;
-
-       ecc_swap_digits((const u64 *)params.key, tctx->private_key,
-                       tctx->curve->g.ndigits);
-cleanup:
-       memzero_explicit(&params, sizeof(params));
-
-       if (rc)
-               tctx->curve = NULL;
-
-       return rc;
-}
-
-/* Compute shared secret. */
-static int kmb_ecc_do_shared_secret(struct ocs_ecc_ctx *tctx,
-                                   struct kpp_request *req)
-{
-       struct ocs_ecc_dev *ecc_dev = tctx->ecc_dev;
-       const struct ecc_curve *curve = tctx->curve;
-       u64 shared_secret[KMB_ECC_VLI_MAX_DIGITS];
-       u64 pubk_buf[KMB_ECC_VLI_MAX_DIGITS * 2];
-       size_t copied, nbytes, pubk_len;
-       struct ecc_point *pk, *result;
-       int rc;
-
-       nbytes = digits_to_bytes(curve->g.ndigits);
-
-       /* Public key is a point, thus it has two coordinates */
-       pubk_len = 2 * nbytes;
-
-       /* Copy public key from SG list to pubk_buf. */
-       copied = sg_copy_to_buffer(req->src,
-                                  sg_nents_for_len(req->src, pubk_len),
-                                  pubk_buf, pubk_len);
-       if (copied != pubk_len)
-               return -EINVAL;
-
-       /* Allocate and initialize public key point. */
-       pk = ecc_alloc_point(curve->g.ndigits);
-       if (!pk)
-               return -ENOMEM;
-
-       ecc_swap_digits(pubk_buf, pk->x, curve->g.ndigits);
-       ecc_swap_digits(&pubk_buf[curve->g.ndigits], pk->y, curve->g.ndigits);
-
-       /*
-        * Check the public key for following
-        * Check 1: Verify key is not the zero point.
-        * Check 2: Verify key is in the range [1, p-1].
-        * Check 3: Verify that y^2 == (x^3 + a·x + b) mod p
-        */
-       rc = kmb_ocs_ecc_is_pubkey_valid_partial(ecc_dev, curve, pk);
-       if (rc)
-               goto exit_free_pk;
-
-       /* Allocate point for storing computed shared secret. */
-       result = ecc_alloc_point(pk->ndigits);
-       if (!result) {
-               rc = -ENOMEM;
-               goto exit_free_pk;
-       }
-
-       /* Calculate the shared secret.*/
-       rc = kmb_ecc_point_mult(ecc_dev, result, pk, tctx->private_key, curve);
-       if (rc)
-               goto exit_free_result;
-
-       if (ecc_point_is_zero(result)) {
-               rc = -EFAULT;
-               goto exit_free_result;
-       }
-
-       /* Copy shared secret from point to buffer. */
-       ecc_swap_digits(result->x, shared_secret, result->ndigits);
-
-       /* Request might ask for less bytes than what we have. */
-       nbytes = min_t(size_t, nbytes, req->dst_len);
-
-       copied = sg_copy_from_buffer(req->dst,
-                                    sg_nents_for_len(req->dst, nbytes),
-                                    shared_secret, nbytes);
-
-       if (copied != nbytes)
-               rc = -EINVAL;
-
-       memzero_explicit(shared_secret, sizeof(shared_secret));
-
-exit_free_result:
-       ecc_free_point(result);
-
-exit_free_pk:
-       ecc_free_point(pk);
-
-       return rc;
-}
-
-/* Compute public key. */
-static int kmb_ecc_do_public_key(struct ocs_ecc_ctx *tctx,
-                                struct kpp_request *req)
-{
-       const struct ecc_curve *curve = tctx->curve;
-       u64 pubk_buf[KMB_ECC_VLI_MAX_DIGITS * 2];
-       struct ecc_point *pk;
-       size_t pubk_len;
-       size_t copied;
-       int rc;
-
-       /* Public key is a point, so it has double the digits. */
-       pubk_len = 2 * digits_to_bytes(curve->g.ndigits);
-
-       pk = ecc_alloc_point(curve->g.ndigits);
-       if (!pk)
-               return -ENOMEM;
-
-       /* Public Key(pk) = priv * G. */
-       rc = kmb_ecc_point_mult(tctx->ecc_dev, pk, &curve->g, tctx->private_key,
-                               curve);
-       if (rc)
-               goto exit;
-
-       /* SP800-56A rev 3 5.6.2.1.3 key check */
-       if (kmb_ocs_ecc_is_pubkey_valid_full(tctx->ecc_dev, curve, pk)) {
-               rc = -EAGAIN;
-               goto exit;
-       }
-
-       /* Copy public key from point to buffer. */
-       ecc_swap_digits(pk->x, pubk_buf, pk->ndigits);
-       ecc_swap_digits(pk->y, &pubk_buf[pk->ndigits], pk->ndigits);
-
-       /* Copy public key to req->dst. */
-       copied = sg_copy_from_buffer(req->dst,
-                                    sg_nents_for_len(req->dst, pubk_len),
-                                    pubk_buf, pubk_len);
-
-       if (copied != pubk_len)
-               rc = -EINVAL;
-
-exit:
-       ecc_free_point(pk);
-
-       return rc;
-}
-
-static int kmb_ocs_ecc_do_one_request(struct crypto_engine *engine,
-                                     void *areq)
-{
-       struct kpp_request *req = container_of(areq, struct kpp_request, base);
-       struct ocs_ecc_ctx *tctx = kmb_ocs_ecc_tctx(req);
-       struct ocs_ecc_dev *ecc_dev = tctx->ecc_dev;
-       int rc;
-
-       if (req->src)
-               rc = kmb_ecc_do_shared_secret(tctx, req);
-       else
-               rc = kmb_ecc_do_public_key(tctx, req);
-
-       crypto_finalize_kpp_request(ecc_dev->engine, req, rc);
-
-       return 0;
-}
-
-static int kmb_ocs_ecdh_generate_public_key(struct kpp_request *req)
-{
-       struct ocs_ecc_ctx *tctx = kmb_ocs_ecc_tctx(req);
-       const struct ecc_curve *curve = tctx->curve;
-
-       /* Ensure kmb_ocs_ecdh_set_secret() has been successfully called. */
-       if (!tctx->curve)
-               return -EINVAL;
-
-       /* Ensure dst is present. */
-       if (!req->dst)
-               return -EINVAL;
-
-       /* Check the request dst is big enough to hold the public key. */
-       if (req->dst_len < (2 * digits_to_bytes(curve->g.ndigits)))
-               return -EINVAL;
-
-       /* 'src' is not supposed to be present when generate pubk is called. */
-       if (req->src)
-               return -EINVAL;
-
-       return crypto_transfer_kpp_request_to_engine(tctx->ecc_dev->engine,
-                                                    req);
-}
-
-static int kmb_ocs_ecdh_compute_shared_secret(struct kpp_request *req)
-{
-       struct ocs_ecc_ctx *tctx = kmb_ocs_ecc_tctx(req);
-       const struct ecc_curve *curve = tctx->curve;
-
-       /* Ensure kmb_ocs_ecdh_set_secret() has been successfully called. */
-       if (!tctx->curve)
-               return -EINVAL;
-
-       /* Ensure dst is present. */
-       if (!req->dst)
-               return -EINVAL;
-
-       /* Ensure src is present. */
-       if (!req->src)
-               return -EINVAL;
-
-       /*
-        * req->src is expected to the (other-side) public key, so its length
-        * must be 2 * coordinate size (in bytes).
-        */
-       if (req->src_len != 2 * digits_to_bytes(curve->g.ndigits))
-               return -EINVAL;
-
-       return crypto_transfer_kpp_request_to_engine(tctx->ecc_dev->engine,
-                                                    req);
-}
-
-static int kmb_ecc_tctx_init(struct ocs_ecc_ctx *tctx, unsigned int curve_id)
-{
-       memset(tctx, 0, sizeof(*tctx));
-
-       tctx->ecc_dev = kmb_ocs_ecc_find_dev(tctx);
-
-       if (IS_ERR(tctx->ecc_dev)) {
-               pr_err("Failed to find the device : %ld\n",
-                      PTR_ERR(tctx->ecc_dev));
-               return PTR_ERR(tctx->ecc_dev);
-       }
-
-       tctx->curve = ecc_get_curve(curve_id);
-       if (!tctx->curve)
-               return -EOPNOTSUPP;
-
-       tctx->engine_ctx.op.prepare_request = NULL;
-       tctx->engine_ctx.op.do_one_request = kmb_ocs_ecc_do_one_request;
-       tctx->engine_ctx.op.unprepare_request = NULL;
-
-       return 0;
-}
-
-static int kmb_ocs_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
-{
-       struct ocs_ecc_ctx *tctx = kpp_tfm_ctx(tfm);
-
-       return kmb_ecc_tctx_init(tctx, ECC_CURVE_NIST_P256);
-}
-
-static int kmb_ocs_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm)
-{
-       struct ocs_ecc_ctx *tctx = kpp_tfm_ctx(tfm);
-
-       return kmb_ecc_tctx_init(tctx, ECC_CURVE_NIST_P384);
-}
-
-static void kmb_ocs_ecdh_exit_tfm(struct crypto_kpp *tfm)
-{
-       struct ocs_ecc_ctx *tctx = kpp_tfm_ctx(tfm);
-
-       memzero_explicit(tctx->private_key, sizeof(*tctx->private_key));
-}
-
-static unsigned int kmb_ocs_ecdh_max_size(struct crypto_kpp *tfm)
-{
-       struct ocs_ecc_ctx *tctx = kpp_tfm_ctx(tfm);
-
-       /* Public key is made of two coordinates, so double the digits. */
-       return digits_to_bytes(tctx->curve->g.ndigits) * 2;
-}
-
-static struct kpp_alg ocs_ecdh_p256 = {
-       .set_secret = kmb_ocs_ecdh_set_secret,
-       .generate_public_key = kmb_ocs_ecdh_generate_public_key,
-       .compute_shared_secret = kmb_ocs_ecdh_compute_shared_secret,
-       .init = kmb_ocs_ecdh_nist_p256_init_tfm,
-       .exit = kmb_ocs_ecdh_exit_tfm,
-       .max_size = kmb_ocs_ecdh_max_size,
-       .base = {
-               .cra_name = "ecdh-nist-p256",
-               .cra_driver_name = "ecdh-nist-p256-keembay-ocs",
-               .cra_priority = KMB_OCS_ECC_PRIORITY,
-               .cra_module = THIS_MODULE,
-               .cra_ctxsize = sizeof(struct ocs_ecc_ctx),
-       },
-};
-
-static struct kpp_alg ocs_ecdh_p384 = {
-       .set_secret = kmb_ocs_ecdh_set_secret,
-       .generate_public_key = kmb_ocs_ecdh_generate_public_key,
-       .compute_shared_secret = kmb_ocs_ecdh_compute_shared_secret,
-       .init = kmb_ocs_ecdh_nist_p384_init_tfm,
-       .exit = kmb_ocs_ecdh_exit_tfm,
-       .max_size = kmb_ocs_ecdh_max_size,
-       .base = {
-               .cra_name = "ecdh-nist-p384",
-               .cra_driver_name = "ecdh-nist-p384-keembay-ocs",
-               .cra_priority = KMB_OCS_ECC_PRIORITY,
-               .cra_module = THIS_MODULE,
-               .cra_ctxsize = sizeof(struct ocs_ecc_ctx),
-       },
-};
-
-static irqreturn_t ocs_ecc_irq_handler(int irq, void *dev_id)
-{
-       struct ocs_ecc_dev *ecc_dev = dev_id;
-       u32 status;
-
-       /*
-        * Read the status register and write it back to clear the
-        * DONE_INT_STATUS bit.
-        */
-       status = ioread32(ecc_dev->base_reg + HW_OFFS_OCS_ECC_ISR);
-       iowrite32(status, ecc_dev->base_reg + HW_OFFS_OCS_ECC_ISR);
-
-       if (!(status & HW_OCS_ECC_ISR_INT_STATUS_DONE))
-               return IRQ_NONE;
-
-       complete(&ecc_dev->irq_done);
-
-       return IRQ_HANDLED;
-}
-
-static int kmb_ocs_ecc_probe(struct platform_device *pdev)
-{
-       struct device *dev = &pdev->dev;
-       struct ocs_ecc_dev *ecc_dev;
-       int rc;
-
-       ecc_dev = devm_kzalloc(dev, sizeof(*ecc_dev), GFP_KERNEL);
-       if (!ecc_dev)
-               return -ENOMEM;
-
-       ecc_dev->dev = dev;
-
-       platform_set_drvdata(pdev, ecc_dev);
-
-       INIT_LIST_HEAD(&ecc_dev->list);
-       init_completion(&ecc_dev->irq_done);
-
-       /* Get base register address. */
-       ecc_dev->base_reg = devm_platform_ioremap_resource(pdev, 0);
-       if (IS_ERR(ecc_dev->base_reg)) {
-               dev_err(dev, "Failed to get base address\n");
-               rc = PTR_ERR(ecc_dev->base_reg);
-               goto list_del;
-       }
-
-       /* Get and request IRQ */
-       ecc_dev->irq = platform_get_irq(pdev, 0);
-       if (ecc_dev->irq < 0) {
-               rc = ecc_dev->irq;
-               goto list_del;
-       }
-
-       rc = devm_request_threaded_irq(dev, ecc_dev->irq, ocs_ecc_irq_handler,
-                                      NULL, 0, "keembay-ocs-ecc", ecc_dev);
-       if (rc < 0) {
-               dev_err(dev, "Could not request IRQ\n");
-               goto list_del;
-       }
-
-       /* Add device to the list of OCS ECC devices. */
-       spin_lock(&ocs_ecc.lock);
-       list_add_tail(&ecc_dev->list, &ocs_ecc.dev_list);
-       spin_unlock(&ocs_ecc.lock);
-
-       /* Initialize crypto engine. */
-       ecc_dev->engine = crypto_engine_alloc_init(dev, 1);
-       if (!ecc_dev->engine) {
-               dev_err(dev, "Could not allocate crypto engine\n");
-               rc = -ENOMEM;
-               goto list_del;
-       }
-
-       rc = crypto_engine_start(ecc_dev->engine);
-       if (rc) {
-               dev_err(dev, "Could not start crypto engine\n");
-               goto cleanup;
-       }
-
-       /* Register the KPP algo. */
-       rc = crypto_register_kpp(&ocs_ecdh_p256);
-       if (rc) {
-               dev_err(dev,
-                       "Could not register OCS algorithms with Crypto API\n");
-               goto cleanup;
-       }
-
-       rc = crypto_register_kpp(&ocs_ecdh_p384);
-       if (rc) {
-               dev_err(dev,
-                       "Could not register OCS algorithms with Crypto API\n");
-               goto ocs_ecdh_p384_error;
-       }
-
-       return 0;
-
-ocs_ecdh_p384_error:
-       crypto_unregister_kpp(&ocs_ecdh_p256);
-
-cleanup:
-       crypto_engine_exit(ecc_dev->engine);
-
-list_del:
-       spin_lock(&ocs_ecc.lock);
-       list_del(&ecc_dev->list);
-       spin_unlock(&ocs_ecc.lock);
-
-       return rc;
-}
-
-static int kmb_ocs_ecc_remove(struct platform_device *pdev)
-{
-       struct ocs_ecc_dev *ecc_dev;
-
-       ecc_dev = platform_get_drvdata(pdev);
-
-       crypto_unregister_kpp(&ocs_ecdh_p384);
-       crypto_unregister_kpp(&ocs_ecdh_p256);
-
-       spin_lock(&ocs_ecc.lock);
-       list_del(&ecc_dev->list);
-       spin_unlock(&ocs_ecc.lock);
-
-       crypto_engine_exit(ecc_dev->engine);
-
-       return 0;
-}
-
-/* Device tree driver match. */
-static const struct of_device_id kmb_ocs_ecc_of_match[] = {
-       {
-               .compatible = "intel,keembay-ocs-ecc",
-       },
-       {}
-};
-
-/* The OCS driver is a platform device. */
-static struct platform_driver kmb_ocs_ecc_driver = {
-       .probe = kmb_ocs_ecc_probe,
-       .remove = kmb_ocs_ecc_remove,
-       .driver = {
-                       .name = DRV_NAME,
-                       .of_match_table = kmb_ocs_ecc_of_match,
-               },
-};
-module_platform_driver(kmb_ocs_ecc_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Intel Keem Bay OCS ECC Driver");
-MODULE_ALIAS_CRYPTO("ecdh-nist-p256");
-MODULE_ALIAS_CRYPTO("ecdh-nist-p384");
-MODULE_ALIAS_CRYPTO("ecdh-nist-p256-keembay-ocs");
-MODULE_ALIAS_CRYPTO("ecdh-nist-p384-keembay-ocs");
diff --git a/drivers/crypto/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/keembay/keembay-ocs-hcu-core.c
deleted file mode 100644 (file)
index d4bcbed..0000000
+++ /dev/null
@@ -1,1264 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel Keem Bay OCS HCU Crypto Driver.
- *
- * Copyright (C) 2018-2020 Intel Corporation
- */
-
-#include <linux/completion.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/of_device.h>
-
-#include <crypto/engine.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/sha2.h>
-#include <crypto/sm3.h>
-#include <crypto/hmac.h>
-#include <crypto/internal/hash.h>
-
-#include "ocs-hcu.h"
-
-#define DRV_NAME       "keembay-ocs-hcu"
-
-/* Flag marking a final request. */
-#define REQ_FINAL                      BIT(0)
-/* Flag marking a HMAC request. */
-#define REQ_FLAGS_HMAC                 BIT(1)
-/* Flag set when HW HMAC is being used. */
-#define REQ_FLAGS_HMAC_HW              BIT(2)
-/* Flag set when SW HMAC is being used. */
-#define REQ_FLAGS_HMAC_SW              BIT(3)
-
-/**
- * struct ocs_hcu_ctx: OCS HCU Transform context.
- * @engine_ctx:         Crypto Engine context.
- * @hcu_dev:    The OCS HCU device used by the transformation.
- * @key:        The key (used only for HMAC transformations).
- * @key_len:    The length of the key.
- * @is_sm3_tfm:  Whether or not this is an SM3 transformation.
- * @is_hmac_tfm: Whether or not this is a HMAC transformation.
- */
-struct ocs_hcu_ctx {
-       struct crypto_engine_ctx engine_ctx;
-       struct ocs_hcu_dev *hcu_dev;
-       u8 key[SHA512_BLOCK_SIZE];
-       size_t key_len;
-       bool is_sm3_tfm;
-       bool is_hmac_tfm;
-};
-
-/**
- * struct ocs_hcu_rctx - Context for the request.
- * @hcu_dev:       OCS HCU device to be used to service the request.
- * @flags:         Flags tracking request status.
- * @algo:          Algorithm to use for the request.
- * @blk_sz:        Block size of the transformation / request.
- * @dig_sz:        Digest size of the transformation / request.
- * @dma_list:      OCS DMA linked list.
- * @hash_ctx:      OCS HCU hashing context.
- * @buffer:        Buffer to store: partial block of data and SW HMAC
- *                 artifacts (ipad, opad, etc.).
- * @buf_cnt:       Number of bytes currently stored in the buffer.
- * @buf_dma_addr:   The DMA address of @buffer (when mapped).
- * @buf_dma_count:  The number of bytes in @buffer currently DMA-mapped.
- * @sg:                    Head of the scatterlist entries containing data.
- * @sg_data_total:  Total data in the SG list at any time.
- * @sg_data_offset: Offset into the data of the current individual SG node.
- * @sg_dma_nents:   Number of sg entries mapped in dma_list.
- */
-struct ocs_hcu_rctx {
-       struct ocs_hcu_dev      *hcu_dev;
-       u32                     flags;
-       enum ocs_hcu_algo       algo;
-       size_t                  blk_sz;
-       size_t                  dig_sz;
-       struct ocs_hcu_dma_list *dma_list;
-       struct ocs_hcu_hash_ctx hash_ctx;
-       /*
-        * Buffer is double the block size because we need space for SW HMAC
-        * artifacts, i.e:
-        * - ipad (1 block) + a possible partial block of data.
-        * - opad (1 block) + digest of H(k ^ ipad || m)
-        */
-       u8                      buffer[2 * SHA512_BLOCK_SIZE];
-       size_t                  buf_cnt;
-       dma_addr_t              buf_dma_addr;
-       size_t                  buf_dma_count;
-       struct scatterlist      *sg;
-       unsigned int            sg_data_total;
-       unsigned int            sg_data_offset;
-       unsigned int            sg_dma_nents;
-};
-
-/**
- * struct ocs_hcu_drv - Driver data
- * @dev_list:  The list of HCU devices.
- * @lock:      The lock protecting dev_list.
- */
-struct ocs_hcu_drv {
-       struct list_head dev_list;
-       spinlock_t lock; /* Protects dev_list. */
-};
-
-static struct ocs_hcu_drv ocs_hcu = {
-       .dev_list = LIST_HEAD_INIT(ocs_hcu.dev_list),
-       .lock = __SPIN_LOCK_UNLOCKED(ocs_hcu.lock),
-};
-
-/*
- * Return the total amount of data in the request; that is: the data in the
- * request buffer + the data in the sg list.
- */
-static inline unsigned int kmb_get_total_data(struct ocs_hcu_rctx *rctx)
-{
-       return rctx->sg_data_total + rctx->buf_cnt;
-}
-
-/* Move remaining content of scatter-gather list to context buffer. */
-static int flush_sg_to_ocs_buffer(struct ocs_hcu_rctx *rctx)
-{
-       size_t count;
-
-       if (rctx->sg_data_total > (sizeof(rctx->buffer) - rctx->buf_cnt)) {
-               WARN(1, "%s: sg data does not fit in buffer\n", __func__);
-               return -EINVAL;
-       }
-
-       while (rctx->sg_data_total) {
-               if (!rctx->sg) {
-                       WARN(1, "%s: unexpected NULL sg\n", __func__);
-                       return -EINVAL;
-               }
-               /*
-                * If current sg has been fully processed, skip to the next
-                * one.
-                */
-               if (rctx->sg_data_offset == rctx->sg->length) {
-                       rctx->sg = sg_next(rctx->sg);
-                       rctx->sg_data_offset = 0;
-                       continue;
-               }
-               /*
-                * Determine the maximum data available to copy from the node.
-                * Minimum of the length left in the sg node, or the total data
-                * in the request.
-                */
-               count = min(rctx->sg->length - rctx->sg_data_offset,
-                           rctx->sg_data_total);
-               /* Copy from scatter-list entry to context buffer. */
-               scatterwalk_map_and_copy(&rctx->buffer[rctx->buf_cnt],
-                                        rctx->sg, rctx->sg_data_offset,
-                                        count, 0);
-
-               rctx->sg_data_offset += count;
-               rctx->sg_data_total -= count;
-               rctx->buf_cnt += count;
-       }
-
-       return 0;
-}
-
-static struct ocs_hcu_dev *kmb_ocs_hcu_find_dev(struct ahash_request *req)
-{
-       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-       struct ocs_hcu_ctx *tctx = crypto_ahash_ctx(tfm);
-
-       /* If the HCU device for the request was previously set, return it. */
-       if (tctx->hcu_dev)
-               return tctx->hcu_dev;
-
-       /*
-        * Otherwise, get the first HCU device available (there should be one
-        * and only one device).
-        */
-       spin_lock_bh(&ocs_hcu.lock);
-       tctx->hcu_dev = list_first_entry_or_null(&ocs_hcu.dev_list,
-                                                struct ocs_hcu_dev,
-                                                list);
-       spin_unlock_bh(&ocs_hcu.lock);
-
-       return tctx->hcu_dev;
-}
-
-/* Free OCS DMA linked list and DMA-able context buffer. */
-static void kmb_ocs_hcu_dma_cleanup(struct ahash_request *req,
-                                   struct ocs_hcu_rctx *rctx)
-{
-       struct ocs_hcu_dev *hcu_dev = rctx->hcu_dev;
-       struct device *dev = hcu_dev->dev;
-
-       /* Unmap rctx->buffer (if mapped). */
-       if (rctx->buf_dma_count) {
-               dma_unmap_single(dev, rctx->buf_dma_addr, rctx->buf_dma_count,
-                                DMA_TO_DEVICE);
-               rctx->buf_dma_count = 0;
-       }
-
-       /* Unmap req->src (if mapped). */
-       if (rctx->sg_dma_nents) {
-               dma_unmap_sg(dev, req->src, rctx->sg_dma_nents, DMA_TO_DEVICE);
-               rctx->sg_dma_nents = 0;
-       }
-
-       /* Free dma_list (if allocated). */
-       if (rctx->dma_list) {
-               ocs_hcu_dma_list_free(hcu_dev, rctx->dma_list);
-               rctx->dma_list = NULL;
-       }
-}
-
-/*
- * Prepare for DMA operation:
- * - DMA-map request context buffer (if needed)
- * - DMA-map SG list (only the entries to be processed, see note below)
- * - Allocate OCS HCU DMA linked list (number of elements =  SG entries to
- *   process + context buffer (if not empty)).
- * - Add DMA-mapped request context buffer to OCS HCU DMA list.
- * - Add SG entries to DMA list.
- *
- * Note: if this is a final request, we process all the data in the SG list,
- * otherwise we can only process up to the maximum amount of block-aligned data
- * (the remainder will be put into the context buffer and processed in the next
- * request).
- */
-static int kmb_ocs_dma_prepare(struct ahash_request *req)
-{
-       struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
-       struct device *dev = rctx->hcu_dev->dev;
-       unsigned int remainder = 0;
-       unsigned int total;
-       size_t nents;
-       size_t count;
-       int rc;
-       int i;
-
-       /* This function should be called only when there is data to process. */
-       total = kmb_get_total_data(rctx);
-       if (!total)
-               return -EINVAL;
-
-       /*
-        * If this is not a final DMA (terminated DMA), the data passed to the
-        * HCU must be aligned to the block size; compute the remainder data to
-        * be processed in the next request.
-        */
-       if (!(rctx->flags & REQ_FINAL))
-               remainder = total % rctx->blk_sz;
-
-       /* Determine the number of scatter gather list entries to process. */
-       nents = sg_nents_for_len(req->src, rctx->sg_data_total - remainder);
-
-       /* If there are entries to process, map them. */
-       if (nents) {
-               rctx->sg_dma_nents = dma_map_sg(dev, req->src, nents,
-                                               DMA_TO_DEVICE);
-               if (!rctx->sg_dma_nents) {
-                       dev_err(dev, "Failed to MAP SG\n");
-                       rc = -ENOMEM;
-                       goto cleanup;
-               }
-               /*
-                * The value returned by dma_map_sg() can be < nents; so update
-                * nents accordingly.
-                */
-               nents = rctx->sg_dma_nents;
-       }
-
-       /*
-        * If context buffer is not empty, map it and add extra DMA entry for
-        * it.
-        */
-       if (rctx->buf_cnt) {
-               rctx->buf_dma_addr = dma_map_single(dev, rctx->buffer,
-                                                   rctx->buf_cnt,
-                                                   DMA_TO_DEVICE);
-               if (dma_mapping_error(dev, rctx->buf_dma_addr)) {
-                       dev_err(dev, "Failed to map request context buffer\n");
-                       rc = -ENOMEM;
-                       goto cleanup;
-               }
-               rctx->buf_dma_count = rctx->buf_cnt;
-               /* Increase number of dma entries. */
-               nents++;
-       }
-
-       /* Allocate OCS HCU DMA list. */
-       rctx->dma_list = ocs_hcu_dma_list_alloc(rctx->hcu_dev, nents);
-       if (!rctx->dma_list) {
-               rc = -ENOMEM;
-               goto cleanup;
-       }
-
-       /* Add request context buffer (if previously DMA-mapped) */
-       if (rctx->buf_dma_count) {
-               rc = ocs_hcu_dma_list_add_tail(rctx->hcu_dev, rctx->dma_list,
-                                              rctx->buf_dma_addr,
-                                              rctx->buf_dma_count);
-               if (rc)
-                       goto cleanup;
-       }
-
-       /* Add the SG nodes to be processed to the DMA linked list. */
-       for_each_sg(req->src, rctx->sg, rctx->sg_dma_nents, i) {
-               /*
-                * The number of bytes to add to the list entry is the minimum
-                * between:
-                * - The DMA length of the SG entry.
-                * - The data left to be processed.
-                */
-               count = min(rctx->sg_data_total - remainder,
-                           sg_dma_len(rctx->sg) - rctx->sg_data_offset);
-               /*
-                * Do not create a zero length DMA descriptor. Check in case of
-                * zero length SG node.
-                */
-               if (count == 0)
-                       continue;
-               /* Add sg to HCU DMA list. */
-               rc = ocs_hcu_dma_list_add_tail(rctx->hcu_dev,
-                                              rctx->dma_list,
-                                              rctx->sg->dma_address,
-                                              count);
-               if (rc)
-                       goto cleanup;
-
-               /* Update amount of data remaining in SG list. */
-               rctx->sg_data_total -= count;
-
-               /*
-                * If  remaining data is equal to remainder (note: 'less than'
-                * case should never happen in practice), we are done: update
-                * offset and exit the loop.
-                */
-               if (rctx->sg_data_total <= remainder) {
-                       WARN_ON(rctx->sg_data_total < remainder);
-                       rctx->sg_data_offset += count;
-                       break;
-               }
-
-               /*
-                * If we get here is because we need to process the next sg in
-                * the list; set offset within the sg to 0.
-                */
-               rctx->sg_data_offset = 0;
-       }
-
-       return 0;
-cleanup:
-       dev_err(dev, "Failed to prepare DMA.\n");
-       kmb_ocs_hcu_dma_cleanup(req, rctx);
-
-       return rc;
-}
-
-static void kmb_ocs_hcu_secure_cleanup(struct ahash_request *req)
-{
-       struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
-
-       /* Clear buffer of any data. */
-       memzero_explicit(rctx->buffer, sizeof(rctx->buffer));
-}
-
-static int kmb_ocs_hcu_handle_queue(struct ahash_request *req)
-{
-       struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
-
-       if (!hcu_dev)
-               return -ENOENT;
-
-       return crypto_transfer_hash_request_to_engine(hcu_dev->engine, req);
-}
-
-static int prepare_ipad(struct ahash_request *req)
-{
-       struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
-       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-       struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
-       int i;
-
-       WARN(rctx->buf_cnt, "%s: Context buffer is not empty\n", __func__);
-       WARN(!(rctx->flags & REQ_FLAGS_HMAC_SW),
-            "%s: HMAC_SW flag is not set\n", __func__);
-       /*
-        * Key length must be equal to block size. If key is shorter,
-        * we pad it with zero (note: key cannot be longer, since
-        * longer keys are hashed by kmb_ocs_hcu_setkey()).
-        */
-       if (ctx->key_len > rctx->blk_sz) {
-               WARN(1, "%s: Invalid key length in tfm context\n", __func__);
-               return -EINVAL;
-       }
-       memzero_explicit(&ctx->key[ctx->key_len],
-                        rctx->blk_sz - ctx->key_len);
-       ctx->key_len = rctx->blk_sz;
-       /*
-        * Prepare IPAD for HMAC. Only done for first block.
-        * HMAC(k,m) = H(k ^ opad || H(k ^ ipad || m))
-        * k ^ ipad will be first hashed block.
-        * k ^ opad will be calculated in the final request.
-        * Only needed if not using HW HMAC.
-        */
-       for (i = 0; i < rctx->blk_sz; i++)
-               rctx->buffer[i] = ctx->key[i] ^ HMAC_IPAD_VALUE;
-       rctx->buf_cnt = rctx->blk_sz;
-
-       return 0;
-}
-
-static int kmb_ocs_hcu_do_one_request(struct crypto_engine *engine, void *areq)
-{
-       struct ahash_request *req = container_of(areq, struct ahash_request,
-                                                base);
-       struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
-       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-       struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
-       struct ocs_hcu_ctx *tctx = crypto_ahash_ctx(tfm);
-       int rc;
-       int i;
-
-       if (!hcu_dev) {
-               rc = -ENOENT;
-               goto error;
-       }
-
-       /*
-        * If hardware HMAC flag is set, perform HMAC in hardware.
-        *
-        * NOTE: this flag implies REQ_FINAL && kmb_get_total_data(rctx)
-        */
-       if (rctx->flags & REQ_FLAGS_HMAC_HW) {
-               /* Map input data into the HCU DMA linked list. */
-               rc = kmb_ocs_dma_prepare(req);
-               if (rc)
-                       goto error;
-
-               rc = ocs_hcu_hmac(hcu_dev, rctx->algo, tctx->key, tctx->key_len,
-                                 rctx->dma_list, req->result, rctx->dig_sz);
-
-               /* Unmap data and free DMA list regardless of return code. */
-               kmb_ocs_hcu_dma_cleanup(req, rctx);
-
-               /* Process previous return code. */
-               if (rc)
-                       goto error;
-
-               goto done;
-       }
-
-       /* Handle update request case. */
-       if (!(rctx->flags & REQ_FINAL)) {
-               /* Update should always have input data. */
-               if (!kmb_get_total_data(rctx))
-                       return -EINVAL;
-
-               /* Map input data into the HCU DMA linked list. */
-               rc = kmb_ocs_dma_prepare(req);
-               if (rc)
-                       goto error;
-
-               /* Do hashing step. */
-               rc = ocs_hcu_hash_update(hcu_dev, &rctx->hash_ctx,
-                                        rctx->dma_list);
-
-               /* Unmap data and free DMA list regardless of return code. */
-               kmb_ocs_hcu_dma_cleanup(req, rctx);
-
-               /* Process previous return code. */
-               if (rc)
-                       goto error;
-
-               /*
-                * Reset request buffer count (data in the buffer was just
-                * processed).
-                */
-               rctx->buf_cnt = 0;
-               /*
-                * Move remaining sg data into the request buffer, so that it
-                * will be processed during the next request.
-                *
-                * NOTE: we have remaining data if kmb_get_total_data() was not
-                * a multiple of block size.
-                */
-               rc = flush_sg_to_ocs_buffer(rctx);
-               if (rc)
-                       goto error;
-
-               goto done;
-       }
-
-       /* If we get here, this is a final request. */
-
-       /* If there is data to process, use finup. */
-       if (kmb_get_total_data(rctx)) {
-               /* Map input data into the HCU DMA linked list. */
-               rc = kmb_ocs_dma_prepare(req);
-               if (rc)
-                       goto error;
-
-               /* Do hashing step. */
-               rc = ocs_hcu_hash_finup(hcu_dev, &rctx->hash_ctx,
-                                       rctx->dma_list,
-                                       req->result, rctx->dig_sz);
-               /* Free DMA list regardless of return code. */
-               kmb_ocs_hcu_dma_cleanup(req, rctx);
-
-               /* Process previous return code. */
-               if (rc)
-                       goto error;
-
-       } else {  /* Otherwise (if we have no data), use final. */
-               rc = ocs_hcu_hash_final(hcu_dev, &rctx->hash_ctx, req->result,
-                                       rctx->dig_sz);
-               if (rc)
-                       goto error;
-       }
-
-       /*
-        * If we are finalizing a SW HMAC request, we just computed the result
-        * of: H(k ^ ipad || m).
-        *
-        * We now need to complete the HMAC calculation with the OPAD step,
-        * that is, we need to compute H(k ^ opad || digest), where digest is
-        * the digest we just obtained, i.e., H(k ^ ipad || m).
-        */
-       if (rctx->flags & REQ_FLAGS_HMAC_SW) {
-               /*
-                * Compute k ^ opad and store it in the request buffer (which
-                * is not used anymore at this point).
-                * Note: key has been padded / hashed already (so keylen ==
-                * blksz) .
-                */
-               WARN_ON(tctx->key_len != rctx->blk_sz);
-               for (i = 0; i < rctx->blk_sz; i++)
-                       rctx->buffer[i] = tctx->key[i] ^ HMAC_OPAD_VALUE;
-               /* Now append the digest to the rest of the buffer. */
-               for (i = 0; (i < rctx->dig_sz); i++)
-                       rctx->buffer[rctx->blk_sz + i] = req->result[i];
-
-               /* Now hash the buffer to obtain the final HMAC. */
-               rc = ocs_hcu_digest(hcu_dev, rctx->algo, rctx->buffer,
-                                   rctx->blk_sz + rctx->dig_sz, req->result,
-                                   rctx->dig_sz);
-               if (rc)
-                       goto error;
-       }
-
-       /* Perform secure clean-up. */
-       kmb_ocs_hcu_secure_cleanup(req);
-done:
-       crypto_finalize_hash_request(hcu_dev->engine, req, 0);
-
-       return 0;
-
-error:
-       kmb_ocs_hcu_secure_cleanup(req);
-       return rc;
-}
-
-static int kmb_ocs_hcu_init(struct ahash_request *req)
-{
-       struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
-       struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
-       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-       struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
-
-       if (!hcu_dev)
-               return -ENOENT;
-
-       /* Initialize entire request context to zero. */
-       memset(rctx, 0, sizeof(*rctx));
-
-       rctx->hcu_dev = hcu_dev;
-       rctx->dig_sz = crypto_ahash_digestsize(tfm);
-
-       switch (rctx->dig_sz) {
-#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
-       case SHA224_DIGEST_SIZE:
-               rctx->blk_sz = SHA224_BLOCK_SIZE;
-               rctx->algo = OCS_HCU_ALGO_SHA224;
-               break;
-#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224 */
-       case SHA256_DIGEST_SIZE:
-               rctx->blk_sz = SHA256_BLOCK_SIZE;
-               /*
-                * SHA256 and SM3 have the same digest size: use info from tfm
-                * context to find out which one we should use.
-                */
-               rctx->algo = ctx->is_sm3_tfm ? OCS_HCU_ALGO_SM3 :
-                                              OCS_HCU_ALGO_SHA256;
-               break;
-       case SHA384_DIGEST_SIZE:
-               rctx->blk_sz = SHA384_BLOCK_SIZE;
-               rctx->algo = OCS_HCU_ALGO_SHA384;
-               break;
-       case SHA512_DIGEST_SIZE:
-               rctx->blk_sz = SHA512_BLOCK_SIZE;
-               rctx->algo = OCS_HCU_ALGO_SHA512;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       /* Initialize intermediate data. */
-       ocs_hcu_hash_init(&rctx->hash_ctx, rctx->algo);
-
-       /* If this a HMAC request, set HMAC flag. */
-       if (ctx->is_hmac_tfm)
-               rctx->flags |= REQ_FLAGS_HMAC;
-
-       return 0;
-}
-
-static int kmb_ocs_hcu_update(struct ahash_request *req)
-{
-       struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
-       int rc;
-
-       if (!req->nbytes)
-               return 0;
-
-       rctx->sg_data_total = req->nbytes;
-       rctx->sg_data_offset = 0;
-       rctx->sg = req->src;
-
-       /*
-        * If we are doing HMAC, then we must use SW-assisted HMAC, since HW
-        * HMAC does not support context switching (there it can only be used
-        * with finup() or digest()).
-        */
-       if (rctx->flags & REQ_FLAGS_HMAC &&
-           !(rctx->flags & REQ_FLAGS_HMAC_SW)) {
-               rctx->flags |= REQ_FLAGS_HMAC_SW;
-               rc = prepare_ipad(req);
-               if (rc)
-                       return rc;
-       }
-
-       /*
-        * If remaining sg_data fits into ctx buffer, just copy it there; we'll
-        * process it at the next update() or final().
-        */
-       if (rctx->sg_data_total <= (sizeof(rctx->buffer) - rctx->buf_cnt))
-               return flush_sg_to_ocs_buffer(rctx);
-
-       return kmb_ocs_hcu_handle_queue(req);
-}
-
-/* Common logic for kmb_ocs_hcu_final() and kmb_ocs_hcu_finup(). */
-static int kmb_ocs_hcu_fin_common(struct ahash_request *req)
-{
-       struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
-       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-       struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
-       int rc;
-
-       rctx->flags |= REQ_FINAL;
-
-       /*
-        * If this is a HMAC request and, so far, we didn't have to switch to
-        * SW HMAC, check if we can use HW HMAC.
-        */
-       if (rctx->flags & REQ_FLAGS_HMAC &&
-           !(rctx->flags & REQ_FLAGS_HMAC_SW)) {
-               /*
-                * If we are here, it means we never processed any data so far,
-                * so we can use HW HMAC, but only if there is some data to
-                * process (since OCS HW MAC does not support zero-length
-                * messages) and the key length is supported by the hardware
-                * (OCS HCU HW only supports length <= 64); if HW HMAC cannot
-                * be used, fall back to SW-assisted HMAC.
-                */
-               if (kmb_get_total_data(rctx) &&
-                   ctx->key_len <= OCS_HCU_HW_KEY_LEN) {
-                       rctx->flags |= REQ_FLAGS_HMAC_HW;
-               } else {
-                       rctx->flags |= REQ_FLAGS_HMAC_SW;
-                       rc = prepare_ipad(req);
-                       if (rc)
-                               return rc;
-               }
-       }
-
-       return kmb_ocs_hcu_handle_queue(req);
-}
-
-static int kmb_ocs_hcu_final(struct ahash_request *req)
-{
-       struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
-
-       rctx->sg_data_total = 0;
-       rctx->sg_data_offset = 0;
-       rctx->sg = NULL;
-
-       return kmb_ocs_hcu_fin_common(req);
-}
-
-static int kmb_ocs_hcu_finup(struct ahash_request *req)
-{
-       struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
-
-       rctx->sg_data_total = req->nbytes;
-       rctx->sg_data_offset = 0;
-       rctx->sg = req->src;
-
-       return kmb_ocs_hcu_fin_common(req);
-}
-
-static int kmb_ocs_hcu_digest(struct ahash_request *req)
-{
-       int rc = 0;
-       struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
-
-       if (!hcu_dev)
-               return -ENOENT;
-
-       rc = kmb_ocs_hcu_init(req);
-       if (rc)
-               return rc;
-
-       rc = kmb_ocs_hcu_finup(req);
-
-       return rc;
-}
-
-static int kmb_ocs_hcu_export(struct ahash_request *req, void *out)
-{
-       struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
-
-       /* Intermediate data is always stored and applied per request. */
-       memcpy(out, rctx, sizeof(*rctx));
-
-       return 0;
-}
-
-static int kmb_ocs_hcu_import(struct ahash_request *req, const void *in)
-{
-       struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
-
-       /* Intermediate data is always stored and applied per request. */
-       memcpy(rctx, in, sizeof(*rctx));
-
-       return 0;
-}
-
-static int kmb_ocs_hcu_setkey(struct crypto_ahash *tfm, const u8 *key,
-                             unsigned int keylen)
-{
-       unsigned int digestsize = crypto_ahash_digestsize(tfm);
-       struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
-       size_t blk_sz = crypto_ahash_blocksize(tfm);
-       struct crypto_ahash *ahash_tfm;
-       struct ahash_request *req;
-       struct crypto_wait wait;
-       struct scatterlist sg;
-       const char *alg_name;
-       int rc;
-
-       /*
-        * Key length must be equal to block size:
-        * - If key is shorter, we are done for now (the key will be padded
-        *   later on); this is to maximize the use of HW HMAC (which works
-        *   only for keys <= 64 bytes).
-        * - If key is longer, we hash it.
-        */
-       if (keylen <= blk_sz) {
-               memcpy(ctx->key, key, keylen);
-               ctx->key_len = keylen;
-               return 0;
-       }
-
-       switch (digestsize) {
-#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
-       case SHA224_DIGEST_SIZE:
-               alg_name = "sha224-keembay-ocs";
-               break;
-#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224 */
-       case SHA256_DIGEST_SIZE:
-               alg_name = ctx->is_sm3_tfm ? "sm3-keembay-ocs" :
-                                            "sha256-keembay-ocs";
-               break;
-       case SHA384_DIGEST_SIZE:
-               alg_name = "sha384-keembay-ocs";
-               break;
-       case SHA512_DIGEST_SIZE:
-               alg_name = "sha512-keembay-ocs";
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       ahash_tfm = crypto_alloc_ahash(alg_name, 0, 0);
-       if (IS_ERR(ahash_tfm))
-               return PTR_ERR(ahash_tfm);
-
-       req = ahash_request_alloc(ahash_tfm, GFP_KERNEL);
-       if (!req) {
-               rc = -ENOMEM;
-               goto err_free_ahash;
-       }
-
-       crypto_init_wait(&wait);
-       ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-                                  crypto_req_done, &wait);
-       crypto_ahash_clear_flags(ahash_tfm, ~0);
-
-       sg_init_one(&sg, key, keylen);
-       ahash_request_set_crypt(req, &sg, ctx->key, keylen);
-
-       rc = crypto_wait_req(crypto_ahash_digest(req), &wait);
-       if (rc == 0)
-               ctx->key_len = digestsize;
-
-       ahash_request_free(req);
-err_free_ahash:
-       crypto_free_ahash(ahash_tfm);
-
-       return rc;
-}
-
-/* Set request size and initialize tfm context. */
-static void __cra_init(struct crypto_tfm *tfm, struct ocs_hcu_ctx *ctx)
-{
-       crypto_ahash_set_reqsize_dma(__crypto_ahash_cast(tfm),
-                                    sizeof(struct ocs_hcu_rctx));
-
-       /* Init context to 0. */
-       memzero_explicit(ctx, sizeof(*ctx));
-       /* Set engine ops. */
-       ctx->engine_ctx.op.do_one_request = kmb_ocs_hcu_do_one_request;
-}
-
-static int kmb_ocs_hcu_sha_cra_init(struct crypto_tfm *tfm)
-{
-       struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
-
-       __cra_init(tfm, ctx);
-
-       return 0;
-}
-
-static int kmb_ocs_hcu_sm3_cra_init(struct crypto_tfm *tfm)
-{
-       struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
-
-       __cra_init(tfm, ctx);
-
-       ctx->is_sm3_tfm = true;
-
-       return 0;
-}
-
-static int kmb_ocs_hcu_hmac_sm3_cra_init(struct crypto_tfm *tfm)
-{
-       struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
-
-       __cra_init(tfm, ctx);
-
-       ctx->is_sm3_tfm = true;
-       ctx->is_hmac_tfm = true;
-
-       return 0;
-}
-
-static int kmb_ocs_hcu_hmac_cra_init(struct crypto_tfm *tfm)
-{
-       struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
-
-       __cra_init(tfm, ctx);
-
-       ctx->is_hmac_tfm = true;
-
-       return 0;
-}
-
-/* Function called when 'tfm' is de-initialized. */
-static void kmb_ocs_hcu_hmac_cra_exit(struct crypto_tfm *tfm)
-{
-       struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
-
-       /* Clear the key. */
-       memzero_explicit(ctx->key, sizeof(ctx->key));
-}
-
-static struct ahash_alg ocs_hcu_algs[] = {
-#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
-{
-       .init           = kmb_ocs_hcu_init,
-       .update         = kmb_ocs_hcu_update,
-       .final          = kmb_ocs_hcu_final,
-       .finup          = kmb_ocs_hcu_finup,
-       .digest         = kmb_ocs_hcu_digest,
-       .export         = kmb_ocs_hcu_export,
-       .import         = kmb_ocs_hcu_import,
-       .halg = {
-               .digestsize     = SHA224_DIGEST_SIZE,
-               .statesize      = sizeof(struct ocs_hcu_rctx),
-               .base   = {
-                       .cra_name               = "sha224",
-                       .cra_driver_name        = "sha224-keembay-ocs",
-                       .cra_priority           = 255,
-                       .cra_flags              = CRYPTO_ALG_ASYNC,
-                       .cra_blocksize          = SHA224_BLOCK_SIZE,
-                       .cra_ctxsize            = sizeof(struct ocs_hcu_ctx),
-                       .cra_alignmask          = 0,
-                       .cra_module             = THIS_MODULE,
-                       .cra_init               = kmb_ocs_hcu_sha_cra_init,
-               }
-       }
-},
-{
-       .init           = kmb_ocs_hcu_init,
-       .update         = kmb_ocs_hcu_update,
-       .final          = kmb_ocs_hcu_final,
-       .finup          = kmb_ocs_hcu_finup,
-       .digest         = kmb_ocs_hcu_digest,
-       .export         = kmb_ocs_hcu_export,
-       .import         = kmb_ocs_hcu_import,
-       .setkey         = kmb_ocs_hcu_setkey,
-       .halg = {
-               .digestsize     = SHA224_DIGEST_SIZE,
-               .statesize      = sizeof(struct ocs_hcu_rctx),
-               .base   = {
-                       .cra_name               = "hmac(sha224)",
-                       .cra_driver_name        = "hmac-sha224-keembay-ocs",
-                       .cra_priority           = 255,
-                       .cra_flags              = CRYPTO_ALG_ASYNC,
-                       .cra_blocksize          = SHA224_BLOCK_SIZE,
-                       .cra_ctxsize            = sizeof(struct ocs_hcu_ctx),
-                       .cra_alignmask          = 0,
-                       .cra_module             = THIS_MODULE,
-                       .cra_init               = kmb_ocs_hcu_hmac_cra_init,
-                       .cra_exit               = kmb_ocs_hcu_hmac_cra_exit,
-               }
-       }
-},
-#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224 */
-{
-       .init           = kmb_ocs_hcu_init,
-       .update         = kmb_ocs_hcu_update,
-       .final          = kmb_ocs_hcu_final,
-       .finup          = kmb_ocs_hcu_finup,
-       .digest         = kmb_ocs_hcu_digest,
-       .export         = kmb_ocs_hcu_export,
-       .import         = kmb_ocs_hcu_import,
-       .halg = {
-               .digestsize     = SHA256_DIGEST_SIZE,
-               .statesize      = sizeof(struct ocs_hcu_rctx),
-               .base   = {
-                       .cra_name               = "sha256",
-                       .cra_driver_name        = "sha256-keembay-ocs",
-                       .cra_priority           = 255,
-                       .cra_flags              = CRYPTO_ALG_ASYNC,
-                       .cra_blocksize          = SHA256_BLOCK_SIZE,
-                       .cra_ctxsize            = sizeof(struct ocs_hcu_ctx),
-                       .cra_alignmask          = 0,
-                       .cra_module             = THIS_MODULE,
-                       .cra_init               = kmb_ocs_hcu_sha_cra_init,
-               }
-       }
-},
-{
-       .init           = kmb_ocs_hcu_init,
-       .update         = kmb_ocs_hcu_update,
-       .final          = kmb_ocs_hcu_final,
-       .finup          = kmb_ocs_hcu_finup,
-       .digest         = kmb_ocs_hcu_digest,
-       .export         = kmb_ocs_hcu_export,
-       .import         = kmb_ocs_hcu_import,
-       .setkey         = kmb_ocs_hcu_setkey,
-       .halg = {
-               .digestsize     = SHA256_DIGEST_SIZE,
-               .statesize      = sizeof(struct ocs_hcu_rctx),
-               .base   = {
-                       .cra_name               = "hmac(sha256)",
-                       .cra_driver_name        = "hmac-sha256-keembay-ocs",
-                       .cra_priority           = 255,
-                       .cra_flags              = CRYPTO_ALG_ASYNC,
-                       .cra_blocksize          = SHA256_BLOCK_SIZE,
-                       .cra_ctxsize            = sizeof(struct ocs_hcu_ctx),
-                       .cra_alignmask          = 0,
-                       .cra_module             = THIS_MODULE,
-                       .cra_init               = kmb_ocs_hcu_hmac_cra_init,
-                       .cra_exit               = kmb_ocs_hcu_hmac_cra_exit,
-               }
-       }
-},
-{
-       .init           = kmb_ocs_hcu_init,
-       .update         = kmb_ocs_hcu_update,
-       .final          = kmb_ocs_hcu_final,
-       .finup          = kmb_ocs_hcu_finup,
-       .digest         = kmb_ocs_hcu_digest,
-       .export         = kmb_ocs_hcu_export,
-       .import         = kmb_ocs_hcu_import,
-       .halg = {
-               .digestsize     = SM3_DIGEST_SIZE,
-               .statesize      = sizeof(struct ocs_hcu_rctx),
-               .base   = {
-                       .cra_name               = "sm3",
-                       .cra_driver_name        = "sm3-keembay-ocs",
-                       .cra_priority           = 255,
-                       .cra_flags              = CRYPTO_ALG_ASYNC,
-                       .cra_blocksize          = SM3_BLOCK_SIZE,
-                       .cra_ctxsize            = sizeof(struct ocs_hcu_ctx),
-                       .cra_alignmask          = 0,
-                       .cra_module             = THIS_MODULE,
-                       .cra_init               = kmb_ocs_hcu_sm3_cra_init,
-               }
-       }
-},
-{
-       .init           = kmb_ocs_hcu_init,
-       .update         = kmb_ocs_hcu_update,
-       .final          = kmb_ocs_hcu_final,
-       .finup          = kmb_ocs_hcu_finup,
-       .digest         = kmb_ocs_hcu_digest,
-       .export         = kmb_ocs_hcu_export,
-       .import         = kmb_ocs_hcu_import,
-       .setkey         = kmb_ocs_hcu_setkey,
-       .halg = {
-               .digestsize     = SM3_DIGEST_SIZE,
-               .statesize      = sizeof(struct ocs_hcu_rctx),
-               .base   = {
-                       .cra_name               = "hmac(sm3)",
-                       .cra_driver_name        = "hmac-sm3-keembay-ocs",
-                       .cra_priority           = 255,
-                       .cra_flags              = CRYPTO_ALG_ASYNC,
-                       .cra_blocksize          = SM3_BLOCK_SIZE,
-                       .cra_ctxsize            = sizeof(struct ocs_hcu_ctx),
-                       .cra_alignmask          = 0,
-                       .cra_module             = THIS_MODULE,
-                       .cra_init               = kmb_ocs_hcu_hmac_sm3_cra_init,
-                       .cra_exit               = kmb_ocs_hcu_hmac_cra_exit,
-               }
-       }
-},
-{
-       .init           = kmb_ocs_hcu_init,
-       .update         = kmb_ocs_hcu_update,
-       .final          = kmb_ocs_hcu_final,
-       .finup          = kmb_ocs_hcu_finup,
-       .digest         = kmb_ocs_hcu_digest,
-       .export         = kmb_ocs_hcu_export,
-       .import         = kmb_ocs_hcu_import,
-       .halg = {
-               .digestsize     = SHA384_DIGEST_SIZE,
-               .statesize      = sizeof(struct ocs_hcu_rctx),
-               .base   = {
-                       .cra_name               = "sha384",
-                       .cra_driver_name        = "sha384-keembay-ocs",
-                       .cra_priority           = 255,
-                       .cra_flags              = CRYPTO_ALG_ASYNC,
-                       .cra_blocksize          = SHA384_BLOCK_SIZE,
-                       .cra_ctxsize            = sizeof(struct ocs_hcu_ctx),
-                       .cra_alignmask          = 0,
-                       .cra_module             = THIS_MODULE,
-                       .cra_init               = kmb_ocs_hcu_sha_cra_init,
-               }
-       }
-},
-{
-       .init           = kmb_ocs_hcu_init,
-       .update         = kmb_ocs_hcu_update,
-       .final          = kmb_ocs_hcu_final,
-       .finup          = kmb_ocs_hcu_finup,
-       .digest         = kmb_ocs_hcu_digest,
-       .export         = kmb_ocs_hcu_export,
-       .import         = kmb_ocs_hcu_import,
-       .setkey         = kmb_ocs_hcu_setkey,
-       .halg = {
-               .digestsize     = SHA384_DIGEST_SIZE,
-               .statesize      = sizeof(struct ocs_hcu_rctx),
-               .base   = {
-                       .cra_name               = "hmac(sha384)",
-                       .cra_driver_name        = "hmac-sha384-keembay-ocs",
-                       .cra_priority           = 255,
-                       .cra_flags              = CRYPTO_ALG_ASYNC,
-                       .cra_blocksize          = SHA384_BLOCK_SIZE,
-                       .cra_ctxsize            = sizeof(struct ocs_hcu_ctx),
-                       .cra_alignmask          = 0,
-                       .cra_module             = THIS_MODULE,
-                       .cra_init               = kmb_ocs_hcu_hmac_cra_init,
-                       .cra_exit               = kmb_ocs_hcu_hmac_cra_exit,
-               }
-       }
-},
-{
-       .init           = kmb_ocs_hcu_init,
-       .update         = kmb_ocs_hcu_update,
-       .final          = kmb_ocs_hcu_final,
-       .finup          = kmb_ocs_hcu_finup,
-       .digest         = kmb_ocs_hcu_digest,
-       .export         = kmb_ocs_hcu_export,
-       .import         = kmb_ocs_hcu_import,
-       .halg = {
-               .digestsize     = SHA512_DIGEST_SIZE,
-               .statesize      = sizeof(struct ocs_hcu_rctx),
-               .base   = {
-                       .cra_name               = "sha512",
-                       .cra_driver_name        = "sha512-keembay-ocs",
-                       .cra_priority           = 255,
-                       .cra_flags              = CRYPTO_ALG_ASYNC,
-                       .cra_blocksize          = SHA512_BLOCK_SIZE,
-                       .cra_ctxsize            = sizeof(struct ocs_hcu_ctx),
-                       .cra_alignmask          = 0,
-                       .cra_module             = THIS_MODULE,
-                       .cra_init               = kmb_ocs_hcu_sha_cra_init,
-               }
-       }
-},
-{
-       .init           = kmb_ocs_hcu_init,
-       .update         = kmb_ocs_hcu_update,
-       .final          = kmb_ocs_hcu_final,
-       .finup          = kmb_ocs_hcu_finup,
-       .digest         = kmb_ocs_hcu_digest,
-       .export         = kmb_ocs_hcu_export,
-       .import         = kmb_ocs_hcu_import,
-       .setkey         = kmb_ocs_hcu_setkey,
-       .halg = {
-               .digestsize     = SHA512_DIGEST_SIZE,
-               .statesize      = sizeof(struct ocs_hcu_rctx),
-               .base   = {
-                       .cra_name               = "hmac(sha512)",
-                       .cra_driver_name        = "hmac-sha512-keembay-ocs",
-                       .cra_priority           = 255,
-                       .cra_flags              = CRYPTO_ALG_ASYNC,
-                       .cra_blocksize          = SHA512_BLOCK_SIZE,
-                       .cra_ctxsize            = sizeof(struct ocs_hcu_ctx),
-                       .cra_alignmask          = 0,
-                       .cra_module             = THIS_MODULE,
-                       .cra_init               = kmb_ocs_hcu_hmac_cra_init,
-                       .cra_exit               = kmb_ocs_hcu_hmac_cra_exit,
-               }
-       }
-},
-};
-
-/* Device tree driver match. */
-static const struct of_device_id kmb_ocs_hcu_of_match[] = {
-       {
-               .compatible = "intel,keembay-ocs-hcu",
-       },
-       {}
-};
-
-static int kmb_ocs_hcu_remove(struct platform_device *pdev)
-{
-       struct ocs_hcu_dev *hcu_dev;
-       int rc;
-
-       hcu_dev = platform_get_drvdata(pdev);
-       if (!hcu_dev)
-               return -ENODEV;
-
-       crypto_unregister_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs));
-
-       rc = crypto_engine_exit(hcu_dev->engine);
-
-       spin_lock_bh(&ocs_hcu.lock);
-       list_del(&hcu_dev->list);
-       spin_unlock_bh(&ocs_hcu.lock);
-
-       return rc;
-}
-
-static int kmb_ocs_hcu_probe(struct platform_device *pdev)
-{
-       struct device *dev = &pdev->dev;
-       struct ocs_hcu_dev *hcu_dev;
-       struct resource *hcu_mem;
-       int rc;
-
-       hcu_dev = devm_kzalloc(dev, sizeof(*hcu_dev), GFP_KERNEL);
-       if (!hcu_dev)
-               return -ENOMEM;
-
-       hcu_dev->dev = dev;
-
-       platform_set_drvdata(pdev, hcu_dev);
-       rc = dma_set_mask_and_coherent(&pdev->dev, OCS_HCU_DMA_BIT_MASK);
-       if (rc)
-               return rc;
-
-       /* Get the memory address and remap. */
-       hcu_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!hcu_mem) {
-               dev_err(dev, "Could not retrieve io mem resource.\n");
-               return -ENODEV;
-       }
-
-       hcu_dev->io_base = devm_ioremap_resource(dev, hcu_mem);
-       if (IS_ERR(hcu_dev->io_base))
-               return PTR_ERR(hcu_dev->io_base);
-
-       init_completion(&hcu_dev->irq_done);
-
-       /* Get and request IRQ. */
-       hcu_dev->irq = platform_get_irq(pdev, 0);
-       if (hcu_dev->irq < 0)
-               return hcu_dev->irq;
-
-       rc = devm_request_threaded_irq(&pdev->dev, hcu_dev->irq,
-                                      ocs_hcu_irq_handler, NULL, 0,
-                                      "keembay-ocs-hcu", hcu_dev);
-       if (rc < 0) {
-               dev_err(dev, "Could not request IRQ.\n");
-               return rc;
-       }
-
-       INIT_LIST_HEAD(&hcu_dev->list);
-
-       spin_lock_bh(&ocs_hcu.lock);
-       list_add_tail(&hcu_dev->list, &ocs_hcu.dev_list);
-       spin_unlock_bh(&ocs_hcu.lock);
-
-       /* Initialize crypto engine */
-       hcu_dev->engine = crypto_engine_alloc_init(dev, 1);
-       if (!hcu_dev->engine) {
-               rc = -ENOMEM;
-               goto list_del;
-       }
-
-       rc = crypto_engine_start(hcu_dev->engine);
-       if (rc) {
-               dev_err(dev, "Could not start engine.\n");
-               goto cleanup;
-       }
-
-       /* Security infrastructure guarantees OCS clock is enabled. */
-
-       rc = crypto_register_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs));
-       if (rc) {
-               dev_err(dev, "Could not register algorithms.\n");
-               goto cleanup;
-       }
-
-       return 0;
-
-cleanup:
-       crypto_engine_exit(hcu_dev->engine);
-list_del:
-       spin_lock_bh(&ocs_hcu.lock);
-       list_del(&hcu_dev->list);
-       spin_unlock_bh(&ocs_hcu.lock);
-
-       return rc;
-}
-
-/* The OCS driver is a platform device. */
-static struct platform_driver kmb_ocs_hcu_driver = {
-       .probe = kmb_ocs_hcu_probe,
-       .remove = kmb_ocs_hcu_remove,
-       .driver = {
-                       .name = DRV_NAME,
-                       .of_match_table = kmb_ocs_hcu_of_match,
-               },
-};
-
-module_platform_driver(kmb_ocs_hcu_driver);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/keembay/ocs-aes.c b/drivers/crypto/keembay/ocs-aes.c
deleted file mode 100644 (file)
index be9f32f..0000000
+++ /dev/null
@@ -1,1489 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel Keem Bay OCS AES Crypto Driver.
- *
- * Copyright (C) 2018-2020 Intel Corporation
- */
-
-#include <linux/dma-mapping.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/swab.h>
-
-#include <asm/byteorder.h>
-#include <asm/errno.h>
-
-#include <crypto/aes.h>
-#include <crypto/gcm.h>
-
-#include "ocs-aes.h"
-
-#define AES_COMMAND_OFFSET                     0x0000
-#define AES_KEY_0_OFFSET                       0x0004
-#define AES_KEY_1_OFFSET                       0x0008
-#define AES_KEY_2_OFFSET                       0x000C
-#define AES_KEY_3_OFFSET                       0x0010
-#define AES_KEY_4_OFFSET                       0x0014
-#define AES_KEY_5_OFFSET                       0x0018
-#define AES_KEY_6_OFFSET                       0x001C
-#define AES_KEY_7_OFFSET                       0x0020
-#define AES_IV_0_OFFSET                                0x0024
-#define AES_IV_1_OFFSET                                0x0028
-#define AES_IV_2_OFFSET                                0x002C
-#define AES_IV_3_OFFSET                                0x0030
-#define AES_ACTIVE_OFFSET                      0x0034
-#define AES_STATUS_OFFSET                      0x0038
-#define AES_KEY_SIZE_OFFSET                    0x0044
-#define AES_IER_OFFSET                         0x0048
-#define AES_ISR_OFFSET                         0x005C
-#define AES_MULTIPURPOSE1_0_OFFSET             0x0200
-#define AES_MULTIPURPOSE1_1_OFFSET             0x0204
-#define AES_MULTIPURPOSE1_2_OFFSET             0x0208
-#define AES_MULTIPURPOSE1_3_OFFSET             0x020C
-#define AES_MULTIPURPOSE2_0_OFFSET             0x0220
-#define AES_MULTIPURPOSE2_1_OFFSET             0x0224
-#define AES_MULTIPURPOSE2_2_OFFSET             0x0228
-#define AES_MULTIPURPOSE2_3_OFFSET             0x022C
-#define AES_BYTE_ORDER_CFG_OFFSET              0x02C0
-#define AES_TLEN_OFFSET                                0x0300
-#define AES_T_MAC_0_OFFSET                     0x0304
-#define AES_T_MAC_1_OFFSET                     0x0308
-#define AES_T_MAC_2_OFFSET                     0x030C
-#define AES_T_MAC_3_OFFSET                     0x0310
-#define AES_PLEN_OFFSET                                0x0314
-#define AES_A_DMA_SRC_ADDR_OFFSET              0x0400
-#define AES_A_DMA_DST_ADDR_OFFSET              0x0404
-#define AES_A_DMA_SRC_SIZE_OFFSET              0x0408
-#define AES_A_DMA_DST_SIZE_OFFSET              0x040C
-#define AES_A_DMA_DMA_MODE_OFFSET              0x0410
-#define AES_A_DMA_NEXT_SRC_DESCR_OFFSET                0x0418
-#define AES_A_DMA_NEXT_DST_DESCR_OFFSET                0x041C
-#define AES_A_DMA_WHILE_ACTIVE_MODE_OFFSET     0x0420
-#define AES_A_DMA_LOG_OFFSET                   0x0424
-#define AES_A_DMA_STATUS_OFFSET                        0x0428
-#define AES_A_DMA_PERF_CNTR_OFFSET             0x042C
-#define AES_A_DMA_MSI_ISR_OFFSET               0x0480
-#define AES_A_DMA_MSI_IER_OFFSET               0x0484
-#define AES_A_DMA_MSI_MASK_OFFSET              0x0488
-#define AES_A_DMA_INBUFFER_WRITE_FIFO_OFFSET   0x0600
-#define AES_A_DMA_OUTBUFFER_READ_FIFO_OFFSET   0x0700
-
-/*
- * AES_A_DMA_DMA_MODE register.
- * Default: 0x00000000.
- * bit[31]     ACTIVE
- *             This bit activates the DMA. When the DMA finishes, it resets
- *             this bit to zero.
- * bit[30:26]  Unused by this driver.
- * bit[25]     SRC_LINK_LIST_EN
- *             Source link list enable bit. When the linked list is terminated
- *             this bit is reset by the DMA.
- * bit[24]     DST_LINK_LIST_EN
- *             Destination link list enable bit. When the linked list is
- *             terminated this bit is reset by the DMA.
- * bit[23:0]   Unused by this driver.
- */
-#define AES_A_DMA_DMA_MODE_ACTIVE              BIT(31)
-#define AES_A_DMA_DMA_MODE_SRC_LINK_LIST_EN    BIT(25)
-#define AES_A_DMA_DMA_MODE_DST_LINK_LIST_EN    BIT(24)
-
-/*
- * AES_ACTIVE register
- * default 0x00000000
- * bit[31:10]  Reserved
- * bit[9]      LAST_ADATA
- * bit[8]      LAST_GCX
- * bit[7:2]    Reserved
- * bit[1]      TERMINATION
- * bit[0]      TRIGGER
- */
-#define AES_ACTIVE_LAST_ADATA                  BIT(9)
-#define AES_ACTIVE_LAST_CCM_GCM                        BIT(8)
-#define AES_ACTIVE_TERMINATION                 BIT(1)
-#define AES_ACTIVE_TRIGGER                     BIT(0)
-
-#define AES_DISABLE_INT                                0x00000000
-#define AES_DMA_CPD_ERR_INT                    BIT(8)
-#define AES_DMA_OUTBUF_RD_ERR_INT              BIT(7)
-#define AES_DMA_OUTBUF_WR_ERR_INT              BIT(6)
-#define AES_DMA_INBUF_RD_ERR_INT               BIT(5)
-#define AES_DMA_INBUF_WR_ERR_INT               BIT(4)
-#define AES_DMA_BAD_COMP_INT                   BIT(3)
-#define AES_DMA_SAI_INT                                BIT(2)
-#define AES_DMA_SRC_DONE_INT                   BIT(0)
-#define AES_COMPLETE_INT                       BIT(1)
-
-#define AES_DMA_MSI_MASK_CLEAR                 BIT(0)
-
-#define AES_128_BIT_KEY                                0x00000000
-#define AES_256_BIT_KEY                                BIT(0)
-
-#define AES_DEACTIVATE_PERF_CNTR               0x00000000
-#define AES_ACTIVATE_PERF_CNTR                 BIT(0)
-
-#define AES_MAX_TAG_SIZE_U32                   4
-
-#define OCS_LL_DMA_FLAG_TERMINATE              BIT(31)
-
-/*
- * There is an inconsistency in the documentation. This is documented as a
- * 11-bit value, but it is actually 10-bits.
- */
-#define AES_DMA_STATUS_INPUT_BUFFER_OCCUPANCY_MASK     0x3FF
-
-/*
- * During CCM decrypt, the OCS block needs to finish processing the ciphertext
- * before the tag is written. For 128-bit mode this required delay is 28 OCS
- * clock cycles. For 256-bit mode it is 36 OCS clock cycles.
- */
-#define CCM_DECRYPT_DELAY_TAG_CLK_COUNT                36UL
-
-/*
- * During CCM decrypt there must be a delay of at least 42 OCS clock cycles
- * between setting the TRIGGER bit in AES_ACTIVE and setting the LAST_CCM_GCM
- * bit in the same register (as stated in the OCS databook)
- */
-#define CCM_DECRYPT_DELAY_LAST_GCX_CLK_COUNT   42UL
-
-/* See RFC3610 section 2.2 */
-#define L_PRIME_MIN (1)
-#define L_PRIME_MAX (7)
-/*
- * CCM IV format from RFC 3610 section 2.3
- *
- *   Octet Number   Contents
- *   ------------   ---------
- *   0              Flags
- *   1 ... 15-L     Nonce N
- *   16-L ... 15    Counter i
- *
- * Flags = L' = L - 1
- */
-#define L_PRIME_IDX            0
-#define COUNTER_START(lprime)  (16 - ((lprime) + 1))
-#define COUNTER_LEN(lprime)    ((lprime) + 1)
-
-enum aes_counter_mode {
-       AES_CTR_M_NO_INC = 0,
-       AES_CTR_M_32_INC = 1,
-       AES_CTR_M_64_INC = 2,
-       AES_CTR_M_128_INC = 3,
-};
-
-/**
- * struct ocs_dma_linked_list - OCS DMA linked list entry.
- * @src_addr:   Source address of the data.
- * @src_len:    Length of data to be fetched.
- * @next:      Next dma_list to fetch.
- * @ll_flags:   Flags (Freeze @ terminate) for the DMA engine.
- */
-struct ocs_dma_linked_list {
-       u32 src_addr;
-       u32 src_len;
-       u32 next;
-       u32 ll_flags;
-} __packed;
-
-/*
- * Set endianness of inputs and outputs
- * AES_BYTE_ORDER_CFG
- * default 0x00000000
- * bit [10] - KEY_HI_LO_SWAP
- * bit [9] - KEY_HI_SWAP_DWORDS_IN_OCTWORD
- * bit [8] - KEY_HI_SWAP_BYTES_IN_DWORD
- * bit [7] - KEY_LO_SWAP_DWORDS_IN_OCTWORD
- * bit [6] - KEY_LO_SWAP_BYTES_IN_DWORD
- * bit [5] - IV_SWAP_DWORDS_IN_OCTWORD
- * bit [4] - IV_SWAP_BYTES_IN_DWORD
- * bit [3] - DOUT_SWAP_DWORDS_IN_OCTWORD
- * bit [2] - DOUT_SWAP_BYTES_IN_DWORD
- * bit [1] - DOUT_SWAP_DWORDS_IN_OCTWORD
- * bit [0] - DOUT_SWAP_BYTES_IN_DWORD
- */
-static inline void aes_a_set_endianness(const struct ocs_aes_dev *aes_dev)
-{
-       iowrite32(0x7FF, aes_dev->base_reg + AES_BYTE_ORDER_CFG_OFFSET);
-}
-
-/* Trigger AES process start. */
-static inline void aes_a_op_trigger(const struct ocs_aes_dev *aes_dev)
-{
-       iowrite32(AES_ACTIVE_TRIGGER, aes_dev->base_reg + AES_ACTIVE_OFFSET);
-}
-
-/* Indicate last bulk of data. */
-static inline void aes_a_op_termination(const struct ocs_aes_dev *aes_dev)
-{
-       iowrite32(AES_ACTIVE_TERMINATION,
-                 aes_dev->base_reg + AES_ACTIVE_OFFSET);
-}
-
-/*
- * Set LAST_CCM_GCM in AES_ACTIVE register and clear all other bits.
- *
- * Called when DMA is programmed to fetch the last batch of data.
- * - For AES-CCM it is called for the last batch of Payload data and Ciphertext
- *   data.
- * - For AES-GCM, it is called for the last batch of Plaintext data and
- *   Ciphertext data.
- */
-static inline void aes_a_set_last_gcx(const struct ocs_aes_dev *aes_dev)
-{
-       iowrite32(AES_ACTIVE_LAST_CCM_GCM,
-                 aes_dev->base_reg + AES_ACTIVE_OFFSET);
-}
-
-/* Wait for LAST_CCM_GCM bit to be unset. */
-static inline void aes_a_wait_last_gcx(const struct ocs_aes_dev *aes_dev)
-{
-       u32 aes_active_reg;
-
-       do {
-               aes_active_reg = ioread32(aes_dev->base_reg +
-                                         AES_ACTIVE_OFFSET);
-       } while (aes_active_reg & AES_ACTIVE_LAST_CCM_GCM);
-}
-
-/* Wait for 10 bits of input occupancy. */
-static void aes_a_dma_wait_input_buffer_occupancy(const struct ocs_aes_dev *aes_dev)
-{
-       u32 reg;
-
-       do {
-               reg = ioread32(aes_dev->base_reg + AES_A_DMA_STATUS_OFFSET);
-       } while (reg & AES_DMA_STATUS_INPUT_BUFFER_OCCUPANCY_MASK);
-}
-
- /*
-  * Set LAST_CCM_GCM and LAST_ADATA bits in AES_ACTIVE register (and clear all
-  * other bits).
-  *
-  * Called when DMA is programmed to fetch the last batch of Associated Data
-  * (CCM case) or Additional Authenticated Data (GCM case).
-  */
-static inline void aes_a_set_last_gcx_and_adata(const struct ocs_aes_dev *aes_dev)
-{
-       iowrite32(AES_ACTIVE_LAST_ADATA | AES_ACTIVE_LAST_CCM_GCM,
-                 aes_dev->base_reg + AES_ACTIVE_OFFSET);
-}
-
-/* Set DMA src and dst transfer size to 0 */
-static inline void aes_a_dma_set_xfer_size_zero(const struct ocs_aes_dev *aes_dev)
-{
-       iowrite32(0, aes_dev->base_reg + AES_A_DMA_SRC_SIZE_OFFSET);
-       iowrite32(0, aes_dev->base_reg + AES_A_DMA_DST_SIZE_OFFSET);
-}
-
-/* Activate DMA for zero-byte transfer case. */
-static inline void aes_a_dma_active(const struct ocs_aes_dev *aes_dev)
-{
-       iowrite32(AES_A_DMA_DMA_MODE_ACTIVE,
-                 aes_dev->base_reg + AES_A_DMA_DMA_MODE_OFFSET);
-}
-
-/* Activate DMA and enable src linked list */
-static inline void aes_a_dma_active_src_ll_en(const struct ocs_aes_dev *aes_dev)
-{
-       iowrite32(AES_A_DMA_DMA_MODE_ACTIVE |
-                 AES_A_DMA_DMA_MODE_SRC_LINK_LIST_EN,
-                 aes_dev->base_reg + AES_A_DMA_DMA_MODE_OFFSET);
-}
-
-/* Activate DMA and enable dst linked list */
-static inline void aes_a_dma_active_dst_ll_en(const struct ocs_aes_dev *aes_dev)
-{
-       iowrite32(AES_A_DMA_DMA_MODE_ACTIVE |
-                 AES_A_DMA_DMA_MODE_DST_LINK_LIST_EN,
-                 aes_dev->base_reg + AES_A_DMA_DMA_MODE_OFFSET);
-}
-
-/* Activate DMA and enable src and dst linked lists */
-static inline void aes_a_dma_active_src_dst_ll_en(const struct ocs_aes_dev *aes_dev)
-{
-       iowrite32(AES_A_DMA_DMA_MODE_ACTIVE |
-                 AES_A_DMA_DMA_MODE_SRC_LINK_LIST_EN |
-                 AES_A_DMA_DMA_MODE_DST_LINK_LIST_EN,
-                 aes_dev->base_reg + AES_A_DMA_DMA_MODE_OFFSET);
-}
-
-/* Reset PERF_CNTR to 0 and activate it */
-static inline void aes_a_dma_reset_and_activate_perf_cntr(const struct ocs_aes_dev *aes_dev)
-{
-       iowrite32(0x00000000, aes_dev->base_reg + AES_A_DMA_PERF_CNTR_OFFSET);
-       iowrite32(AES_ACTIVATE_PERF_CNTR,
-                 aes_dev->base_reg + AES_A_DMA_WHILE_ACTIVE_MODE_OFFSET);
-}
-
-/* Wait until PERF_CNTR is > delay, then deactivate it */
-static inline void aes_a_dma_wait_and_deactivate_perf_cntr(const struct ocs_aes_dev *aes_dev,
-                                                          int delay)
-{
-       while (ioread32(aes_dev->base_reg + AES_A_DMA_PERF_CNTR_OFFSET) < delay)
-               ;
-       iowrite32(AES_DEACTIVATE_PERF_CNTR,
-                 aes_dev->base_reg + AES_A_DMA_WHILE_ACTIVE_MODE_OFFSET);
-}
-
-/* Disable AES and DMA IRQ. */
-static void aes_irq_disable(struct ocs_aes_dev *aes_dev)
-{
-       u32 isr_val = 0;
-
-       /* Disable interrupts */
-       iowrite32(AES_DISABLE_INT,
-                 aes_dev->base_reg + AES_A_DMA_MSI_IER_OFFSET);
-       iowrite32(AES_DISABLE_INT, aes_dev->base_reg + AES_IER_OFFSET);
-
-       /* Clear any pending interrupt */
-       isr_val = ioread32(aes_dev->base_reg + AES_A_DMA_MSI_ISR_OFFSET);
-       if (isr_val)
-               iowrite32(isr_val,
-                         aes_dev->base_reg + AES_A_DMA_MSI_ISR_OFFSET);
-
-       isr_val = ioread32(aes_dev->base_reg + AES_A_DMA_MSI_MASK_OFFSET);
-       if (isr_val)
-               iowrite32(isr_val,
-                         aes_dev->base_reg + AES_A_DMA_MSI_MASK_OFFSET);
-
-       isr_val = ioread32(aes_dev->base_reg + AES_ISR_OFFSET);
-       if (isr_val)
-               iowrite32(isr_val, aes_dev->base_reg + AES_ISR_OFFSET);
-}
-
-/* Enable AES or DMA IRQ.  IRQ is disabled once fired. */
-static void aes_irq_enable(struct ocs_aes_dev *aes_dev, u8 irq)
-{
-       if (irq == AES_COMPLETE_INT) {
-               /* Ensure DMA error interrupts are enabled */
-               iowrite32(AES_DMA_CPD_ERR_INT |
-                         AES_DMA_OUTBUF_RD_ERR_INT |
-                         AES_DMA_OUTBUF_WR_ERR_INT |
-                         AES_DMA_INBUF_RD_ERR_INT |
-                         AES_DMA_INBUF_WR_ERR_INT |
-                         AES_DMA_BAD_COMP_INT |
-                         AES_DMA_SAI_INT,
-                         aes_dev->base_reg + AES_A_DMA_MSI_IER_OFFSET);
-               /*
-                * AES_IER
-                * default 0x00000000
-                * bits [31:3] - reserved
-                * bit [2] - EN_SKS_ERR
-                * bit [1] - EN_AES_COMPLETE
-                * bit [0] - reserved
-                */
-               iowrite32(AES_COMPLETE_INT, aes_dev->base_reg + AES_IER_OFFSET);
-               return;
-       }
-       if (irq == AES_DMA_SRC_DONE_INT) {
-               /* Ensure AES interrupts are disabled */
-               iowrite32(AES_DISABLE_INT, aes_dev->base_reg + AES_IER_OFFSET);
-               /*
-                * DMA_MSI_IER
-                * default 0x00000000
-                * bits [31:9] - reserved
-                * bit [8] - CPD_ERR_INT_EN
-                * bit [7] - OUTBUF_RD_ERR_INT_EN
-                * bit [6] - OUTBUF_WR_ERR_INT_EN
-                * bit [5] - INBUF_RD_ERR_INT_EN
-                * bit [4] - INBUF_WR_ERR_INT_EN
-                * bit [3] - BAD_COMP_INT_EN
-                * bit [2] - SAI_INT_EN
-                * bit [1] - DST_DONE_INT_EN
-                * bit [0] - SRC_DONE_INT_EN
-                */
-               iowrite32(AES_DMA_CPD_ERR_INT |
-                         AES_DMA_OUTBUF_RD_ERR_INT |
-                         AES_DMA_OUTBUF_WR_ERR_INT |
-                         AES_DMA_INBUF_RD_ERR_INT |
-                         AES_DMA_INBUF_WR_ERR_INT |
-                         AES_DMA_BAD_COMP_INT |
-                         AES_DMA_SAI_INT |
-                         AES_DMA_SRC_DONE_INT,
-                         aes_dev->base_reg + AES_A_DMA_MSI_IER_OFFSET);
-       }
-}
-
-/* Enable and wait for IRQ (either from OCS AES engine or DMA) */
-static int ocs_aes_irq_enable_and_wait(struct ocs_aes_dev *aes_dev, u8 irq)
-{
-       int rc;
-
-       reinit_completion(&aes_dev->irq_completion);
-       aes_irq_enable(aes_dev, irq);
-       rc = wait_for_completion_interruptible(&aes_dev->irq_completion);
-       if (rc)
-               return rc;
-
-       return aes_dev->dma_err_mask ? -EIO : 0;
-}
-
-/* Configure DMA to OCS, linked list mode */
-static inline void dma_to_ocs_aes_ll(struct ocs_aes_dev *aes_dev,
-                                    dma_addr_t dma_list)
-{
-       iowrite32(0, aes_dev->base_reg + AES_A_DMA_SRC_SIZE_OFFSET);
-       iowrite32(dma_list,
-                 aes_dev->base_reg + AES_A_DMA_NEXT_SRC_DESCR_OFFSET);
-}
-
-/* Configure DMA from OCS, linked list mode */
-static inline void dma_from_ocs_aes_ll(struct ocs_aes_dev *aes_dev,
-                                      dma_addr_t dma_list)
-{
-       iowrite32(0, aes_dev->base_reg + AES_A_DMA_DST_SIZE_OFFSET);
-       iowrite32(dma_list,
-                 aes_dev->base_reg + AES_A_DMA_NEXT_DST_DESCR_OFFSET);
-}
-
-irqreturn_t ocs_aes_irq_handler(int irq, void *dev_id)
-{
-       struct ocs_aes_dev *aes_dev = dev_id;
-       u32 aes_dma_isr;
-
-       /* Read DMA ISR status. */
-       aes_dma_isr = ioread32(aes_dev->base_reg + AES_A_DMA_MSI_ISR_OFFSET);
-
-       /* Disable and clear interrupts. */
-       aes_irq_disable(aes_dev);
-
-       /* Save DMA error status. */
-       aes_dev->dma_err_mask = aes_dma_isr &
-                               (AES_DMA_CPD_ERR_INT |
-                                AES_DMA_OUTBUF_RD_ERR_INT |
-                                AES_DMA_OUTBUF_WR_ERR_INT |
-                                AES_DMA_INBUF_RD_ERR_INT |
-                                AES_DMA_INBUF_WR_ERR_INT |
-                                AES_DMA_BAD_COMP_INT |
-                                AES_DMA_SAI_INT);
-
-       /* Signal IRQ completion. */
-       complete(&aes_dev->irq_completion);
-
-       return IRQ_HANDLED;
-}
-
-/**
- * ocs_aes_set_key() - Write key into OCS AES hardware.
- * @aes_dev:   The OCS AES device to write the key to.
- * @key_size:  The size of the key (in bytes).
- * @key:       The key to write.
- * @cipher:    The cipher the key is for.
- *
- * For AES @key_size must be either 16 or 32. For SM4 @key_size must be 16.
- *
- * Return:     0 on success, negative error code otherwise.
- */
-int ocs_aes_set_key(struct ocs_aes_dev *aes_dev, u32 key_size, const u8 *key,
-                   enum ocs_cipher cipher)
-{
-       const u32 *key_u32;
-       u32 val;
-       int i;
-
-       /* OCS AES supports 128-bit and 256-bit keys only. */
-       if (cipher == OCS_AES && !(key_size == 32 || key_size == 16)) {
-               dev_err(aes_dev->dev,
-                       "%d-bit keys not supported by AES cipher\n",
-                       key_size * 8);
-               return -EINVAL;
-       }
-       /* OCS SM4 supports 128-bit keys only. */
-       if (cipher == OCS_SM4 && key_size != 16) {
-               dev_err(aes_dev->dev,
-                       "%d-bit keys not supported for SM4 cipher\n",
-                       key_size * 8);
-               return -EINVAL;
-       }
-
-       if (!key)
-               return -EINVAL;
-
-       key_u32 = (const u32 *)key;
-
-       /* Write key to AES_KEY[0-7] registers */
-       for (i = 0; i < (key_size / sizeof(u32)); i++) {
-               iowrite32(key_u32[i],
-                         aes_dev->base_reg + AES_KEY_0_OFFSET +
-                         (i * sizeof(u32)));
-       }
-       /*
-        * Write key size
-        * bits [31:1] - reserved
-        * bit [0] - AES_KEY_SIZE
-        *           0 - 128 bit key
-        *           1 - 256 bit key
-        */
-       val = (key_size == 16) ? AES_128_BIT_KEY : AES_256_BIT_KEY;
-       iowrite32(val, aes_dev->base_reg + AES_KEY_SIZE_OFFSET);
-
-       return 0;
-}
-
-/* Write AES_COMMAND */
-static inline void set_ocs_aes_command(struct ocs_aes_dev *aes_dev,
-                                      enum ocs_cipher cipher,
-                                      enum ocs_mode mode,
-                                      enum ocs_instruction instruction)
-{
-       u32 val;
-
-       /* AES_COMMAND
-        * default 0x000000CC
-        * bit [14] - CIPHER_SELECT
-        *            0 - AES
-        *            1 - SM4
-        * bits [11:8] - OCS_AES_MODE
-        *               0000 - ECB
-        *               0001 - CBC
-        *               0010 - CTR
-        *               0110 - CCM
-        *               0111 - GCM
-        *               1001 - CTS
-        * bits [7:6] - AES_INSTRUCTION
-        *              00 - ENCRYPT
-        *              01 - DECRYPT
-        *              10 - EXPAND
-        *              11 - BYPASS
-        * bits [3:2] - CTR_M_BITS
-        *              00 - No increment
-        *              01 - Least significant 32 bits are incremented
-        *              10 - Least significant 64 bits are incremented
-        *              11 - Full 128 bits are incremented
-        */
-       val = (cipher << 14) | (mode << 8) | (instruction << 6) |
-             (AES_CTR_M_128_INC << 2);
-       iowrite32(val, aes_dev->base_reg + AES_COMMAND_OFFSET);
-}
-
-static void ocs_aes_init(struct ocs_aes_dev *aes_dev,
-                        enum ocs_mode mode,
-                        enum ocs_cipher cipher,
-                        enum ocs_instruction instruction)
-{
-       /* Ensure interrupts are disabled and pending interrupts cleared. */
-       aes_irq_disable(aes_dev);
-
-       /* Set endianness recommended by data-sheet. */
-       aes_a_set_endianness(aes_dev);
-
-       /* Set AES_COMMAND register. */
-       set_ocs_aes_command(aes_dev, cipher, mode, instruction);
-}
-
-/*
- * Write the byte length of the last AES/SM4 block of Payload data (without
- * zero padding and without the length of the MAC) in register AES_PLEN.
- */
-static inline void ocs_aes_write_last_data_blk_len(struct ocs_aes_dev *aes_dev,
-                                                  u32 size)
-{
-       u32 val;
-
-       if (size == 0) {
-               val = 0;
-               goto exit;
-       }
-
-       val = size % AES_BLOCK_SIZE;
-       if (val == 0)
-               val = AES_BLOCK_SIZE;
-
-exit:
-       iowrite32(val, aes_dev->base_reg + AES_PLEN_OFFSET);
-}
-
-/*
- * Validate inputs according to mode.
- * If OK return 0; else return -EINVAL.
- */
-static int ocs_aes_validate_inputs(dma_addr_t src_dma_list, u32 src_size,
-                                  const u8 *iv, u32 iv_size,
-                                  dma_addr_t aad_dma_list, u32 aad_size,
-                                  const u8 *tag, u32 tag_size,
-                                  enum ocs_cipher cipher, enum ocs_mode mode,
-                                  enum ocs_instruction instruction,
-                                  dma_addr_t dst_dma_list)
-{
-       /* Ensure cipher, mode and instruction are valid. */
-       if (!(cipher == OCS_AES || cipher == OCS_SM4))
-               return -EINVAL;
-
-       if (mode != OCS_MODE_ECB && mode != OCS_MODE_CBC &&
-           mode != OCS_MODE_CTR && mode != OCS_MODE_CCM &&
-           mode != OCS_MODE_GCM && mode != OCS_MODE_CTS)
-               return -EINVAL;
-
-       if (instruction != OCS_ENCRYPT && instruction != OCS_DECRYPT &&
-           instruction != OCS_EXPAND  && instruction != OCS_BYPASS)
-               return -EINVAL;
-
-       /*
-        * When instruction is OCS_BYPASS, OCS simply copies data from source
-        * to destination using DMA.
-        *
-        * AES mode is irrelevant, but both source and destination DMA
-        * linked-list must be defined.
-        */
-       if (instruction == OCS_BYPASS) {
-               if (src_dma_list == DMA_MAPPING_ERROR ||
-                   dst_dma_list == DMA_MAPPING_ERROR)
-                       return -EINVAL;
-
-               return 0;
-       }
-
-       /*
-        * For performance reasons switch based on mode to limit unnecessary
-        * conditionals for each mode
-        */
-       switch (mode) {
-       case OCS_MODE_ECB:
-               /* Ensure input length is multiple of block size */
-               if (src_size % AES_BLOCK_SIZE != 0)
-                       return -EINVAL;
-
-               /* Ensure source and destination linked lists are created */
-               if (src_dma_list == DMA_MAPPING_ERROR ||
-                   dst_dma_list == DMA_MAPPING_ERROR)
-                       return -EINVAL;
-
-               return 0;
-
-       case OCS_MODE_CBC:
-               /* Ensure input length is multiple of block size */
-               if (src_size % AES_BLOCK_SIZE != 0)
-                       return -EINVAL;
-
-               /* Ensure source and destination linked lists are created */
-               if (src_dma_list == DMA_MAPPING_ERROR ||
-                   dst_dma_list == DMA_MAPPING_ERROR)
-                       return -EINVAL;
-
-               /* Ensure IV is present and block size in length */
-               if (!iv || iv_size != AES_BLOCK_SIZE)
-                       return -EINVAL;
-
-               return 0;
-
-       case OCS_MODE_CTR:
-               /* Ensure input length of 1 byte or greater */
-               if (src_size == 0)
-                       return -EINVAL;
-
-               /* Ensure source and destination linked lists are created */
-               if (src_dma_list == DMA_MAPPING_ERROR ||
-                   dst_dma_list == DMA_MAPPING_ERROR)
-                       return -EINVAL;
-
-               /* Ensure IV is present and block size in length */
-               if (!iv || iv_size != AES_BLOCK_SIZE)
-                       return -EINVAL;
-
-               return 0;
-
-       case OCS_MODE_CTS:
-               /* Ensure input length >= block size */
-               if (src_size < AES_BLOCK_SIZE)
-                       return -EINVAL;
-
-               /* Ensure source and destination linked lists are created */
-               if (src_dma_list == DMA_MAPPING_ERROR ||
-                   dst_dma_list == DMA_MAPPING_ERROR)
-                       return -EINVAL;
-
-               /* Ensure IV is present and block size in length */
-               if (!iv || iv_size != AES_BLOCK_SIZE)
-                       return -EINVAL;
-
-               return 0;
-
-       case OCS_MODE_GCM:
-               /* Ensure IV is present and GCM_AES_IV_SIZE in length */
-               if (!iv || iv_size != GCM_AES_IV_SIZE)
-                       return -EINVAL;
-
-               /*
-                * If input data present ensure source and destination linked
-                * lists are created
-                */
-               if (src_size && (src_dma_list == DMA_MAPPING_ERROR ||
-                                dst_dma_list == DMA_MAPPING_ERROR))
-                       return -EINVAL;
-
-               /* If aad present ensure aad linked list is created */
-               if (aad_size && aad_dma_list == DMA_MAPPING_ERROR)
-                       return -EINVAL;
-
-               /* Ensure tag destination is set */
-               if (!tag)
-                       return -EINVAL;
-
-               /* Just ensure that tag_size doesn't cause overflows. */
-               if (tag_size > (AES_MAX_TAG_SIZE_U32 * sizeof(u32)))
-                       return -EINVAL;
-
-               return 0;
-
-       case OCS_MODE_CCM:
-               /* Ensure IV is present and block size in length */
-               if (!iv || iv_size != AES_BLOCK_SIZE)
-                       return -EINVAL;
-
-               /* 2 <= L <= 8, so 1 <= L' <= 7 */
-               if (iv[L_PRIME_IDX] < L_PRIME_MIN ||
-                   iv[L_PRIME_IDX] > L_PRIME_MAX)
-                       return -EINVAL;
-
-               /* If aad present ensure aad linked list is created */
-               if (aad_size && aad_dma_list == DMA_MAPPING_ERROR)
-                       return -EINVAL;
-
-               /* Just ensure that tag_size doesn't cause overflows. */
-               if (tag_size > (AES_MAX_TAG_SIZE_U32 * sizeof(u32)))
-                       return -EINVAL;
-
-               if (instruction == OCS_DECRYPT) {
-                       /*
-                        * If input data present ensure source and destination
-                        * linked lists are created
-                        */
-                       if (src_size && (src_dma_list == DMA_MAPPING_ERROR ||
-                                        dst_dma_list == DMA_MAPPING_ERROR))
-                               return -EINVAL;
-
-                       /* Ensure input tag is present */
-                       if (!tag)
-                               return -EINVAL;
-
-                       return 0;
-               }
-
-               /* Instruction == OCS_ENCRYPT */
-
-               /*
-                * Destination linked list always required (for tag even if no
-                * input data)
-                */
-               if (dst_dma_list == DMA_MAPPING_ERROR)
-                       return -EINVAL;
-
-               /* If input data present ensure src linked list is created */
-               if (src_size && src_dma_list == DMA_MAPPING_ERROR)
-                       return -EINVAL;
-
-               return 0;
-
-       default:
-               return -EINVAL;
-       }
-}
-
-/**
- * ocs_aes_op() - Perform AES/SM4 operation.
- * @aes_dev:           The OCS AES device to use.
- * @mode:              The mode to use (ECB, CBC, CTR, or CTS).
- * @cipher:            The cipher to use (AES or SM4).
- * @instruction:       The instruction to perform (encrypt or decrypt).
- * @dst_dma_list:      The OCS DMA list mapping output memory.
- * @src_dma_list:      The OCS DMA list mapping input payload data.
- * @src_size:          The amount of data mapped by @src_dma_list.
- * @iv:                        The IV vector.
- * @iv_size:           The size (in bytes) of @iv.
- *
- * Return: 0 on success, negative error code otherwise.
- */
-int ocs_aes_op(struct ocs_aes_dev *aes_dev,
-              enum ocs_mode mode,
-              enum ocs_cipher cipher,
-              enum ocs_instruction instruction,
-              dma_addr_t dst_dma_list,
-              dma_addr_t src_dma_list,
-              u32 src_size,
-              u8 *iv,
-              u32 iv_size)
-{
-       u32 *iv32;
-       int rc;
-
-       rc = ocs_aes_validate_inputs(src_dma_list, src_size, iv, iv_size, 0, 0,
-                                    NULL, 0, cipher, mode, instruction,
-                                    dst_dma_list);
-       if (rc)
-               return rc;
-       /*
-        * ocs_aes_validate_inputs() is a generic check, now ensure mode is not
-        * GCM or CCM.
-        */
-       if (mode == OCS_MODE_GCM || mode == OCS_MODE_CCM)
-               return -EINVAL;
-
-       /* Cast IV to u32 array. */
-       iv32 = (u32 *)iv;
-
-       ocs_aes_init(aes_dev, mode, cipher, instruction);
-
-       if (mode == OCS_MODE_CTS) {
-               /* Write the byte length of the last data block to engine. */
-               ocs_aes_write_last_data_blk_len(aes_dev, src_size);
-       }
-
-       /* ECB is the only mode that doesn't use IV. */
-       if (mode != OCS_MODE_ECB) {
-               iowrite32(iv32[0], aes_dev->base_reg + AES_IV_0_OFFSET);
-               iowrite32(iv32[1], aes_dev->base_reg + AES_IV_1_OFFSET);
-               iowrite32(iv32[2], aes_dev->base_reg + AES_IV_2_OFFSET);
-               iowrite32(iv32[3], aes_dev->base_reg + AES_IV_3_OFFSET);
-       }
-
-       /* Set AES_ACTIVE.TRIGGER to start the operation. */
-       aes_a_op_trigger(aes_dev);
-
-       /* Configure and activate input / output DMA. */
-       dma_to_ocs_aes_ll(aes_dev, src_dma_list);
-       dma_from_ocs_aes_ll(aes_dev, dst_dma_list);
-       aes_a_dma_active_src_dst_ll_en(aes_dev);
-
-       if (mode == OCS_MODE_CTS) {
-               /*
-                * For CTS mode, instruct engine to activate ciphertext
-                * stealing if last block of data is incomplete.
-                */
-               aes_a_set_last_gcx(aes_dev);
-       } else {
-               /* For all other modes, just write the 'termination' bit. */
-               aes_a_op_termination(aes_dev);
-       }
-
-       /* Wait for engine to complete processing. */
-       rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_COMPLETE_INT);
-       if (rc)
-               return rc;
-
-       if (mode == OCS_MODE_CTR) {
-               /* Read back IV for streaming mode */
-               iv32[0] = ioread32(aes_dev->base_reg + AES_IV_0_OFFSET);
-               iv32[1] = ioread32(aes_dev->base_reg + AES_IV_1_OFFSET);
-               iv32[2] = ioread32(aes_dev->base_reg + AES_IV_2_OFFSET);
-               iv32[3] = ioread32(aes_dev->base_reg + AES_IV_3_OFFSET);
-       }
-
-       return 0;
-}
-
-/* Compute and write J0 to engine registers. */
-static void ocs_aes_gcm_write_j0(const struct ocs_aes_dev *aes_dev,
-                                const u8 *iv)
-{
-       const u32 *j0 = (u32 *)iv;
-
-       /*
-        * IV must be 12 bytes; Other sizes not supported as Linux crypto API
-        * does only expects/allows 12 byte IV for GCM
-        */
-       iowrite32(0x00000001, aes_dev->base_reg + AES_IV_0_OFFSET);
-       iowrite32(__swab32(j0[2]), aes_dev->base_reg + AES_IV_1_OFFSET);
-       iowrite32(__swab32(j0[1]), aes_dev->base_reg + AES_IV_2_OFFSET);
-       iowrite32(__swab32(j0[0]), aes_dev->base_reg + AES_IV_3_OFFSET);
-}
-
-/* Read GCM tag from engine registers. */
-static inline void ocs_aes_gcm_read_tag(struct ocs_aes_dev *aes_dev,
-                                       u8 *tag, u32 tag_size)
-{
-       u32 tag_u32[AES_MAX_TAG_SIZE_U32];
-
-       /*
-        * The Authentication Tag T is stored in Little Endian order in the
-        * registers with the most significant bytes stored from AES_T_MAC[3]
-        * downward.
-        */
-       tag_u32[0] = __swab32(ioread32(aes_dev->base_reg + AES_T_MAC_3_OFFSET));
-       tag_u32[1] = __swab32(ioread32(aes_dev->base_reg + AES_T_MAC_2_OFFSET));
-       tag_u32[2] = __swab32(ioread32(aes_dev->base_reg + AES_T_MAC_1_OFFSET));
-       tag_u32[3] = __swab32(ioread32(aes_dev->base_reg + AES_T_MAC_0_OFFSET));
-
-       memcpy(tag, tag_u32, tag_size);
-}
-
-/**
- * ocs_aes_gcm_op() - Perform GCM operation.
- * @aes_dev:           The OCS AES device to use.
- * @cipher:            The Cipher to use (AES or SM4).
- * @instruction:       The instruction to perform (encrypt or decrypt).
- * @dst_dma_list:      The OCS DMA list mapping output memory.
- * @src_dma_list:      The OCS DMA list mapping input payload data.
- * @src_size:          The amount of data mapped by @src_dma_list.
- * @iv:                        The input IV vector.
- * @aad_dma_list:      The OCS DMA list mapping input AAD data.
- * @aad_size:          The amount of data mapped by @aad_dma_list.
- * @out_tag:           Where to store computed tag.
- * @tag_size:          The size (in bytes) of @out_tag.
- *
- * Return: 0 on success, negative error code otherwise.
- */
-int ocs_aes_gcm_op(struct ocs_aes_dev *aes_dev,
-                  enum ocs_cipher cipher,
-                  enum ocs_instruction instruction,
-                  dma_addr_t dst_dma_list,
-                  dma_addr_t src_dma_list,
-                  u32 src_size,
-                  const u8 *iv,
-                  dma_addr_t aad_dma_list,
-                  u32 aad_size,
-                  u8 *out_tag,
-                  u32 tag_size)
-{
-       u64 bit_len;
-       u32 val;
-       int rc;
-
-       rc = ocs_aes_validate_inputs(src_dma_list, src_size, iv,
-                                    GCM_AES_IV_SIZE, aad_dma_list,
-                                    aad_size, out_tag, tag_size, cipher,
-                                    OCS_MODE_GCM, instruction,
-                                    dst_dma_list);
-       if (rc)
-               return rc;
-
-       ocs_aes_init(aes_dev, OCS_MODE_GCM, cipher, instruction);
-
-       /* Compute and write J0 to OCS HW. */
-       ocs_aes_gcm_write_j0(aes_dev, iv);
-
-       /* Write out_tag byte length */
-       iowrite32(tag_size, aes_dev->base_reg + AES_TLEN_OFFSET);
-
-       /* Write the byte length of the last plaintext / ciphertext block. */
-       ocs_aes_write_last_data_blk_len(aes_dev, src_size);
-
-       /* Write ciphertext bit length */
-       bit_len = (u64)src_size * 8;
-       val = bit_len & 0xFFFFFFFF;
-       iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_0_OFFSET);
-       val = bit_len >> 32;
-       iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_1_OFFSET);
-
-       /* Write aad bit length */
-       bit_len = (u64)aad_size * 8;
-       val = bit_len & 0xFFFFFFFF;
-       iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_2_OFFSET);
-       val = bit_len >> 32;
-       iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_3_OFFSET);
-
-       /* Set AES_ACTIVE.TRIGGER to start the operation. */
-       aes_a_op_trigger(aes_dev);
-
-       /* Process AAD. */
-       if (aad_size) {
-               /* If aad present, configure DMA to feed it to the engine. */
-               dma_to_ocs_aes_ll(aes_dev, aad_dma_list);
-               aes_a_dma_active_src_ll_en(aes_dev);
-
-               /* Instructs engine to pad last block of aad, if needed. */
-               aes_a_set_last_gcx_and_adata(aes_dev);
-
-               /* Wait for DMA transfer to complete. */
-               rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_DMA_SRC_DONE_INT);
-               if (rc)
-                       return rc;
-       } else {
-               aes_a_set_last_gcx_and_adata(aes_dev);
-       }
-
-       /* Wait until adata (if present) has been processed. */
-       aes_a_wait_last_gcx(aes_dev);
-       aes_a_dma_wait_input_buffer_occupancy(aes_dev);
-
-       /* Now process payload. */
-       if (src_size) {
-               /* Configure and activate DMA for both input and output data. */
-               dma_to_ocs_aes_ll(aes_dev, src_dma_list);
-               dma_from_ocs_aes_ll(aes_dev, dst_dma_list);
-               aes_a_dma_active_src_dst_ll_en(aes_dev);
-       } else {
-               aes_a_dma_set_xfer_size_zero(aes_dev);
-               aes_a_dma_active(aes_dev);
-       }
-
-       /* Instruct AES/SMA4 engine payload processing is over. */
-       aes_a_set_last_gcx(aes_dev);
-
-       /* Wait for OCS AES engine to complete processing. */
-       rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_COMPLETE_INT);
-       if (rc)
-               return rc;
-
-       ocs_aes_gcm_read_tag(aes_dev, out_tag, tag_size);
-
-       return 0;
-}
-
-/* Write encrypted tag to AES/SM4 engine. */
-static void ocs_aes_ccm_write_encrypted_tag(struct ocs_aes_dev *aes_dev,
-                                           const u8 *in_tag, u32 tag_size)
-{
-       int i;
-
-       /* Ensure DMA input buffer is empty */
-       aes_a_dma_wait_input_buffer_occupancy(aes_dev);
-
-       /*
-        * During CCM decrypt, the OCS block needs to finish processing the
-        * ciphertext before the tag is written.  So delay needed after DMA has
-        * completed writing the ciphertext
-        */
-       aes_a_dma_reset_and_activate_perf_cntr(aes_dev);
-       aes_a_dma_wait_and_deactivate_perf_cntr(aes_dev,
-                                               CCM_DECRYPT_DELAY_TAG_CLK_COUNT);
-
-       /* Write encrypted tag to AES/SM4 engine. */
-       for (i = 0; i < tag_size; i++) {
-               iowrite8(in_tag[i], aes_dev->base_reg +
-                                   AES_A_DMA_INBUFFER_WRITE_FIFO_OFFSET);
-       }
-}
-
-/*
- * Write B0 CCM block to OCS AES HW.
- *
- * Note: B0 format is documented in NIST Special Publication 800-38C
- * https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38c.pdf
- * (see Section A.2.1)
- */
-static int ocs_aes_ccm_write_b0(const struct ocs_aes_dev *aes_dev,
-                               const u8 *iv, u32 adata_size, u32 tag_size,
-                               u32 cryptlen)
-{
-       u8 b0[16]; /* CCM B0 block is 16 bytes long. */
-       int i, q;
-
-       /* Initialize B0 to 0. */
-       memset(b0, 0, sizeof(b0));
-
-       /*
-        * B0[0] is the 'Flags Octet' and has the following structure:
-        *   bit 7: Reserved
-        *   bit 6: Adata flag
-        *   bit 5-3: t value encoded as (t-2)/2
-        *   bit 2-0: q value encoded as q - 1
-        */
-       /* If there is AAD data, set the Adata flag. */
-       if (adata_size)
-               b0[0] |= BIT(6);
-       /*
-        * t denotes the octet length of T.
-        * t can only be an element of { 4, 6, 8, 10, 12, 14, 16} and is
-        * encoded as (t - 2) / 2
-        */
-       b0[0] |= (((tag_size - 2) / 2) & 0x7)  << 3;
-       /*
-        * q is the octet length of Q.
-        * q can only be an element of {2, 3, 4, 5, 6, 7, 8} and is encoded as
-        * q - 1 == iv[0] & 0x7;
-        */
-       b0[0] |= iv[0] & 0x7;
-       /*
-        * Copy the Nonce N from IV to B0; N is located in iv[1]..iv[15 - q]
-        * and must be copied to b0[1]..b0[15-q].
-        * q == (iv[0] & 0x7) + 1
-        */
-       q = (iv[0] & 0x7) + 1;
-       for (i = 1; i <= 15 - q; i++)
-               b0[i] = iv[i];
-       /*
-        * The rest of B0 must contain Q, i.e., the message length.
-        * Q is encoded in q octets, in big-endian order, so to write it, we
-        * start from the end of B0 and we move backward.
-        */
-       i = sizeof(b0) - 1;
-       while (q) {
-               b0[i] = cryptlen & 0xff;
-               cryptlen >>= 8;
-               i--;
-               q--;
-       }
-       /*
-        * If cryptlen is not zero at this point, it means that its original
-        * value was too big.
-        */
-       if (cryptlen)
-               return -EOVERFLOW;
-       /* Now write B0 to OCS AES input buffer. */
-       for (i = 0; i < sizeof(b0); i++)
-               iowrite8(b0[i], aes_dev->base_reg +
-                               AES_A_DMA_INBUFFER_WRITE_FIFO_OFFSET);
-       return 0;
-}
-
-/*
- * Write adata length to OCS AES HW.
- *
- * Note: adata len encoding is documented in NIST Special Publication 800-38C
- * https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38c.pdf
- * (see Section A.2.2)
- */
-static void ocs_aes_ccm_write_adata_len(const struct ocs_aes_dev *aes_dev,
-                                       u64 adata_len)
-{
-       u8 enc_a[10]; /* Maximum encoded size: 10 octets. */
-       int i, len;
-
-       /*
-        * adata_len ('a') is encoded as follows:
-        * If 0 < a < 2^16 - 2^8    ==> 'a' encoded as [a]16, i.e., two octets
-        *                              (big endian).
-        * If 2^16 - 2^8 â‰¤ a < 2^32 ==> 'a' encoded as 0xff || 0xfe || [a]32,
-        *                              i.e., six octets (big endian).
-        * If 2^32 â‰¤ a < 2^64       ==> 'a' encoded as 0xff || 0xff || [a]64,
-        *                              i.e., ten octets (big endian).
-        */
-       if (adata_len < 65280) {
-               len = 2;
-               *(__be16 *)enc_a = cpu_to_be16(adata_len);
-       } else if (adata_len <= 0xFFFFFFFF) {
-               len = 6;
-               *(__be16 *)enc_a = cpu_to_be16(0xfffe);
-               *(__be32 *)&enc_a[2] = cpu_to_be32(adata_len);
-       } else { /* adata_len >= 2^32 */
-               len = 10;
-               *(__be16 *)enc_a = cpu_to_be16(0xffff);
-               *(__be64 *)&enc_a[2] = cpu_to_be64(adata_len);
-       }
-       for (i = 0; i < len; i++)
-               iowrite8(enc_a[i],
-                        aes_dev->base_reg +
-                        AES_A_DMA_INBUFFER_WRITE_FIFO_OFFSET);
-}
-
-static int ocs_aes_ccm_do_adata(struct ocs_aes_dev *aes_dev,
-                               dma_addr_t adata_dma_list, u32 adata_size)
-{
-       int rc;
-
-       if (!adata_size) {
-               /* Since no aad the LAST_GCX bit can be set now */
-               aes_a_set_last_gcx_and_adata(aes_dev);
-               goto exit;
-       }
-
-       /* Adata case. */
-
-       /*
-        * Form the encoding of the Associated data length and write it
-        * to the AES/SM4 input buffer.
-        */
-       ocs_aes_ccm_write_adata_len(aes_dev, adata_size);
-
-       /* Configure the AES/SM4 DMA to fetch the Associated Data */
-       dma_to_ocs_aes_ll(aes_dev, adata_dma_list);
-
-       /* Activate DMA to fetch Associated data. */
-       aes_a_dma_active_src_ll_en(aes_dev);
-
-       /* Set LAST_GCX and LAST_ADATA in AES ACTIVE register. */
-       aes_a_set_last_gcx_and_adata(aes_dev);
-
-       /* Wait for DMA transfer to complete. */
-       rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_DMA_SRC_DONE_INT);
-       if (rc)
-               return rc;
-
-exit:
-       /* Wait until adata (if present) has been processed. */
-       aes_a_wait_last_gcx(aes_dev);
-       aes_a_dma_wait_input_buffer_occupancy(aes_dev);
-
-       return 0;
-}
-
-static int ocs_aes_ccm_encrypt_do_payload(struct ocs_aes_dev *aes_dev,
-                                         dma_addr_t dst_dma_list,
-                                         dma_addr_t src_dma_list,
-                                         u32 src_size)
-{
-       if (src_size) {
-               /*
-                * Configure and activate DMA for both input and output
-                * data.
-                */
-               dma_to_ocs_aes_ll(aes_dev, src_dma_list);
-               dma_from_ocs_aes_ll(aes_dev, dst_dma_list);
-               aes_a_dma_active_src_dst_ll_en(aes_dev);
-       } else {
-               /* Configure and activate DMA for output data only. */
-               dma_from_ocs_aes_ll(aes_dev, dst_dma_list);
-               aes_a_dma_active_dst_ll_en(aes_dev);
-       }
-
-       /*
-        * Set the LAST GCX bit in AES_ACTIVE Register to instruct
-        * AES/SM4 engine to pad the last block of data.
-        */
-       aes_a_set_last_gcx(aes_dev);
-
-       /* We are done, wait for IRQ and return. */
-       return ocs_aes_irq_enable_and_wait(aes_dev, AES_COMPLETE_INT);
-}
-
-static int ocs_aes_ccm_decrypt_do_payload(struct ocs_aes_dev *aes_dev,
-                                         dma_addr_t dst_dma_list,
-                                         dma_addr_t src_dma_list,
-                                         u32 src_size)
-{
-       if (!src_size) {
-               /* Let engine process 0-length input. */
-               aes_a_dma_set_xfer_size_zero(aes_dev);
-               aes_a_dma_active(aes_dev);
-               aes_a_set_last_gcx(aes_dev);
-
-               return 0;
-       }
-
-       /*
-        * Configure and activate DMA for both input and output
-        * data.
-        */
-       dma_to_ocs_aes_ll(aes_dev, src_dma_list);
-       dma_from_ocs_aes_ll(aes_dev, dst_dma_list);
-       aes_a_dma_active_src_dst_ll_en(aes_dev);
-       /*
-        * Set the LAST GCX bit in AES_ACTIVE Register; this allows the
-        * AES/SM4 engine to differentiate between encrypted data and
-        * encrypted MAC.
-        */
-       aes_a_set_last_gcx(aes_dev);
-        /*
-         * Enable DMA DONE interrupt; once DMA transfer is over,
-         * interrupt handler will process the MAC/tag.
-         */
-       return ocs_aes_irq_enable_and_wait(aes_dev, AES_DMA_SRC_DONE_INT);
-}
-
-/*
- * Compare Tag to Yr.
- *
- * Only used at the end of CCM decrypt. If tag == yr, message authentication
- * has succeeded.
- */
-static inline int ccm_compare_tag_to_yr(struct ocs_aes_dev *aes_dev,
-                                       u8 tag_size_bytes)
-{
-       u32 tag[AES_MAX_TAG_SIZE_U32];
-       u32 yr[AES_MAX_TAG_SIZE_U32];
-       u8 i;
-
-       /* Read Tag and Yr from AES registers. */
-       for (i = 0; i < AES_MAX_TAG_SIZE_U32; i++) {
-               tag[i] = ioread32(aes_dev->base_reg +
-                                 AES_T_MAC_0_OFFSET + (i * sizeof(u32)));
-               yr[i] = ioread32(aes_dev->base_reg +
-                                AES_MULTIPURPOSE2_0_OFFSET +
-                                (i * sizeof(u32)));
-       }
-
-       return memcmp(tag, yr, tag_size_bytes) ? -EBADMSG : 0;
-}
-
-/**
- * ocs_aes_ccm_op() - Perform CCM operation.
- * @aes_dev:           The OCS AES device to use.
- * @cipher:            The Cipher to use (AES or SM4).
- * @instruction:       The instruction to perform (encrypt or decrypt).
- * @dst_dma_list:      The OCS DMA list mapping output memory.
- * @src_dma_list:      The OCS DMA list mapping input payload data.
- * @src_size:          The amount of data mapped by @src_dma_list.
- * @iv:                        The input IV vector.
- * @adata_dma_list:    The OCS DMA list mapping input A-data.
- * @adata_size:                The amount of data mapped by @adata_dma_list.
- * @in_tag:            Input tag.
- * @tag_size:          The size (in bytes) of @in_tag.
- *
- * Note: for encrypt the tag is appended to the ciphertext (in the memory
- *      mapped by @dst_dma_list).
- *
- * Return: 0 on success, negative error code otherwise.
- */
-int ocs_aes_ccm_op(struct ocs_aes_dev *aes_dev,
-                  enum ocs_cipher cipher,
-                  enum ocs_instruction instruction,
-                  dma_addr_t dst_dma_list,
-                  dma_addr_t src_dma_list,
-                  u32 src_size,
-                  u8 *iv,
-                  dma_addr_t adata_dma_list,
-                  u32 adata_size,
-                  u8 *in_tag,
-                  u32 tag_size)
-{
-       u32 *iv_32;
-       u8 lprime;
-       int rc;
-
-       rc = ocs_aes_validate_inputs(src_dma_list, src_size, iv,
-                                    AES_BLOCK_SIZE, adata_dma_list, adata_size,
-                                    in_tag, tag_size, cipher, OCS_MODE_CCM,
-                                    instruction, dst_dma_list);
-       if (rc)
-               return rc;
-
-       ocs_aes_init(aes_dev, OCS_MODE_CCM, cipher, instruction);
-
-       /*
-        * Note: rfc 3610 and NIST 800-38C require counter of zero to encrypt
-        * auth tag so ensure this is the case
-        */
-       lprime = iv[L_PRIME_IDX];
-       memset(&iv[COUNTER_START(lprime)], 0, COUNTER_LEN(lprime));
-
-       /*
-        * Nonce is already converted to ctr0 before being passed into this
-        * function as iv.
-        */
-       iv_32 = (u32 *)iv;
-       iowrite32(__swab32(iv_32[0]),
-                 aes_dev->base_reg + AES_MULTIPURPOSE1_3_OFFSET);
-       iowrite32(__swab32(iv_32[1]),
-                 aes_dev->base_reg + AES_MULTIPURPOSE1_2_OFFSET);
-       iowrite32(__swab32(iv_32[2]),
-                 aes_dev->base_reg + AES_MULTIPURPOSE1_1_OFFSET);
-       iowrite32(__swab32(iv_32[3]),
-                 aes_dev->base_reg + AES_MULTIPURPOSE1_0_OFFSET);
-
-       /* Write MAC/tag length in register AES_TLEN */
-       iowrite32(tag_size, aes_dev->base_reg + AES_TLEN_OFFSET);
-       /*
-        * Write the byte length of the last AES/SM4 block of Payload data
-        * (without zero padding and without the length of the MAC) in register
-        * AES_PLEN.
-        */
-       ocs_aes_write_last_data_blk_len(aes_dev, src_size);
-
-       /* Set AES_ACTIVE.TRIGGER to start the operation. */
-       aes_a_op_trigger(aes_dev);
-
-       aes_a_dma_reset_and_activate_perf_cntr(aes_dev);
-
-       /* Form block B0 and write it to the AES/SM4 input buffer. */
-       rc = ocs_aes_ccm_write_b0(aes_dev, iv, adata_size, tag_size, src_size);
-       if (rc)
-               return rc;
-       /*
-        * Ensure there has been at least CCM_DECRYPT_DELAY_LAST_GCX_CLK_COUNT
-        * clock cycles since TRIGGER bit was set
-        */
-       aes_a_dma_wait_and_deactivate_perf_cntr(aes_dev,
-                                               CCM_DECRYPT_DELAY_LAST_GCX_CLK_COUNT);
-
-       /* Process Adata. */
-       ocs_aes_ccm_do_adata(aes_dev, adata_dma_list, adata_size);
-
-       /* For Encrypt case we just process the payload and return. */
-       if (instruction == OCS_ENCRYPT) {
-               return ocs_aes_ccm_encrypt_do_payload(aes_dev, dst_dma_list,
-                                                     src_dma_list, src_size);
-       }
-       /* For Decypt we need to process the payload and then the tag. */
-       rc = ocs_aes_ccm_decrypt_do_payload(aes_dev, dst_dma_list,
-                                           src_dma_list, src_size);
-       if (rc)
-               return rc;
-
-       /* Process MAC/tag directly: feed tag to engine and wait for IRQ. */
-       ocs_aes_ccm_write_encrypted_tag(aes_dev, in_tag, tag_size);
-       rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_COMPLETE_INT);
-       if (rc)
-               return rc;
-
-       return ccm_compare_tag_to_yr(aes_dev, tag_size);
-}
-
-/**
- * ocs_create_linked_list_from_sg() - Create OCS DMA linked list from SG list.
- * @aes_dev:     The OCS AES device the list will be created for.
- * @sg:                  The SG list OCS DMA linked list will be created from. When
- *               passed to this function, @sg must have been already mapped
- *               with dma_map_sg().
- * @sg_dma_count: The number of DMA-mapped entries in @sg. This must be the
- *               value returned by dma_map_sg() when @sg was mapped.
- * @dll_desc:    The OCS DMA dma_list to use to store information about the
- *               created linked list.
- * @data_size:   The size of the data (from the SG list) to be mapped into the
- *               OCS DMA linked list.
- * @data_offset:  The offset (within the SG list) of the data to be mapped.
- *
- * Return:     0 on success, negative error code otherwise.
- */
-int ocs_create_linked_list_from_sg(const struct ocs_aes_dev *aes_dev,
-                                  struct scatterlist *sg,
-                                  int sg_dma_count,
-                                  struct ocs_dll_desc *dll_desc,
-                                  size_t data_size, size_t data_offset)
-{
-       struct ocs_dma_linked_list *ll = NULL;
-       struct scatterlist *sg_tmp;
-       unsigned int tmp;
-       int dma_nents;
-       int i;
-
-       if (!dll_desc || !sg || !aes_dev)
-               return -EINVAL;
-
-       /* Default values for when no ddl_desc is created. */
-       dll_desc->vaddr = NULL;
-       dll_desc->dma_addr = DMA_MAPPING_ERROR;
-       dll_desc->size = 0;
-
-       if (data_size == 0)
-               return 0;
-
-       /* Loop over sg_list until we reach entry at specified offset. */
-       while (data_offset >= sg_dma_len(sg)) {
-               data_offset -= sg_dma_len(sg);
-               sg_dma_count--;
-               sg = sg_next(sg);
-               /* If we reach the end of the list, offset was invalid. */
-               if (!sg || sg_dma_count == 0)
-                       return -EINVAL;
-       }
-
-       /* Compute number of DMA-mapped SG entries to add into OCS DMA list. */
-       dma_nents = 0;
-       tmp = 0;
-       sg_tmp = sg;
-       while (tmp < data_offset + data_size) {
-               /* If we reach the end of the list, data_size was invalid. */
-               if (!sg_tmp)
-                       return -EINVAL;
-               tmp += sg_dma_len(sg_tmp);
-               dma_nents++;
-               sg_tmp = sg_next(sg_tmp);
-       }
-       if (dma_nents > sg_dma_count)
-               return -EINVAL;
-
-       /* Allocate the DMA list, one entry for each SG entry. */
-       dll_desc->size = sizeof(struct ocs_dma_linked_list) * dma_nents;
-       dll_desc->vaddr = dma_alloc_coherent(aes_dev->dev, dll_desc->size,
-                                            &dll_desc->dma_addr, GFP_KERNEL);
-       if (!dll_desc->vaddr)
-               return -ENOMEM;
-
-       /* Populate DMA linked list entries. */
-       ll = dll_desc->vaddr;
-       for (i = 0; i < dma_nents; i++, sg = sg_next(sg)) {
-               ll[i].src_addr = sg_dma_address(sg) + data_offset;
-               ll[i].src_len = (sg_dma_len(sg) - data_offset) < data_size ?
-                               (sg_dma_len(sg) - data_offset) : data_size;
-               data_offset = 0;
-               data_size -= ll[i].src_len;
-               /* Current element points to the DMA address of the next one. */
-               ll[i].next = dll_desc->dma_addr + (sizeof(*ll) * (i + 1));
-               ll[i].ll_flags = 0;
-       }
-       /* Terminate last element. */
-       ll[i - 1].next = 0;
-       ll[i - 1].ll_flags = OCS_LL_DMA_FLAG_TERMINATE;
-
-       return 0;
-}
diff --git a/drivers/crypto/keembay/ocs-aes.h b/drivers/crypto/keembay/ocs-aes.h
deleted file mode 100644 (file)
index c035fc4..0000000
+++ /dev/null
@@ -1,129 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel Keem Bay OCS AES Crypto Driver.
- *
- * Copyright (C) 2018-2020 Intel Corporation
- */
-
-#ifndef _CRYPTO_OCS_AES_H
-#define _CRYPTO_OCS_AES_H
-
-#include <linux/dma-mapping.h>
-
-enum ocs_cipher {
-       OCS_AES = 0,
-       OCS_SM4 = 1,
-};
-
-enum ocs_mode {
-       OCS_MODE_ECB = 0,
-       OCS_MODE_CBC = 1,
-       OCS_MODE_CTR = 2,
-       OCS_MODE_CCM = 6,
-       OCS_MODE_GCM = 7,
-       OCS_MODE_CTS = 9,
-};
-
-enum ocs_instruction {
-       OCS_ENCRYPT = 0,
-       OCS_DECRYPT = 1,
-       OCS_EXPAND  = 2,
-       OCS_BYPASS  = 3,
-};
-
-/**
- * struct ocs_aes_dev - AES device context.
- * @list:                      List head for insertion into device list hold
- *                             by driver.
- * @dev:                       OCS AES device.
- * @irq:                       IRQ number.
- * @base_reg:                  IO base address of OCS AES.
- * @irq_copy_completion:       Completion to indicate IRQ has been triggered.
- * @dma_err_mask:              Error reported by OCS DMA interrupts.
- * @engine:                    Crypto engine for the device.
- */
-struct ocs_aes_dev {
-       struct list_head list;
-       struct device *dev;
-       int irq;
-       void __iomem *base_reg;
-       struct completion irq_completion;
-       u32 dma_err_mask;
-       struct crypto_engine *engine;
-};
-
-/**
- * struct ocs_dll_desc - Descriptor of an OCS DMA Linked List.
- * @vaddr:     Virtual address of the linked list head.
- * @dma_addr:  DMA address of the linked list head.
- * @size:      Size (in bytes) of the linked list.
- */
-struct ocs_dll_desc {
-       void            *vaddr;
-       dma_addr_t      dma_addr;
-       size_t          size;
-};
-
-int ocs_aes_set_key(struct ocs_aes_dev *aes_dev, const u32 key_size,
-                   const u8 *key, const enum ocs_cipher cipher);
-
-int ocs_aes_op(struct ocs_aes_dev *aes_dev,
-              enum ocs_mode mode,
-              enum ocs_cipher cipher,
-              enum ocs_instruction instruction,
-              dma_addr_t dst_dma_list,
-              dma_addr_t src_dma_list,
-              u32 src_size,
-              u8 *iv,
-              u32 iv_size);
-
-/**
- * ocs_aes_bypass_op() - Use OCS DMA to copy data.
- * @aes_dev:            The OCS AES device to use.
- * @dst_dma_list:      The OCS DMA list mapping the memory where input data
- *                     will be copied to.
- * @src_dma_list:      The OCS DMA list mapping input data.
- * @src_size:          The amount of data to copy.
- */
-static inline int ocs_aes_bypass_op(struct ocs_aes_dev *aes_dev,
-                                   dma_addr_t dst_dma_list,
-                                   dma_addr_t src_dma_list, u32 src_size)
-{
-       return ocs_aes_op(aes_dev, OCS_MODE_ECB, OCS_AES, OCS_BYPASS,
-                         dst_dma_list, src_dma_list, src_size, NULL, 0);
-}
-
-int ocs_aes_gcm_op(struct ocs_aes_dev *aes_dev,
-                  enum ocs_cipher cipher,
-                  enum ocs_instruction instruction,
-                  dma_addr_t dst_dma_list,
-                  dma_addr_t src_dma_list,
-                  u32 src_size,
-                  const u8 *iv,
-                  dma_addr_t aad_dma_list,
-                  u32 aad_size,
-                  u8 *out_tag,
-                  u32 tag_size);
-
-int ocs_aes_ccm_op(struct ocs_aes_dev *aes_dev,
-                  enum ocs_cipher cipher,
-                  enum ocs_instruction instruction,
-                  dma_addr_t dst_dma_list,
-                  dma_addr_t src_dma_list,
-                  u32 src_size,
-                  u8 *iv,
-                  dma_addr_t adata_dma_list,
-                  u32 adata_size,
-                  u8 *in_tag,
-                  u32 tag_size);
-
-int ocs_create_linked_list_from_sg(const struct ocs_aes_dev *aes_dev,
-                                  struct scatterlist *sg,
-                                  int sg_dma_count,
-                                  struct ocs_dll_desc *dll_desc,
-                                  size_t data_size,
-                                  size_t data_offset);
-
-irqreturn_t ocs_aes_irq_handler(int irq, void *dev_id);
-
-#endif
diff --git a/drivers/crypto/keembay/ocs-hcu.c b/drivers/crypto/keembay/ocs-hcu.c
deleted file mode 100644 (file)
index deb9bd4..0000000
+++ /dev/null
@@ -1,840 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel Keem Bay OCS HCU Crypto Driver.
- *
- * Copyright (C) 2018-2020 Intel Corporation
- */
-
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/iopoll.h>
-#include <linux/irq.h>
-#include <linux/module.h>
-
-#include <crypto/sha2.h>
-
-#include "ocs-hcu.h"
-
-/* Registers. */
-#define OCS_HCU_MODE                   0x00
-#define OCS_HCU_CHAIN                  0x04
-#define OCS_HCU_OPERATION              0x08
-#define OCS_HCU_KEY_0                  0x0C
-#define OCS_HCU_ISR                    0x50
-#define OCS_HCU_IER                    0x54
-#define OCS_HCU_STATUS                 0x58
-#define OCS_HCU_MSG_LEN_LO             0x60
-#define OCS_HCU_MSG_LEN_HI             0x64
-#define OCS_HCU_KEY_BYTE_ORDER_CFG     0x80
-#define OCS_HCU_DMA_SRC_ADDR           0x400
-#define OCS_HCU_DMA_SRC_SIZE           0x408
-#define OCS_HCU_DMA_DST_SIZE           0x40C
-#define OCS_HCU_DMA_DMA_MODE           0x410
-#define OCS_HCU_DMA_NEXT_SRC_DESCR     0x418
-#define OCS_HCU_DMA_MSI_ISR            0x480
-#define OCS_HCU_DMA_MSI_IER            0x484
-#define OCS_HCU_DMA_MSI_MASK           0x488
-
-/* Register bit definitions. */
-#define HCU_MODE_ALGO_SHIFT            16
-#define HCU_MODE_HMAC_SHIFT            22
-
-#define HCU_STATUS_BUSY                        BIT(0)
-
-#define HCU_BYTE_ORDER_SWAP            BIT(0)
-
-#define HCU_IRQ_HASH_DONE              BIT(2)
-#define HCU_IRQ_HASH_ERR_MASK          (BIT(3) | BIT(1) | BIT(0))
-
-#define HCU_DMA_IRQ_SRC_DONE           BIT(0)
-#define HCU_DMA_IRQ_SAI_ERR            BIT(2)
-#define HCU_DMA_IRQ_BAD_COMP_ERR       BIT(3)
-#define HCU_DMA_IRQ_INBUF_RD_ERR       BIT(4)
-#define HCU_DMA_IRQ_INBUF_WD_ERR       BIT(5)
-#define HCU_DMA_IRQ_OUTBUF_WR_ERR      BIT(6)
-#define HCU_DMA_IRQ_OUTBUF_RD_ERR      BIT(7)
-#define HCU_DMA_IRQ_CRD_ERR            BIT(8)
-#define HCU_DMA_IRQ_ERR_MASK           (HCU_DMA_IRQ_SAI_ERR | \
-                                        HCU_DMA_IRQ_BAD_COMP_ERR | \
-                                        HCU_DMA_IRQ_INBUF_RD_ERR | \
-                                        HCU_DMA_IRQ_INBUF_WD_ERR | \
-                                        HCU_DMA_IRQ_OUTBUF_WR_ERR | \
-                                        HCU_DMA_IRQ_OUTBUF_RD_ERR | \
-                                        HCU_DMA_IRQ_CRD_ERR)
-
-#define HCU_DMA_SNOOP_MASK             (0x7 << 28)
-#define HCU_DMA_SRC_LL_EN              BIT(25)
-#define HCU_DMA_EN                     BIT(31)
-
-#define OCS_HCU_ENDIANNESS_VALUE       0x2A
-
-#define HCU_DMA_MSI_UNMASK             BIT(0)
-#define HCU_DMA_MSI_DISABLE            0
-#define HCU_IRQ_DISABLE                        0
-
-#define OCS_HCU_START                  BIT(0)
-#define OCS_HCU_TERMINATE              BIT(1)
-
-#define OCS_LL_DMA_FLAG_TERMINATE      BIT(31)
-
-#define OCS_HCU_HW_KEY_LEN_U32         (OCS_HCU_HW_KEY_LEN / sizeof(u32))
-
-#define HCU_DATA_WRITE_ENDIANNESS_OFFSET       26
-
-#define OCS_HCU_NUM_CHAINS_SHA256_224_SM3      (SHA256_DIGEST_SIZE / sizeof(u32))
-#define OCS_HCU_NUM_CHAINS_SHA384_512          (SHA512_DIGEST_SIZE / sizeof(u32))
-
-/*
- * While polling on a busy HCU, wait maximum 200us between one check and the
- * other.
- */
-#define OCS_HCU_WAIT_BUSY_RETRY_DELAY_US       200
-/* Wait on a busy HCU for maximum 1 second. */
-#define OCS_HCU_WAIT_BUSY_TIMEOUT_US           1000000
-
-/**
- * struct ocs_hcu_dma_entry - An entry in an OCS DMA linked list.
- * @src_addr:  Source address of the data.
- * @src_len:   Length of data to be fetched.
- * @nxt_desc:  Next descriptor to fetch.
- * @ll_flags:  Flags (Freeze @ terminate) for the DMA engine.
- */
-struct ocs_hcu_dma_entry {
-       u32 src_addr;
-       u32 src_len;
-       u32 nxt_desc;
-       u32 ll_flags;
-};
-
-/**
- * struct ocs_hcu_dma_list - OCS-specific DMA linked list.
- * @head:      The head of the list (points to the array backing the list).
- * @tail:      The current tail of the list; NULL if the list is empty.
- * @dma_addr:  The DMA address of @head (i.e., the DMA address of the backing
- *             array).
- * @max_nents: Maximum number of entries in the list (i.e., number of elements
- *             in the backing array).
- *
- * The OCS DMA list is an array-backed list of OCS DMA descriptors. The array
- * backing the list is allocated with dma_alloc_coherent() and pointed by
- * @head.
- */
-struct ocs_hcu_dma_list {
-       struct ocs_hcu_dma_entry        *head;
-       struct ocs_hcu_dma_entry        *tail;
-       dma_addr_t                      dma_addr;
-       size_t                          max_nents;
-};
-
-static inline u32 ocs_hcu_num_chains(enum ocs_hcu_algo algo)
-{
-       switch (algo) {
-       case OCS_HCU_ALGO_SHA224:
-       case OCS_HCU_ALGO_SHA256:
-       case OCS_HCU_ALGO_SM3:
-               return OCS_HCU_NUM_CHAINS_SHA256_224_SM3;
-       case OCS_HCU_ALGO_SHA384:
-       case OCS_HCU_ALGO_SHA512:
-               return OCS_HCU_NUM_CHAINS_SHA384_512;
-       default:
-               return 0;
-       };
-}
-
-static inline u32 ocs_hcu_digest_size(enum ocs_hcu_algo algo)
-{
-       switch (algo) {
-       case OCS_HCU_ALGO_SHA224:
-               return SHA224_DIGEST_SIZE;
-       case OCS_HCU_ALGO_SHA256:
-       case OCS_HCU_ALGO_SM3:
-               /* SM3 shares the same block size. */
-               return SHA256_DIGEST_SIZE;
-       case OCS_HCU_ALGO_SHA384:
-               return SHA384_DIGEST_SIZE;
-       case OCS_HCU_ALGO_SHA512:
-               return SHA512_DIGEST_SIZE;
-       default:
-               return 0;
-       }
-}
-
-/**
- * ocs_hcu_wait_busy() - Wait for HCU OCS hardware to became usable.
- * @hcu_dev:   OCS HCU device to wait for.
- *
- * Return: 0 if device free, -ETIMEOUT if device busy and internal timeout has
- *        expired.
- */
-static int ocs_hcu_wait_busy(struct ocs_hcu_dev *hcu_dev)
-{
-       long val;
-
-       return readl_poll_timeout(hcu_dev->io_base + OCS_HCU_STATUS, val,
-                                 !(val & HCU_STATUS_BUSY),
-                                 OCS_HCU_WAIT_BUSY_RETRY_DELAY_US,
-                                 OCS_HCU_WAIT_BUSY_TIMEOUT_US);
-}
-
-static void ocs_hcu_done_irq_en(struct ocs_hcu_dev *hcu_dev)
-{
-       /* Clear any pending interrupts. */
-       writel(0xFFFFFFFF, hcu_dev->io_base + OCS_HCU_ISR);
-       hcu_dev->irq_err = false;
-       /* Enable error and HCU done interrupts. */
-       writel(HCU_IRQ_HASH_DONE | HCU_IRQ_HASH_ERR_MASK,
-              hcu_dev->io_base + OCS_HCU_IER);
-}
-
-static void ocs_hcu_dma_irq_en(struct ocs_hcu_dev *hcu_dev)
-{
-       /* Clear any pending interrupts. */
-       writel(0xFFFFFFFF, hcu_dev->io_base + OCS_HCU_DMA_MSI_ISR);
-       hcu_dev->irq_err = false;
-       /* Only operating on DMA source completion and error interrupts. */
-       writel(HCU_DMA_IRQ_ERR_MASK | HCU_DMA_IRQ_SRC_DONE,
-              hcu_dev->io_base + OCS_HCU_DMA_MSI_IER);
-       /* Unmask */
-       writel(HCU_DMA_MSI_UNMASK, hcu_dev->io_base + OCS_HCU_DMA_MSI_MASK);
-}
-
-static void ocs_hcu_irq_dis(struct ocs_hcu_dev *hcu_dev)
-{
-       writel(HCU_IRQ_DISABLE, hcu_dev->io_base + OCS_HCU_IER);
-       writel(HCU_DMA_MSI_DISABLE, hcu_dev->io_base + OCS_HCU_DMA_MSI_IER);
-}
-
-static int ocs_hcu_wait_and_disable_irq(struct ocs_hcu_dev *hcu_dev)
-{
-       int rc;
-
-       rc = wait_for_completion_interruptible(&hcu_dev->irq_done);
-       if (rc)
-               goto exit;
-
-       if (hcu_dev->irq_err) {
-               /* Unset flag and return error. */
-               hcu_dev->irq_err = false;
-               rc = -EIO;
-               goto exit;
-       }
-
-exit:
-       ocs_hcu_irq_dis(hcu_dev);
-
-       return rc;
-}
-
-/**
- * ocs_hcu_get_intermediate_data() - Get intermediate data.
- * @hcu_dev:   The target HCU device.
- * @data:      Where to store the intermediate.
- * @algo:      The algorithm being used.
- *
- * This function is used to save the current hashing process state in order to
- * continue it in the future.
- *
- * Note: once all data has been processed, the intermediate data actually
- * contains the hashing result. So this function is also used to retrieve the
- * final result of a hashing process.
- *
- * Return: 0 on success, negative error code otherwise.
- */
-static int ocs_hcu_get_intermediate_data(struct ocs_hcu_dev *hcu_dev,
-                                        struct ocs_hcu_idata *data,
-                                        enum ocs_hcu_algo algo)
-{
-       const int n = ocs_hcu_num_chains(algo);
-       u32 *chain;
-       int rc;
-       int i;
-
-       /* Data not requested. */
-       if (!data)
-               return -EINVAL;
-
-       chain = (u32 *)data->digest;
-
-       /* Ensure that the OCS is no longer busy before reading the chains. */
-       rc = ocs_hcu_wait_busy(hcu_dev);
-       if (rc)
-               return rc;
-
-       /*
-        * This loops is safe because data->digest is an array of
-        * SHA512_DIGEST_SIZE bytes and the maximum value returned by
-        * ocs_hcu_num_chains() is OCS_HCU_NUM_CHAINS_SHA384_512 which is equal
-        * to SHA512_DIGEST_SIZE / sizeof(u32).
-        */
-       for (i = 0; i < n; i++)
-               chain[i] = readl(hcu_dev->io_base + OCS_HCU_CHAIN);
-
-       data->msg_len_lo = readl(hcu_dev->io_base + OCS_HCU_MSG_LEN_LO);
-       data->msg_len_hi = readl(hcu_dev->io_base + OCS_HCU_MSG_LEN_HI);
-
-       return 0;
-}
-
-/**
- * ocs_hcu_set_intermediate_data() - Set intermediate data.
- * @hcu_dev:   The target HCU device.
- * @data:      The intermediate data to be set.
- * @algo:      The algorithm being used.
- *
- * This function is used to continue a previous hashing process.
- */
-static void ocs_hcu_set_intermediate_data(struct ocs_hcu_dev *hcu_dev,
-                                         const struct ocs_hcu_idata *data,
-                                         enum ocs_hcu_algo algo)
-{
-       const int n = ocs_hcu_num_chains(algo);
-       u32 *chain = (u32 *)data->digest;
-       int i;
-
-       /*
-        * This loops is safe because data->digest is an array of
-        * SHA512_DIGEST_SIZE bytes and the maximum value returned by
-        * ocs_hcu_num_chains() is OCS_HCU_NUM_CHAINS_SHA384_512 which is equal
-        * to SHA512_DIGEST_SIZE / sizeof(u32).
-        */
-       for (i = 0; i < n; i++)
-               writel(chain[i], hcu_dev->io_base + OCS_HCU_CHAIN);
-
-       writel(data->msg_len_lo, hcu_dev->io_base + OCS_HCU_MSG_LEN_LO);
-       writel(data->msg_len_hi, hcu_dev->io_base + OCS_HCU_MSG_LEN_HI);
-}
-
-static int ocs_hcu_get_digest(struct ocs_hcu_dev *hcu_dev,
-                             enum ocs_hcu_algo algo, u8 *dgst, size_t dgst_len)
-{
-       u32 *chain;
-       int rc;
-       int i;
-
-       if (!dgst)
-               return -EINVAL;
-
-       /* Length of the output buffer must match the algo digest size. */
-       if (dgst_len != ocs_hcu_digest_size(algo))
-               return -EINVAL;
-
-       /* Ensure that the OCS is no longer busy before reading the chains. */
-       rc = ocs_hcu_wait_busy(hcu_dev);
-       if (rc)
-               return rc;
-
-       chain = (u32 *)dgst;
-       for (i = 0; i < dgst_len / sizeof(u32); i++)
-               chain[i] = readl(hcu_dev->io_base + OCS_HCU_CHAIN);
-
-       return 0;
-}
-
-/**
- * ocs_hcu_hw_cfg() - Configure the HCU hardware.
- * @hcu_dev:   The HCU device to configure.
- * @algo:      The algorithm to be used by the HCU device.
- * @use_hmac:  Whether or not HW HMAC should be used.
- *
- * Return: 0 on success, negative error code otherwise.
- */
-static int ocs_hcu_hw_cfg(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
-                         bool use_hmac)
-{
-       u32 cfg;
-       int rc;
-
-       if (algo != OCS_HCU_ALGO_SHA256 && algo != OCS_HCU_ALGO_SHA224 &&
-           algo != OCS_HCU_ALGO_SHA384 && algo != OCS_HCU_ALGO_SHA512 &&
-           algo != OCS_HCU_ALGO_SM3)
-               return -EINVAL;
-
-       rc = ocs_hcu_wait_busy(hcu_dev);
-       if (rc)
-               return rc;
-
-       /* Ensure interrupts are disabled. */
-       ocs_hcu_irq_dis(hcu_dev);
-
-       /* Configure endianness, hashing algorithm and HW HMAC (if needed) */
-       cfg = OCS_HCU_ENDIANNESS_VALUE << HCU_DATA_WRITE_ENDIANNESS_OFFSET;
-       cfg |= algo << HCU_MODE_ALGO_SHIFT;
-       if (use_hmac)
-               cfg |= BIT(HCU_MODE_HMAC_SHIFT);
-
-       writel(cfg, hcu_dev->io_base + OCS_HCU_MODE);
-
-       return 0;
-}
-
-/**
- * ocs_hcu_clear_key() - Clear key stored in OCS HMAC KEY registers.
- * @hcu_dev:   The OCS HCU device whose key registers should be cleared.
- */
-static void ocs_hcu_clear_key(struct ocs_hcu_dev *hcu_dev)
-{
-       int reg_off;
-
-       /* Clear OCS_HCU_KEY_[0..15] */
-       for (reg_off = 0; reg_off < OCS_HCU_HW_KEY_LEN; reg_off += sizeof(u32))
-               writel(0, hcu_dev->io_base + OCS_HCU_KEY_0 + reg_off);
-}
-
-/**
- * ocs_hcu_write_key() - Write key to OCS HMAC KEY registers.
- * @hcu_dev:   The OCS HCU device the key should be written to.
- * @key:       The key to be written.
- * @len:       The size of the key to write. It must be OCS_HCU_HW_KEY_LEN.
- *
- * Return:     0 on success, negative error code otherwise.
- */
-static int ocs_hcu_write_key(struct ocs_hcu_dev *hcu_dev, const u8 *key, size_t len)
-{
-       u32 key_u32[OCS_HCU_HW_KEY_LEN_U32];
-       int i;
-
-       if (len > OCS_HCU_HW_KEY_LEN)
-               return -EINVAL;
-
-       /* Copy key into temporary u32 array. */
-       memcpy(key_u32, key, len);
-
-       /*
-        * Hardware requires all the bytes of the HW Key vector to be
-        * written. So pad with zero until we reach OCS_HCU_HW_KEY_LEN.
-        */
-       memzero_explicit((u8 *)key_u32 + len, OCS_HCU_HW_KEY_LEN - len);
-
-       /*
-        * OCS hardware expects the MSB of the key to be written at the highest
-        * address of the HCU Key vector; in other word, the key must be
-        * written in reverse order.
-        *
-        * Therefore, we first enable byte swapping for the HCU key vector;
-        * so that bytes of 32-bit word written to OCS_HCU_KEY_[0..15] will be
-        * swapped:
-        * 3 <---> 0, 2 <---> 1.
-        */
-       writel(HCU_BYTE_ORDER_SWAP,
-              hcu_dev->io_base + OCS_HCU_KEY_BYTE_ORDER_CFG);
-       /*
-        * And then we write the 32-bit words composing the key starting from
-        * the end of the key.
-        */
-       for (i = 0; i < OCS_HCU_HW_KEY_LEN_U32; i++)
-               writel(key_u32[OCS_HCU_HW_KEY_LEN_U32 - 1 - i],
-                      hcu_dev->io_base + OCS_HCU_KEY_0 + (sizeof(u32) * i));
-
-       memzero_explicit(key_u32, OCS_HCU_HW_KEY_LEN);
-
-       return 0;
-}
-
-/**
- * ocs_hcu_ll_dma_start() - Start OCS HCU hashing via DMA
- * @hcu_dev:   The OCS HCU device to use.
- * @dma_list:  The OCS DMA list mapping the data to hash.
- * @finalize:  Whether or not this is the last hashing operation and therefore
- *             the final hash should be compute even if data is not
- *             block-aligned.
- *
- * Return: 0 on success, negative error code otherwise.
- */
-static int ocs_hcu_ll_dma_start(struct ocs_hcu_dev *hcu_dev,
-                               const struct ocs_hcu_dma_list *dma_list,
-                               bool finalize)
-{
-       u32 cfg = HCU_DMA_SNOOP_MASK | HCU_DMA_SRC_LL_EN | HCU_DMA_EN;
-       int rc;
-
-       if (!dma_list)
-               return -EINVAL;
-
-       /*
-        * For final requests we use HCU_DONE IRQ to be notified when all input
-        * data has been processed by the HCU; however, we cannot do so for
-        * non-final requests, because we don't get a HCU_DONE IRQ when we
-        * don't terminate the operation.
-        *
-        * Therefore, for non-final requests, we use the DMA IRQ, which
-        * triggers when DMA has finishing feeding all the input data to the
-        * HCU, but the HCU may still be processing it. This is fine, since we
-        * will wait for the HCU processing to be completed when we try to read
-        * intermediate results, in ocs_hcu_get_intermediate_data().
-        */
-       if (finalize)
-               ocs_hcu_done_irq_en(hcu_dev);
-       else
-               ocs_hcu_dma_irq_en(hcu_dev);
-
-       reinit_completion(&hcu_dev->irq_done);
-       writel(dma_list->dma_addr, hcu_dev->io_base + OCS_HCU_DMA_NEXT_SRC_DESCR);
-       writel(0, hcu_dev->io_base + OCS_HCU_DMA_SRC_SIZE);
-       writel(0, hcu_dev->io_base + OCS_HCU_DMA_DST_SIZE);
-
-       writel(OCS_HCU_START, hcu_dev->io_base + OCS_HCU_OPERATION);
-
-       writel(cfg, hcu_dev->io_base + OCS_HCU_DMA_DMA_MODE);
-
-       if (finalize)
-               writel(OCS_HCU_TERMINATE, hcu_dev->io_base + OCS_HCU_OPERATION);
-
-       rc = ocs_hcu_wait_and_disable_irq(hcu_dev);
-       if (rc)
-               return rc;
-
-       return 0;
-}
-
-struct ocs_hcu_dma_list *ocs_hcu_dma_list_alloc(struct ocs_hcu_dev *hcu_dev,
-                                               int max_nents)
-{
-       struct ocs_hcu_dma_list *dma_list;
-
-       dma_list = kmalloc(sizeof(*dma_list), GFP_KERNEL);
-       if (!dma_list)
-               return NULL;
-
-       /* Total size of the DMA list to allocate. */
-       dma_list->head = dma_alloc_coherent(hcu_dev->dev,
-                                           sizeof(*dma_list->head) * max_nents,
-                                           &dma_list->dma_addr, GFP_KERNEL);
-       if (!dma_list->head) {
-               kfree(dma_list);
-               return NULL;
-       }
-       dma_list->max_nents = max_nents;
-       dma_list->tail = NULL;
-
-       return dma_list;
-}
-
-void ocs_hcu_dma_list_free(struct ocs_hcu_dev *hcu_dev,
-                          struct ocs_hcu_dma_list *dma_list)
-{
-       if (!dma_list)
-               return;
-
-       dma_free_coherent(hcu_dev->dev,
-                         sizeof(*dma_list->head) * dma_list->max_nents,
-                         dma_list->head, dma_list->dma_addr);
-
-       kfree(dma_list);
-}
-
-/* Add a new DMA entry at the end of the OCS DMA list. */
-int ocs_hcu_dma_list_add_tail(struct ocs_hcu_dev *hcu_dev,
-                             struct ocs_hcu_dma_list *dma_list,
-                             dma_addr_t addr, u32 len)
-{
-       struct device *dev = hcu_dev->dev;
-       struct ocs_hcu_dma_entry *old_tail;
-       struct ocs_hcu_dma_entry *new_tail;
-
-       if (!len)
-               return 0;
-
-       if (!dma_list)
-               return -EINVAL;
-
-       if (addr & ~OCS_HCU_DMA_BIT_MASK) {
-               dev_err(dev,
-                       "Unexpected error: Invalid DMA address for OCS HCU\n");
-               return -EINVAL;
-       }
-
-       old_tail = dma_list->tail;
-       new_tail = old_tail ? old_tail + 1 : dma_list->head;
-
-       /* Check if list is full. */
-       if (new_tail - dma_list->head >= dma_list->max_nents)
-               return -ENOMEM;
-
-       /*
-        * If there was an old tail (i.e., this is not the first element we are
-        * adding), un-terminate the old tail and make it point to the new one.
-        */
-       if (old_tail) {
-               old_tail->ll_flags &= ~OCS_LL_DMA_FLAG_TERMINATE;
-               /*
-                * The old tail 'nxt_desc' must point to the DMA address of the
-                * new tail.
-                */
-               old_tail->nxt_desc = dma_list->dma_addr +
-                                    sizeof(*dma_list->tail) * (new_tail -
-                                                               dma_list->head);
-       }
-
-       new_tail->src_addr = (u32)addr;
-       new_tail->src_len = (u32)len;
-       new_tail->ll_flags = OCS_LL_DMA_FLAG_TERMINATE;
-       new_tail->nxt_desc = 0;
-
-       /* Update list tail with new tail. */
-       dma_list->tail = new_tail;
-
-       return 0;
-}
-
-/**
- * ocs_hcu_hash_init() - Initialize hash operation context.
- * @ctx:       The context to initialize.
- * @algo:      The hashing algorithm to use.
- *
- * Return:     0 on success, negative error code otherwise.
- */
-int ocs_hcu_hash_init(struct ocs_hcu_hash_ctx *ctx, enum ocs_hcu_algo algo)
-{
-       if (!ctx)
-               return -EINVAL;
-
-       ctx->algo = algo;
-       ctx->idata.msg_len_lo = 0;
-       ctx->idata.msg_len_hi = 0;
-       /* No need to set idata.digest to 0. */
-
-       return 0;
-}
-
-/**
- * ocs_hcu_hash_update() - Perform a hashing iteration.
- * @hcu_dev:   The OCS HCU device to use.
- * @ctx:       The OCS HCU hashing context.
- * @dma_list:  The OCS DMA list mapping the input data to process.
- *
- * Return: 0 on success; negative error code otherwise.
- */
-int ocs_hcu_hash_update(struct ocs_hcu_dev *hcu_dev,
-                       struct ocs_hcu_hash_ctx *ctx,
-                       const struct ocs_hcu_dma_list *dma_list)
-{
-       int rc;
-
-       if (!hcu_dev || !ctx)
-               return -EINVAL;
-
-       /* Configure the hardware for the current request. */
-       rc = ocs_hcu_hw_cfg(hcu_dev, ctx->algo, false);
-       if (rc)
-               return rc;
-
-       /* If we already processed some data, idata needs to be set. */
-       if (ctx->idata.msg_len_lo || ctx->idata.msg_len_hi)
-               ocs_hcu_set_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
-
-       /* Start linked-list DMA hashing. */
-       rc = ocs_hcu_ll_dma_start(hcu_dev, dma_list, false);
-       if (rc)
-               return rc;
-
-       /* Update idata and return. */
-       return ocs_hcu_get_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
-}
-
-/**
- * ocs_hcu_hash_finup() - Update and finalize hash computation.
- * @hcu_dev:   The OCS HCU device to use.
- * @ctx:       The OCS HCU hashing context.
- * @dma_list:  The OCS DMA list mapping the input data to process.
- * @dgst:      The buffer where to save the computed digest.
- * @dgst_len:  The length of @dgst.
- *
- * Return: 0 on success; negative error code otherwise.
- */
-int ocs_hcu_hash_finup(struct ocs_hcu_dev *hcu_dev,
-                      const struct ocs_hcu_hash_ctx *ctx,
-                      const struct ocs_hcu_dma_list *dma_list,
-                      u8 *dgst, size_t dgst_len)
-{
-       int rc;
-
-       if (!hcu_dev || !ctx)
-               return -EINVAL;
-
-       /* Configure the hardware for the current request. */
-       rc = ocs_hcu_hw_cfg(hcu_dev, ctx->algo, false);
-       if (rc)
-               return rc;
-
-       /* If we already processed some data, idata needs to be set. */
-       if (ctx->idata.msg_len_lo || ctx->idata.msg_len_hi)
-               ocs_hcu_set_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
-
-       /* Start linked-list DMA hashing. */
-       rc = ocs_hcu_ll_dma_start(hcu_dev, dma_list, true);
-       if (rc)
-               return rc;
-
-       /* Get digest and return. */
-       return ocs_hcu_get_digest(hcu_dev, ctx->algo, dgst, dgst_len);
-}
-
-/**
- * ocs_hcu_hash_final() - Finalize hash computation.
- * @hcu_dev:           The OCS HCU device to use.
- * @ctx:               The OCS HCU hashing context.
- * @dgst:              The buffer where to save the computed digest.
- * @dgst_len:          The length of @dgst.
- *
- * Return: 0 on success; negative error code otherwise.
- */
-int ocs_hcu_hash_final(struct ocs_hcu_dev *hcu_dev,
-                      const struct ocs_hcu_hash_ctx *ctx, u8 *dgst,
-                      size_t dgst_len)
-{
-       int rc;
-
-       if (!hcu_dev || !ctx)
-               return -EINVAL;
-
-       /* Configure the hardware for the current request. */
-       rc = ocs_hcu_hw_cfg(hcu_dev, ctx->algo, false);
-       if (rc)
-               return rc;
-
-       /* If we already processed some data, idata needs to be set. */
-       if (ctx->idata.msg_len_lo || ctx->idata.msg_len_hi)
-               ocs_hcu_set_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
-
-       /*
-        * Enable HCU interrupts, so that HCU_DONE will be triggered once the
-        * final hash is computed.
-        */
-       ocs_hcu_done_irq_en(hcu_dev);
-       reinit_completion(&hcu_dev->irq_done);
-       writel(OCS_HCU_TERMINATE, hcu_dev->io_base + OCS_HCU_OPERATION);
-
-       rc = ocs_hcu_wait_and_disable_irq(hcu_dev);
-       if (rc)
-               return rc;
-
-       /* Get digest and return. */
-       return ocs_hcu_get_digest(hcu_dev, ctx->algo, dgst, dgst_len);
-}
-
-/**
- * ocs_hcu_digest() - Compute hash digest.
- * @hcu_dev:           The OCS HCU device to use.
- * @algo:              The hash algorithm to use.
- * @data:              The input data to process.
- * @data_len:          The length of @data.
- * @dgst:              The buffer where to save the computed digest.
- * @dgst_len:          The length of @dgst.
- *
- * Return: 0 on success; negative error code otherwise.
- */
-int ocs_hcu_digest(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
-                  void *data, size_t data_len, u8 *dgst, size_t dgst_len)
-{
-       struct device *dev = hcu_dev->dev;
-       dma_addr_t dma_handle;
-       u32 reg;
-       int rc;
-
-       /* Configure the hardware for the current request. */
-       rc = ocs_hcu_hw_cfg(hcu_dev, algo, false);
-       if (rc)
-               return rc;
-
-       dma_handle = dma_map_single(dev, data, data_len, DMA_TO_DEVICE);
-       if (dma_mapping_error(dev, dma_handle))
-               return -EIO;
-
-       reg = HCU_DMA_SNOOP_MASK | HCU_DMA_EN;
-
-       ocs_hcu_done_irq_en(hcu_dev);
-
-       reinit_completion(&hcu_dev->irq_done);
-
-       writel(dma_handle, hcu_dev->io_base + OCS_HCU_DMA_SRC_ADDR);
-       writel(data_len, hcu_dev->io_base + OCS_HCU_DMA_SRC_SIZE);
-       writel(OCS_HCU_START, hcu_dev->io_base + OCS_HCU_OPERATION);
-       writel(reg, hcu_dev->io_base + OCS_HCU_DMA_DMA_MODE);
-
-       writel(OCS_HCU_TERMINATE, hcu_dev->io_base + OCS_HCU_OPERATION);
-
-       rc = ocs_hcu_wait_and_disable_irq(hcu_dev);
-       if (rc)
-               return rc;
-
-       dma_unmap_single(dev, dma_handle, data_len, DMA_TO_DEVICE);
-
-       return ocs_hcu_get_digest(hcu_dev, algo, dgst, dgst_len);
-}
-
-/**
- * ocs_hcu_hmac() - Compute HMAC.
- * @hcu_dev:           The OCS HCU device to use.
- * @algo:              The hash algorithm to use with HMAC.
- * @key:               The key to use.
- * @dma_list:  The OCS DMA list mapping the input data to process.
- * @key_len:           The length of @key.
- * @dgst:              The buffer where to save the computed HMAC.
- * @dgst_len:          The length of @dgst.
- *
- * Return: 0 on success; negative error code otherwise.
- */
-int ocs_hcu_hmac(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
-                const u8 *key, size_t key_len,
-                const struct ocs_hcu_dma_list *dma_list,
-                u8 *dgst, size_t dgst_len)
-{
-       int rc;
-
-       /* Ensure 'key' is not NULL. */
-       if (!key || key_len == 0)
-               return -EINVAL;
-
-       /* Configure the hardware for the current request. */
-       rc = ocs_hcu_hw_cfg(hcu_dev, algo, true);
-       if (rc)
-               return rc;
-
-       rc = ocs_hcu_write_key(hcu_dev, key, key_len);
-       if (rc)
-               return rc;
-
-       rc = ocs_hcu_ll_dma_start(hcu_dev, dma_list, true);
-
-       /* Clear HW key before processing return code. */
-       ocs_hcu_clear_key(hcu_dev);
-
-       if (rc)
-               return rc;
-
-       return ocs_hcu_get_digest(hcu_dev, algo, dgst, dgst_len);
-}
-
-irqreturn_t ocs_hcu_irq_handler(int irq, void *dev_id)
-{
-       struct ocs_hcu_dev *hcu_dev = dev_id;
-       u32 hcu_irq;
-       u32 dma_irq;
-
-       /* Read and clear the HCU interrupt. */
-       hcu_irq = readl(hcu_dev->io_base + OCS_HCU_ISR);
-       writel(hcu_irq, hcu_dev->io_base + OCS_HCU_ISR);
-
-       /* Read and clear the HCU DMA interrupt. */
-       dma_irq = readl(hcu_dev->io_base + OCS_HCU_DMA_MSI_ISR);
-       writel(dma_irq, hcu_dev->io_base + OCS_HCU_DMA_MSI_ISR);
-
-       /* Check for errors. */
-       if (hcu_irq & HCU_IRQ_HASH_ERR_MASK || dma_irq & HCU_DMA_IRQ_ERR_MASK) {
-               hcu_dev->irq_err = true;
-               goto complete;
-       }
-
-       /* Check for DONE IRQs. */
-       if (hcu_irq & HCU_IRQ_HASH_DONE || dma_irq & HCU_DMA_IRQ_SRC_DONE)
-               goto complete;
-
-       return IRQ_NONE;
-
-complete:
-       complete(&hcu_dev->irq_done);
-
-       return IRQ_HANDLED;
-}
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/keembay/ocs-hcu.h b/drivers/crypto/keembay/ocs-hcu.h
deleted file mode 100644 (file)
index fbbbb92..0000000
+++ /dev/null
@@ -1,106 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel Keem Bay OCS HCU Crypto Driver.
- *
- * Copyright (C) 2018-2020 Intel Corporation
- */
-
-#include <linux/dma-mapping.h>
-
-#ifndef _CRYPTO_OCS_HCU_H
-#define _CRYPTO_OCS_HCU_H
-
-#define OCS_HCU_DMA_BIT_MASK           DMA_BIT_MASK(32)
-
-#define OCS_HCU_HW_KEY_LEN             64
-
-struct ocs_hcu_dma_list;
-
-enum ocs_hcu_algo {
-       OCS_HCU_ALGO_SHA256 = 2,
-       OCS_HCU_ALGO_SHA224 = 3,
-       OCS_HCU_ALGO_SHA384 = 4,
-       OCS_HCU_ALGO_SHA512 = 5,
-       OCS_HCU_ALGO_SM3    = 6,
-};
-
-/**
- * struct ocs_hcu_dev - OCS HCU device context.
- * @list:      List of device contexts.
- * @dev:       OCS HCU device.
- * @io_base:   Base address of OCS HCU registers.
- * @engine:    Crypto engine for the device.
- * @irq:       IRQ number.
- * @irq_done:  Completion for IRQ.
- * @irq_err:   Flag indicating an IRQ error has happened.
- */
-struct ocs_hcu_dev {
-       struct list_head list;
-       struct device *dev;
-       void __iomem *io_base;
-       struct crypto_engine *engine;
-       int irq;
-       struct completion irq_done;
-       bool irq_err;
-};
-
-/**
- * struct ocs_hcu_idata - Intermediate data generated by the HCU.
- * @msg_len_lo: Length of data the HCU has operated on in bits, low 32b.
- * @msg_len_hi: Length of data the HCU has operated on in bits, high 32b.
- * @digest: The digest read from the HCU. If the HCU is terminated, it will
- *         contain the actual hash digest. Otherwise it is the intermediate
- *         state.
- */
-struct ocs_hcu_idata {
-       u32 msg_len_lo;
-       u32 msg_len_hi;
-       u8  digest[SHA512_DIGEST_SIZE];
-};
-
-/**
- * struct ocs_hcu_hash_ctx - Context for OCS HCU hashing operation.
- * @algo:      The hashing algorithm being used.
- * @idata:     The current intermediate data.
- */
-struct ocs_hcu_hash_ctx {
-       enum ocs_hcu_algo       algo;
-       struct ocs_hcu_idata    idata;
-};
-
-irqreturn_t ocs_hcu_irq_handler(int irq, void *dev_id);
-
-struct ocs_hcu_dma_list *ocs_hcu_dma_list_alloc(struct ocs_hcu_dev *hcu_dev,
-                                               int max_nents);
-
-void ocs_hcu_dma_list_free(struct ocs_hcu_dev *hcu_dev,
-                          struct ocs_hcu_dma_list *dma_list);
-
-int ocs_hcu_dma_list_add_tail(struct ocs_hcu_dev *hcu_dev,
-                             struct ocs_hcu_dma_list *dma_list,
-                             dma_addr_t addr, u32 len);
-
-int ocs_hcu_hash_init(struct ocs_hcu_hash_ctx *ctx, enum ocs_hcu_algo algo);
-
-int ocs_hcu_hash_update(struct ocs_hcu_dev *hcu_dev,
-                       struct ocs_hcu_hash_ctx *ctx,
-                       const struct ocs_hcu_dma_list *dma_list);
-
-int ocs_hcu_hash_finup(struct ocs_hcu_dev *hcu_dev,
-                      const struct ocs_hcu_hash_ctx *ctx,
-                      const struct ocs_hcu_dma_list *dma_list,
-                      u8 *dgst, size_t dgst_len);
-
-int ocs_hcu_hash_final(struct ocs_hcu_dev *hcu_dev,
-                      const struct ocs_hcu_hash_ctx *ctx, u8 *dgst,
-                      size_t dgst_len);
-
-int ocs_hcu_digest(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
-                  void *data, size_t data_len, u8 *dgst, size_t dgst_len);
-
-int ocs_hcu_hmac(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
-                const u8 *key, size_t key_len,
-                const struct ocs_hcu_dma_list *dma_list,
-                u8 *dgst, size_t dgst_len);
-
-#endif /* _CRYPTO_OCS_HCU_H */
index 1c11946a4f0ba977648a6a2d3f397a07294f7803..f6b7bce0e65686e17356b6753c9a28095724f1cd 100644 (file)
@@ -1022,21 +1022,15 @@ static int mxs_dcp_probe(struct platform_device *pdev)
        sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT);
 
        /* DCP clock is optional, only used on some SOCs */
-       sdcp->dcp_clk = devm_clk_get(dev, "dcp");
-       if (IS_ERR(sdcp->dcp_clk)) {
-               if (sdcp->dcp_clk != ERR_PTR(-ENOENT))
-                       return PTR_ERR(sdcp->dcp_clk);
-               sdcp->dcp_clk = NULL;
-       }
-       ret = clk_prepare_enable(sdcp->dcp_clk);
-       if (ret)
-               return ret;
+       sdcp->dcp_clk = devm_clk_get_optional_enabled(dev, "dcp");
+       if (IS_ERR(sdcp->dcp_clk))
+               return PTR_ERR(sdcp->dcp_clk);
 
        /* Restart the DCP block. */
        ret = stmp_reset_block(sdcp->base);
        if (ret) {
                dev_err(dev, "Failed reset\n");
-               goto err_disable_unprepare_clk;
+               return ret;
        }
 
        /* Initialize control register. */
@@ -1076,7 +1070,7 @@ static int mxs_dcp_probe(struct platform_device *pdev)
        if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
                dev_err(dev, "Error starting SHA thread!\n");
                ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
-               goto err_disable_unprepare_clk;
+               return ret;
        }
 
        sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
@@ -1134,9 +1128,6 @@ err_destroy_aes_thread:
 err_destroy_sha_thread:
        kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
 
-err_disable_unprepare_clk:
-       clk_disable_unprepare(sdcp->dcp_clk);
-
        return ret;
 }
 
@@ -1156,8 +1147,6 @@ static int mxs_dcp_remove(struct platform_device *pdev)
        kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
        kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
 
-       clk_disable_unprepare(sdcp->dcp_clk);
-
        platform_set_drvdata(pdev, NULL);
 
        global_sdcp = NULL;
diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig
deleted file mode 100644 (file)
index 1220cc8..0000000
+++ /dev/null
@@ -1,97 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config CRYPTO_DEV_QAT
-       tristate
-       select CRYPTO_AEAD
-       select CRYPTO_AUTHENC
-       select CRYPTO_SKCIPHER
-       select CRYPTO_AKCIPHER
-       select CRYPTO_DH
-       select CRYPTO_HMAC
-       select CRYPTO_RSA
-       select CRYPTO_SHA1
-       select CRYPTO_SHA256
-       select CRYPTO_SHA512
-       select CRYPTO_LIB_AES
-       select FW_LOADER
-       select CRC8
-
-config CRYPTO_DEV_QAT_DH895xCC
-       tristate "Support for Intel(R) DH895xCC"
-       depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
-       select CRYPTO_DEV_QAT
-       help
-         Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology
-         for accelerating crypto and compression workloads.
-
-         To compile this as a module, choose M here: the module
-         will be called qat_dh895xcc.
-
-config CRYPTO_DEV_QAT_C3XXX
-       tristate "Support for Intel(R) C3XXX"
-       depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
-       select CRYPTO_DEV_QAT
-       help
-         Support for Intel(R) C3xxx with Intel(R) QuickAssist Technology
-         for accelerating crypto and compression workloads.
-
-         To compile this as a module, choose M here: the module
-         will be called qat_c3xxx.
-
-config CRYPTO_DEV_QAT_C62X
-       tristate "Support for Intel(R) C62X"
-       depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
-       select CRYPTO_DEV_QAT
-       help
-         Support for Intel(R) C62x with Intel(R) QuickAssist Technology
-         for accelerating crypto and compression workloads.
-
-         To compile this as a module, choose M here: the module
-         will be called qat_c62x.
-
-config CRYPTO_DEV_QAT_4XXX
-       tristate "Support for Intel(R) QAT_4XXX"
-       depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
-       select CRYPTO_DEV_QAT
-       help
-         Support for Intel(R) QuickAssist Technology QAT_4xxx
-         for accelerating crypto and compression workloads.
-
-         To compile this as a module, choose M here: the module
-         will be called qat_4xxx.
-
-config CRYPTO_DEV_QAT_DH895xCCVF
-       tristate "Support for Intel(R) DH895xCC Virtual Function"
-       depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
-       select PCI_IOV
-       select CRYPTO_DEV_QAT
-
-       help
-         Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology
-         Virtual Function for accelerating crypto and compression workloads.
-
-         To compile this as a module, choose M here: the module
-         will be called qat_dh895xccvf.
-
-config CRYPTO_DEV_QAT_C3XXXVF
-       tristate "Support for Intel(R) C3XXX Virtual Function"
-       depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
-       select PCI_IOV
-       select CRYPTO_DEV_QAT
-       help
-         Support for Intel(R) C3xxx with Intel(R) QuickAssist Technology
-         Virtual Function for accelerating crypto and compression workloads.
-
-         To compile this as a module, choose M here: the module
-         will be called qat_c3xxxvf.
-
-config CRYPTO_DEV_QAT_C62XVF
-       tristate "Support for Intel(R) C62X Virtual Function"
-       depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
-       select PCI_IOV
-       select CRYPTO_DEV_QAT
-       help
-         Support for Intel(R) C62x with Intel(R) QuickAssist Technology
-         Virtual Function for accelerating crypto and compression workloads.
-
-         To compile this as a module, choose M here: the module
-         will be called qat_c62xvf.
diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile
deleted file mode 100644 (file)
index 258c8a6..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/
-obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/
-obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx/
-obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x/
-obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx/
-obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/
-obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf/
-obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf/
diff --git a/drivers/crypto/qat/qat_4xxx/Makefile b/drivers/crypto/qat/qat_4xxx/Makefile
deleted file mode 100644 (file)
index ff9c8b5..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-ccflags-y := -I $(srctree)/$(src)/../qat_common
-obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx.o
-qat_4xxx-objs := adf_drv.o adf_4xxx_hw_data.o
diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
deleted file mode 100644 (file)
index 834a705..0000000
+++ /dev/null
@@ -1,371 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2020 - 2021 Intel Corporation */
-#include <linux/iopoll.h>
-#include <adf_accel_devices.h>
-#include <adf_cfg.h>
-#include <adf_common_drv.h>
-#include <adf_gen4_dc.h>
-#include <adf_gen4_hw_data.h>
-#include <adf_gen4_pfvf.h>
-#include <adf_gen4_pm.h>
-#include "adf_4xxx_hw_data.h"
-#include "icp_qat_hw.h"
-
-struct adf_fw_config {
-       u32 ae_mask;
-       char *obj_name;
-};
-
-static struct adf_fw_config adf_4xxx_fw_cy_config[] = {
-       {0xF0, ADF_4XXX_SYM_OBJ},
-       {0xF, ADF_4XXX_ASYM_OBJ},
-       {0x100, ADF_4XXX_ADMIN_OBJ},
-};
-
-static struct adf_fw_config adf_4xxx_fw_dc_config[] = {
-       {0xF0, ADF_4XXX_DC_OBJ},
-       {0xF, ADF_4XXX_DC_OBJ},
-       {0x100, ADF_4XXX_ADMIN_OBJ},
-};
-
-/* Worker thread to service arbiter mappings */
-static const u32 thrd_to_arb_map[ADF_4XXX_MAX_ACCELENGINES] = {
-       0x5555555, 0x5555555, 0x5555555, 0x5555555,
-       0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA,
-       0x0
-};
-
-static struct adf_hw_device_class adf_4xxx_class = {
-       .name = ADF_4XXX_DEVICE_NAME,
-       .type = DEV_4XXX,
-       .instances = 0,
-};
-
-enum dev_services {
-       SVC_CY = 0,
-       SVC_DC,
-};
-
-static const char *const dev_cfg_services[] = {
-       [SVC_CY] = ADF_CFG_CY,
-       [SVC_DC] = ADF_CFG_DC,
-};
-
-static int get_service_enabled(struct adf_accel_dev *accel_dev)
-{
-       char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
-       int ret;
-
-       ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
-                                     ADF_SERVICES_ENABLED, services);
-       if (ret) {
-               dev_err(&GET_DEV(accel_dev),
-                       ADF_SERVICES_ENABLED " param not found\n");
-               return ret;
-       }
-
-       ret = match_string(dev_cfg_services, ARRAY_SIZE(dev_cfg_services),
-                          services);
-       if (ret < 0)
-               dev_err(&GET_DEV(accel_dev),
-                       "Invalid value of " ADF_SERVICES_ENABLED " param: %s\n",
-                       services);
-
-       return ret;
-}
-
-static u32 get_accel_mask(struct adf_hw_device_data *self)
-{
-       return ADF_4XXX_ACCELERATORS_MASK;
-}
-
-static u32 get_ae_mask(struct adf_hw_device_data *self)
-{
-       u32 me_disable = self->fuses;
-
-       return ~me_disable & ADF_4XXX_ACCELENGINES_MASK;
-}
-
-static u32 get_num_accels(struct adf_hw_device_data *self)
-{
-       return ADF_4XXX_MAX_ACCELERATORS;
-}
-
-static u32 get_num_aes(struct adf_hw_device_data *self)
-{
-       if (!self || !self->ae_mask)
-               return 0;
-
-       return hweight32(self->ae_mask);
-}
-
-static u32 get_misc_bar_id(struct adf_hw_device_data *self)
-{
-       return ADF_4XXX_PMISC_BAR;
-}
-
-static u32 get_etr_bar_id(struct adf_hw_device_data *self)
-{
-       return ADF_4XXX_ETR_BAR;
-}
-
-static u32 get_sram_bar_id(struct adf_hw_device_data *self)
-{
-       return ADF_4XXX_SRAM_BAR;
-}
-
-/*
- * The vector routing table is used to select the MSI-X entry to use for each
- * interrupt source.
- * The first ADF_4XXX_ETR_MAX_BANKS entries correspond to ring interrupts.
- * The final entry corresponds to VF2PF or error interrupts.
- * This vector table could be used to configure one MSI-X entry to be shared
- * between multiple interrupt sources.
- *
- * The default routing is set to have a one to one correspondence between the
- * interrupt source and the MSI-X entry used.
- */
-static void set_msix_default_rttable(struct adf_accel_dev *accel_dev)
-{
-       void __iomem *csr;
-       int i;
-
-       csr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
-       for (i = 0; i <= ADF_4XXX_ETR_MAX_BANKS; i++)
-               ADF_CSR_WR(csr, ADF_4XXX_MSIX_RTTABLE_OFFSET(i), i);
-}
-
-static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
-{
-       struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
-       u32 capabilities_cy, capabilities_dc;
-       u32 fusectl1;
-
-       /* Read accelerator capabilities mask */
-       pci_read_config_dword(pdev, ADF_4XXX_FUSECTL1_OFFSET, &fusectl1);
-
-       capabilities_cy = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
-                         ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
-                         ICP_ACCEL_CAPABILITIES_CIPHER |
-                         ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
-                         ICP_ACCEL_CAPABILITIES_SHA3 |
-                         ICP_ACCEL_CAPABILITIES_SHA3_EXT |
-                         ICP_ACCEL_CAPABILITIES_HKDF |
-                         ICP_ACCEL_CAPABILITIES_ECEDMONT |
-                         ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
-                         ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
-                         ICP_ACCEL_CAPABILITIES_AES_V2;
-
-       /* A set bit in fusectl1 means the feature is OFF in this SKU */
-       if (fusectl1 & ICP_ACCEL_4XXX_MASK_CIPHER_SLICE) {
-               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
-               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_HKDF;
-               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
-       }
-       if (fusectl1 & ICP_ACCEL_4XXX_MASK_UCS_SLICE) {
-               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
-               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
-               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
-               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
-       }
-       if (fusectl1 & ICP_ACCEL_4XXX_MASK_AUTH_SLICE) {
-               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
-               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_SHA3;
-               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
-               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
-       }
-       if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE) {
-               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
-               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
-       }
-
-       capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION |
-                         ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
-                         ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
-                         ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
-
-       if (fusectl1 & ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE) {
-               capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
-               capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION;
-               capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION;
-               capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
-       }
-
-       switch (get_service_enabled(accel_dev)) {
-       case SVC_CY:
-               return capabilities_cy;
-       case SVC_DC:
-               return capabilities_dc;
-       }
-
-       return 0;
-}
-
-static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
-{
-       return DEV_SKU_1;
-}
-
-static const u32 *adf_get_arbiter_mapping(void)
-{
-       return thrd_to_arb_map;
-}
-
-static void get_arb_info(struct arb_info *arb_info)
-{
-       arb_info->arb_cfg = ADF_4XXX_ARB_CONFIG;
-       arb_info->arb_offset = ADF_4XXX_ARB_OFFSET;
-       arb_info->wt2sam_offset = ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET;
-}
-
-static void get_admin_info(struct admin_info *admin_csrs_info)
-{
-       admin_csrs_info->mailbox_offset = ADF_4XXX_MAILBOX_BASE_OFFSET;
-       admin_csrs_info->admin_msg_ur = ADF_4XXX_ADMINMSGUR_OFFSET;
-       admin_csrs_info->admin_msg_lr = ADF_4XXX_ADMINMSGLR_OFFSET;
-}
-
-static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
-{
-       struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR];
-       void __iomem *csr = misc_bar->virt_addr;
-
-       /* Enable all in errsou3 except VFLR notification on host */
-       ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_VFLNOTIFY);
-}
-
-static void adf_enable_ints(struct adf_accel_dev *accel_dev)
-{
-       void __iomem *addr;
-
-       addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
-
-       /* Enable bundle interrupts */
-       ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET, 0);
-       ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET, 0);
-
-       /* Enable misc interrupts */
-       ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_MASK_OFFSET, 0);
-}
-
-static int adf_init_device(struct adf_accel_dev *accel_dev)
-{
-       void __iomem *addr;
-       u32 status;
-       u32 csr;
-       int ret;
-
-       addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
-
-       /* Temporarily mask PM interrupt */
-       csr = ADF_CSR_RD(addr, ADF_GEN4_ERRMSK2);
-       csr |= ADF_GEN4_PM_SOU;
-       ADF_CSR_WR(addr, ADF_GEN4_ERRMSK2, csr);
-
-       /* Set DRV_ACTIVE bit to power up the device */
-       ADF_CSR_WR(addr, ADF_GEN4_PM_INTERRUPT, ADF_GEN4_PM_DRV_ACTIVE);
-
-       /* Poll status register to make sure the device is powered up */
-       ret = read_poll_timeout(ADF_CSR_RD, status,
-                               status & ADF_GEN4_PM_INIT_STATE,
-                               ADF_GEN4_PM_POLL_DELAY_US,
-                               ADF_GEN4_PM_POLL_TIMEOUT_US, true, addr,
-                               ADF_GEN4_PM_STATUS);
-       if (ret)
-               dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n");
-
-       return ret;
-}
-
-static u32 uof_get_num_objs(void)
-{
-       BUILD_BUG_ON_MSG(ARRAY_SIZE(adf_4xxx_fw_cy_config) !=
-                        ARRAY_SIZE(adf_4xxx_fw_dc_config),
-                        "Size mismatch between adf_4xxx_fw_*_config arrays");
-
-       return ARRAY_SIZE(adf_4xxx_fw_cy_config);
-}
-
-static char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num)
-{
-       switch (get_service_enabled(accel_dev)) {
-       case SVC_CY:
-               return adf_4xxx_fw_cy_config[obj_num].obj_name;
-       case SVC_DC:
-               return adf_4xxx_fw_dc_config[obj_num].obj_name;
-       }
-
-       return NULL;
-}
-
-static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
-{
-       switch (get_service_enabled(accel_dev)) {
-       case SVC_CY:
-               return adf_4xxx_fw_cy_config[obj_num].ae_mask;
-       case SVC_DC:
-               return adf_4xxx_fw_dc_config[obj_num].ae_mask;
-       }
-
-       return 0;
-}
-
-void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
-{
-       hw_data->dev_class = &adf_4xxx_class;
-       hw_data->instance_id = adf_4xxx_class.instances++;
-       hw_data->num_banks = ADF_4XXX_ETR_MAX_BANKS;
-       hw_data->num_banks_per_vf = ADF_4XXX_NUM_BANKS_PER_VF;
-       hw_data->num_rings_per_bank = ADF_4XXX_NUM_RINGS_PER_BANK;
-       hw_data->num_accel = ADF_4XXX_MAX_ACCELERATORS;
-       hw_data->num_engines = ADF_4XXX_MAX_ACCELENGINES;
-       hw_data->num_logical_accel = 1;
-       hw_data->tx_rx_gap = ADF_4XXX_RX_RINGS_OFFSET;
-       hw_data->tx_rings_mask = ADF_4XXX_TX_RINGS_MASK;
-       hw_data->ring_to_svc_map = ADF_GEN4_DEFAULT_RING_TO_SRV_MAP;
-       hw_data->alloc_irq = adf_isr_resource_alloc;
-       hw_data->free_irq = adf_isr_resource_free;
-       hw_data->enable_error_correction = adf_enable_error_correction;
-       hw_data->get_accel_mask = get_accel_mask;
-       hw_data->get_ae_mask = get_ae_mask;
-       hw_data->get_num_accels = get_num_accels;
-       hw_data->get_num_aes = get_num_aes;
-       hw_data->get_sram_bar_id = get_sram_bar_id;
-       hw_data->get_etr_bar_id = get_etr_bar_id;
-       hw_data->get_misc_bar_id = get_misc_bar_id;
-       hw_data->get_arb_info = get_arb_info;
-       hw_data->get_admin_info = get_admin_info;
-       hw_data->get_accel_cap = get_accel_cap;
-       hw_data->get_sku = get_sku;
-       hw_data->fw_name = ADF_4XXX_FW;
-       hw_data->fw_mmp_name = ADF_4XXX_MMP;
-       hw_data->init_admin_comms = adf_init_admin_comms;
-       hw_data->exit_admin_comms = adf_exit_admin_comms;
-       hw_data->send_admin_init = adf_send_admin_init;
-       hw_data->init_arb = adf_init_arb;
-       hw_data->exit_arb = adf_exit_arb;
-       hw_data->get_arb_mapping = adf_get_arbiter_mapping;
-       hw_data->enable_ints = adf_enable_ints;
-       hw_data->init_device = adf_init_device;
-       hw_data->reset_device = adf_reset_flr;
-       hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK;
-       hw_data->uof_get_num_objs = uof_get_num_objs;
-       hw_data->uof_get_name = uof_get_name;
-       hw_data->uof_get_ae_mask = uof_get_ae_mask;
-       hw_data->set_msix_rttable = set_msix_default_rttable;
-       hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
-       hw_data->disable_iov = adf_disable_sriov;
-       hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
-       hw_data->enable_pm = adf_gen4_enable_pm;
-       hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
-       hw_data->dev_config = adf_gen4_dev_config;
-
-       adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
-       adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops);
-       adf_gen4_init_dc_ops(&hw_data->dc_ops);
-}
-
-void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data)
-{
-       hw_data->dev_class->instances--;
-}
diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h
deleted file mode 100644 (file)
index e98428b..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef ADF_4XXX_HW_DATA_H_
-#define ADF_4XXX_HW_DATA_H_
-
-#include <adf_accel_devices.h>
-
-/* PCIe configuration space */
-#define ADF_4XXX_SRAM_BAR              0
-#define ADF_4XXX_PMISC_BAR             1
-#define ADF_4XXX_ETR_BAR               2
-#define ADF_4XXX_RX_RINGS_OFFSET       1
-#define ADF_4XXX_TX_RINGS_MASK         0x1
-#define ADF_4XXX_MAX_ACCELERATORS      1
-#define ADF_4XXX_MAX_ACCELENGINES      9
-#define ADF_4XXX_BAR_MASK              (BIT(0) | BIT(2) | BIT(4))
-
-/* Physical function fuses */
-#define ADF_4XXX_FUSECTL0_OFFSET       (0x2C8)
-#define ADF_4XXX_FUSECTL1_OFFSET       (0x2CC)
-#define ADF_4XXX_FUSECTL2_OFFSET       (0x2D0)
-#define ADF_4XXX_FUSECTL3_OFFSET       (0x2D4)
-#define ADF_4XXX_FUSECTL4_OFFSET       (0x2D8)
-#define ADF_4XXX_FUSECTL5_OFFSET       (0x2DC)
-
-#define ADF_4XXX_ACCELERATORS_MASK     (0x1)
-#define ADF_4XXX_ACCELENGINES_MASK     (0x1FF)
-#define ADF_4XXX_ADMIN_AE_MASK         (0x100)
-
-#define ADF_4XXX_ETR_MAX_BANKS         64
-
-/* MSIX interrupt */
-#define ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET      (0x41A040)
-#define ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET      (0x41A044)
-#define ADF_4XXX_SMIAPF_MASK_OFFSET            (0x41A084)
-#define ADF_4XXX_MSIX_RTTABLE_OFFSET(i)                (0x409000 + ((i) * 0x04))
-
-/* Bank and ring configuration */
-#define ADF_4XXX_NUM_RINGS_PER_BANK    2
-#define ADF_4XXX_NUM_BANKS_PER_VF      4
-
-/* Arbiter configuration */
-#define ADF_4XXX_ARB_CONFIG                    (BIT(31) | BIT(6) | BIT(0))
-#define ADF_4XXX_ARB_OFFSET                    (0x0)
-#define ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET      (0x400)
-
-/* Admin Interface Reg Offset */
-#define ADF_4XXX_ADMINMSGUR_OFFSET     (0x500574)
-#define ADF_4XXX_ADMINMSGLR_OFFSET     (0x500578)
-#define ADF_4XXX_MAILBOX_BASE_OFFSET   (0x600970)
-
-/* Firmware Binaries */
-#define ADF_4XXX_FW            "qat_4xxx.bin"
-#define ADF_4XXX_MMP           "qat_4xxx_mmp.bin"
-#define ADF_4XXX_SYM_OBJ       "qat_4xxx_sym.bin"
-#define ADF_4XXX_DC_OBJ                "qat_4xxx_dc.bin"
-#define ADF_4XXX_ASYM_OBJ      "qat_4xxx_asym.bin"
-#define ADF_4XXX_ADMIN_OBJ     "qat_4xxx_admin.bin"
-
-/* qat_4xxx fuse bits are different from old GENs, redefine them */
-enum icp_qat_4xxx_slice_mask {
-       ICP_ACCEL_4XXX_MASK_CIPHER_SLICE = BIT(0),
-       ICP_ACCEL_4XXX_MASK_AUTH_SLICE = BIT(1),
-       ICP_ACCEL_4XXX_MASK_PKE_SLICE = BIT(2),
-       ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE = BIT(3),
-       ICP_ACCEL_4XXX_MASK_UCS_SLICE = BIT(4),
-       ICP_ACCEL_4XXX_MASK_EIA3_SLICE = BIT(5),
-       ICP_ACCEL_4XXX_MASK_SMX_SLICE = BIT(6),
-};
-
-void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data);
-void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data);
-int adf_gen4_dev_config(struct adf_accel_dev *accel_dev);
-
-#endif
diff --git a/drivers/crypto/qat/qat_4xxx/adf_drv.c b/drivers/crypto/qat/qat_4xxx/adf_drv.c
deleted file mode 100644 (file)
index b3a4c7b..0000000
+++ /dev/null
@@ -1,474 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2020 Intel Corporation */
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-
-#include <adf_accel_devices.h>
-#include <adf_cfg.h>
-#include <adf_common_drv.h>
-
-#include "adf_4xxx_hw_data.h"
-#include "qat_compression.h"
-#include "qat_crypto.h"
-#include "adf_transport_access_macros.h"
-
-static const struct pci_device_id adf_pci_tbl[] = {
-       { PCI_VDEVICE(INTEL, ADF_4XXX_PCI_DEVICE_ID), },
-       { PCI_VDEVICE(INTEL, ADF_401XX_PCI_DEVICE_ID), },
-       { }
-};
-MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
-
-enum configs {
-       DEV_CFG_CY = 0,
-       DEV_CFG_DC,
-};
-
-static const char * const services_operations[] = {
-       ADF_CFG_CY,
-       ADF_CFG_DC,
-};
-
-static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
-{
-       if (accel_dev->hw_device) {
-               adf_clean_hw_data_4xxx(accel_dev->hw_device);
-               accel_dev->hw_device = NULL;
-       }
-       adf_cfg_dev_remove(accel_dev);
-       debugfs_remove(accel_dev->debugfs_dir);
-       adf_devmgr_rm_dev(accel_dev, NULL);
-}
-
-static int adf_cfg_dev_init(struct adf_accel_dev *accel_dev)
-{
-       const char *config;
-       int ret;
-
-       config = accel_dev->accel_id % 2 ? ADF_CFG_DC : ADF_CFG_CY;
-
-       ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
-       if (ret)
-               return ret;
-
-       /* Default configuration is crypto only for even devices
-        * and compression for odd devices
-        */
-       ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
-                                         ADF_SERVICES_ENABLED, config,
-                                         ADF_STR);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
-{
-       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
-       int banks = GET_MAX_BANKS(accel_dev);
-       int cpus = num_online_cpus();
-       unsigned long bank, val;
-       int instances;
-       int ret;
-       int i;
-
-       if (adf_hw_dev_has_crypto(accel_dev))
-               instances = min(cpus, banks / 2);
-       else
-               instances = 0;
-
-       for (i = 0; i < instances; i++) {
-               val = i;
-               bank = i * 2;
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &bank, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               bank += 1;
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &bank, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
-                        i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
-               val = 128;
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = 512;
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = 0;
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = 0;
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = 1;
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = 1;
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = ADF_COALESCING_DEF_TIME;
-               snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-       }
-
-       val = i;
-       ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
-                                         &val, ADF_DEC);
-       if (ret)
-               goto err;
-
-       val = 0;
-       ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
-                                         &val, ADF_DEC);
-       if (ret)
-               goto err;
-
-       return 0;
-err:
-       dev_err(&GET_DEV(accel_dev), "Failed to add configuration for crypto\n");
-       return ret;
-}
-
-static int adf_comp_dev_config(struct adf_accel_dev *accel_dev)
-{
-       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
-       int banks = GET_MAX_BANKS(accel_dev);
-       int cpus = num_online_cpus();
-       unsigned long val;
-       int instances;
-       int ret;
-       int i;
-
-       if (adf_hw_dev_has_compression(accel_dev))
-               instances = min(cpus, banks);
-       else
-               instances = 0;
-
-       for (i = 0; i < instances; i++) {
-               val = i;
-               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = 512;
-               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = 0;
-               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = 1;
-               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = ADF_COALESCING_DEF_TIME;
-               snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-       }
-
-       val = i;
-       ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
-                                         &val, ADF_DEC);
-       if (ret)
-               goto err;
-
-       val = 0;
-       ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
-                                         &val, ADF_DEC);
-       if (ret)
-               goto err;
-
-       return 0;
-err:
-       dev_err(&GET_DEV(accel_dev), "Failed to add configuration for compression\n");
-       return ret;
-}
-
-int adf_gen4_dev_config(struct adf_accel_dev *accel_dev)
-{
-       char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
-       int ret;
-
-       ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
-       if (ret)
-               goto err;
-
-       ret = adf_cfg_section_add(accel_dev, "Accelerator0");
-       if (ret)
-               goto err;
-
-       ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
-                                     ADF_SERVICES_ENABLED, services);
-       if (ret)
-               goto err;
-
-       ret = sysfs_match_string(services_operations, services);
-       if (ret < 0)
-               goto err;
-
-       switch (ret) {
-       case DEV_CFG_CY:
-               ret = adf_crypto_dev_config(accel_dev);
-               break;
-       case DEV_CFG_DC:
-               ret = adf_comp_dev_config(accel_dev);
-               break;
-       }
-
-       if (ret)
-               goto err;
-
-       set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
-
-       return ret;
-
-err:
-       dev_err(&GET_DEV(accel_dev), "Failed to configure QAT driver\n");
-       return ret;
-}
-
-static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
-       struct adf_accel_dev *accel_dev;
-       struct adf_accel_pci *accel_pci_dev;
-       struct adf_hw_device_data *hw_data;
-       char name[ADF_DEVICE_NAME_LENGTH];
-       unsigned int i, bar_nr;
-       unsigned long bar_mask;
-       struct adf_bar *bar;
-       int ret;
-
-       if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
-               /*
-                * If the accelerator is connected to a node with no memory
-                * there is no point in using the accelerator since the remote
-                * memory transaction will be very slow.
-                */
-               dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
-               return -EINVAL;
-       }
-
-       accel_dev = devm_kzalloc(&pdev->dev, sizeof(*accel_dev), GFP_KERNEL);
-       if (!accel_dev)
-               return -ENOMEM;
-
-       INIT_LIST_HEAD(&accel_dev->crypto_list);
-       accel_pci_dev = &accel_dev->accel_pci_dev;
-       accel_pci_dev->pci_dev = pdev;
-
-       /*
-        * Add accel device to accel table
-        * This should be called before adf_cleanup_accel is called
-        */
-       if (adf_devmgr_add_dev(accel_dev, NULL)) {
-               dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
-               return -EFAULT;
-       }
-
-       accel_dev->owner = THIS_MODULE;
-       /* Allocate and initialise device hardware meta-data structure */
-       hw_data = devm_kzalloc(&pdev->dev, sizeof(*hw_data), GFP_KERNEL);
-       if (!hw_data) {
-               ret = -ENOMEM;
-               goto out_err;
-       }
-
-       accel_dev->hw_device = hw_data;
-       adf_init_hw_data_4xxx(accel_dev->hw_device);
-
-       pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
-       pci_read_config_dword(pdev, ADF_4XXX_FUSECTL4_OFFSET, &hw_data->fuses);
-
-       /* Get Accelerators and Accelerators Engines masks */
-       hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
-       hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
-       accel_pci_dev->sku = hw_data->get_sku(hw_data);
-       /* If the device has no acceleration engines then ignore it */
-       if (!hw_data->accel_mask || !hw_data->ae_mask ||
-           (~hw_data->ae_mask & 0x01)) {
-               dev_err(&pdev->dev, "No acceleration units found.\n");
-               ret = -EFAULT;
-               goto out_err;
-       }
-
-       /* Create dev top level debugfs entry */
-       snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
-                hw_data->dev_class->name, pci_name(pdev));
-
-       accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
-
-       /* Create device configuration table */
-       ret = adf_cfg_dev_add(accel_dev);
-       if (ret)
-               goto out_err;
-
-       /* Enable PCI device */
-       ret = pcim_enable_device(pdev);
-       if (ret) {
-               dev_err(&pdev->dev, "Can't enable PCI device.\n");
-               goto out_err;
-       }
-
-       /* Set DMA identifier */
-       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
-       if (ret) {
-               dev_err(&pdev->dev, "No usable DMA configuration.\n");
-               goto out_err;
-       }
-
-       ret = adf_cfg_dev_init(accel_dev);
-       if (ret) {
-               dev_err(&pdev->dev, "Failed to initialize configuration.\n");
-               goto out_err;
-       }
-
-       /* Get accelerator capabilities mask */
-       hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
-       if (!hw_data->accel_capabilities_mask) {
-               dev_err(&pdev->dev, "Failed to get capabilities mask.\n");
-               ret = -EINVAL;
-               goto out_err;
-       }
-
-       /* Find and map all the device's BARS */
-       bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_4XXX_BAR_MASK;
-
-       ret = pcim_iomap_regions_request_all(pdev, bar_mask, pci_name(pdev));
-       if (ret) {
-               dev_err(&pdev->dev, "Failed to map pci regions.\n");
-               goto out_err;
-       }
-
-       i = 0;
-       for_each_set_bit(bar_nr, &bar_mask, PCI_STD_NUM_BARS) {
-               bar = &accel_pci_dev->pci_bars[i++];
-               bar->virt_addr = pcim_iomap_table(pdev)[bar_nr];
-       }
-
-       pci_set_master(pdev);
-
-       adf_enable_aer(accel_dev);
-
-       if (pci_save_state(pdev)) {
-               dev_err(&pdev->dev, "Failed to save pci state.\n");
-               ret = -ENOMEM;
-               goto out_err_disable_aer;
-       }
-
-       ret = adf_sysfs_init(accel_dev);
-       if (ret)
-               goto out_err_disable_aer;
-
-       ret = hw_data->dev_config(accel_dev);
-       if (ret)
-               goto out_err_disable_aer;
-
-       ret = adf_dev_init(accel_dev);
-       if (ret)
-               goto out_err_dev_shutdown;
-
-       ret = adf_dev_start(accel_dev);
-       if (ret)
-               goto out_err_dev_stop;
-
-       return ret;
-
-out_err_dev_stop:
-       adf_dev_stop(accel_dev);
-out_err_dev_shutdown:
-       adf_dev_shutdown(accel_dev);
-out_err_disable_aer:
-       adf_disable_aer(accel_dev);
-out_err:
-       adf_cleanup_accel(accel_dev);
-       return ret;
-}
-
-static void adf_remove(struct pci_dev *pdev)
-{
-       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
-
-       if (!accel_dev) {
-               pr_err("QAT: Driver removal failed\n");
-               return;
-       }
-       adf_dev_stop(accel_dev);
-       adf_dev_shutdown(accel_dev);
-       adf_disable_aer(accel_dev);
-       adf_cleanup_accel(accel_dev);
-}
-
-static struct pci_driver adf_driver = {
-       .id_table = adf_pci_tbl,
-       .name = ADF_4XXX_DEVICE_NAME,
-       .probe = adf_probe,
-       .remove = adf_remove,
-       .sriov_configure = adf_sriov_configure,
-       .err_handler = &adf_err_handler,
-};
-
-module_pci_driver(adf_driver);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Intel");
-MODULE_FIRMWARE(ADF_4XXX_FW);
-MODULE_FIRMWARE(ADF_4XXX_MMP);
-MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
-MODULE_VERSION(ADF_DRV_VERSION);
-MODULE_SOFTDEP("pre: crypto-intel_qat");
diff --git a/drivers/crypto/qat/qat_c3xxx/Makefile b/drivers/crypto/qat/qat_c3xxx/Makefile
deleted file mode 100644 (file)
index 92ef416..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(srctree)/$(src)/../qat_common
-obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx.o
-qat_c3xxx-objs := adf_drv.o adf_c3xxx_hw_data.o
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
deleted file mode 100644 (file)
index c55c51a..0000000
+++ /dev/null
@@ -1,139 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2021 Intel Corporation */
-#include <adf_accel_devices.h>
-#include <adf_common_drv.h>
-#include <adf_gen2_config.h>
-#include <adf_gen2_dc.h>
-#include <adf_gen2_hw_data.h>
-#include <adf_gen2_pfvf.h>
-#include "adf_c3xxx_hw_data.h"
-#include "icp_qat_hw.h"
-
-/* Worker thread to service arbiter mappings */
-static const u32 thrd_to_arb_map[ADF_C3XXX_MAX_ACCELENGINES] = {
-       0x12222AAA, 0x11222AAA, 0x12222AAA,
-       0x11222AAA, 0x12222AAA, 0x11222AAA
-};
-
-static struct adf_hw_device_class c3xxx_class = {
-       .name = ADF_C3XXX_DEVICE_NAME,
-       .type = DEV_C3XXX,
-       .instances = 0
-};
-
-static u32 get_accel_mask(struct adf_hw_device_data *self)
-{
-       u32 straps = self->straps;
-       u32 fuses = self->fuses;
-       u32 accel;
-
-       accel = ~(fuses | straps) >> ADF_C3XXX_ACCELERATORS_REG_OFFSET;
-       accel &= ADF_C3XXX_ACCELERATORS_MASK;
-
-       return accel;
-}
-
-static u32 get_ae_mask(struct adf_hw_device_data *self)
-{
-       u32 straps = self->straps;
-       u32 fuses = self->fuses;
-       unsigned long disabled;
-       u32 ae_disable;
-       int accel;
-
-       /* If an accel is disabled, then disable the corresponding two AEs */
-       disabled = ~get_accel_mask(self) & ADF_C3XXX_ACCELERATORS_MASK;
-       ae_disable = BIT(1) | BIT(0);
-       for_each_set_bit(accel, &disabled, ADF_C3XXX_MAX_ACCELERATORS)
-               straps |= ae_disable << (accel << 1);
-
-       return ~(fuses | straps) & ADF_C3XXX_ACCELENGINES_MASK;
-}
-
-static u32 get_misc_bar_id(struct adf_hw_device_data *self)
-{
-       return ADF_C3XXX_PMISC_BAR;
-}
-
-static u32 get_etr_bar_id(struct adf_hw_device_data *self)
-{
-       return ADF_C3XXX_ETR_BAR;
-}
-
-static u32 get_sram_bar_id(struct adf_hw_device_data *self)
-{
-       return ADF_C3XXX_SRAM_BAR;
-}
-
-static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
-{
-       int aes = self->get_num_aes(self);
-
-       if (aes == 6)
-               return DEV_SKU_4;
-
-       return DEV_SKU_UNKNOWN;
-}
-
-static const u32 *adf_get_arbiter_mapping(void)
-{
-       return thrd_to_arb_map;
-}
-
-static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
-{
-       adf_gen2_cfg_iov_thds(accel_dev, enable,
-                             ADF_C3XXX_AE2FUNC_MAP_GRP_A_NUM_REGS,
-                             ADF_C3XXX_AE2FUNC_MAP_GRP_B_NUM_REGS);
-}
-
-void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
-{
-       hw_data->dev_class = &c3xxx_class;
-       hw_data->instance_id = c3xxx_class.instances++;
-       hw_data->num_banks = ADF_C3XXX_ETR_MAX_BANKS;
-       hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
-       hw_data->num_accel = ADF_C3XXX_MAX_ACCELERATORS;
-       hw_data->num_logical_accel = 1;
-       hw_data->num_engines = ADF_C3XXX_MAX_ACCELENGINES;
-       hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET;
-       hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK;
-       hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
-       hw_data->alloc_irq = adf_isr_resource_alloc;
-       hw_data->free_irq = adf_isr_resource_free;
-       hw_data->enable_error_correction = adf_gen2_enable_error_correction;
-       hw_data->get_accel_mask = get_accel_mask;
-       hw_data->get_ae_mask = get_ae_mask;
-       hw_data->get_accel_cap = adf_gen2_get_accel_cap;
-       hw_data->get_num_accels = adf_gen2_get_num_accels;
-       hw_data->get_num_aes = adf_gen2_get_num_aes;
-       hw_data->get_sram_bar_id = get_sram_bar_id;
-       hw_data->get_etr_bar_id = get_etr_bar_id;
-       hw_data->get_misc_bar_id = get_misc_bar_id;
-       hw_data->get_admin_info = adf_gen2_get_admin_info;
-       hw_data->get_arb_info = adf_gen2_get_arb_info;
-       hw_data->get_sku = get_sku;
-       hw_data->fw_name = ADF_C3XXX_FW;
-       hw_data->fw_mmp_name = ADF_C3XXX_MMP;
-       hw_data->init_admin_comms = adf_init_admin_comms;
-       hw_data->exit_admin_comms = adf_exit_admin_comms;
-       hw_data->configure_iov_threads = configure_iov_threads;
-       hw_data->send_admin_init = adf_send_admin_init;
-       hw_data->init_arb = adf_init_arb;
-       hw_data->exit_arb = adf_exit_arb;
-       hw_data->get_arb_mapping = adf_get_arbiter_mapping;
-       hw_data->enable_ints = adf_gen2_enable_ints;
-       hw_data->reset_device = adf_reset_flr;
-       hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
-       hw_data->disable_iov = adf_disable_sriov;
-       hw_data->dev_config = adf_gen2_dev_config;
-
-       adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
-       adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
-       adf_gen2_init_dc_ops(&hw_data->dc_ops);
-}
-
-void adf_clean_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
-{
-       hw_data->dev_class->instances--;
-}
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h
deleted file mode 100644 (file)
index 336a06f..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef ADF_C3XXX_HW_DATA_H_
-#define ADF_C3XXX_HW_DATA_H_
-
-/* PCIe configuration space */
-#define ADF_C3XXX_PMISC_BAR 0
-#define ADF_C3XXX_ETR_BAR 1
-#define ADF_C3XXX_SRAM_BAR 0
-#define ADF_C3XXX_MAX_ACCELERATORS 3
-#define ADF_C3XXX_MAX_ACCELENGINES 6
-#define ADF_C3XXX_ACCELERATORS_REG_OFFSET 16
-#define ADF_C3XXX_ACCELERATORS_MASK 0x7
-#define ADF_C3XXX_ACCELENGINES_MASK 0x3F
-#define ADF_C3XXX_ETR_MAX_BANKS 16
-#define ADF_C3XXX_SOFTSTRAP_CSR_OFFSET 0x2EC
-
-/* AE to function mapping */
-#define ADF_C3XXX_AE2FUNC_MAP_GRP_A_NUM_REGS 48
-#define ADF_C3XXX_AE2FUNC_MAP_GRP_B_NUM_REGS 6
-
-/* Firmware Binary */
-#define ADF_C3XXX_FW "qat_c3xxx.bin"
-#define ADF_C3XXX_MMP "qat_c3xxx_mmp.bin"
-
-void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data);
-void adf_clean_hw_data_c3xxx(struct adf_hw_device_data *hw_data);
-#endif
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
deleted file mode 100644 (file)
index 1f4fbf4..0000000
+++ /dev/null
@@ -1,274 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
-#include <linux/workqueue.h>
-#include <linux/io.h>
-#include <adf_accel_devices.h>
-#include <adf_common_drv.h>
-#include <adf_cfg.h>
-#include "adf_c3xxx_hw_data.h"
-
-static const struct pci_device_id adf_pci_tbl[] = {
-       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C3XXX), },
-       { }
-};
-MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
-
-static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
-static void adf_remove(struct pci_dev *dev);
-
-static struct pci_driver adf_driver = {
-       .id_table = adf_pci_tbl,
-       .name = ADF_C3XXX_DEVICE_NAME,
-       .probe = adf_probe,
-       .remove = adf_remove,
-       .sriov_configure = adf_sriov_configure,
-       .err_handler = &adf_err_handler,
-};
-
-static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
-{
-       pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
-       pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
-}
-
-static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
-{
-       struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
-       int i;
-
-       for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
-               struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
-
-               if (bar->virt_addr)
-                       pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
-       }
-
-       if (accel_dev->hw_device) {
-               switch (accel_pci_dev->pci_dev->device) {
-               case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
-                       adf_clean_hw_data_c3xxx(accel_dev->hw_device);
-                       break;
-               default:
-                       break;
-               }
-               kfree(accel_dev->hw_device);
-               accel_dev->hw_device = NULL;
-       }
-       adf_cfg_dev_remove(accel_dev);
-       debugfs_remove(accel_dev->debugfs_dir);
-       adf_devmgr_rm_dev(accel_dev, NULL);
-}
-
-static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
-       struct adf_accel_dev *accel_dev;
-       struct adf_accel_pci *accel_pci_dev;
-       struct adf_hw_device_data *hw_data;
-       char name[ADF_DEVICE_NAME_LENGTH];
-       unsigned int i, bar_nr;
-       unsigned long bar_mask;
-       int ret;
-
-       switch (ent->device) {
-       case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
-               break;
-       default:
-               dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
-               return -ENODEV;
-       }
-
-       if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
-               /* If the accelerator is connected to a node with no memory
-                * there is no point in using the accelerator since the remote
-                * memory transaction will be very slow. */
-               dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
-               return -EINVAL;
-       }
-
-       accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
-                                dev_to_node(&pdev->dev));
-       if (!accel_dev)
-               return -ENOMEM;
-
-       INIT_LIST_HEAD(&accel_dev->crypto_list);
-       accel_pci_dev = &accel_dev->accel_pci_dev;
-       accel_pci_dev->pci_dev = pdev;
-
-       /* Add accel device to accel table.
-        * This should be called before adf_cleanup_accel is called */
-       if (adf_devmgr_add_dev(accel_dev, NULL)) {
-               dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
-               kfree(accel_dev);
-               return -EFAULT;
-       }
-
-       accel_dev->owner = THIS_MODULE;
-       /* Allocate and configure device configuration structure */
-       hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
-                              dev_to_node(&pdev->dev));
-       if (!hw_data) {
-               ret = -ENOMEM;
-               goto out_err;
-       }
-
-       accel_dev->hw_device = hw_data;
-       adf_init_hw_data_c3xxx(accel_dev->hw_device);
-       pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
-       pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET,
-                             &hw_data->fuses);
-       pci_read_config_dword(pdev, ADF_C3XXX_SOFTSTRAP_CSR_OFFSET,
-                             &hw_data->straps);
-
-       /* Get Accelerators and Accelerators Engines masks */
-       hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
-       hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
-       accel_pci_dev->sku = hw_data->get_sku(hw_data);
-       /* If the device has no acceleration engines then ignore it. */
-       if (!hw_data->accel_mask || !hw_data->ae_mask ||
-           ((~hw_data->ae_mask) & 0x01)) {
-               dev_err(&pdev->dev, "No acceleration units found");
-               ret = -EFAULT;
-               goto out_err;
-       }
-
-       /* Create dev top level debugfs entry */
-       snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
-                hw_data->dev_class->name, pci_name(pdev));
-
-       accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
-
-       /* Create device configuration table */
-       ret = adf_cfg_dev_add(accel_dev);
-       if (ret)
-               goto out_err;
-
-       /* enable PCI device */
-       if (pci_enable_device(pdev)) {
-               ret = -EFAULT;
-               goto out_err;
-       }
-
-       /* set dma identifier */
-       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
-       if (ret) {
-               dev_err(&pdev->dev, "No usable DMA configuration\n");
-               goto out_err_disable;
-       }
-
-       if (pci_request_regions(pdev, ADF_C3XXX_DEVICE_NAME)) {
-               ret = -EFAULT;
-               goto out_err_disable;
-       }
-
-       /* Get accelerator capabilities mask */
-       hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
-
-       /* Find and map all the device's BARS */
-       i = 0;
-       bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
-               struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
-
-               bar->base_addr = pci_resource_start(pdev, bar_nr);
-               if (!bar->base_addr)
-                       break;
-               bar->size = pci_resource_len(pdev, bar_nr);
-               bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
-               if (!bar->virt_addr) {
-                       dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
-                       ret = -EFAULT;
-                       goto out_err_free_reg;
-               }
-       }
-       pci_set_master(pdev);
-
-       adf_enable_aer(accel_dev);
-
-       if (pci_save_state(pdev)) {
-               dev_err(&pdev->dev, "Failed to save pci state\n");
-               ret = -ENOMEM;
-               goto out_err_disable_aer;
-       }
-
-       ret = hw_data->dev_config(accel_dev);
-       if (ret)
-               goto out_err_disable_aer;
-
-       ret = adf_dev_init(accel_dev);
-       if (ret)
-               goto out_err_dev_shutdown;
-
-       ret = adf_dev_start(accel_dev);
-       if (ret)
-               goto out_err_dev_stop;
-
-       return ret;
-
-out_err_dev_stop:
-       adf_dev_stop(accel_dev);
-out_err_dev_shutdown:
-       adf_dev_shutdown(accel_dev);
-out_err_disable_aer:
-       adf_disable_aer(accel_dev);
-out_err_free_reg:
-       pci_release_regions(accel_pci_dev->pci_dev);
-out_err_disable:
-       pci_disable_device(accel_pci_dev->pci_dev);
-out_err:
-       adf_cleanup_accel(accel_dev);
-       kfree(accel_dev);
-       return ret;
-}
-
-static void adf_remove(struct pci_dev *pdev)
-{
-       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
-
-       if (!accel_dev) {
-               pr_err("QAT: Driver removal failed\n");
-               return;
-       }
-       adf_dev_stop(accel_dev);
-       adf_dev_shutdown(accel_dev);
-       adf_disable_aer(accel_dev);
-       adf_cleanup_accel(accel_dev);
-       adf_cleanup_pci_dev(accel_dev);
-       kfree(accel_dev);
-}
-
-static int __init adfdrv_init(void)
-{
-       request_module("intel_qat");
-
-       if (pci_register_driver(&adf_driver)) {
-               pr_err("QAT: Driver initialization failed\n");
-               return -EFAULT;
-       }
-       return 0;
-}
-
-static void __exit adfdrv_release(void)
-{
-       pci_unregister_driver(&adf_driver);
-}
-
-module_init(adfdrv_init);
-module_exit(adfdrv_release);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Intel");
-MODULE_FIRMWARE(ADF_C3XXX_FW);
-MODULE_FIRMWARE(ADF_C3XXX_MMP);
-MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
-MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/qat/qat_c3xxxvf/Makefile b/drivers/crypto/qat/qat_c3xxxvf/Makefile
deleted file mode 100644 (file)
index b6d7682..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(srctree)/$(src)/../qat_common
-obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf.o
-qat_c3xxxvf-objs := adf_drv.o adf_c3xxxvf_hw_data.o
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
deleted file mode 100644 (file)
index 84d9486..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2015 - 2021 Intel Corporation */
-#include <adf_accel_devices.h>
-#include <adf_common_drv.h>
-#include <adf_gen2_config.h>
-#include <adf_gen2_dc.h>
-#include <adf_gen2_hw_data.h>
-#include <adf_gen2_pfvf.h>
-#include <adf_pfvf_vf_msg.h>
-#include "adf_c3xxxvf_hw_data.h"
-
-static struct adf_hw_device_class c3xxxiov_class = {
-       .name = ADF_C3XXXVF_DEVICE_NAME,
-       .type = DEV_C3XXXVF,
-       .instances = 0
-};
-
-static u32 get_accel_mask(struct adf_hw_device_data *self)
-{
-       return ADF_C3XXXIOV_ACCELERATORS_MASK;
-}
-
-static u32 get_ae_mask(struct adf_hw_device_data *self)
-{
-       return ADF_C3XXXIOV_ACCELENGINES_MASK;
-}
-
-static u32 get_num_accels(struct adf_hw_device_data *self)
-{
-       return ADF_C3XXXIOV_MAX_ACCELERATORS;
-}
-
-static u32 get_num_aes(struct adf_hw_device_data *self)
-{
-       return ADF_C3XXXIOV_MAX_ACCELENGINES;
-}
-
-static u32 get_misc_bar_id(struct adf_hw_device_data *self)
-{
-       return ADF_C3XXXIOV_PMISC_BAR;
-}
-
-static u32 get_etr_bar_id(struct adf_hw_device_data *self)
-{
-       return ADF_C3XXXIOV_ETR_BAR;
-}
-
-static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
-{
-       return DEV_SKU_VF;
-}
-
-static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
-{
-       return 0;
-}
-
-static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
-{
-}
-
-void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
-{
-       hw_data->dev_class = &c3xxxiov_class;
-       hw_data->num_banks = ADF_C3XXXIOV_ETR_MAX_BANKS;
-       hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
-       hw_data->num_accel = ADF_C3XXXIOV_MAX_ACCELERATORS;
-       hw_data->num_logical_accel = 1;
-       hw_data->num_engines = ADF_C3XXXIOV_MAX_ACCELENGINES;
-       hw_data->tx_rx_gap = ADF_C3XXXIOV_RX_RINGS_OFFSET;
-       hw_data->tx_rings_mask = ADF_C3XXXIOV_TX_RINGS_MASK;
-       hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
-       hw_data->alloc_irq = adf_vf_isr_resource_alloc;
-       hw_data->free_irq = adf_vf_isr_resource_free;
-       hw_data->enable_error_correction = adf_vf_void_noop;
-       hw_data->init_admin_comms = adf_vf_int_noop;
-       hw_data->exit_admin_comms = adf_vf_void_noop;
-       hw_data->send_admin_init = adf_vf2pf_notify_init;
-       hw_data->init_arb = adf_vf_int_noop;
-       hw_data->exit_arb = adf_vf_void_noop;
-       hw_data->disable_iov = adf_vf2pf_notify_shutdown;
-       hw_data->get_accel_mask = get_accel_mask;
-       hw_data->get_ae_mask = get_ae_mask;
-       hw_data->get_num_accels = get_num_accels;
-       hw_data->get_num_aes = get_num_aes;
-       hw_data->get_etr_bar_id = get_etr_bar_id;
-       hw_data->get_misc_bar_id = get_misc_bar_id;
-       hw_data->get_sku = get_sku;
-       hw_data->enable_ints = adf_vf_void_noop;
-       hw_data->dev_class->instances++;
-       hw_data->dev_config = adf_gen2_dev_config;
-       adf_devmgr_update_class_index(hw_data);
-       adf_gen2_init_vf_pfvf_ops(&hw_data->pfvf_ops);
-       adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
-       adf_gen2_init_dc_ops(&hw_data->dc_ops);
-}
-
-void adf_clean_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
-{
-       hw_data->dev_class->instances--;
-       adf_devmgr_update_class_index(hw_data);
-}
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h
deleted file mode 100644 (file)
index 6b4bf18..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2015 - 2020 Intel Corporation */
-#ifndef ADF_C3XXXVF_HW_DATA_H_
-#define ADF_C3XXXVF_HW_DATA_H_
-
-#define ADF_C3XXXIOV_PMISC_BAR 1
-#define ADF_C3XXXIOV_ACCELERATORS_MASK 0x1
-#define ADF_C3XXXIOV_ACCELENGINES_MASK 0x1
-#define ADF_C3XXXIOV_MAX_ACCELERATORS 1
-#define ADF_C3XXXIOV_MAX_ACCELENGINES 1
-#define ADF_C3XXXIOV_RX_RINGS_OFFSET 8
-#define ADF_C3XXXIOV_TX_RINGS_MASK 0xFF
-#define ADF_C3XXXIOV_ETR_BAR 0
-#define ADF_C3XXXIOV_ETR_MAX_BANKS 1
-
-void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data);
-void adf_clean_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data);
-#endif
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
deleted file mode 100644 (file)
index cf4ef83..0000000
+++ /dev/null
@@ -1,239 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
-#include <linux/workqueue.h>
-#include <linux/io.h>
-#include <adf_accel_devices.h>
-#include <adf_common_drv.h>
-#include <adf_cfg.h>
-#include "adf_c3xxxvf_hw_data.h"
-
-static const struct pci_device_id adf_pci_tbl[] = {
-       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF), },
-       { }
-};
-MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
-
-static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
-static void adf_remove(struct pci_dev *dev);
-
-static struct pci_driver adf_driver = {
-       .id_table = adf_pci_tbl,
-       .name = ADF_C3XXXVF_DEVICE_NAME,
-       .probe = adf_probe,
-       .remove = adf_remove,
-};
-
-static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
-{
-       pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
-       pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
-}
-
-static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
-{
-       struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
-       struct adf_accel_dev *pf;
-       int i;
-
-       for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
-               struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
-
-               if (bar->virt_addr)
-                       pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
-       }
-
-       if (accel_dev->hw_device) {
-               switch (accel_pci_dev->pci_dev->device) {
-               case PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF:
-                       adf_clean_hw_data_c3xxxiov(accel_dev->hw_device);
-                       break;
-               default:
-                       break;
-               }
-               kfree(accel_dev->hw_device);
-               accel_dev->hw_device = NULL;
-       }
-       adf_cfg_dev_remove(accel_dev);
-       debugfs_remove(accel_dev->debugfs_dir);
-       pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
-       adf_devmgr_rm_dev(accel_dev, pf);
-}
-
-static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
-       struct adf_accel_dev *accel_dev;
-       struct adf_accel_dev *pf;
-       struct adf_accel_pci *accel_pci_dev;
-       struct adf_hw_device_data *hw_data;
-       char name[ADF_DEVICE_NAME_LENGTH];
-       unsigned int i, bar_nr;
-       unsigned long bar_mask;
-       int ret;
-
-       switch (ent->device) {
-       case PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF:
-               break;
-       default:
-               dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
-               return -ENODEV;
-       }
-
-       accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
-                                dev_to_node(&pdev->dev));
-       if (!accel_dev)
-               return -ENOMEM;
-
-       accel_dev->is_vf = true;
-       pf = adf_devmgr_pci_to_accel_dev(pdev->physfn);
-       accel_pci_dev = &accel_dev->accel_pci_dev;
-       accel_pci_dev->pci_dev = pdev;
-
-       /* Add accel device to accel table */
-       if (adf_devmgr_add_dev(accel_dev, pf)) {
-               dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
-               kfree(accel_dev);
-               return -EFAULT;
-       }
-       INIT_LIST_HEAD(&accel_dev->crypto_list);
-
-       accel_dev->owner = THIS_MODULE;
-       /* Allocate and configure device configuration structure */
-       hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
-                              dev_to_node(&pdev->dev));
-       if (!hw_data) {
-               ret = -ENOMEM;
-               goto out_err;
-       }
-       accel_dev->hw_device = hw_data;
-       adf_init_hw_data_c3xxxiov(accel_dev->hw_device);
-
-       /* Get Accelerators and Accelerators Engines masks */
-       hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
-       hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
-       accel_pci_dev->sku = hw_data->get_sku(hw_data);
-
-       /* Create dev top level debugfs entry */
-       snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
-                hw_data->dev_class->name, pci_name(pdev));
-
-       accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
-
-       /* Create device configuration table */
-       ret = adf_cfg_dev_add(accel_dev);
-       if (ret)
-               goto out_err;
-
-       /* enable PCI device */
-       if (pci_enable_device(pdev)) {
-               ret = -EFAULT;
-               goto out_err;
-       }
-
-       /* set dma identifier */
-       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
-       if (ret) {
-               dev_err(&pdev->dev, "No usable DMA configuration\n");
-               goto out_err_disable;
-       }
-
-       if (pci_request_regions(pdev, ADF_C3XXXVF_DEVICE_NAME)) {
-               ret = -EFAULT;
-               goto out_err_disable;
-       }
-
-       /* Find and map all the device's BARS */
-       i = 0;
-       bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
-               struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
-
-               bar->base_addr = pci_resource_start(pdev, bar_nr);
-               if (!bar->base_addr)
-                       break;
-               bar->size = pci_resource_len(pdev, bar_nr);
-               bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
-               if (!bar->virt_addr) {
-                       dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
-                       ret = -EFAULT;
-                       goto out_err_free_reg;
-               }
-       }
-       pci_set_master(pdev);
-       /* Completion for VF2PF request/response message exchange */
-       init_completion(&accel_dev->vf.msg_received);
-
-       ret = adf_dev_init(accel_dev);
-       if (ret)
-               goto out_err_dev_shutdown;
-
-       ret = adf_dev_start(accel_dev);
-       if (ret)
-               goto out_err_dev_stop;
-
-       return ret;
-
-out_err_dev_stop:
-       adf_dev_stop(accel_dev);
-out_err_dev_shutdown:
-       adf_dev_shutdown(accel_dev);
-out_err_free_reg:
-       pci_release_regions(accel_pci_dev->pci_dev);
-out_err_disable:
-       pci_disable_device(accel_pci_dev->pci_dev);
-out_err:
-       adf_cleanup_accel(accel_dev);
-       kfree(accel_dev);
-       return ret;
-}
-
-static void adf_remove(struct pci_dev *pdev)
-{
-       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
-
-       if (!accel_dev) {
-               pr_err("QAT: Driver removal failed\n");
-               return;
-       }
-       adf_flush_vf_wq(accel_dev);
-       adf_dev_stop(accel_dev);
-       adf_dev_shutdown(accel_dev);
-       adf_cleanup_accel(accel_dev);
-       adf_cleanup_pci_dev(accel_dev);
-       kfree(accel_dev);
-}
-
-static int __init adfdrv_init(void)
-{
-       request_module("intel_qat");
-
-       if (pci_register_driver(&adf_driver)) {
-               pr_err("QAT: Driver initialization failed\n");
-               return -EFAULT;
-       }
-       return 0;
-}
-
-static void __exit adfdrv_release(void)
-{
-       pci_unregister_driver(&adf_driver);
-       adf_clean_vf_map(true);
-}
-
-module_init(adfdrv_init);
-module_exit(adfdrv_release);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Intel");
-MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
-MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/qat/qat_c62x/Makefile b/drivers/crypto/qat/qat_c62x/Makefile
deleted file mode 100644 (file)
index d581f7c..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(srctree)/$(src)/../qat_common
-obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x.o
-qat_c62x-objs := adf_drv.o adf_c62x_hw_data.o
diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
deleted file mode 100644 (file)
index b7aa19d..0000000
+++ /dev/null
@@ -1,141 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2021 Intel Corporation */
-#include <adf_accel_devices.h>
-#include <adf_common_drv.h>
-#include <adf_gen2_config.h>
-#include <adf_gen2_dc.h>
-#include <adf_gen2_hw_data.h>
-#include <adf_gen2_pfvf.h>
-#include "adf_c62x_hw_data.h"
-#include "icp_qat_hw.h"
-
-/* Worker thread to service arbiter mappings */
-static const u32 thrd_to_arb_map[ADF_C62X_MAX_ACCELENGINES] = {
-       0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA,
-       0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA
-};
-
-static struct adf_hw_device_class c62x_class = {
-       .name = ADF_C62X_DEVICE_NAME,
-       .type = DEV_C62X,
-       .instances = 0
-};
-
-static u32 get_accel_mask(struct adf_hw_device_data *self)
-{
-       u32 straps = self->straps;
-       u32 fuses = self->fuses;
-       u32 accel;
-
-       accel = ~(fuses | straps) >> ADF_C62X_ACCELERATORS_REG_OFFSET;
-       accel &= ADF_C62X_ACCELERATORS_MASK;
-
-       return accel;
-}
-
-static u32 get_ae_mask(struct adf_hw_device_data *self)
-{
-       u32 straps = self->straps;
-       u32 fuses = self->fuses;
-       unsigned long disabled;
-       u32 ae_disable;
-       int accel;
-
-       /* If an accel is disabled, then disable the corresponding two AEs */
-       disabled = ~get_accel_mask(self) & ADF_C62X_ACCELERATORS_MASK;
-       ae_disable = BIT(1) | BIT(0);
-       for_each_set_bit(accel, &disabled, ADF_C62X_MAX_ACCELERATORS)
-               straps |= ae_disable << (accel << 1);
-
-       return ~(fuses | straps) & ADF_C62X_ACCELENGINES_MASK;
-}
-
-static u32 get_misc_bar_id(struct adf_hw_device_data *self)
-{
-       return ADF_C62X_PMISC_BAR;
-}
-
-static u32 get_etr_bar_id(struct adf_hw_device_data *self)
-{
-       return ADF_C62X_ETR_BAR;
-}
-
-static u32 get_sram_bar_id(struct adf_hw_device_data *self)
-{
-       return ADF_C62X_SRAM_BAR;
-}
-
-static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
-{
-       int aes = self->get_num_aes(self);
-
-       if (aes == 8)
-               return DEV_SKU_2;
-       else if (aes == 10)
-               return DEV_SKU_4;
-
-       return DEV_SKU_UNKNOWN;
-}
-
-static const u32 *adf_get_arbiter_mapping(void)
-{
-       return thrd_to_arb_map;
-}
-
-static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
-{
-       adf_gen2_cfg_iov_thds(accel_dev, enable,
-                             ADF_C62X_AE2FUNC_MAP_GRP_A_NUM_REGS,
-                             ADF_C62X_AE2FUNC_MAP_GRP_B_NUM_REGS);
-}
-
-void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
-{
-       hw_data->dev_class = &c62x_class;
-       hw_data->instance_id = c62x_class.instances++;
-       hw_data->num_banks = ADF_C62X_ETR_MAX_BANKS;
-       hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
-       hw_data->num_accel = ADF_C62X_MAX_ACCELERATORS;
-       hw_data->num_logical_accel = 1;
-       hw_data->num_engines = ADF_C62X_MAX_ACCELENGINES;
-       hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET;
-       hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK;
-       hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
-       hw_data->alloc_irq = adf_isr_resource_alloc;
-       hw_data->free_irq = adf_isr_resource_free;
-       hw_data->enable_error_correction = adf_gen2_enable_error_correction;
-       hw_data->get_accel_mask = get_accel_mask;
-       hw_data->get_ae_mask = get_ae_mask;
-       hw_data->get_accel_cap = adf_gen2_get_accel_cap;
-       hw_data->get_num_accels = adf_gen2_get_num_accels;
-       hw_data->get_num_aes = adf_gen2_get_num_aes;
-       hw_data->get_sram_bar_id = get_sram_bar_id;
-       hw_data->get_etr_bar_id = get_etr_bar_id;
-       hw_data->get_misc_bar_id = get_misc_bar_id;
-       hw_data->get_admin_info = adf_gen2_get_admin_info;
-       hw_data->get_arb_info = adf_gen2_get_arb_info;
-       hw_data->get_sku = get_sku;
-       hw_data->fw_name = ADF_C62X_FW;
-       hw_data->fw_mmp_name = ADF_C62X_MMP;
-       hw_data->init_admin_comms = adf_init_admin_comms;
-       hw_data->exit_admin_comms = adf_exit_admin_comms;
-       hw_data->configure_iov_threads = configure_iov_threads;
-       hw_data->send_admin_init = adf_send_admin_init;
-       hw_data->init_arb = adf_init_arb;
-       hw_data->exit_arb = adf_exit_arb;
-       hw_data->get_arb_mapping = adf_get_arbiter_mapping;
-       hw_data->enable_ints = adf_gen2_enable_ints;
-       hw_data->reset_device = adf_reset_flr;
-       hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
-       hw_data->disable_iov = adf_disable_sriov;
-       hw_data->dev_config = adf_gen2_dev_config;
-
-       adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
-       adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
-       adf_gen2_init_dc_ops(&hw_data->dc_ops);
-}
-
-void adf_clean_hw_data_c62x(struct adf_hw_device_data *hw_data)
-{
-       hw_data->dev_class->instances--;
-}
diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h
deleted file mode 100644 (file)
index 008c0a3..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef ADF_C62X_HW_DATA_H_
-#define ADF_C62X_HW_DATA_H_
-
-/* PCIe configuration space */
-#define ADF_C62X_SRAM_BAR 0
-#define ADF_C62X_PMISC_BAR 1
-#define ADF_C62X_ETR_BAR 2
-#define ADF_C62X_MAX_ACCELERATORS 5
-#define ADF_C62X_MAX_ACCELENGINES 10
-#define ADF_C62X_ACCELERATORS_REG_OFFSET 16
-#define ADF_C62X_ACCELERATORS_MASK 0x1F
-#define ADF_C62X_ACCELENGINES_MASK 0x3FF
-#define ADF_C62X_ETR_MAX_BANKS 16
-#define ADF_C62X_SOFTSTRAP_CSR_OFFSET 0x2EC
-
-/* AE to function mapping */
-#define ADF_C62X_AE2FUNC_MAP_GRP_A_NUM_REGS 80
-#define ADF_C62X_AE2FUNC_MAP_GRP_B_NUM_REGS 10
-
-/* Firmware Binary */
-#define ADF_C62X_FW "qat_c62x.bin"
-#define ADF_C62X_MMP "qat_c62x_mmp.bin"
-
-void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data);
-void adf_clean_hw_data_c62x(struct adf_hw_device_data *hw_data);
-#endif
diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c
deleted file mode 100644 (file)
index 4ccaf29..0000000
+++ /dev/null
@@ -1,274 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
-#include <linux/workqueue.h>
-#include <linux/io.h>
-#include <adf_accel_devices.h>
-#include <adf_common_drv.h>
-#include <adf_cfg.h>
-#include "adf_c62x_hw_data.h"
-
-static const struct pci_device_id adf_pci_tbl[] = {
-       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C62X), },
-       { }
-};
-MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
-
-static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
-static void adf_remove(struct pci_dev *dev);
-
-static struct pci_driver adf_driver = {
-       .id_table = adf_pci_tbl,
-       .name = ADF_C62X_DEVICE_NAME,
-       .probe = adf_probe,
-       .remove = adf_remove,
-       .sriov_configure = adf_sriov_configure,
-       .err_handler = &adf_err_handler,
-};
-
-static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
-{
-       pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
-       pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
-}
-
-static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
-{
-       struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
-       int i;
-
-       for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
-               struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
-
-               if (bar->virt_addr)
-                       pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
-       }
-
-       if (accel_dev->hw_device) {
-               switch (accel_pci_dev->pci_dev->device) {
-               case PCI_DEVICE_ID_INTEL_QAT_C62X:
-                       adf_clean_hw_data_c62x(accel_dev->hw_device);
-                       break;
-               default:
-                       break;
-               }
-               kfree(accel_dev->hw_device);
-               accel_dev->hw_device = NULL;
-       }
-       adf_cfg_dev_remove(accel_dev);
-       debugfs_remove(accel_dev->debugfs_dir);
-       adf_devmgr_rm_dev(accel_dev, NULL);
-}
-
-static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
-       struct adf_accel_dev *accel_dev;
-       struct adf_accel_pci *accel_pci_dev;
-       struct adf_hw_device_data *hw_data;
-       char name[ADF_DEVICE_NAME_LENGTH];
-       unsigned int i, bar_nr;
-       unsigned long bar_mask;
-       int ret;
-
-       switch (ent->device) {
-       case PCI_DEVICE_ID_INTEL_QAT_C62X:
-               break;
-       default:
-               dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
-               return -ENODEV;
-       }
-
-       if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
-               /* If the accelerator is connected to a node with no memory
-                * there is no point in using the accelerator since the remote
-                * memory transaction will be very slow. */
-               dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
-               return -EINVAL;
-       }
-
-       accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
-                                dev_to_node(&pdev->dev));
-       if (!accel_dev)
-               return -ENOMEM;
-
-       INIT_LIST_HEAD(&accel_dev->crypto_list);
-       accel_pci_dev = &accel_dev->accel_pci_dev;
-       accel_pci_dev->pci_dev = pdev;
-
-       /* Add accel device to accel table.
-        * This should be called before adf_cleanup_accel is called */
-       if (adf_devmgr_add_dev(accel_dev, NULL)) {
-               dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
-               kfree(accel_dev);
-               return -EFAULT;
-       }
-
-       accel_dev->owner = THIS_MODULE;
-       /* Allocate and configure device configuration structure */
-       hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
-                              dev_to_node(&pdev->dev));
-       if (!hw_data) {
-               ret = -ENOMEM;
-               goto out_err;
-       }
-
-       accel_dev->hw_device = hw_data;
-       adf_init_hw_data_c62x(accel_dev->hw_device);
-       pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
-       pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET,
-                             &hw_data->fuses);
-       pci_read_config_dword(pdev, ADF_C62X_SOFTSTRAP_CSR_OFFSET,
-                             &hw_data->straps);
-
-       /* Get Accelerators and Accelerators Engines masks */
-       hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
-       hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
-       accel_pci_dev->sku = hw_data->get_sku(hw_data);
-       /* If the device has no acceleration engines then ignore it. */
-       if (!hw_data->accel_mask || !hw_data->ae_mask ||
-           ((~hw_data->ae_mask) & 0x01)) {
-               dev_err(&pdev->dev, "No acceleration units found");
-               ret = -EFAULT;
-               goto out_err;
-       }
-
-       /* Create dev top level debugfs entry */
-       snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
-                hw_data->dev_class->name, pci_name(pdev));
-
-       accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
-
-       /* Create device configuration table */
-       ret = adf_cfg_dev_add(accel_dev);
-       if (ret)
-               goto out_err;
-
-       /* enable PCI device */
-       if (pci_enable_device(pdev)) {
-               ret = -EFAULT;
-               goto out_err;
-       }
-
-       /* set dma identifier */
-       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
-       if (ret) {
-               dev_err(&pdev->dev, "No usable DMA configuration\n");
-               goto out_err_disable;
-       }
-
-       if (pci_request_regions(pdev, ADF_C62X_DEVICE_NAME)) {
-               ret = -EFAULT;
-               goto out_err_disable;
-       }
-
-       /* Get accelerator capabilities mask */
-       hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
-
-       /* Find and map all the device's BARS */
-       i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
-       bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
-               struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
-
-               bar->base_addr = pci_resource_start(pdev, bar_nr);
-               if (!bar->base_addr)
-                       break;
-               bar->size = pci_resource_len(pdev, bar_nr);
-               bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
-               if (!bar->virt_addr) {
-                       dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
-                       ret = -EFAULT;
-                       goto out_err_free_reg;
-               }
-       }
-       pci_set_master(pdev);
-
-       adf_enable_aer(accel_dev);
-
-       if (pci_save_state(pdev)) {
-               dev_err(&pdev->dev, "Failed to save pci state\n");
-               ret = -ENOMEM;
-               goto out_err_disable_aer;
-       }
-
-       ret = hw_data->dev_config(accel_dev);
-       if (ret)
-               goto out_err_disable_aer;
-
-       ret = adf_dev_init(accel_dev);
-       if (ret)
-               goto out_err_dev_shutdown;
-
-       ret = adf_dev_start(accel_dev);
-       if (ret)
-               goto out_err_dev_stop;
-
-       return ret;
-
-out_err_dev_stop:
-       adf_dev_stop(accel_dev);
-out_err_dev_shutdown:
-       adf_dev_shutdown(accel_dev);
-out_err_disable_aer:
-       adf_disable_aer(accel_dev);
-out_err_free_reg:
-       pci_release_regions(accel_pci_dev->pci_dev);
-out_err_disable:
-       pci_disable_device(accel_pci_dev->pci_dev);
-out_err:
-       adf_cleanup_accel(accel_dev);
-       kfree(accel_dev);
-       return ret;
-}
-
-static void adf_remove(struct pci_dev *pdev)
-{
-       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
-
-       if (!accel_dev) {
-               pr_err("QAT: Driver removal failed\n");
-               return;
-       }
-       adf_dev_stop(accel_dev);
-       adf_dev_shutdown(accel_dev);
-       adf_disable_aer(accel_dev);
-       adf_cleanup_accel(accel_dev);
-       adf_cleanup_pci_dev(accel_dev);
-       kfree(accel_dev);
-}
-
-static int __init adfdrv_init(void)
-{
-       request_module("intel_qat");
-
-       if (pci_register_driver(&adf_driver)) {
-               pr_err("QAT: Driver initialization failed\n");
-               return -EFAULT;
-       }
-       return 0;
-}
-
-static void __exit adfdrv_release(void)
-{
-       pci_unregister_driver(&adf_driver);
-}
-
-module_init(adfdrv_init);
-module_exit(adfdrv_release);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Intel");
-MODULE_FIRMWARE(ADF_C62X_FW);
-MODULE_FIRMWARE(ADF_C62X_MMP);
-MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
-MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/qat/qat_c62xvf/Makefile b/drivers/crypto/qat/qat_c62xvf/Makefile
deleted file mode 100644 (file)
index 446c3d6..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(srctree)/$(src)/../qat_common
-obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf.o
-qat_c62xvf-objs := adf_drv.o adf_c62xvf_hw_data.o
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
deleted file mode 100644 (file)
index 751d7aa..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2015 - 2021 Intel Corporation */
-#include <adf_accel_devices.h>
-#include <adf_common_drv.h>
-#include <adf_gen2_config.h>
-#include <adf_gen2_dc.h>
-#include <adf_gen2_hw_data.h>
-#include <adf_gen2_pfvf.h>
-#include <adf_pfvf_vf_msg.h>
-#include "adf_c62xvf_hw_data.h"
-
-static struct adf_hw_device_class c62xiov_class = {
-       .name = ADF_C62XVF_DEVICE_NAME,
-       .type = DEV_C62XVF,
-       .instances = 0
-};
-
-static u32 get_accel_mask(struct adf_hw_device_data *self)
-{
-       return ADF_C62XIOV_ACCELERATORS_MASK;
-}
-
-static u32 get_ae_mask(struct adf_hw_device_data *self)
-{
-       return ADF_C62XIOV_ACCELENGINES_MASK;
-}
-
-static u32 get_num_accels(struct adf_hw_device_data *self)
-{
-       return ADF_C62XIOV_MAX_ACCELERATORS;
-}
-
-static u32 get_num_aes(struct adf_hw_device_data *self)
-{
-       return ADF_C62XIOV_MAX_ACCELENGINES;
-}
-
-static u32 get_misc_bar_id(struct adf_hw_device_data *self)
-{
-       return ADF_C62XIOV_PMISC_BAR;
-}
-
-static u32 get_etr_bar_id(struct adf_hw_device_data *self)
-{
-       return ADF_C62XIOV_ETR_BAR;
-}
-
-static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
-{
-       return DEV_SKU_VF;
-}
-
-static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
-{
-       return 0;
-}
-
-static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
-{
-}
-
-void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
-{
-       hw_data->dev_class = &c62xiov_class;
-       hw_data->num_banks = ADF_C62XIOV_ETR_MAX_BANKS;
-       hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
-       hw_data->num_accel = ADF_C62XIOV_MAX_ACCELERATORS;
-       hw_data->num_logical_accel = 1;
-       hw_data->num_engines = ADF_C62XIOV_MAX_ACCELENGINES;
-       hw_data->tx_rx_gap = ADF_C62XIOV_RX_RINGS_OFFSET;
-       hw_data->tx_rings_mask = ADF_C62XIOV_TX_RINGS_MASK;
-       hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
-       hw_data->alloc_irq = adf_vf_isr_resource_alloc;
-       hw_data->free_irq = adf_vf_isr_resource_free;
-       hw_data->enable_error_correction = adf_vf_void_noop;
-       hw_data->init_admin_comms = adf_vf_int_noop;
-       hw_data->exit_admin_comms = adf_vf_void_noop;
-       hw_data->send_admin_init = adf_vf2pf_notify_init;
-       hw_data->init_arb = adf_vf_int_noop;
-       hw_data->exit_arb = adf_vf_void_noop;
-       hw_data->disable_iov = adf_vf2pf_notify_shutdown;
-       hw_data->get_accel_mask = get_accel_mask;
-       hw_data->get_ae_mask = get_ae_mask;
-       hw_data->get_num_accels = get_num_accels;
-       hw_data->get_num_aes = get_num_aes;
-       hw_data->get_etr_bar_id = get_etr_bar_id;
-       hw_data->get_misc_bar_id = get_misc_bar_id;
-       hw_data->get_sku = get_sku;
-       hw_data->enable_ints = adf_vf_void_noop;
-       hw_data->dev_class->instances++;
-       hw_data->dev_config = adf_gen2_dev_config;
-       adf_devmgr_update_class_index(hw_data);
-       adf_gen2_init_vf_pfvf_ops(&hw_data->pfvf_ops);
-       adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
-       adf_gen2_init_dc_ops(&hw_data->dc_ops);
-}
-
-void adf_clean_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
-{
-       hw_data->dev_class->instances--;
-       adf_devmgr_update_class_index(hw_data);
-}
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h
deleted file mode 100644 (file)
index a1a62c0..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2015 - 2020 Intel Corporation */
-#ifndef ADF_C62XVF_HW_DATA_H_
-#define ADF_C62XVF_HW_DATA_H_
-
-#define ADF_C62XIOV_PMISC_BAR 1
-#define ADF_C62XIOV_ACCELERATORS_MASK 0x1
-#define ADF_C62XIOV_ACCELENGINES_MASK 0x1
-#define ADF_C62XIOV_MAX_ACCELERATORS 1
-#define ADF_C62XIOV_MAX_ACCELENGINES 1
-#define ADF_C62XIOV_RX_RINGS_OFFSET 8
-#define ADF_C62XIOV_TX_RINGS_MASK 0xFF
-#define ADF_C62XIOV_ETR_BAR 0
-#define ADF_C62XIOV_ETR_MAX_BANKS 1
-
-void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data);
-void adf_clean_hw_data_c62xiov(struct adf_hw_device_data *hw_data);
-#endif
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
deleted file mode 100644 (file)
index 0e642c9..0000000
+++ /dev/null
@@ -1,239 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
-#include <linux/workqueue.h>
-#include <linux/io.h>
-#include <adf_accel_devices.h>
-#include <adf_common_drv.h>
-#include <adf_cfg.h>
-#include "adf_c62xvf_hw_data.h"
-
-static const struct pci_device_id adf_pci_tbl[] = {
-       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C62X_VF), },
-       { }
-};
-MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
-
-static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
-static void adf_remove(struct pci_dev *dev);
-
-static struct pci_driver adf_driver = {
-       .id_table = adf_pci_tbl,
-       .name = ADF_C62XVF_DEVICE_NAME,
-       .probe = adf_probe,
-       .remove = adf_remove,
-};
-
-static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
-{
-       pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
-       pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
-}
-
-static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
-{
-       struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
-       struct adf_accel_dev *pf;
-       int i;
-
-       for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
-               struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
-
-               if (bar->virt_addr)
-                       pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
-       }
-
-       if (accel_dev->hw_device) {
-               switch (accel_pci_dev->pci_dev->device) {
-               case PCI_DEVICE_ID_INTEL_QAT_C62X_VF:
-                       adf_clean_hw_data_c62xiov(accel_dev->hw_device);
-                       break;
-               default:
-                       break;
-               }
-               kfree(accel_dev->hw_device);
-               accel_dev->hw_device = NULL;
-       }
-       adf_cfg_dev_remove(accel_dev);
-       debugfs_remove(accel_dev->debugfs_dir);
-       pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
-       adf_devmgr_rm_dev(accel_dev, pf);
-}
-
-static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
-       struct adf_accel_dev *accel_dev;
-       struct adf_accel_dev *pf;
-       struct adf_accel_pci *accel_pci_dev;
-       struct adf_hw_device_data *hw_data;
-       char name[ADF_DEVICE_NAME_LENGTH];
-       unsigned int i, bar_nr;
-       unsigned long bar_mask;
-       int ret;
-
-       switch (ent->device) {
-       case PCI_DEVICE_ID_INTEL_QAT_C62X_VF:
-               break;
-       default:
-               dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
-               return -ENODEV;
-       }
-
-       accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
-                                dev_to_node(&pdev->dev));
-       if (!accel_dev)
-               return -ENOMEM;
-
-       accel_dev->is_vf = true;
-       pf = adf_devmgr_pci_to_accel_dev(pdev->physfn);
-       accel_pci_dev = &accel_dev->accel_pci_dev;
-       accel_pci_dev->pci_dev = pdev;
-
-       /* Add accel device to accel table */
-       if (adf_devmgr_add_dev(accel_dev, pf)) {
-               dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
-               kfree(accel_dev);
-               return -EFAULT;
-       }
-       INIT_LIST_HEAD(&accel_dev->crypto_list);
-
-       accel_dev->owner = THIS_MODULE;
-       /* Allocate and configure device configuration structure */
-       hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
-                              dev_to_node(&pdev->dev));
-       if (!hw_data) {
-               ret = -ENOMEM;
-               goto out_err;
-       }
-       accel_dev->hw_device = hw_data;
-       adf_init_hw_data_c62xiov(accel_dev->hw_device);
-
-       /* Get Accelerators and Accelerators Engines masks */
-       hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
-       hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
-       accel_pci_dev->sku = hw_data->get_sku(hw_data);
-
-       /* Create dev top level debugfs entry */
-       snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
-                hw_data->dev_class->name, pci_name(pdev));
-
-       accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
-
-       /* Create device configuration table */
-       ret = adf_cfg_dev_add(accel_dev);
-       if (ret)
-               goto out_err;
-
-       /* enable PCI device */
-       if (pci_enable_device(pdev)) {
-               ret = -EFAULT;
-               goto out_err;
-       }
-
-       /* set dma identifier */
-       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
-       if (ret) {
-               dev_err(&pdev->dev, "No usable DMA configuration\n");
-               goto out_err_disable;
-       }
-
-       if (pci_request_regions(pdev, ADF_C62XVF_DEVICE_NAME)) {
-               ret = -EFAULT;
-               goto out_err_disable;
-       }
-
-       /* Find and map all the device's BARS */
-       i = 0;
-       bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
-               struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
-
-               bar->base_addr = pci_resource_start(pdev, bar_nr);
-               if (!bar->base_addr)
-                       break;
-               bar->size = pci_resource_len(pdev, bar_nr);
-               bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
-               if (!bar->virt_addr) {
-                       dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
-                       ret = -EFAULT;
-                       goto out_err_free_reg;
-               }
-       }
-       pci_set_master(pdev);
-       /* Completion for VF2PF request/response message exchange */
-       init_completion(&accel_dev->vf.msg_received);
-
-       ret = adf_dev_init(accel_dev);
-       if (ret)
-               goto out_err_dev_shutdown;
-
-       ret = adf_dev_start(accel_dev);
-       if (ret)
-               goto out_err_dev_stop;
-
-       return ret;
-
-out_err_dev_stop:
-       adf_dev_stop(accel_dev);
-out_err_dev_shutdown:
-       adf_dev_shutdown(accel_dev);
-out_err_free_reg:
-       pci_release_regions(accel_pci_dev->pci_dev);
-out_err_disable:
-       pci_disable_device(accel_pci_dev->pci_dev);
-out_err:
-       adf_cleanup_accel(accel_dev);
-       kfree(accel_dev);
-       return ret;
-}
-
-static void adf_remove(struct pci_dev *pdev)
-{
-       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
-
-       if (!accel_dev) {
-               pr_err("QAT: Driver removal failed\n");
-               return;
-       }
-       adf_flush_vf_wq(accel_dev);
-       adf_dev_stop(accel_dev);
-       adf_dev_shutdown(accel_dev);
-       adf_cleanup_accel(accel_dev);
-       adf_cleanup_pci_dev(accel_dev);
-       kfree(accel_dev);
-}
-
-static int __init adfdrv_init(void)
-{
-       request_module("intel_qat");
-
-       if (pci_register_driver(&adf_driver)) {
-               pr_err("QAT: Driver initialization failed\n");
-               return -EFAULT;
-       }
-       return 0;
-}
-
-static void __exit adfdrv_release(void)
-{
-       pci_unregister_driver(&adf_driver);
-       adf_clean_vf_map(true);
-}
-
-module_init(adfdrv_init);
-module_exit(adfdrv_release);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Intel");
-MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
-MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
deleted file mode 100644 (file)
index 1fb8d50..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
-intel_qat-objs := adf_cfg.o \
-       adf_isr.o \
-       adf_ctl_drv.o \
-       adf_dev_mgr.o \
-       adf_init.o \
-       adf_accel_engine.o \
-       adf_aer.o \
-       adf_transport.o \
-       adf_admin.o \
-       adf_hw_arbiter.o \
-       adf_sysfs.o \
-       adf_gen2_hw_data.o \
-       adf_gen2_config.o \
-       adf_gen4_hw_data.o \
-       adf_gen4_pm.o \
-       adf_gen2_dc.o \
-       adf_gen4_dc.o \
-       qat_crypto.o \
-       qat_compression.o \
-       qat_comp_algs.o \
-       qat_algs.o \
-       qat_asym_algs.o \
-       qat_algs_send.o \
-       qat_uclo.o \
-       qat_hal.o \
-       qat_bl.o
-
-intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o
-intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \
-                              adf_pfvf_pf_msg.o adf_pfvf_pf_proto.o \
-                              adf_pfvf_vf_msg.o adf_pfvf_vf_proto.o \
-                              adf_gen2_pfvf.o adf_gen4_pfvf.o
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
deleted file mode 100644 (file)
index 284f5aa..0000000
+++ /dev/null
@@ -1,316 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef ADF_ACCEL_DEVICES_H_
-#define ADF_ACCEL_DEVICES_H_
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/list.h>
-#include <linux/io.h>
-#include <linux/ratelimit.h>
-#include "adf_cfg_common.h"
-#include "adf_pfvf_msg.h"
-
-#define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
-#define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
-#define ADF_C62X_DEVICE_NAME "c6xx"
-#define ADF_C62XVF_DEVICE_NAME "c6xxvf"
-#define ADF_C3XXX_DEVICE_NAME "c3xxx"
-#define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf"
-#define ADF_4XXX_DEVICE_NAME "4xxx"
-#define ADF_4XXX_PCI_DEVICE_ID 0x4940
-#define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941
-#define ADF_401XX_PCI_DEVICE_ID 0x4942
-#define ADF_401XXIOV_PCI_DEVICE_ID 0x4943
-#define ADF_DEVICE_FUSECTL_OFFSET 0x40
-#define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
-#define ADF_DEVICE_FUSECTL_MASK 0x80000000
-#define ADF_PCI_MAX_BARS 3
-#define ADF_DEVICE_NAME_LENGTH 32
-#define ADF_ETR_MAX_RINGS_PER_BANK 16
-#define ADF_MAX_MSIX_VECTOR_NAME 16
-#define ADF_DEVICE_NAME_PREFIX "qat_"
-
-enum adf_accel_capabilities {
-       ADF_ACCEL_CAPABILITIES_NULL = 0,
-       ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = 1,
-       ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = 2,
-       ADF_ACCEL_CAPABILITIES_CIPHER = 4,
-       ADF_ACCEL_CAPABILITIES_AUTHENTICATION = 8,
-       ADF_ACCEL_CAPABILITIES_COMPRESSION = 32,
-       ADF_ACCEL_CAPABILITIES_LZS_COMPRESSION = 64,
-       ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128
-};
-
-struct adf_bar {
-       resource_size_t base_addr;
-       void __iomem *virt_addr;
-       resource_size_t size;
-};
-
-struct adf_irq {
-       bool enabled;
-       char name[ADF_MAX_MSIX_VECTOR_NAME];
-};
-
-struct adf_accel_msix {
-       struct adf_irq *irqs;
-       u32 num_entries;
-};
-
-struct adf_accel_pci {
-       struct pci_dev *pci_dev;
-       struct adf_accel_msix msix_entries;
-       struct adf_bar pci_bars[ADF_PCI_MAX_BARS];
-       u8 revid;
-       u8 sku;
-};
-
-enum dev_state {
-       DEV_DOWN = 0,
-       DEV_UP
-};
-
-enum dev_sku_info {
-       DEV_SKU_1 = 0,
-       DEV_SKU_2,
-       DEV_SKU_3,
-       DEV_SKU_4,
-       DEV_SKU_VF,
-       DEV_SKU_UNKNOWN,
-};
-
-static inline const char *get_sku_info(enum dev_sku_info info)
-{
-       switch (info) {
-       case DEV_SKU_1:
-               return "SKU1";
-       case DEV_SKU_2:
-               return "SKU2";
-       case DEV_SKU_3:
-               return "SKU3";
-       case DEV_SKU_4:
-               return "SKU4";
-       case DEV_SKU_VF:
-               return "SKUVF";
-       case DEV_SKU_UNKNOWN:
-       default:
-               break;
-       }
-       return "Unknown SKU";
-}
-
-struct adf_hw_device_class {
-       const char *name;
-       const enum adf_device_type type;
-       u32 instances;
-};
-
-struct arb_info {
-       u32 arb_cfg;
-       u32 arb_offset;
-       u32 wt2sam_offset;
-};
-
-struct admin_info {
-       u32 admin_msg_ur;
-       u32 admin_msg_lr;
-       u32 mailbox_offset;
-};
-
-struct adf_hw_csr_ops {
-       u64 (*build_csr_ring_base_addr)(dma_addr_t addr, u32 size);
-       u32 (*read_csr_ring_head)(void __iomem *csr_base_addr, u32 bank,
-                                 u32 ring);
-       void (*write_csr_ring_head)(void __iomem *csr_base_addr, u32 bank,
-                                   u32 ring, u32 value);
-       u32 (*read_csr_ring_tail)(void __iomem *csr_base_addr, u32 bank,
-                                 u32 ring);
-       void (*write_csr_ring_tail)(void __iomem *csr_base_addr, u32 bank,
-                                   u32 ring, u32 value);
-       u32 (*read_csr_e_stat)(void __iomem *csr_base_addr, u32 bank);
-       void (*write_csr_ring_config)(void __iomem *csr_base_addr, u32 bank,
-                                     u32 ring, u32 value);
-       void (*write_csr_ring_base)(void __iomem *csr_base_addr, u32 bank,
-                                   u32 ring, dma_addr_t addr);
-       void (*write_csr_int_flag)(void __iomem *csr_base_addr, u32 bank,
-                                  u32 value);
-       void (*write_csr_int_srcsel)(void __iomem *csr_base_addr, u32 bank);
-       void (*write_csr_int_col_en)(void __iomem *csr_base_addr, u32 bank,
-                                    u32 value);
-       void (*write_csr_int_col_ctl)(void __iomem *csr_base_addr, u32 bank,
-                                     u32 value);
-       void (*write_csr_int_flag_and_col)(void __iomem *csr_base_addr,
-                                          u32 bank, u32 value);
-       void (*write_csr_ring_srv_arb_en)(void __iomem *csr_base_addr, u32 bank,
-                                         u32 value);
-};
-
-struct adf_cfg_device_data;
-struct adf_accel_dev;
-struct adf_etr_data;
-struct adf_etr_ring_data;
-
-struct adf_pfvf_ops {
-       int (*enable_comms)(struct adf_accel_dev *accel_dev);
-       u32 (*get_pf2vf_offset)(u32 i);
-       u32 (*get_vf2pf_offset)(u32 i);
-       void (*enable_vf2pf_interrupts)(void __iomem *pmisc_addr, u32 vf_mask);
-       void (*disable_all_vf2pf_interrupts)(void __iomem *pmisc_addr);
-       u32 (*disable_pending_vf2pf_interrupts)(void __iomem *pmisc_addr);
-       int (*send_msg)(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
-                       u32 pfvf_offset, struct mutex *csr_lock);
-       struct pfvf_message (*recv_msg)(struct adf_accel_dev *accel_dev,
-                                       u32 pfvf_offset, u8 compat_ver);
-};
-
-struct adf_dc_ops {
-       void (*build_deflate_ctx)(void *ctx);
-};
-
-struct adf_hw_device_data {
-       struct adf_hw_device_class *dev_class;
-       u32 (*get_accel_mask)(struct adf_hw_device_data *self);
-       u32 (*get_ae_mask)(struct adf_hw_device_data *self);
-       u32 (*get_accel_cap)(struct adf_accel_dev *accel_dev);
-       u32 (*get_sram_bar_id)(struct adf_hw_device_data *self);
-       u32 (*get_misc_bar_id)(struct adf_hw_device_data *self);
-       u32 (*get_etr_bar_id)(struct adf_hw_device_data *self);
-       u32 (*get_num_aes)(struct adf_hw_device_data *self);
-       u32 (*get_num_accels)(struct adf_hw_device_data *self);
-       void (*get_arb_info)(struct arb_info *arb_csrs_info);
-       void (*get_admin_info)(struct admin_info *admin_csrs_info);
-       enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
-       int (*alloc_irq)(struct adf_accel_dev *accel_dev);
-       void (*free_irq)(struct adf_accel_dev *accel_dev);
-       void (*enable_error_correction)(struct adf_accel_dev *accel_dev);
-       int (*init_admin_comms)(struct adf_accel_dev *accel_dev);
-       void (*exit_admin_comms)(struct adf_accel_dev *accel_dev);
-       int (*send_admin_init)(struct adf_accel_dev *accel_dev);
-       int (*init_arb)(struct adf_accel_dev *accel_dev);
-       void (*exit_arb)(struct adf_accel_dev *accel_dev);
-       const u32 *(*get_arb_mapping)(void);
-       int (*init_device)(struct adf_accel_dev *accel_dev);
-       int (*enable_pm)(struct adf_accel_dev *accel_dev);
-       bool (*handle_pm_interrupt)(struct adf_accel_dev *accel_dev);
-       void (*disable_iov)(struct adf_accel_dev *accel_dev);
-       void (*configure_iov_threads)(struct adf_accel_dev *accel_dev,
-                                     bool enable);
-       void (*enable_ints)(struct adf_accel_dev *accel_dev);
-       void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev);
-       int (*ring_pair_reset)(struct adf_accel_dev *accel_dev, u32 bank_nr);
-       void (*reset_device)(struct adf_accel_dev *accel_dev);
-       void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
-       char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num);
-       u32 (*uof_get_num_objs)(void);
-       u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
-       int (*dev_config)(struct adf_accel_dev *accel_dev);
-       struct adf_pfvf_ops pfvf_ops;
-       struct adf_hw_csr_ops csr_ops;
-       struct adf_dc_ops dc_ops;
-       const char *fw_name;
-       const char *fw_mmp_name;
-       u32 fuses;
-       u32 straps;
-       u32 accel_capabilities_mask;
-       u32 extended_dc_capabilities;
-       u32 clock_frequency;
-       u32 instance_id;
-       u16 accel_mask;
-       u32 ae_mask;
-       u32 admin_ae_mask;
-       u16 tx_rings_mask;
-       u16 ring_to_svc_map;
-       u8 tx_rx_gap;
-       u8 num_banks;
-       u16 num_banks_per_vf;
-       u8 num_rings_per_bank;
-       u8 num_accel;
-       u8 num_logical_accel;
-       u8 num_engines;
-};
-
-/* CSR write macro */
-#define ADF_CSR_WR(csr_base, csr_offset, val) \
-       __raw_writel(val, csr_base + csr_offset)
-
-/* CSR read macro */
-#define ADF_CSR_RD(csr_base, csr_offset) __raw_readl(csr_base + csr_offset)
-
-#define ADF_CFG_NUM_SERVICES   4
-#define ADF_SRV_TYPE_BIT_LEN   3
-#define ADF_SRV_TYPE_MASK      0x7
-
-#define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev->dev)
-#define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars)
-#define GET_HW_DATA(accel_dev) (accel_dev->hw_device)
-#define GET_MAX_BANKS(accel_dev) (GET_HW_DATA(accel_dev)->num_banks)
-#define GET_NUM_RINGS_PER_BANK(accel_dev) \
-       GET_HW_DATA(accel_dev)->num_rings_per_bank
-#define GET_SRV_TYPE(accel_dev, idx) \
-       (((GET_HW_DATA(accel_dev)->ring_to_svc_map) >> (ADF_SRV_TYPE_BIT_LEN * (idx))) \
-       & ADF_SRV_TYPE_MASK)
-#define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines)
-#define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_ops)
-#define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops)
-#define GET_DC_OPS(accel_dev) (&(accel_dev)->hw_device->dc_ops)
-#define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev
-
-struct adf_admin_comms;
-struct icp_qat_fw_loader_handle;
-struct adf_fw_loader_data {
-       struct icp_qat_fw_loader_handle *fw_loader;
-       const struct firmware *uof_fw;
-       const struct firmware *mmp_fw;
-};
-
-struct adf_accel_vf_info {
-       struct adf_accel_dev *accel_dev;
-       struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */
-       struct ratelimit_state vf2pf_ratelimit;
-       u32 vf_nr;
-       bool init;
-       u8 vf_compat_ver;
-};
-
-struct adf_dc_data {
-       u8 *ovf_buff;
-       size_t ovf_buff_sz;
-       dma_addr_t ovf_buff_p;
-};
-
-struct adf_accel_dev {
-       struct adf_etr_data *transport;
-       struct adf_hw_device_data *hw_device;
-       struct adf_cfg_device_data *cfg;
-       struct adf_fw_loader_data *fw_loader;
-       struct adf_admin_comms *admin;
-       struct adf_dc_data *dc_data;
-       struct list_head crypto_list;
-       struct list_head compression_list;
-       unsigned long status;
-       atomic_t ref_count;
-       struct dentry *debugfs_dir;
-       struct list_head list;
-       struct module *owner;
-       struct adf_accel_pci accel_pci_dev;
-       union {
-               struct {
-                       /* protects VF2PF interrupts access */
-                       spinlock_t vf2pf_ints_lock;
-                       /* vf_info is non-zero when SR-IOV is init'ed */
-                       struct adf_accel_vf_info *vf_info;
-               } pf;
-               struct {
-                       bool irq_enabled;
-                       char irq_name[ADF_MAX_MSIX_VECTOR_NAME];
-                       struct tasklet_struct pf2vf_bh_tasklet;
-                       struct mutex vf2pf_lock; /* protect CSR access */
-                       struct completion msg_received;
-                       struct pfvf_message response; /* temp field holding pf2vf response */
-                       u8 pf_compat_ver;
-               } vf;
-       };
-       bool is_vf;
-       u32 accel_id;
-};
-#endif
diff --git a/drivers/crypto/qat/qat_common/adf_accel_engine.c b/drivers/crypto/qat/qat_common/adf_accel_engine.c
deleted file mode 100644 (file)
index 4ce2b66..0000000
+++ /dev/null
@@ -1,212 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/firmware.h>
-#include <linux/pci.h>
-#include "adf_cfg.h"
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "icp_qat_uclo.h"
-
-static int adf_ae_fw_load_images(struct adf_accel_dev *accel_dev, void *fw_addr,
-                                u32 fw_size)
-{
-       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
-       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
-       struct icp_qat_fw_loader_handle *loader;
-       char *obj_name;
-       u32 num_objs;
-       u32 ae_mask;
-       int i;
-
-       loader = loader_data->fw_loader;
-       num_objs = hw_device->uof_get_num_objs();
-
-       for (i = 0; i < num_objs; i++) {
-               obj_name = hw_device->uof_get_name(accel_dev, i);
-               ae_mask = hw_device->uof_get_ae_mask(accel_dev, i);
-               if (!obj_name || !ae_mask) {
-                       dev_err(&GET_DEV(accel_dev), "Invalid UOF image\n");
-                       goto out_err;
-               }
-
-               if (qat_uclo_set_cfg_ae_mask(loader, ae_mask)) {
-                       dev_err(&GET_DEV(accel_dev),
-                               "Invalid mask for UOF image\n");
-                       goto out_err;
-               }
-               if (qat_uclo_map_obj(loader, fw_addr, fw_size, obj_name)) {
-                       dev_err(&GET_DEV(accel_dev),
-                               "Failed to map UOF firmware\n");
-                       goto out_err;
-               }
-               if (qat_uclo_wr_all_uimage(loader)) {
-                       dev_err(&GET_DEV(accel_dev),
-                               "Failed to load UOF firmware\n");
-                       goto out_err;
-               }
-               qat_uclo_del_obj(loader);
-       }
-
-       return 0;
-
-out_err:
-       adf_ae_fw_release(accel_dev);
-       return -EFAULT;
-}
-
-int adf_ae_fw_load(struct adf_accel_dev *accel_dev)
-{
-       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
-       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
-       void *fw_addr, *mmp_addr;
-       u32 fw_size, mmp_size;
-
-       if (!hw_device->fw_name)
-               return 0;
-
-       if (request_firmware(&loader_data->mmp_fw, hw_device->fw_mmp_name,
-                            &accel_dev->accel_pci_dev.pci_dev->dev)) {
-               dev_err(&GET_DEV(accel_dev), "Failed to load MMP firmware %s\n",
-                       hw_device->fw_mmp_name);
-               return -EFAULT;
-       }
-       if (request_firmware(&loader_data->uof_fw, hw_device->fw_name,
-                            &accel_dev->accel_pci_dev.pci_dev->dev)) {
-               dev_err(&GET_DEV(accel_dev), "Failed to load UOF firmware %s\n",
-                       hw_device->fw_name);
-               goto out_err;
-       }
-
-       fw_size = loader_data->uof_fw->size;
-       fw_addr = (void *)loader_data->uof_fw->data;
-       mmp_size = loader_data->mmp_fw->size;
-       mmp_addr = (void *)loader_data->mmp_fw->data;
-
-       if (qat_uclo_wr_mimage(loader_data->fw_loader, mmp_addr, mmp_size)) {
-               dev_err(&GET_DEV(accel_dev), "Failed to load MMP\n");
-               goto out_err;
-       }
-
-       if (hw_device->uof_get_num_objs)
-               return adf_ae_fw_load_images(accel_dev, fw_addr, fw_size);
-
-       if (qat_uclo_map_obj(loader_data->fw_loader, fw_addr, fw_size, NULL)) {
-               dev_err(&GET_DEV(accel_dev), "Failed to map FW\n");
-               goto out_err;
-       }
-       if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) {
-               dev_err(&GET_DEV(accel_dev), "Failed to load UOF\n");
-               goto out_err;
-       }
-       return 0;
-
-out_err:
-       adf_ae_fw_release(accel_dev);
-       return -EFAULT;
-}
-
-void adf_ae_fw_release(struct adf_accel_dev *accel_dev)
-{
-       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
-       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
-
-       if (!hw_device->fw_name)
-               return;
-
-       qat_uclo_del_obj(loader_data->fw_loader);
-       qat_hal_deinit(loader_data->fw_loader);
-       release_firmware(loader_data->uof_fw);
-       release_firmware(loader_data->mmp_fw);
-       loader_data->uof_fw = NULL;
-       loader_data->mmp_fw = NULL;
-       loader_data->fw_loader = NULL;
-}
-
-int adf_ae_start(struct adf_accel_dev *accel_dev)
-{
-       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       u32 ae_ctr;
-
-       if (!hw_data->fw_name)
-               return 0;
-
-       ae_ctr = qat_hal_start(loader_data->fw_loader);
-       dev_info(&GET_DEV(accel_dev),
-                "qat_dev%d started %d acceleration engines\n",
-                accel_dev->accel_id, ae_ctr);
-       return 0;
-}
-
-int adf_ae_stop(struct adf_accel_dev *accel_dev)
-{
-       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       u32 ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
-
-       if (!hw_data->fw_name)
-               return 0;
-
-       for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
-               if (hw_data->ae_mask & (1 << ae)) {
-                       qat_hal_stop(loader_data->fw_loader, ae, 0xFF);
-                       ae_ctr++;
-               }
-       }
-       dev_info(&GET_DEV(accel_dev),
-                "qat_dev%d stopped %d acceleration engines\n",
-                accel_dev->accel_id, ae_ctr);
-       return 0;
-}
-
-static int adf_ae_reset(struct adf_accel_dev *accel_dev, int ae)
-{
-       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
-
-       qat_hal_reset(loader_data->fw_loader);
-       if (qat_hal_clr_reset(loader_data->fw_loader))
-               return -EFAULT;
-
-       return 0;
-}
-
-int adf_ae_init(struct adf_accel_dev *accel_dev)
-{
-       struct adf_fw_loader_data *loader_data;
-       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
-
-       if (!hw_device->fw_name)
-               return 0;
-
-       loader_data = kzalloc(sizeof(*loader_data), GFP_KERNEL);
-       if (!loader_data)
-               return -ENOMEM;
-
-       accel_dev->fw_loader = loader_data;
-       if (qat_hal_init(accel_dev)) {
-               dev_err(&GET_DEV(accel_dev), "Failed to init the AEs\n");
-               kfree(loader_data);
-               return -EFAULT;
-       }
-       if (adf_ae_reset(accel_dev, 0)) {
-               dev_err(&GET_DEV(accel_dev), "Failed to reset the AEs\n");
-               qat_hal_deinit(loader_data->fw_loader);
-               kfree(loader_data);
-               return -EFAULT;
-       }
-       return 0;
-}
-
-int adf_ae_shutdown(struct adf_accel_dev *accel_dev)
-{
-       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
-       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
-
-       if (!hw_device->fw_name)
-               return 0;
-
-       qat_hal_deinit(loader_data->fw_loader);
-       kfree(accel_dev->fw_loader);
-       accel_dev->fw_loader = NULL;
-       return 0;
-}
diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c
deleted file mode 100644 (file)
index 3b6184c..0000000
+++ /dev/null
@@ -1,362 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/types.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/iopoll.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "icp_qat_fw_init_admin.h"
-
-#define ADF_ADMIN_MAILBOX_STRIDE 0x1000
-#define ADF_ADMINMSG_LEN 32
-#define ADF_CONST_TABLE_SIZE 1024
-#define ADF_ADMIN_POLL_DELAY_US 20
-#define ADF_ADMIN_POLL_TIMEOUT_US (5 * USEC_PER_SEC)
-
-static const u8 const_tab[1024] __aligned(1024) = {
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x03, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x02, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13,
-0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x33, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76,
-0x54, 0x32, 0x10, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab,
-0x89, 0x98, 0xba, 0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0,
-0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc1, 0x05, 0x9e,
-0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd, 0x17, 0xf7, 0x0e, 0x59, 0x39,
-0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58, 0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe,
-0xfa, 0x4f, 0xa4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae,
-0x85, 0x3c, 0x6e, 0xf3, 0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f,
-0x9b, 0x05, 0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19, 0x05,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
-0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17,
-0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67, 0x33, 0x26, 0x67, 0xff,
-0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c,
-0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f,
-0xa4, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb,
-0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
-0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51, 0x0e, 0x52,
-0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f,
-0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13,
-0x7e, 0x21, 0x79, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x18,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x01, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x15, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x02, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x14, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x02,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x24, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x25,
-0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x12, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x43, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x43, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x45, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x01, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x01,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x2B, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
-
-struct adf_admin_comms {
-       dma_addr_t phy_addr;
-       dma_addr_t const_tbl_addr;
-       void *virt_addr;
-       void *virt_tbl_addr;
-       void __iomem *mailbox_addr;
-       struct mutex lock;      /* protects adf_admin_comms struct */
-};
-
-static int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae,
-                                 void *in, void *out)
-{
-       int ret;
-       u32 status;
-       struct adf_admin_comms *admin = accel_dev->admin;
-       int offset = ae * ADF_ADMINMSG_LEN * 2;
-       void __iomem *mailbox = admin->mailbox_addr;
-       int mb_offset = ae * ADF_ADMIN_MAILBOX_STRIDE;
-       struct icp_qat_fw_init_admin_req *request = in;
-
-       mutex_lock(&admin->lock);
-
-       if (ADF_CSR_RD(mailbox, mb_offset) == 1) {
-               mutex_unlock(&admin->lock);
-               return -EAGAIN;
-       }
-
-       memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN);
-       ADF_CSR_WR(mailbox, mb_offset, 1);
-
-       ret = read_poll_timeout(ADF_CSR_RD, status, status == 0,
-                               ADF_ADMIN_POLL_DELAY_US,
-                               ADF_ADMIN_POLL_TIMEOUT_US, true,
-                               mailbox, mb_offset);
-       if (ret < 0) {
-               /* Response timeout */
-               dev_err(&GET_DEV(accel_dev),
-                       "Failed to send admin msg %d to accelerator %d\n",
-                       request->cmd_id, ae);
-       } else {
-               /* Response received from admin message, we can now
-                * make response data available in "out" parameter.
-                */
-               memcpy(out, admin->virt_addr + offset +
-                      ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN);
-       }
-
-       mutex_unlock(&admin->lock);
-       return ret;
-}
-
-static int adf_send_admin(struct adf_accel_dev *accel_dev,
-                         struct icp_qat_fw_init_admin_req *req,
-                         struct icp_qat_fw_init_admin_resp *resp,
-                         const unsigned long ae_mask)
-{
-       u32 ae;
-
-       for_each_set_bit(ae, &ae_mask, ICP_QAT_HW_AE_DELIMITER)
-               if (adf_put_admin_msg_sync(accel_dev, ae, req, resp) ||
-                   resp->status)
-                       return -EFAULT;
-
-       return 0;
-}
-
-static int adf_init_ae(struct adf_accel_dev *accel_dev)
-{
-       struct icp_qat_fw_init_admin_req req;
-       struct icp_qat_fw_init_admin_resp resp;
-       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
-       u32 ae_mask = hw_device->ae_mask;
-
-       memset(&req, 0, sizeof(req));
-       memset(&resp, 0, sizeof(resp));
-       req.cmd_id = ICP_QAT_FW_INIT_AE;
-
-       return adf_send_admin(accel_dev, &req, &resp, ae_mask);
-}
-
-static int adf_set_fw_constants(struct adf_accel_dev *accel_dev)
-{
-       struct icp_qat_fw_init_admin_req req;
-       struct icp_qat_fw_init_admin_resp resp;
-       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
-       u32 ae_mask = hw_device->admin_ae_mask ?: hw_device->ae_mask;
-
-       memset(&req, 0, sizeof(req));
-       memset(&resp, 0, sizeof(resp));
-       req.cmd_id = ICP_QAT_FW_CONSTANTS_CFG;
-
-       req.init_cfg_sz = ADF_CONST_TABLE_SIZE;
-       req.init_cfg_ptr = accel_dev->admin->const_tbl_addr;
-
-       return adf_send_admin(accel_dev, &req, &resp, ae_mask);
-}
-
-static int adf_get_dc_capabilities(struct adf_accel_dev *accel_dev,
-                                  u32 *capabilities)
-{
-       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
-       struct icp_qat_fw_init_admin_resp resp;
-       struct icp_qat_fw_init_admin_req req;
-       unsigned long ae_mask;
-       unsigned long ae;
-       int ret;
-
-       /* Target only service accelerator engines */
-       ae_mask = hw_device->ae_mask & ~hw_device->admin_ae_mask;
-
-       memset(&req, 0, sizeof(req));
-       memset(&resp, 0, sizeof(resp));
-       req.cmd_id = ICP_QAT_FW_COMP_CAPABILITY_GET;
-
-       *capabilities = 0;
-       for_each_set_bit(ae, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) {
-               ret = adf_send_admin(accel_dev, &req, &resp, 1ULL << ae);
-               if (ret)
-                       return ret;
-
-               *capabilities |= resp.extended_features;
-       }
-
-       return 0;
-}
-
-/**
- * adf_send_admin_init() - Function sends init message to FW
- * @accel_dev: Pointer to acceleration device.
- *
- * Function sends admin init message to the FW
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_send_admin_init(struct adf_accel_dev *accel_dev)
-{
-       u32 dc_capabilities = 0;
-       int ret;
-
-       ret = adf_get_dc_capabilities(accel_dev, &dc_capabilities);
-       if (ret) {
-               dev_err(&GET_DEV(accel_dev), "Cannot get dc capabilities\n");
-               return ret;
-       }
-       accel_dev->hw_device->extended_dc_capabilities = dc_capabilities;
-
-       ret = adf_set_fw_constants(accel_dev);
-       if (ret)
-               return ret;
-
-       return adf_init_ae(accel_dev);
-}
-EXPORT_SYMBOL_GPL(adf_send_admin_init);
-
-/**
- * adf_init_admin_pm() - Function sends PM init message to FW
- * @accel_dev: Pointer to acceleration device.
- * @idle_delay: QAT HW idle time before power gating is initiated.
- *             000 - 64us
- *             001 - 128us
- *             010 - 256us
- *             011 - 512us
- *             100 - 1ms
- *             101 - 2ms
- *             110 - 4ms
- *             111 - 8ms
- *
- * Function sends to the FW the admin init message for the PM state
- * configuration.
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay)
-{
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       struct icp_qat_fw_init_admin_resp resp = {0};
-       struct icp_qat_fw_init_admin_req req = {0};
-       u32 ae_mask = hw_data->admin_ae_mask;
-
-       if (!accel_dev->admin) {
-               dev_err(&GET_DEV(accel_dev), "adf_admin is not available\n");
-               return -EFAULT;
-       }
-
-       req.cmd_id = ICP_QAT_FW_PM_STATE_CONFIG;
-       req.idle_filter = idle_delay;
-
-       return adf_send_admin(accel_dev, &req, &resp, ae_mask);
-}
-EXPORT_SYMBOL_GPL(adf_init_admin_pm);
-
-int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
-{
-       struct adf_admin_comms *admin;
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
-       struct admin_info admin_csrs_info;
-       u32 mailbox_offset, adminmsg_u, adminmsg_l;
-       void __iomem *mailbox;
-       u64 reg_val;
-
-       admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
-                            dev_to_node(&GET_DEV(accel_dev)));
-       if (!admin)
-               return -ENOMEM;
-       admin->virt_addr = dma_alloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
-                                             &admin->phy_addr, GFP_KERNEL);
-       if (!admin->virt_addr) {
-               dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n");
-               kfree(admin);
-               return -ENOMEM;
-       }
-
-       admin->virt_tbl_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
-                                                 PAGE_SIZE,
-                                                 &admin->const_tbl_addr,
-                                                 GFP_KERNEL);
-       if (!admin->virt_tbl_addr) {
-               dev_err(&GET_DEV(accel_dev), "Failed to allocate const_tbl\n");
-               dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
-                                 admin->virt_addr, admin->phy_addr);
-               kfree(admin);
-               return -ENOMEM;
-       }
-
-       memcpy(admin->virt_tbl_addr, const_tab, sizeof(const_tab));
-       hw_data->get_admin_info(&admin_csrs_info);
-
-       mailbox_offset = admin_csrs_info.mailbox_offset;
-       mailbox = pmisc_addr + mailbox_offset;
-       adminmsg_u = admin_csrs_info.admin_msg_ur;
-       adminmsg_l = admin_csrs_info.admin_msg_lr;
-
-       reg_val = (u64)admin->phy_addr;
-       ADF_CSR_WR(pmisc_addr, adminmsg_u, upper_32_bits(reg_val));
-       ADF_CSR_WR(pmisc_addr, adminmsg_l, lower_32_bits(reg_val));
-
-       mutex_init(&admin->lock);
-       admin->mailbox_addr = mailbox;
-       accel_dev->admin = admin;
-       return 0;
-}
-EXPORT_SYMBOL_GPL(adf_init_admin_comms);
-
-void adf_exit_admin_comms(struct adf_accel_dev *accel_dev)
-{
-       struct adf_admin_comms *admin = accel_dev->admin;
-
-       if (!admin)
-               return;
-
-       if (admin->virt_addr)
-               dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
-                                 admin->virt_addr, admin->phy_addr);
-       if (admin->virt_tbl_addr)
-               dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
-                                 admin->virt_tbl_addr, admin->const_tbl_addr);
-
-       mutex_destroy(&admin->lock);
-       kfree(admin);
-       accel_dev->admin = NULL;
-}
-EXPORT_SYMBOL_GPL(adf_exit_admin_comms);
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
deleted file mode 100644 (file)
index fe9bb2f..0000000
+++ /dev/null
@@ -1,222 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/aer.h>
-#include <linux/completion.h>
-#include <linux/workqueue.h>
-#include <linux/delay.h>
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-
-static struct workqueue_struct *device_reset_wq;
-
-static pci_ers_result_t adf_error_detected(struct pci_dev *pdev,
-                                          pci_channel_state_t state)
-{
-       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
-
-       dev_info(&pdev->dev, "Acceleration driver hardware error detected.\n");
-       if (!accel_dev) {
-               dev_err(&pdev->dev, "Can't find acceleration device\n");
-               return PCI_ERS_RESULT_DISCONNECT;
-       }
-
-       if (state == pci_channel_io_perm_failure) {
-               dev_err(&pdev->dev, "Can't recover from device error\n");
-               return PCI_ERS_RESULT_DISCONNECT;
-       }
-
-       return PCI_ERS_RESULT_NEED_RESET;
-}
-
-/* reset dev data */
-struct adf_reset_dev_data {
-       int mode;
-       struct adf_accel_dev *accel_dev;
-       struct completion compl;
-       struct work_struct reset_work;
-};
-
-void adf_reset_sbr(struct adf_accel_dev *accel_dev)
-{
-       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
-       struct pci_dev *parent = pdev->bus->self;
-       u16 bridge_ctl = 0;
-
-       if (!parent)
-               parent = pdev;
-
-       if (!pci_wait_for_pending_transaction(pdev))
-               dev_info(&GET_DEV(accel_dev),
-                        "Transaction still in progress. Proceeding\n");
-
-       dev_info(&GET_DEV(accel_dev), "Secondary bus reset\n");
-
-       pci_read_config_word(parent, PCI_BRIDGE_CONTROL, &bridge_ctl);
-       bridge_ctl |= PCI_BRIDGE_CTL_BUS_RESET;
-       pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl);
-       msleep(100);
-       bridge_ctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
-       pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl);
-       msleep(100);
-}
-EXPORT_SYMBOL_GPL(adf_reset_sbr);
-
-void adf_reset_flr(struct adf_accel_dev *accel_dev)
-{
-       pcie_flr(accel_to_pci_dev(accel_dev));
-}
-EXPORT_SYMBOL_GPL(adf_reset_flr);
-
-void adf_dev_restore(struct adf_accel_dev *accel_dev)
-{
-       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
-       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
-
-       if (hw_device->reset_device) {
-               dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n",
-                        accel_dev->accel_id);
-               hw_device->reset_device(accel_dev);
-               pci_restore_state(pdev);
-               pci_save_state(pdev);
-       }
-}
-
-static void adf_device_reset_worker(struct work_struct *work)
-{
-       struct adf_reset_dev_data *reset_data =
-                 container_of(work, struct adf_reset_dev_data, reset_work);
-       struct adf_accel_dev *accel_dev = reset_data->accel_dev;
-
-       adf_dev_restarting_notify(accel_dev);
-       adf_dev_stop(accel_dev);
-       adf_dev_shutdown(accel_dev);
-       if (adf_dev_init(accel_dev) || adf_dev_start(accel_dev)) {
-               /* The device hanged and we can't restart it so stop here */
-               dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
-               kfree(reset_data);
-               WARN(1, "QAT: device restart failed. Device is unusable\n");
-               return;
-       }
-       adf_dev_restarted_notify(accel_dev);
-       clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
-
-       /* The dev is back alive. Notify the caller if in sync mode */
-       if (reset_data->mode == ADF_DEV_RESET_SYNC)
-               complete(&reset_data->compl);
-       else
-               kfree(reset_data);
-}
-
-static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
-                                     enum adf_dev_reset_mode mode)
-{
-       struct adf_reset_dev_data *reset_data;
-
-       if (!adf_dev_started(accel_dev) ||
-           test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
-               return 0;
-
-       set_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
-       reset_data = kzalloc(sizeof(*reset_data), GFP_KERNEL);
-       if (!reset_data)
-               return -ENOMEM;
-       reset_data->accel_dev = accel_dev;
-       init_completion(&reset_data->compl);
-       reset_data->mode = mode;
-       INIT_WORK(&reset_data->reset_work, adf_device_reset_worker);
-       queue_work(device_reset_wq, &reset_data->reset_work);
-
-       /* If in sync mode wait for the result */
-       if (mode == ADF_DEV_RESET_SYNC) {
-               int ret = 0;
-               /* Maximum device reset time is 10 seconds */
-               unsigned long wait_jiffies = msecs_to_jiffies(10000);
-               unsigned long timeout = wait_for_completion_timeout(
-                                  &reset_data->compl, wait_jiffies);
-               if (!timeout) {
-                       dev_err(&GET_DEV(accel_dev),
-                               "Reset device timeout expired\n");
-                       ret = -EFAULT;
-               }
-               kfree(reset_data);
-               return ret;
-       }
-       return 0;
-}
-
-static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev)
-{
-       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
-
-       if (!accel_dev) {
-               pr_err("QAT: Can't find acceleration device\n");
-               return PCI_ERS_RESULT_DISCONNECT;
-       }
-       if (adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_SYNC))
-               return PCI_ERS_RESULT_DISCONNECT;
-
-       return PCI_ERS_RESULT_RECOVERED;
-}
-
-static void adf_resume(struct pci_dev *pdev)
-{
-       dev_info(&pdev->dev, "Acceleration driver reset completed\n");
-       dev_info(&pdev->dev, "Device is up and running\n");
-}
-
-const struct pci_error_handlers adf_err_handler = {
-       .error_detected = adf_error_detected,
-       .slot_reset = adf_slot_reset,
-       .resume = adf_resume,
-};
-EXPORT_SYMBOL_GPL(adf_err_handler);
-
-/**
- * adf_enable_aer() - Enable Advance Error Reporting for acceleration device
- * @accel_dev:  Pointer to acceleration device.
- *
- * Function enables PCI Advance Error Reporting for the
- * QAT acceleration device accel_dev.
- * To be used by QAT device specific drivers.
- */
-void adf_enable_aer(struct adf_accel_dev *accel_dev)
-{
-       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
-
-       pci_enable_pcie_error_reporting(pdev);
-}
-EXPORT_SYMBOL_GPL(adf_enable_aer);
-
-/**
- * adf_disable_aer() - Disable Advance Error Reporting for acceleration device
- * @accel_dev:  Pointer to acceleration device.
- *
- * Function disables PCI Advance Error Reporting for the
- * QAT acceleration device accel_dev.
- * To be used by QAT device specific drivers.
- *
- * Return: void
- */
-void adf_disable_aer(struct adf_accel_dev *accel_dev)
-{
-       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
-
-       pci_disable_pcie_error_reporting(pdev);
-}
-EXPORT_SYMBOL_GPL(adf_disable_aer);
-
-int adf_init_aer(void)
-{
-       device_reset_wq = alloc_workqueue("qat_device_reset_wq",
-                                         WQ_MEM_RECLAIM, 0);
-       return !device_reset_wq ? -EFAULT : 0;
-}
-
-void adf_exit_aer(void)
-{
-       if (device_reset_wq)
-               destroy_workqueue(device_reset_wq);
-       device_reset_wq = NULL;
-}
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c
deleted file mode 100644 (file)
index 1931e5b..0000000
+++ /dev/null
@@ -1,339 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/seq_file.h>
-#include "adf_accel_devices.h"
-#include "adf_cfg.h"
-#include "adf_common_drv.h"
-
-static DEFINE_MUTEX(qat_cfg_read_lock);
-
-static void *qat_dev_cfg_start(struct seq_file *sfile, loff_t *pos)
-{
-       struct adf_cfg_device_data *dev_cfg = sfile->private;
-
-       mutex_lock(&qat_cfg_read_lock);
-       return seq_list_start(&dev_cfg->sec_list, *pos);
-}
-
-static int qat_dev_cfg_show(struct seq_file *sfile, void *v)
-{
-       struct list_head *list;
-       struct adf_cfg_section *sec =
-                               list_entry(v, struct adf_cfg_section, list);
-
-       seq_printf(sfile, "[%s]\n", sec->name);
-       list_for_each(list, &sec->param_head) {
-               struct adf_cfg_key_val *ptr =
-                       list_entry(list, struct adf_cfg_key_val, list);
-               seq_printf(sfile, "%s = %s\n", ptr->key, ptr->val);
-       }
-       return 0;
-}
-
-static void *qat_dev_cfg_next(struct seq_file *sfile, void *v, loff_t *pos)
-{
-       struct adf_cfg_device_data *dev_cfg = sfile->private;
-
-       return seq_list_next(v, &dev_cfg->sec_list, pos);
-}
-
-static void qat_dev_cfg_stop(struct seq_file *sfile, void *v)
-{
-       mutex_unlock(&qat_cfg_read_lock);
-}
-
-static const struct seq_operations qat_dev_cfg_sops = {
-       .start = qat_dev_cfg_start,
-       .next = qat_dev_cfg_next,
-       .stop = qat_dev_cfg_stop,
-       .show = qat_dev_cfg_show
-};
-
-DEFINE_SEQ_ATTRIBUTE(qat_dev_cfg);
-
-/**
- * adf_cfg_dev_add() - Create an acceleration device configuration table.
- * @accel_dev:  Pointer to acceleration device.
- *
- * Function creates a configuration table for the given acceleration device.
- * The table stores device specific config values.
- * To be used by QAT device specific drivers.
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_cfg_dev_add(struct adf_accel_dev *accel_dev)
-{
-       struct adf_cfg_device_data *dev_cfg_data;
-
-       dev_cfg_data = kzalloc(sizeof(*dev_cfg_data), GFP_KERNEL);
-       if (!dev_cfg_data)
-               return -ENOMEM;
-       INIT_LIST_HEAD(&dev_cfg_data->sec_list);
-       init_rwsem(&dev_cfg_data->lock);
-       accel_dev->cfg = dev_cfg_data;
-
-       /* accel_dev->debugfs_dir should always be non-NULL here */
-       dev_cfg_data->debug = debugfs_create_file("dev_cfg", S_IRUSR,
-                                                 accel_dev->debugfs_dir,
-                                                 dev_cfg_data,
-                                                 &qat_dev_cfg_fops);
-       return 0;
-}
-EXPORT_SYMBOL_GPL(adf_cfg_dev_add);
-
-static void adf_cfg_section_del_all(struct list_head *head);
-
-void adf_cfg_del_all(struct adf_accel_dev *accel_dev)
-{
-       struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
-
-       down_write(&dev_cfg_data->lock);
-       adf_cfg_section_del_all(&dev_cfg_data->sec_list);
-       up_write(&dev_cfg_data->lock);
-       clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
-}
-
-/**
- * adf_cfg_dev_remove() - Clears acceleration device configuration table.
- * @accel_dev:  Pointer to acceleration device.
- *
- * Function removes configuration table from the given acceleration device
- * and frees all allocated memory.
- * To be used by QAT device specific drivers.
- *
- * Return: void
- */
-void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev)
-{
-       struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
-
-       if (!dev_cfg_data)
-               return;
-
-       down_write(&dev_cfg_data->lock);
-       adf_cfg_section_del_all(&dev_cfg_data->sec_list);
-       up_write(&dev_cfg_data->lock);
-       debugfs_remove(dev_cfg_data->debug);
-       kfree(dev_cfg_data);
-       accel_dev->cfg = NULL;
-}
-EXPORT_SYMBOL_GPL(adf_cfg_dev_remove);
-
-static void adf_cfg_keyval_add(struct adf_cfg_key_val *new,
-                              struct adf_cfg_section *sec)
-{
-       list_add_tail(&new->list, &sec->param_head);
-}
-
-static void adf_cfg_keyval_remove(const char *key, struct adf_cfg_section *sec)
-{
-       struct list_head *head = &sec->param_head;
-       struct list_head *list_ptr, *tmp;
-
-       list_for_each_prev_safe(list_ptr, tmp, head) {
-               struct adf_cfg_key_val *ptr =
-                       list_entry(list_ptr, struct adf_cfg_key_val, list);
-
-               if (strncmp(ptr->key, key, sizeof(ptr->key)))
-                       continue;
-
-               list_del(list_ptr);
-               kfree(ptr);
-               break;
-       }
-}
-
-static void adf_cfg_keyval_del_all(struct list_head *head)
-{
-       struct list_head *list_ptr, *tmp;
-
-       list_for_each_prev_safe(list_ptr, tmp, head) {
-               struct adf_cfg_key_val *ptr =
-                       list_entry(list_ptr, struct adf_cfg_key_val, list);
-               list_del(list_ptr);
-               kfree(ptr);
-       }
-}
-
-static void adf_cfg_section_del_all(struct list_head *head)
-{
-       struct adf_cfg_section *ptr;
-       struct list_head *list, *tmp;
-
-       list_for_each_prev_safe(list, tmp, head) {
-               ptr = list_entry(list, struct adf_cfg_section, list);
-               adf_cfg_keyval_del_all(&ptr->param_head);
-               list_del(list);
-               kfree(ptr);
-       }
-}
-
-static struct adf_cfg_key_val *adf_cfg_key_value_find(struct adf_cfg_section *s,
-                                                     const char *key)
-{
-       struct list_head *list;
-
-       list_for_each(list, &s->param_head) {
-               struct adf_cfg_key_val *ptr =
-                       list_entry(list, struct adf_cfg_key_val, list);
-               if (!strcmp(ptr->key, key))
-                       return ptr;
-       }
-       return NULL;
-}
-
-static struct adf_cfg_section *adf_cfg_sec_find(struct adf_accel_dev *accel_dev,
-                                               const char *sec_name)
-{
-       struct adf_cfg_device_data *cfg = accel_dev->cfg;
-       struct list_head *list;
-
-       list_for_each(list, &cfg->sec_list) {
-               struct adf_cfg_section *ptr =
-                       list_entry(list, struct adf_cfg_section, list);
-               if (!strcmp(ptr->name, sec_name))
-                       return ptr;
-       }
-       return NULL;
-}
-
-static int adf_cfg_key_val_get(struct adf_accel_dev *accel_dev,
-                              const char *sec_name,
-                              const char *key_name,
-                              char *val)
-{
-       struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, sec_name);
-       struct adf_cfg_key_val *keyval = NULL;
-
-       if (sec)
-               keyval = adf_cfg_key_value_find(sec, key_name);
-       if (keyval) {
-               memcpy(val, keyval->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES);
-               return 0;
-       }
-       return -ENODATA;
-}
-
-/**
- * adf_cfg_add_key_value_param() - Add key-value config entry to config table.
- * @accel_dev:  Pointer to acceleration device.
- * @section_name: Name of the section where the param will be added
- * @key: The key string
- * @val: Value pain for the given @key
- * @type: Type - string, int or address
- *
- * Function adds configuration key - value entry in the appropriate section
- * in the given acceleration device. If the key exists already, the value
- * is updated.
- * To be used by QAT device specific drivers.
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
-                               const char *section_name,
-                               const char *key, const void *val,
-                               enum adf_cfg_val_type type)
-{
-       struct adf_cfg_device_data *cfg = accel_dev->cfg;
-       struct adf_cfg_key_val *key_val;
-       struct adf_cfg_section *section = adf_cfg_sec_find(accel_dev,
-                                                          section_name);
-       char temp_val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
-
-       if (!section)
-               return -EFAULT;
-
-       key_val = kzalloc(sizeof(*key_val), GFP_KERNEL);
-       if (!key_val)
-               return -ENOMEM;
-
-       INIT_LIST_HEAD(&key_val->list);
-       strscpy(key_val->key, key, sizeof(key_val->key));
-
-       if (type == ADF_DEC) {
-               snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
-                        "%ld", (*((long *)val)));
-       } else if (type == ADF_STR) {
-               strscpy(key_val->val, (char *)val, sizeof(key_val->val));
-       } else if (type == ADF_HEX) {
-               snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
-                        "0x%lx", (unsigned long)val);
-       } else {
-               dev_err(&GET_DEV(accel_dev), "Unknown type given.\n");
-               kfree(key_val);
-               return -EINVAL;
-       }
-       key_val->type = type;
-
-       /* Add the key-value pair as below policy:
-        * 1. if the key doesn't exist, add it;
-        * 2. if the key already exists with a different value then update it
-        *    to the new value (the key is deleted and the newly created
-        *    key_val containing the new value is added to the database);
-        * 3. if the key exists with the same value, then return without doing
-        *    anything (the newly created key_val is freed).
-        */
-       if (!adf_cfg_key_val_get(accel_dev, section_name, key, temp_val)) {
-               if (strncmp(temp_val, key_val->val, sizeof(temp_val))) {
-                       adf_cfg_keyval_remove(key, section);
-               } else {
-                       kfree(key_val);
-                       return 0;
-               }
-       }
-
-       down_write(&cfg->lock);
-       adf_cfg_keyval_add(key_val, section);
-       up_write(&cfg->lock);
-       return 0;
-}
-EXPORT_SYMBOL_GPL(adf_cfg_add_key_value_param);
-
-/**
- * adf_cfg_section_add() - Add config section entry to config table.
- * @accel_dev:  Pointer to acceleration device.
- * @name: Name of the section
- *
- * Function adds configuration section where key - value entries
- * will be stored.
- * To be used by QAT device specific drivers.
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name)
-{
-       struct adf_cfg_device_data *cfg = accel_dev->cfg;
-       struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, name);
-
-       if (sec)
-               return 0;
-
-       sec = kzalloc(sizeof(*sec), GFP_KERNEL);
-       if (!sec)
-               return -ENOMEM;
-
-       strscpy(sec->name, name, sizeof(sec->name));
-       INIT_LIST_HEAD(&sec->param_head);
-       down_write(&cfg->lock);
-       list_add_tail(&sec->list, &cfg->sec_list);
-       up_write(&cfg->lock);
-       return 0;
-}
-EXPORT_SYMBOL_GPL(adf_cfg_section_add);
-
-int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev,
-                           const char *section, const char *name,
-                           char *value)
-{
-       struct adf_cfg_device_data *cfg = accel_dev->cfg;
-       int ret;
-
-       down_read(&cfg->lock);
-       ret = adf_cfg_key_val_get(accel_dev, section, name, value);
-       up_read(&cfg->lock);
-       return ret;
-}
-EXPORT_SYMBOL_GPL(adf_cfg_get_param_value);
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.h b/drivers/crypto/qat/qat_common/adf_cfg.h
deleted file mode 100644 (file)
index 376cde6..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef ADF_CFG_H_
-#define ADF_CFG_H_
-
-#include <linux/list.h>
-#include <linux/rwsem.h>
-#include <linux/debugfs.h>
-#include "adf_accel_devices.h"
-#include "adf_cfg_common.h"
-#include "adf_cfg_strings.h"
-
-struct adf_cfg_key_val {
-       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
-       char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
-       enum adf_cfg_val_type type;
-       struct list_head list;
-};
-
-struct adf_cfg_section {
-       char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
-       struct list_head list;
-       struct list_head param_head;
-};
-
-struct adf_cfg_device_data {
-       struct list_head sec_list;
-       struct dentry *debug;
-       struct rw_semaphore lock;
-};
-
-int adf_cfg_dev_add(struct adf_accel_dev *accel_dev);
-void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev);
-int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name);
-void adf_cfg_del_all(struct adf_accel_dev *accel_dev);
-int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
-                               const char *section_name,
-                               const char *key, const void *val,
-                               enum adf_cfg_val_type type);
-int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev,
-                           const char *section, const char *name, char *value);
-
-#endif
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_common.h b/drivers/crypto/qat/qat_common/adf_cfg_common.h
deleted file mode 100644 (file)
index 6e5de1d..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef ADF_CFG_COMMON_H_
-#define ADF_CFG_COMMON_H_
-
-#include <linux/types.h>
-#include <linux/ioctl.h>
-
-#define ADF_CFG_MAX_STR_LEN 64
-#define ADF_CFG_MAX_KEY_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN
-#define ADF_CFG_MAX_VAL_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN
-#define ADF_CFG_MAX_SECTION_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN
-#define ADF_CFG_BASE_DEC 10
-#define ADF_CFG_BASE_HEX 16
-#define ADF_CFG_ALL_DEVICES 0xFE
-#define ADF_CFG_NO_DEVICE 0xFF
-#define ADF_CFG_AFFINITY_WHATEVER 0xFF
-#define MAX_DEVICE_NAME_SIZE 32
-#define ADF_MAX_DEVICES (32 * 32)
-#define ADF_DEVS_ARRAY_SIZE BITS_TO_LONGS(ADF_MAX_DEVICES)
-
-#define ADF_CFG_SERV_RING_PAIR_0_SHIFT 0
-#define ADF_CFG_SERV_RING_PAIR_1_SHIFT 3
-#define ADF_CFG_SERV_RING_PAIR_2_SHIFT 6
-#define ADF_CFG_SERV_RING_PAIR_3_SHIFT 9
-enum adf_cfg_service_type {
-       UNUSED = 0,
-       CRYPTO,
-       COMP,
-       SYM,
-       ASYM,
-       USED
-};
-
-enum adf_cfg_val_type {
-       ADF_DEC,
-       ADF_HEX,
-       ADF_STR
-};
-
-enum adf_device_type {
-       DEV_UNKNOWN = 0,
-       DEV_DH895XCC,
-       DEV_DH895XCCVF,
-       DEV_C62X,
-       DEV_C62XVF,
-       DEV_C3XXX,
-       DEV_C3XXXVF,
-       DEV_4XXX,
-};
-
-struct adf_dev_status_info {
-       enum adf_device_type type;
-       __u32 accel_id;
-       __u32 instance_id;
-       __u8 num_ae;
-       __u8 num_accel;
-       __u8 num_logical_accel;
-       __u8 banks_per_accel;
-       __u8 state;
-       __u8 bus;
-       __u8 dev;
-       __u8 fun;
-       char name[MAX_DEVICE_NAME_SIZE];
-};
-
-#define ADF_CTL_IOC_MAGIC 'a'
-#define IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS _IOW(ADF_CTL_IOC_MAGIC, 0, \
-               struct adf_user_cfg_ctl_data)
-#define IOCTL_STOP_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 1, \
-               struct adf_user_cfg_ctl_data)
-#define IOCTL_START_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 2, \
-               struct adf_user_cfg_ctl_data)
-#define IOCTL_STATUS_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 3, __u32)
-#define IOCTL_GET_NUM_DEVICES _IOW(ADF_CTL_IOC_MAGIC, 4, __s32)
-#endif
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/qat/qat_common/adf_cfg_strings.h
deleted file mode 100644 (file)
index 5d8c3bd..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef ADF_CFG_STRINGS_H_
-#define ADF_CFG_STRINGS_H_
-
-#define ADF_GENERAL_SEC "GENERAL"
-#define ADF_KERNEL_SEC "KERNEL"
-#define ADF_ACCEL_SEC "Accelerator"
-#define ADF_NUM_CY "NumberCyInstances"
-#define ADF_NUM_DC "NumberDcInstances"
-#define ADF_RING_SYM_SIZE "NumConcurrentSymRequests"
-#define ADF_RING_ASYM_SIZE "NumConcurrentAsymRequests"
-#define ADF_RING_DC_SIZE "NumConcurrentRequests"
-#define ADF_RING_ASYM_TX "RingAsymTx"
-#define ADF_RING_SYM_TX "RingSymTx"
-#define ADF_RING_ASYM_RX "RingAsymRx"
-#define ADF_RING_SYM_RX "RingSymRx"
-#define ADF_RING_DC_TX "RingTx"
-#define ADF_RING_DC_RX "RingRx"
-#define ADF_ETRMGR_BANK "Bank"
-#define ADF_RING_SYM_BANK_NUM "BankSymNumber"
-#define ADF_RING_ASYM_BANK_NUM "BankAsymNumber"
-#define ADF_RING_DC_BANK_NUM "BankDcNumber"
-#define ADF_CY "Cy"
-#define ADF_DC "Dc"
-#define ADF_CFG_DC "dc"
-#define ADF_CFG_CY "sym;asym"
-#define ADF_SERVICES_ENABLED "ServicesEnabled"
-#define ADF_ETRMGR_COALESCING_ENABLED "InterruptCoalescingEnabled"
-#define ADF_ETRMGR_COALESCING_ENABLED_FORMAT \
-       ADF_ETRMGR_BANK "%d" ADF_ETRMGR_COALESCING_ENABLED
-#define ADF_ETRMGR_COALESCE_TIMER "InterruptCoalescingTimerNs"
-#define ADF_ETRMGR_COALESCE_TIMER_FORMAT \
-       ADF_ETRMGR_BANK "%d" ADF_ETRMGR_COALESCE_TIMER
-#define ADF_ETRMGR_COALESCING_MSG_ENABLED "InterruptCoalescingNumResponses"
-#define ADF_ETRMGR_COALESCING_MSG_ENABLED_FORMAT \
-       ADF_ETRMGR_BANK "%d" ADF_ETRMGR_COALESCING_MSG_ENABLED
-#define ADF_ETRMGR_CORE_AFFINITY "CoreAffinity"
-#define ADF_ETRMGR_CORE_AFFINITY_FORMAT \
-       ADF_ETRMGR_BANK "%d" ADF_ETRMGR_CORE_AFFINITY
-#define ADF_ACCEL_STR "Accelerator%d"
-#endif
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_user.h b/drivers/crypto/qat/qat_common/adf_cfg_user.h
deleted file mode 100644 (file)
index 421f4fb..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef ADF_CFG_USER_H_
-#define ADF_CFG_USER_H_
-
-#include "adf_cfg_common.h"
-#include "adf_cfg_strings.h"
-
-struct adf_user_cfg_key_val {
-       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
-       char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
-       union {
-               struct adf_user_cfg_key_val *next;
-               __u64 padding3;
-       };
-       enum adf_cfg_val_type type;
-} __packed;
-
-struct adf_user_cfg_section {
-       char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
-       union {
-               struct adf_user_cfg_key_val *params;
-               __u64 padding1;
-       };
-       union {
-               struct adf_user_cfg_section *next;
-               __u64 padding3;
-       };
-} __packed;
-
-struct adf_user_cfg_ctl_data {
-       union {
-               struct adf_user_cfg_section *config_section;
-               __u64 padding;
-       };
-       __u8 device_id;
-} __packed;
-#endif
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
deleted file mode 100644 (file)
index 7189265..0000000
+++ /dev/null
@@ -1,253 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2021 Intel Corporation */
-#ifndef ADF_DRV_H
-#define ADF_DRV_H
-
-#include <linux/list.h>
-#include <linux/pci.h>
-#include "adf_accel_devices.h"
-#include "icp_qat_fw_loader_handle.h"
-#include "icp_qat_hal.h"
-
-#define ADF_MAJOR_VERSION      0
-#define ADF_MINOR_VERSION      6
-#define ADF_BUILD_VERSION      0
-#define ADF_DRV_VERSION                __stringify(ADF_MAJOR_VERSION) "." \
-                               __stringify(ADF_MINOR_VERSION) "." \
-                               __stringify(ADF_BUILD_VERSION)
-
-#define ADF_STATUS_RESTARTING 0
-#define ADF_STATUS_STARTING 1
-#define ADF_STATUS_CONFIGURED 2
-#define ADF_STATUS_STARTED 3
-#define ADF_STATUS_AE_INITIALISED 4
-#define ADF_STATUS_AE_UCODE_LOADED 5
-#define ADF_STATUS_AE_STARTED 6
-#define ADF_STATUS_PF_RUNNING 7
-#define ADF_STATUS_IRQ_ALLOCATED 8
-
-enum adf_dev_reset_mode {
-       ADF_DEV_RESET_ASYNC = 0,
-       ADF_DEV_RESET_SYNC
-};
-
-enum adf_event {
-       ADF_EVENT_INIT = 0,
-       ADF_EVENT_START,
-       ADF_EVENT_STOP,
-       ADF_EVENT_SHUTDOWN,
-       ADF_EVENT_RESTARTING,
-       ADF_EVENT_RESTARTED,
-};
-
-struct service_hndl {
-       int (*event_hld)(struct adf_accel_dev *accel_dev,
-                        enum adf_event event);
-       unsigned long init_status[ADF_DEVS_ARRAY_SIZE];
-       unsigned long start_status[ADF_DEVS_ARRAY_SIZE];
-       char *name;
-       struct list_head list;
-};
-
-int adf_service_register(struct service_hndl *service);
-int adf_service_unregister(struct service_hndl *service);
-
-int adf_dev_init(struct adf_accel_dev *accel_dev);
-int adf_dev_start(struct adf_accel_dev *accel_dev);
-void adf_dev_stop(struct adf_accel_dev *accel_dev);
-void adf_dev_shutdown(struct adf_accel_dev *accel_dev);
-int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev);
-
-void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data);
-void adf_clean_vf_map(bool);
-
-int adf_ctl_dev_register(void);
-void adf_ctl_dev_unregister(void);
-int adf_processes_dev_register(void);
-void adf_processes_dev_unregister(void);
-
-int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
-                      struct adf_accel_dev *pf);
-void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
-                      struct adf_accel_dev *pf);
-struct list_head *adf_devmgr_get_head(void);
-struct adf_accel_dev *adf_devmgr_get_dev_by_id(u32 id);
-struct adf_accel_dev *adf_devmgr_get_first(void);
-struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev);
-int adf_devmgr_verify_id(u32 id);
-void adf_devmgr_get_num_dev(u32 *num);
-int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev);
-int adf_dev_started(struct adf_accel_dev *accel_dev);
-int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev);
-int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev);
-int adf_ae_init(struct adf_accel_dev *accel_dev);
-int adf_ae_shutdown(struct adf_accel_dev *accel_dev);
-int adf_ae_fw_load(struct adf_accel_dev *accel_dev);
-void adf_ae_fw_release(struct adf_accel_dev *accel_dev);
-int adf_ae_start(struct adf_accel_dev *accel_dev);
-int adf_ae_stop(struct adf_accel_dev *accel_dev);
-
-extern const struct pci_error_handlers adf_err_handler;
-void adf_enable_aer(struct adf_accel_dev *accel_dev);
-void adf_disable_aer(struct adf_accel_dev *accel_dev);
-void adf_reset_sbr(struct adf_accel_dev *accel_dev);
-void adf_reset_flr(struct adf_accel_dev *accel_dev);
-void adf_dev_restore(struct adf_accel_dev *accel_dev);
-int adf_init_aer(void);
-void adf_exit_aer(void);
-int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
-void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
-int adf_send_admin_init(struct adf_accel_dev *accel_dev);
-int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay);
-int adf_init_arb(struct adf_accel_dev *accel_dev);
-void adf_exit_arb(struct adf_accel_dev *accel_dev);
-void adf_update_ring_arb(struct adf_etr_ring_data *ring);
-
-int adf_dev_get(struct adf_accel_dev *accel_dev);
-void adf_dev_put(struct adf_accel_dev *accel_dev);
-int adf_dev_in_use(struct adf_accel_dev *accel_dev);
-int adf_init_etr_data(struct adf_accel_dev *accel_dev);
-void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev);
-int qat_crypto_register(void);
-int qat_crypto_unregister(void);
-int qat_crypto_vf_dev_config(struct adf_accel_dev *accel_dev);
-struct qat_crypto_instance *qat_crypto_get_instance_node(int node);
-void qat_crypto_put_instance(struct qat_crypto_instance *inst);
-void qat_alg_callback(void *resp);
-void qat_alg_asym_callback(void *resp);
-int qat_algs_register(void);
-void qat_algs_unregister(void);
-int qat_asym_algs_register(void);
-void qat_asym_algs_unregister(void);
-
-struct qat_compression_instance *qat_compression_get_instance_node(int node);
-void qat_compression_put_instance(struct qat_compression_instance *inst);
-int qat_compression_register(void);
-int qat_compression_unregister(void);
-int qat_comp_algs_register(void);
-void qat_comp_algs_unregister(void);
-void qat_comp_alg_callback(void *resp);
-
-int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
-void adf_isr_resource_free(struct adf_accel_dev *accel_dev);
-int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
-void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev);
-
-int adf_pfvf_comms_disabled(struct adf_accel_dev *accel_dev);
-
-int adf_sysfs_init(struct adf_accel_dev *accel_dev);
-
-int qat_hal_init(struct adf_accel_dev *accel_dev);
-void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle);
-int qat_hal_start(struct icp_qat_fw_loader_handle *handle);
-void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
-                 unsigned int ctx_mask);
-void qat_hal_reset(struct icp_qat_fw_loader_handle *handle);
-int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle);
-void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
-                         unsigned char ae, unsigned int ctx_mask);
-int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle,
-                           unsigned int ae);
-int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
-                          unsigned char ae, enum icp_qat_uof_regtype lm_type,
-                          unsigned char mode);
-int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
-                           unsigned char ae, unsigned char mode);
-int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
-                          unsigned char ae, unsigned char mode);
-void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
-                   unsigned char ae, unsigned int ctx_mask, unsigned int upc);
-void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
-                      unsigned char ae, unsigned int uaddr,
-                      unsigned int words_num, u64 *uword);
-void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
-                    unsigned int uword_addr, unsigned int words_num,
-                    unsigned int *data);
-int qat_hal_get_ins_num(void);
-int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
-                       unsigned char ae,
-                       struct icp_qat_uof_batch_init *lm_init_header);
-int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle,
-                    unsigned char ae, unsigned long ctx_mask,
-                    enum icp_qat_uof_regtype reg_type,
-                    unsigned short reg_num, unsigned int regdata);
-int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle,
-                        unsigned char ae, unsigned long ctx_mask,
-                        enum icp_qat_uof_regtype reg_type,
-                        unsigned short reg_num, unsigned int regdata);
-int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
-                        unsigned char ae, unsigned long ctx_mask,
-                        enum icp_qat_uof_regtype reg_type,
-                        unsigned short reg_num, unsigned int regdata);
-int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
-                   unsigned char ae, unsigned long ctx_mask,
-                   unsigned short reg_num, unsigned int regdata);
-int qat_hal_wr_lm(struct icp_qat_fw_loader_handle *handle,
-                 unsigned char ae, unsigned short lm_addr, unsigned int value);
-void qat_hal_set_ae_tindex_mode(struct icp_qat_fw_loader_handle *handle,
-                               unsigned char ae, unsigned char mode);
-int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle);
-void qat_uclo_del_obj(struct icp_qat_fw_loader_handle *handle);
-int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, void *addr_ptr,
-                      int mem_size);
-int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
-                    void *addr_ptr, u32 mem_size, char *obj_name);
-int qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle,
-                            unsigned int cfg_ae_mask);
-int adf_init_misc_wq(void);
-void adf_exit_misc_wq(void);
-bool adf_misc_wq_queue_work(struct work_struct *work);
-#if defined(CONFIG_PCI_IOV)
-int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
-void adf_disable_sriov(struct adf_accel_dev *accel_dev);
-void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask);
-void adf_disable_all_vf2pf_interrupts(struct adf_accel_dev *accel_dev);
-bool adf_recv_and_handle_pf2vf_msg(struct adf_accel_dev *accel_dev);
-bool adf_recv_and_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u32 vf_nr);
-int adf_pf2vf_handle_pf_restarting(struct adf_accel_dev *accel_dev);
-void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
-void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
-void adf_schedule_vf2pf_handler(struct adf_accel_vf_info *vf_info);
-int adf_init_pf_wq(void);
-void adf_exit_pf_wq(void);
-int adf_init_vf_wq(void);
-void adf_exit_vf_wq(void);
-void adf_flush_vf_wq(struct adf_accel_dev *accel_dev);
-#else
-#define adf_sriov_configure NULL
-
-static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev)
-{
-}
-
-static inline int adf_init_pf_wq(void)
-{
-       return 0;
-}
-
-static inline void adf_exit_pf_wq(void)
-{
-}
-
-static inline int adf_init_vf_wq(void)
-{
-       return 0;
-}
-
-static inline void adf_exit_vf_wq(void)
-{
-}
-
-#endif
-
-static inline void __iomem *adf_get_pmisc_base(struct adf_accel_dev *accel_dev)
-{
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       struct adf_bar *pmisc;
-
-       pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
-
-       return pmisc->virt_addr;
-}
-
-#endif
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
deleted file mode 100644 (file)
index 9190532..0000000
+++ /dev/null
@@ -1,483 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/fs.h>
-#include <linux/bitops.h>
-#include <linux/pci.h>
-#include <linux/cdev.h>
-#include <linux/uaccess.h>
-#include <linux/crypto.h>
-
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "adf_cfg.h"
-#include "adf_cfg_common.h"
-#include "adf_cfg_user.h"
-
-#define ADF_CFG_MAX_SECTION 512
-#define ADF_CFG_MAX_KEY_VAL 256
-
-#define DEVICE_NAME "qat_adf_ctl"
-
-static DEFINE_MUTEX(adf_ctl_lock);
-static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
-
-static const struct file_operations adf_ctl_ops = {
-       .owner = THIS_MODULE,
-       .unlocked_ioctl = adf_ctl_ioctl,
-       .compat_ioctl = compat_ptr_ioctl,
-};
-
-struct adf_ctl_drv_info {
-       unsigned int major;
-       struct cdev drv_cdev;
-       struct class *drv_class;
-};
-
-static struct adf_ctl_drv_info adf_ctl_drv;
-
-static void adf_chr_drv_destroy(void)
-{
-       device_destroy(adf_ctl_drv.drv_class, MKDEV(adf_ctl_drv.major, 0));
-       cdev_del(&adf_ctl_drv.drv_cdev);
-       class_destroy(adf_ctl_drv.drv_class);
-       unregister_chrdev_region(MKDEV(adf_ctl_drv.major, 0), 1);
-}
-
-static int adf_chr_drv_create(void)
-{
-       dev_t dev_id;
-       struct device *drv_device;
-
-       if (alloc_chrdev_region(&dev_id, 0, 1, DEVICE_NAME)) {
-               pr_err("QAT: unable to allocate chrdev region\n");
-               return -EFAULT;
-       }
-
-       adf_ctl_drv.drv_class = class_create(THIS_MODULE, DEVICE_NAME);
-       if (IS_ERR(adf_ctl_drv.drv_class)) {
-               pr_err("QAT: class_create failed for adf_ctl\n");
-               goto err_chrdev_unreg;
-       }
-       adf_ctl_drv.major = MAJOR(dev_id);
-       cdev_init(&adf_ctl_drv.drv_cdev, &adf_ctl_ops);
-       if (cdev_add(&adf_ctl_drv.drv_cdev, dev_id, 1)) {
-               pr_err("QAT: cdev add failed\n");
-               goto err_class_destr;
-       }
-
-       drv_device = device_create(adf_ctl_drv.drv_class, NULL,
-                                  MKDEV(adf_ctl_drv.major, 0),
-                                  NULL, DEVICE_NAME);
-       if (IS_ERR(drv_device)) {
-               pr_err("QAT: failed to create device\n");
-               goto err_cdev_del;
-       }
-       return 0;
-err_cdev_del:
-       cdev_del(&adf_ctl_drv.drv_cdev);
-err_class_destr:
-       class_destroy(adf_ctl_drv.drv_class);
-err_chrdev_unreg:
-       unregister_chrdev_region(dev_id, 1);
-       return -EFAULT;
-}
-
-static int adf_ctl_alloc_resources(struct adf_user_cfg_ctl_data **ctl_data,
-                                  unsigned long arg)
-{
-       struct adf_user_cfg_ctl_data *cfg_data;
-
-       cfg_data = kzalloc(sizeof(*cfg_data), GFP_KERNEL);
-       if (!cfg_data)
-               return -ENOMEM;
-
-       /* Initialize device id to NO DEVICE as 0 is a valid device id */
-       cfg_data->device_id = ADF_CFG_NO_DEVICE;
-
-       if (copy_from_user(cfg_data, (void __user *)arg, sizeof(*cfg_data))) {
-               pr_err("QAT: failed to copy from user cfg_data.\n");
-               kfree(cfg_data);
-               return -EIO;
-       }
-
-       *ctl_data = cfg_data;
-       return 0;
-}
-
-static int adf_add_key_value_data(struct adf_accel_dev *accel_dev,
-                                 const char *section,
-                                 const struct adf_user_cfg_key_val *key_val)
-{
-       if (key_val->type == ADF_HEX) {
-               long *ptr = (long *)key_val->val;
-               long val = *ptr;
-
-               if (adf_cfg_add_key_value_param(accel_dev, section,
-                                               key_val->key, (void *)val,
-                                               key_val->type)) {
-                       dev_err(&GET_DEV(accel_dev),
-                               "failed to add hex keyvalue.\n");
-                       return -EFAULT;
-               }
-       } else {
-               if (adf_cfg_add_key_value_param(accel_dev, section,
-                                               key_val->key, key_val->val,
-                                               key_val->type)) {
-                       dev_err(&GET_DEV(accel_dev),
-                               "failed to add keyvalue.\n");
-                       return -EFAULT;
-               }
-       }
-       return 0;
-}
-
-static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
-                                  struct adf_user_cfg_ctl_data *ctl_data)
-{
-       struct adf_user_cfg_key_val key_val;
-       struct adf_user_cfg_key_val *params_head;
-       struct adf_user_cfg_section section, *section_head;
-       int i, j;
-
-       section_head = ctl_data->config_section;
-
-       for (i = 0; section_head && i < ADF_CFG_MAX_SECTION; i++) {
-               if (copy_from_user(&section, (void __user *)section_head,
-                                  sizeof(*section_head))) {
-                       dev_err(&GET_DEV(accel_dev),
-                               "failed to copy section info\n");
-                       goto out_err;
-               }
-
-               if (adf_cfg_section_add(accel_dev, section.name)) {
-                       dev_err(&GET_DEV(accel_dev),
-                               "failed to add section.\n");
-                       goto out_err;
-               }
-
-               params_head = section.params;
-
-               for (j = 0; params_head && j < ADF_CFG_MAX_KEY_VAL; j++) {
-                       if (copy_from_user(&key_val, (void __user *)params_head,
-                                          sizeof(key_val))) {
-                               dev_err(&GET_DEV(accel_dev),
-                                       "Failed to copy keyvalue.\n");
-                               goto out_err;
-                       }
-                       if (adf_add_key_value_data(accel_dev, section.name,
-                                                  &key_val)) {
-                               goto out_err;
-                       }
-                       params_head = key_val.next;
-               }
-               section_head = section.next;
-       }
-       return 0;
-out_err:
-       adf_cfg_del_all(accel_dev);
-       return -EFAULT;
-}
-
-static int adf_ctl_ioctl_dev_config(struct file *fp, unsigned int cmd,
-                                   unsigned long arg)
-{
-       int ret;
-       struct adf_user_cfg_ctl_data *ctl_data;
-       struct adf_accel_dev *accel_dev;
-
-       ret = adf_ctl_alloc_resources(&ctl_data, arg);
-       if (ret)
-               return ret;
-
-       accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
-       if (!accel_dev) {
-               ret = -EFAULT;
-               goto out;
-       }
-
-       if (adf_dev_started(accel_dev)) {
-               ret = -EFAULT;
-               goto out;
-       }
-
-       if (adf_copy_key_value_data(accel_dev, ctl_data)) {
-               ret = -EFAULT;
-               goto out;
-       }
-       set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
-out:
-       kfree(ctl_data);
-       return ret;
-}
-
-static int adf_ctl_is_device_in_use(int id)
-{
-       struct adf_accel_dev *dev;
-
-       list_for_each_entry(dev, adf_devmgr_get_head(), list) {
-               if (id == dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
-                       if (adf_devmgr_in_reset(dev) || adf_dev_in_use(dev)) {
-                               dev_info(&GET_DEV(dev),
-                                        "device qat_dev%d is busy\n",
-                                        dev->accel_id);
-                               return -EBUSY;
-                       }
-               }
-       }
-       return 0;
-}
-
-static void adf_ctl_stop_devices(u32 id)
-{
-       struct adf_accel_dev *accel_dev;
-
-       list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
-               if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
-                       if (!adf_dev_started(accel_dev))
-                               continue;
-
-                       /* First stop all VFs */
-                       if (!accel_dev->is_vf)
-                               continue;
-
-                       adf_dev_stop(accel_dev);
-                       adf_dev_shutdown(accel_dev);
-               }
-       }
-
-       list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
-               if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
-                       if (!adf_dev_started(accel_dev))
-                               continue;
-
-                       adf_dev_stop(accel_dev);
-                       adf_dev_shutdown(accel_dev);
-               }
-       }
-}
-
-static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd,
-                                 unsigned long arg)
-{
-       int ret;
-       struct adf_user_cfg_ctl_data *ctl_data;
-
-       ret = adf_ctl_alloc_resources(&ctl_data, arg);
-       if (ret)
-               return ret;
-
-       if (adf_devmgr_verify_id(ctl_data->device_id)) {
-               pr_err("QAT: Device %d not found\n", ctl_data->device_id);
-               ret = -ENODEV;
-               goto out;
-       }
-
-       ret = adf_ctl_is_device_in_use(ctl_data->device_id);
-       if (ret)
-               goto out;
-
-       if (ctl_data->device_id == ADF_CFG_ALL_DEVICES)
-               pr_info("QAT: Stopping all acceleration devices.\n");
-       else
-               pr_info("QAT: Stopping acceleration device qat_dev%d.\n",
-                       ctl_data->device_id);
-
-       adf_ctl_stop_devices(ctl_data->device_id);
-
-out:
-       kfree(ctl_data);
-       return ret;
-}
-
-static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
-                                  unsigned long arg)
-{
-       int ret;
-       struct adf_user_cfg_ctl_data *ctl_data;
-       struct adf_accel_dev *accel_dev;
-
-       ret = adf_ctl_alloc_resources(&ctl_data, arg);
-       if (ret)
-               return ret;
-
-       ret = -ENODEV;
-       accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
-       if (!accel_dev)
-               goto out;
-
-       if (!adf_dev_started(accel_dev)) {
-               dev_info(&GET_DEV(accel_dev),
-                        "Starting acceleration device qat_dev%d.\n",
-                        ctl_data->device_id);
-               ret = adf_dev_init(accel_dev);
-               if (!ret)
-                       ret = adf_dev_start(accel_dev);
-       } else {
-               dev_info(&GET_DEV(accel_dev),
-                        "Acceleration device qat_dev%d already started.\n",
-                        ctl_data->device_id);
-       }
-       if (ret) {
-               dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
-                       ctl_data->device_id);
-               adf_dev_stop(accel_dev);
-               adf_dev_shutdown(accel_dev);
-       }
-out:
-       kfree(ctl_data);
-       return ret;
-}
-
-static int adf_ctl_ioctl_get_num_devices(struct file *fp, unsigned int cmd,
-                                        unsigned long arg)
-{
-       u32 num_devices = 0;
-
-       adf_devmgr_get_num_dev(&num_devices);
-       if (copy_to_user((void __user *)arg, &num_devices, sizeof(num_devices)))
-               return -EFAULT;
-
-       return 0;
-}
-
-static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd,
-                                   unsigned long arg)
-{
-       struct adf_hw_device_data *hw_data;
-       struct adf_dev_status_info dev_info;
-       struct adf_accel_dev *accel_dev;
-
-       if (copy_from_user(&dev_info, (void __user *)arg,
-                          sizeof(struct adf_dev_status_info))) {
-               pr_err("QAT: failed to copy from user.\n");
-               return -EFAULT;
-       }
-
-       accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id);
-       if (!accel_dev)
-               return -ENODEV;
-
-       hw_data = accel_dev->hw_device;
-       dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN;
-       dev_info.num_ae = hw_data->get_num_aes(hw_data);
-       dev_info.num_accel = hw_data->get_num_accels(hw_data);
-       dev_info.num_logical_accel = hw_data->num_logical_accel;
-       dev_info.banks_per_accel = hw_data->num_banks
-                                       / hw_data->num_logical_accel;
-       strscpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name));
-       dev_info.instance_id = hw_data->instance_id;
-       dev_info.type = hw_data->dev_class->type;
-       dev_info.bus = accel_to_pci_dev(accel_dev)->bus->number;
-       dev_info.dev = PCI_SLOT(accel_to_pci_dev(accel_dev)->devfn);
-       dev_info.fun = PCI_FUNC(accel_to_pci_dev(accel_dev)->devfn);
-
-       if (copy_to_user((void __user *)arg, &dev_info,
-                        sizeof(struct adf_dev_status_info))) {
-               dev_err(&GET_DEV(accel_dev), "failed to copy status.\n");
-               return -EFAULT;
-       }
-       return 0;
-}
-
-static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
-{
-       int ret;
-
-       if (mutex_lock_interruptible(&adf_ctl_lock))
-               return -EFAULT;
-
-       switch (cmd) {
-       case IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS:
-               ret = adf_ctl_ioctl_dev_config(fp, cmd, arg);
-               break;
-
-       case IOCTL_STOP_ACCEL_DEV:
-               ret = adf_ctl_ioctl_dev_stop(fp, cmd, arg);
-               break;
-
-       case IOCTL_START_ACCEL_DEV:
-               ret = adf_ctl_ioctl_dev_start(fp, cmd, arg);
-               break;
-
-       case IOCTL_GET_NUM_DEVICES:
-               ret = adf_ctl_ioctl_get_num_devices(fp, cmd, arg);
-               break;
-
-       case IOCTL_STATUS_ACCEL_DEV:
-               ret = adf_ctl_ioctl_get_status(fp, cmd, arg);
-               break;
-       default:
-               pr_err_ratelimited("QAT: Invalid ioctl %d\n", cmd);
-               ret = -EFAULT;
-               break;
-       }
-       mutex_unlock(&adf_ctl_lock);
-       return ret;
-}
-
-static int __init adf_register_ctl_device_driver(void)
-{
-       if (adf_chr_drv_create())
-               goto err_chr_dev;
-
-       if (adf_init_misc_wq())
-               goto err_misc_wq;
-
-       if (adf_init_aer())
-               goto err_aer;
-
-       if (adf_init_pf_wq())
-               goto err_pf_wq;
-
-       if (adf_init_vf_wq())
-               goto err_vf_wq;
-
-       if (qat_crypto_register())
-               goto err_crypto_register;
-
-       if (qat_compression_register())
-               goto err_compression_register;
-
-       return 0;
-
-err_compression_register:
-       qat_crypto_unregister();
-err_crypto_register:
-       adf_exit_vf_wq();
-err_vf_wq:
-       adf_exit_pf_wq();
-err_pf_wq:
-       adf_exit_aer();
-err_aer:
-       adf_exit_misc_wq();
-err_misc_wq:
-       adf_chr_drv_destroy();
-err_chr_dev:
-       mutex_destroy(&adf_ctl_lock);
-       return -EFAULT;
-}
-
-static void __exit adf_unregister_ctl_device_driver(void)
-{
-       adf_chr_drv_destroy();
-       adf_exit_misc_wq();
-       adf_exit_aer();
-       adf_exit_vf_wq();
-       adf_exit_pf_wq();
-       qat_crypto_unregister();
-       qat_compression_unregister();
-       adf_clean_vf_map(false);
-       mutex_destroy(&adf_ctl_lock);
-}
-
-module_init(adf_register_ctl_device_driver);
-module_exit(adf_unregister_ctl_device_driver);
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Intel");
-MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
-MODULE_ALIAS_CRYPTO("intel_qat");
-MODULE_VERSION(ADF_DRV_VERSION);
-MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
deleted file mode 100644 (file)
index 4c752ee..0000000
+++ /dev/null
@@ -1,450 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/mutex.h>
-#include <linux/list.h>
-#include "adf_cfg.h"
-#include "adf_common_drv.h"
-
-static LIST_HEAD(accel_table);
-static LIST_HEAD(vfs_table);
-static DEFINE_MUTEX(table_lock);
-static u32 num_devices;
-static u8 id_map[ADF_MAX_DEVICES];
-
-struct vf_id_map {
-       u32 bdf;
-       u32 id;
-       u32 fake_id;
-       bool attached;
-       struct list_head list;
-};
-
-static int adf_get_vf_id(struct adf_accel_dev *vf)
-{
-       return ((7 * (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1)) +
-               PCI_FUNC(accel_to_pci_dev(vf)->devfn) +
-               (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1));
-}
-
-static int adf_get_vf_num(struct adf_accel_dev *vf)
-{
-       return (accel_to_pci_dev(vf)->bus->number << 8) | adf_get_vf_id(vf);
-}
-
-static struct vf_id_map *adf_find_vf(u32 bdf)
-{
-       struct list_head *itr;
-
-       list_for_each(itr, &vfs_table) {
-               struct vf_id_map *ptr =
-                       list_entry(itr, struct vf_id_map, list);
-
-               if (ptr->bdf == bdf)
-                       return ptr;
-       }
-       return NULL;
-}
-
-static int adf_get_vf_real_id(u32 fake)
-{
-       struct list_head *itr;
-
-       list_for_each(itr, &vfs_table) {
-               struct vf_id_map *ptr =
-                       list_entry(itr, struct vf_id_map, list);
-               if (ptr->fake_id == fake)
-                       return ptr->id;
-       }
-       return -1;
-}
-
-/**
- * adf_clean_vf_map() - Cleans VF id mapings
- *
- * Function cleans internal ids for virtual functions.
- * @vf: flag indicating whether mappings is cleaned
- *     for vfs only or for vfs and pfs
- */
-void adf_clean_vf_map(bool vf)
-{
-       struct vf_id_map *map;
-       struct list_head *ptr, *tmp;
-
-       mutex_lock(&table_lock);
-       list_for_each_safe(ptr, tmp, &vfs_table) {
-               map = list_entry(ptr, struct vf_id_map, list);
-               if (map->bdf != -1) {
-                       id_map[map->id] = 0;
-                       num_devices--;
-               }
-
-               if (vf && map->bdf == -1)
-                       continue;
-
-               list_del(ptr);
-               kfree(map);
-       }
-       mutex_unlock(&table_lock);
-}
-EXPORT_SYMBOL_GPL(adf_clean_vf_map);
-
-/**
- * adf_devmgr_update_class_index() - Update internal index
- * @hw_data:  Pointer to internal device data.
- *
- * Function updates internal dev index for VFs
- */
-void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data)
-{
-       struct adf_hw_device_class *class = hw_data->dev_class;
-       struct list_head *itr;
-       int i = 0;
-
-       list_for_each(itr, &accel_table) {
-               struct adf_accel_dev *ptr =
-                               list_entry(itr, struct adf_accel_dev, list);
-
-               if (ptr->hw_device->dev_class == class)
-                       ptr->hw_device->instance_id = i++;
-
-               if (i == class->instances)
-                       break;
-       }
-}
-EXPORT_SYMBOL_GPL(adf_devmgr_update_class_index);
-
-static unsigned int adf_find_free_id(void)
-{
-       unsigned int i;
-
-       for (i = 0; i < ADF_MAX_DEVICES; i++) {
-               if (!id_map[i]) {
-                       id_map[i] = 1;
-                       return i;
-               }
-       }
-       return ADF_MAX_DEVICES + 1;
-}
-
-/**
- * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework
- * @accel_dev:  Pointer to acceleration device.
- * @pf:                Corresponding PF if the accel_dev is a VF
- *
- * Function adds acceleration device to the acceleration framework.
- * To be used by QAT device specific drivers.
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
-                      struct adf_accel_dev *pf)
-{
-       struct list_head *itr;
-       int ret = 0;
-
-       if (num_devices == ADF_MAX_DEVICES) {
-               dev_err(&GET_DEV(accel_dev), "Only support up to %d devices\n",
-                       ADF_MAX_DEVICES);
-               return -EFAULT;
-       }
-
-       mutex_lock(&table_lock);
-       atomic_set(&accel_dev->ref_count, 0);
-
-       /* PF on host or VF on guest - optimized to remove redundant is_vf */
-       if (!accel_dev->is_vf || !pf) {
-               struct vf_id_map *map;
-
-               list_for_each(itr, &accel_table) {
-                       struct adf_accel_dev *ptr =
-                               list_entry(itr, struct adf_accel_dev, list);
-
-                       if (ptr == accel_dev) {
-                               ret = -EEXIST;
-                               goto unlock;
-                       }
-               }
-
-               list_add_tail(&accel_dev->list, &accel_table);
-               accel_dev->accel_id = adf_find_free_id();
-               if (accel_dev->accel_id > ADF_MAX_DEVICES) {
-                       ret = -EFAULT;
-                       goto unlock;
-               }
-               num_devices++;
-               map = kzalloc(sizeof(*map), GFP_KERNEL);
-               if (!map) {
-                       ret = -ENOMEM;
-                       goto unlock;
-               }
-               map->bdf = ~0;
-               map->id = accel_dev->accel_id;
-               map->fake_id = map->id;
-               map->attached = true;
-               list_add_tail(&map->list, &vfs_table);
-       } else if (accel_dev->is_vf && pf) {
-               /* VF on host */
-               struct vf_id_map *map;
-
-               map = adf_find_vf(adf_get_vf_num(accel_dev));
-               if (map) {
-                       struct vf_id_map *next;
-
-                       accel_dev->accel_id = map->id;
-                       list_add_tail(&accel_dev->list, &accel_table);
-                       map->fake_id++;
-                       map->attached = true;
-                       next = list_next_entry(map, list);
-                       while (next && &next->list != &vfs_table) {
-                               next->fake_id++;
-                               next = list_next_entry(next, list);
-                       }
-
-                       ret = 0;
-                       goto unlock;
-               }
-
-               map = kzalloc(sizeof(*map), GFP_KERNEL);
-               if (!map) {
-                       ret = -ENOMEM;
-                       goto unlock;
-               }
-               accel_dev->accel_id = adf_find_free_id();
-               if (accel_dev->accel_id > ADF_MAX_DEVICES) {
-                       kfree(map);
-                       ret = -EFAULT;
-                       goto unlock;
-               }
-               num_devices++;
-               list_add_tail(&accel_dev->list, &accel_table);
-               map->bdf = adf_get_vf_num(accel_dev);
-               map->id = accel_dev->accel_id;
-               map->fake_id = map->id;
-               map->attached = true;
-               list_add_tail(&map->list, &vfs_table);
-       }
-unlock:
-       mutex_unlock(&table_lock);
-       return ret;
-}
-EXPORT_SYMBOL_GPL(adf_devmgr_add_dev);
-
-struct list_head *adf_devmgr_get_head(void)
-{
-       return &accel_table;
-}
-
-/**
- * adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework.
- * @accel_dev:  Pointer to acceleration device.
- * @pf:                Corresponding PF if the accel_dev is a VF
- *
- * Function removes acceleration device from the acceleration framework.
- * To be used by QAT device specific drivers.
- *
- * Return: void
- */
-void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
-                      struct adf_accel_dev *pf)
-{
-       mutex_lock(&table_lock);
-       /* PF on host or VF on guest - optimized to remove redundant is_vf */
-       if (!accel_dev->is_vf || !pf) {
-               id_map[accel_dev->accel_id] = 0;
-               num_devices--;
-       } else if (accel_dev->is_vf && pf) {
-               struct vf_id_map *map, *next;
-
-               map = adf_find_vf(adf_get_vf_num(accel_dev));
-               if (!map) {
-                       dev_err(&GET_DEV(accel_dev), "Failed to find VF map\n");
-                       goto unlock;
-               }
-               map->fake_id--;
-               map->attached = false;
-               next = list_next_entry(map, list);
-               while (next && &next->list != &vfs_table) {
-                       next->fake_id--;
-                       next = list_next_entry(next, list);
-               }
-       }
-unlock:
-       list_del(&accel_dev->list);
-       mutex_unlock(&table_lock);
-}
-EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev);
-
-struct adf_accel_dev *adf_devmgr_get_first(void)
-{
-       struct adf_accel_dev *dev = NULL;
-
-       if (!list_empty(&accel_table))
-               dev = list_first_entry(&accel_table, struct adf_accel_dev,
-                                      list);
-       return dev;
-}
-
-/**
- * adf_devmgr_pci_to_accel_dev() - Get accel_dev associated with the pci_dev.
- * @pci_dev:  Pointer to PCI device.
- *
- * Function returns acceleration device associated with the given PCI device.
- * To be used by QAT device specific drivers.
- *
- * Return: pointer to accel_dev or NULL if not found.
- */
-struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev)
-{
-       struct list_head *itr;
-
-       mutex_lock(&table_lock);
-       list_for_each(itr, &accel_table) {
-               struct adf_accel_dev *ptr =
-                               list_entry(itr, struct adf_accel_dev, list);
-
-               if (ptr->accel_pci_dev.pci_dev == pci_dev) {
-                       mutex_unlock(&table_lock);
-                       return ptr;
-               }
-       }
-       mutex_unlock(&table_lock);
-       return NULL;
-}
-EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev);
-
-struct adf_accel_dev *adf_devmgr_get_dev_by_id(u32 id)
-{
-       struct list_head *itr;
-       int real_id;
-
-       mutex_lock(&table_lock);
-       real_id = adf_get_vf_real_id(id);
-       if (real_id < 0)
-               goto unlock;
-
-       id = real_id;
-
-       list_for_each(itr, &accel_table) {
-               struct adf_accel_dev *ptr =
-                               list_entry(itr, struct adf_accel_dev, list);
-               if (ptr->accel_id == id) {
-                       mutex_unlock(&table_lock);
-                       return ptr;
-               }
-       }
-unlock:
-       mutex_unlock(&table_lock);
-       return NULL;
-}
-
-int adf_devmgr_verify_id(u32 id)
-{
-       if (id == ADF_CFG_ALL_DEVICES)
-               return 0;
-
-       if (adf_devmgr_get_dev_by_id(id))
-               return 0;
-
-       return -ENODEV;
-}
-
-static int adf_get_num_dettached_vfs(void)
-{
-       struct list_head *itr;
-       int vfs = 0;
-
-       mutex_lock(&table_lock);
-       list_for_each(itr, &vfs_table) {
-               struct vf_id_map *ptr =
-                       list_entry(itr, struct vf_id_map, list);
-               if (ptr->bdf != ~0 && !ptr->attached)
-                       vfs++;
-       }
-       mutex_unlock(&table_lock);
-       return vfs;
-}
-
-void adf_devmgr_get_num_dev(u32 *num)
-{
-       *num = num_devices - adf_get_num_dettached_vfs();
-}
-
-/**
- * adf_dev_in_use() - Check whether accel_dev is currently in use
- * @accel_dev: Pointer to acceleration device.
- *
- * To be used by QAT device specific drivers.
- *
- * Return: 1 when device is in use, 0 otherwise.
- */
-int adf_dev_in_use(struct adf_accel_dev *accel_dev)
-{
-       return atomic_read(&accel_dev->ref_count) != 0;
-}
-EXPORT_SYMBOL_GPL(adf_dev_in_use);
-
-/**
- * adf_dev_get() - Increment accel_dev reference count
- * @accel_dev: Pointer to acceleration device.
- *
- * Increment the accel_dev refcount and if this is the first time
- * incrementing it during this period the accel_dev is in use,
- * increment the module refcount too.
- * To be used by QAT device specific drivers.
- *
- * Return: 0 when successful, EFAULT when fail to bump module refcount
- */
-int adf_dev_get(struct adf_accel_dev *accel_dev)
-{
-       if (atomic_add_return(1, &accel_dev->ref_count) == 1)
-               if (!try_module_get(accel_dev->owner))
-                       return -EFAULT;
-       return 0;
-}
-EXPORT_SYMBOL_GPL(adf_dev_get);
-
-/**
- * adf_dev_put() - Decrement accel_dev reference count
- * @accel_dev: Pointer to acceleration device.
- *
- * Decrement the accel_dev refcount and if this is the last time
- * decrementing it during this period the accel_dev is in use,
- * decrement the module refcount too.
- * To be used by QAT device specific drivers.
- *
- * Return: void
- */
-void adf_dev_put(struct adf_accel_dev *accel_dev)
-{
-       if (atomic_sub_return(1, &accel_dev->ref_count) == 0)
-               module_put(accel_dev->owner);
-}
-EXPORT_SYMBOL_GPL(adf_dev_put);
-
-/**
- * adf_devmgr_in_reset() - Check whether device is in reset
- * @accel_dev: Pointer to acceleration device.
- *
- * To be used by QAT device specific drivers.
- *
- * Return: 1 when the device is being reset, 0 otherwise.
- */
-int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev)
-{
-       return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
-}
-EXPORT_SYMBOL_GPL(adf_devmgr_in_reset);
-
-/**
- * adf_dev_started() - Check whether device has started
- * @accel_dev: Pointer to acceleration device.
- *
- * To be used by QAT device specific drivers.
- *
- * Return: 1 when the device has started, 0 otherwise
- */
-int adf_dev_started(struct adf_accel_dev *accel_dev)
-{
-       return test_bit(ADF_STATUS_STARTED, &accel_dev->status);
-}
-EXPORT_SYMBOL_GPL(adf_dev_started);
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_config.c b/drivers/crypto/qat/qat_common/adf_gen2_config.c
deleted file mode 100644 (file)
index eeb30da..0000000
+++ /dev/null
@@ -1,206 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright(c) 2022 Intel Corporation */
-#include "adf_accel_devices.h"
-#include "adf_cfg.h"
-#include "adf_cfg_strings.h"
-#include "adf_gen2_config.h"
-#include "adf_common_drv.h"
-#include "qat_crypto.h"
-#include "qat_compression.h"
-#include "adf_transport_access_macros.h"
-
-static int adf_gen2_crypto_dev_config(struct adf_accel_dev *accel_dev)
-{
-       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
-       int banks = GET_MAX_BANKS(accel_dev);
-       int cpus = num_online_cpus();
-       unsigned long val;
-       int instances;
-       int ret;
-       int i;
-
-       if (adf_hw_dev_has_crypto(accel_dev))
-               instances = min(cpus, banks);
-       else
-               instances = 0;
-
-       for (i = 0; i < instances; i++) {
-               val = i;
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
-                        i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
-               val = 128;
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = 512;
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = 0;
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = 2;
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = 8;
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = 10;
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = ADF_COALESCING_DEF_TIME;
-               snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-       }
-
-       val = i;
-       ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
-                                         &val, ADF_DEC);
-       if (ret)
-               goto err;
-
-       return ret;
-
-err:
-       dev_err(&GET_DEV(accel_dev), "Failed to add configuration for crypto\n");
-       return ret;
-}
-
-static int adf_gen2_comp_dev_config(struct adf_accel_dev *accel_dev)
-{
-       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
-       int banks = GET_MAX_BANKS(accel_dev);
-       int cpus = num_online_cpus();
-       unsigned long val;
-       int instances;
-       int ret;
-       int i;
-
-       if (adf_hw_dev_has_compression(accel_dev))
-               instances = min(cpus, banks);
-       else
-               instances = 0;
-
-       for (i = 0; i < instances; i++) {
-               val = i;
-               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = 512;
-               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = 6;
-               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = 14;
-               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-       }
-
-       val = i;
-       ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
-                                         &val, ADF_DEC);
-       if (ret)
-               return ret;
-
-       return ret;
-
-err:
-       dev_err(&GET_DEV(accel_dev), "Failed to add configuration for compression\n");
-       return ret;
-}
-
-/**
- * adf_gen2_dev_config() - create dev config required to create instances
- *
- * @accel_dev: Pointer to acceleration device.
- *
- * Function creates device configuration required to create instances
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_gen2_dev_config(struct adf_accel_dev *accel_dev)
-{
-       int ret;
-
-       ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
-       if (ret)
-               goto err;
-
-       ret = adf_cfg_section_add(accel_dev, "Accelerator0");
-       if (ret)
-               goto err;
-
-       ret = adf_gen2_crypto_dev_config(accel_dev);
-       if (ret)
-               goto err;
-
-       ret = adf_gen2_comp_dev_config(accel_dev);
-       if (ret)
-               goto err;
-
-       set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
-
-       return ret;
-
-err:
-       dev_err(&GET_DEV(accel_dev), "Failed to configure QAT driver\n");
-       return ret;
-}
-EXPORT_SYMBOL_GPL(adf_gen2_dev_config);
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_config.h b/drivers/crypto/qat/qat_common/adf_gen2_config.h
deleted file mode 100644 (file)
index 4bf9da2..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright(c) 2022 Intel Corporation */
-#ifndef ADF_GEN2_CONFIG_H_
-#define ADF_GEN2_CONFIG_H_
-
-#include "adf_accel_devices.h"
-
-int adf_gen2_dev_config(struct adf_accel_dev *accel_dev);
-
-#endif
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_dc.c b/drivers/crypto/qat/qat_common/adf_gen2_dc.c
deleted file mode 100644 (file)
index 47261b1..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright(c) 2022 Intel Corporation */
-#include "adf_accel_devices.h"
-#include "adf_gen2_dc.h"
-#include "icp_qat_fw_comp.h"
-
-static void qat_comp_build_deflate_ctx(void *ctx)
-{
-       struct icp_qat_fw_comp_req *req_tmpl = (struct icp_qat_fw_comp_req *)ctx;
-       struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
-       struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
-       struct icp_qat_fw_comp_req_params *req_pars = &req_tmpl->comp_pars;
-       struct icp_qat_fw_comp_cd_hdr *comp_cd_ctrl = &req_tmpl->comp_cd_ctrl;
-
-       memset(req_tmpl, 0, sizeof(*req_tmpl));
-       header->hdr_flags =
-               ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
-       header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
-       header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
-       header->comn_req_flags =
-               ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_16BYTE_DATA,
-                                           QAT_COMN_PTR_TYPE_SGL);
-       header->serv_specif_flags =
-               ICP_QAT_FW_COMP_FLAGS_BUILD(ICP_QAT_FW_COMP_STATELESS_SESSION,
-                                           ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
-                                           ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
-                                           ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
-                                           ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
-       cd_pars->u.sl.comp_slice_cfg_word[0] =
-               ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_COMPRESS,
-                                                   ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED,
-                                                   ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE,
-                                                   ICP_QAT_HW_COMPRESSION_DEPTH_1,
-                                                   ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
-       req_pars->crc.legacy.initial_adler = COMP_CPR_INITIAL_ADLER;
-       req_pars->crc.legacy.initial_crc32 = COMP_CPR_INITIAL_CRC;
-       req_pars->req_par_flags =
-               ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(ICP_QAT_FW_COMP_SOP,
-                                                     ICP_QAT_FW_COMP_EOP,
-                                                     ICP_QAT_FW_COMP_BFINAL,
-                                                     ICP_QAT_FW_COMP_CNV,
-                                                     ICP_QAT_FW_COMP_CNV_RECOVERY,
-                                                     ICP_QAT_FW_COMP_NO_CNV_DFX,
-                                                     ICP_QAT_FW_COMP_CRC_MODE_LEGACY,
-                                                     ICP_QAT_FW_COMP_NO_XXHASH_ACC,
-                                                     ICP_QAT_FW_COMP_CNV_ERROR_NONE,
-                                                     ICP_QAT_FW_COMP_NO_APPEND_CRC,
-                                                     ICP_QAT_FW_COMP_NO_DROP_DATA);
-       ICP_QAT_FW_COMN_NEXT_ID_SET(comp_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
-       ICP_QAT_FW_COMN_CURR_ID_SET(comp_cd_ctrl, ICP_QAT_FW_SLICE_COMP);
-
-       /* Fill second half of the template for decompression */
-       memcpy(req_tmpl + 1, req_tmpl, sizeof(*req_tmpl));
-       req_tmpl++;
-       header = &req_tmpl->comn_hdr;
-       header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
-       cd_pars = &req_tmpl->cd_pars;
-       cd_pars->u.sl.comp_slice_cfg_word[0] =
-               ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS,
-                                                   ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED,
-                                                   ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE,
-                                                   ICP_QAT_HW_COMPRESSION_DEPTH_1,
-                                                   ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
-}
-
-void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops)
-{
-       dc_ops->build_deflate_ctx = qat_comp_build_deflate_ctx;
-}
-EXPORT_SYMBOL_GPL(adf_gen2_init_dc_ops);
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_dc.h b/drivers/crypto/qat/qat_common/adf_gen2_dc.h
deleted file mode 100644 (file)
index 6eae023..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright(c) 2022 Intel Corporation */
-#ifndef ADF_GEN2_DC_H
-#define ADF_GEN2_DC_H
-
-#include "adf_accel_devices.h"
-
-void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops);
-
-#endif /* ADF_GEN2_DC_H */
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c
deleted file mode 100644 (file)
index d188454..0000000
+++ /dev/null
@@ -1,268 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2020 Intel Corporation */
-#include "adf_common_drv.h"
-#include "adf_gen2_hw_data.h"
-#include "icp_qat_hw.h"
-#include <linux/pci.h>
-
-u32 adf_gen2_get_num_accels(struct adf_hw_device_data *self)
-{
-       if (!self || !self->accel_mask)
-               return 0;
-
-       return hweight16(self->accel_mask);
-}
-EXPORT_SYMBOL_GPL(adf_gen2_get_num_accels);
-
-u32 adf_gen2_get_num_aes(struct adf_hw_device_data *self)
-{
-       if (!self || !self->ae_mask)
-               return 0;
-
-       return hweight32(self->ae_mask);
-}
-EXPORT_SYMBOL_GPL(adf_gen2_get_num_aes);
-
-void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev)
-{
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
-       unsigned long accel_mask = hw_data->accel_mask;
-       unsigned long ae_mask = hw_data->ae_mask;
-       unsigned int val, i;
-
-       /* Enable Accel Engine error detection & correction */
-       for_each_set_bit(i, &ae_mask, hw_data->num_engines) {
-               val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_AE_CTX_ENABLES(i));
-               val |= ADF_GEN2_ENABLE_AE_ECC_ERR;
-               ADF_CSR_WR(pmisc_addr, ADF_GEN2_AE_CTX_ENABLES(i), val);
-               val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_AE_MISC_CONTROL(i));
-               val |= ADF_GEN2_ENABLE_AE_ECC_PARITY_CORR;
-               ADF_CSR_WR(pmisc_addr, ADF_GEN2_AE_MISC_CONTROL(i), val);
-       }
-
-       /* Enable shared memory error detection & correction */
-       for_each_set_bit(i, &accel_mask, hw_data->num_accel) {
-               val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_UERRSSMSH(i));
-               val |= ADF_GEN2_ERRSSMSH_EN;
-               ADF_CSR_WR(pmisc_addr, ADF_GEN2_UERRSSMSH(i), val);
-               val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_CERRSSMSH(i));
-               val |= ADF_GEN2_ERRSSMSH_EN;
-               ADF_CSR_WR(pmisc_addr, ADF_GEN2_CERRSSMSH(i), val);
-       }
-}
-EXPORT_SYMBOL_GPL(adf_gen2_enable_error_correction);
-
-void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable,
-                          int num_a_regs, int num_b_regs)
-{
-       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
-       u32 reg;
-       int i;
-
-       /* Set/Unset Valid bit in AE Thread to PCIe Function Mapping Group A */
-       for (i = 0; i < num_a_regs; i++) {
-               reg = READ_CSR_AE2FUNCTION_MAP_A(pmisc_addr, i);
-               if (enable)
-                       reg |= AE2FUNCTION_MAP_VALID;
-               else
-                       reg &= ~AE2FUNCTION_MAP_VALID;
-               WRITE_CSR_AE2FUNCTION_MAP_A(pmisc_addr, i, reg);
-       }
-
-       /* Set/Unset Valid bit in AE Thread to PCIe Function Mapping Group B */
-       for (i = 0; i < num_b_regs; i++) {
-               reg = READ_CSR_AE2FUNCTION_MAP_B(pmisc_addr, i);
-               if (enable)
-                       reg |= AE2FUNCTION_MAP_VALID;
-               else
-                       reg &= ~AE2FUNCTION_MAP_VALID;
-               WRITE_CSR_AE2FUNCTION_MAP_B(pmisc_addr, i, reg);
-       }
-}
-EXPORT_SYMBOL_GPL(adf_gen2_cfg_iov_thds);
-
-void adf_gen2_get_admin_info(struct admin_info *admin_csrs_info)
-{
-       admin_csrs_info->mailbox_offset = ADF_MAILBOX_BASE_OFFSET;
-       admin_csrs_info->admin_msg_ur = ADF_ADMINMSGUR_OFFSET;
-       admin_csrs_info->admin_msg_lr = ADF_ADMINMSGLR_OFFSET;
-}
-EXPORT_SYMBOL_GPL(adf_gen2_get_admin_info);
-
-void adf_gen2_get_arb_info(struct arb_info *arb_info)
-{
-       arb_info->arb_cfg = ADF_ARB_CONFIG;
-       arb_info->arb_offset = ADF_ARB_OFFSET;
-       arb_info->wt2sam_offset = ADF_ARB_WRK_2_SER_MAP_OFFSET;
-}
-EXPORT_SYMBOL_GPL(adf_gen2_get_arb_info);
-
-void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev)
-{
-       void __iomem *addr = adf_get_pmisc_base(accel_dev);
-       u32 val;
-
-       val = accel_dev->pf.vf_info ? 0 : BIT_ULL(GET_MAX_BANKS(accel_dev)) - 1;
-
-       /* Enable bundle and misc interrupts */
-       ADF_CSR_WR(addr, ADF_GEN2_SMIAPF0_MASK_OFFSET, val);
-       ADF_CSR_WR(addr, ADF_GEN2_SMIAPF1_MASK_OFFSET, ADF_GEN2_SMIA1_MASK);
-}
-EXPORT_SYMBOL_GPL(adf_gen2_enable_ints);
-
-static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size)
-{
-       return BUILD_RING_BASE_ADDR(addr, size);
-}
-
-static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring)
-{
-       return READ_CSR_RING_HEAD(csr_base_addr, bank, ring);
-}
-
-static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring,
-                               u32 value)
-{
-       WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value);
-}
-
-static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring)
-{
-       return READ_CSR_RING_TAIL(csr_base_addr, bank, ring);
-}
-
-static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring,
-                               u32 value)
-{
-       WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value);
-}
-
-static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank)
-{
-       return READ_CSR_E_STAT(csr_base_addr, bank);
-}
-
-static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank,
-                                 u32 ring, u32 value)
-{
-       WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value);
-}
-
-static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring,
-                               dma_addr_t addr)
-{
-       WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr);
-}
-
-static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank, u32 value)
-{
-       WRITE_CSR_INT_FLAG(csr_base_addr, bank, value);
-}
-
-static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank)
-{
-       WRITE_CSR_INT_SRCSEL(csr_base_addr, bank);
-}
-
-static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank,
-                                u32 value)
-{
-       WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value);
-}
-
-static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank,
-                                 u32 value)
-{
-       WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value);
-}
-
-static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank,
-                                      u32 value)
-{
-       WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value);
-}
-
-static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank,
-                                     u32 value)
-{
-       WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value);
-}
-
-void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
-{
-       csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr;
-       csr_ops->read_csr_ring_head = read_csr_ring_head;
-       csr_ops->write_csr_ring_head = write_csr_ring_head;
-       csr_ops->read_csr_ring_tail = read_csr_ring_tail;
-       csr_ops->write_csr_ring_tail = write_csr_ring_tail;
-       csr_ops->read_csr_e_stat = read_csr_e_stat;
-       csr_ops->write_csr_ring_config = write_csr_ring_config;
-       csr_ops->write_csr_ring_base = write_csr_ring_base;
-       csr_ops->write_csr_int_flag = write_csr_int_flag;
-       csr_ops->write_csr_int_srcsel = write_csr_int_srcsel;
-       csr_ops->write_csr_int_col_en = write_csr_int_col_en;
-       csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl;
-       csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col;
-       csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
-}
-EXPORT_SYMBOL_GPL(adf_gen2_init_hw_csr_ops);
-
-u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev)
-{
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
-       u32 straps = hw_data->straps;
-       u32 fuses = hw_data->fuses;
-       u32 legfuses;
-       u32 capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
-                          ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
-                          ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
-                          ICP_ACCEL_CAPABILITIES_CIPHER |
-                          ICP_ACCEL_CAPABILITIES_COMPRESSION;
-
-       /* Read accelerator capabilities mask */
-       pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, &legfuses);
-
-       /* A set bit in legfuses means the feature is OFF in this SKU */
-       if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) {
-               capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
-               capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
-       }
-       if (legfuses & ICP_ACCEL_MASK_PKE_SLICE)
-               capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
-       if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE) {
-               capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
-               capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
-       }
-       if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE)
-               capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
-
-       if ((straps | fuses) & ADF_POWERGATE_PKE)
-               capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
-
-       if ((straps | fuses) & ADF_POWERGATE_DC)
-               capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
-
-       return capabilities;
-}
-EXPORT_SYMBOL_GPL(adf_gen2_get_accel_cap);
-
-void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
-{
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
-       u32 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
-       u32 timer_val = ADF_SSM_WDT_DEFAULT_VALUE;
-       unsigned long accel_mask = hw_data->accel_mask;
-       u32 i = 0;
-
-       /* Configures WDT timers */
-       for_each_set_bit(i, &accel_mask, hw_data->num_accel) {
-               /* Enable WDT for sym and dc */
-               ADF_CSR_WR(pmisc_addr, ADF_SSMWDT(i), timer_val);
-               /* Enable WDT for pke */
-               ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKE(i), timer_val_pke);
-       }
-}
-EXPORT_SYMBOL_GPL(adf_gen2_set_ssm_wdtimer);
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h
deleted file mode 100644 (file)
index e4bc075..0000000
+++ /dev/null
@@ -1,165 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2020 Intel Corporation */
-#ifndef ADF_GEN2_HW_DATA_H_
-#define ADF_GEN2_HW_DATA_H_
-
-#include "adf_accel_devices.h"
-#include "adf_cfg_common.h"
-
-/* Transport access */
-#define ADF_BANK_INT_SRC_SEL_MASK_0    0x4444444CUL
-#define ADF_BANK_INT_SRC_SEL_MASK_X    0x44444444UL
-#define ADF_RING_CSR_RING_CONFIG       0x000
-#define ADF_RING_CSR_RING_LBASE                0x040
-#define ADF_RING_CSR_RING_UBASE                0x080
-#define ADF_RING_CSR_RING_HEAD         0x0C0
-#define ADF_RING_CSR_RING_TAIL         0x100
-#define ADF_RING_CSR_E_STAT            0x14C
-#define ADF_RING_CSR_INT_FLAG          0x170
-#define ADF_RING_CSR_INT_SRCSEL                0x174
-#define ADF_RING_CSR_INT_SRCSEL_2      0x178
-#define ADF_RING_CSR_INT_COL_EN                0x17C
-#define ADF_RING_CSR_INT_COL_CTL       0x180
-#define ADF_RING_CSR_INT_FLAG_AND_COL  0x184
-#define ADF_RING_CSR_INT_COL_CTL_ENABLE        0x80000000
-#define ADF_RING_BUNDLE_SIZE           0x1000
-#define ADF_GEN2_RX_RINGS_OFFSET       8
-#define ADF_GEN2_TX_RINGS_MASK         0xFF
-
-#define BUILD_RING_BASE_ADDR(addr, size) \
-       (((addr) >> 6) & (GENMASK_ULL(63, 0) << (size)))
-#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
-       ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
-                  ADF_RING_CSR_RING_HEAD + ((ring) << 2))
-#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
-       ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
-                  ADF_RING_CSR_RING_TAIL + ((ring) << 2))
-#define READ_CSR_E_STAT(csr_base_addr, bank) \
-       ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
-                  ADF_RING_CSR_E_STAT)
-#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
-       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
-                  ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value)
-#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
-do { \
-       u32 l_base = 0, u_base = 0; \
-       l_base = (u32)((value) & 0xFFFFFFFF); \
-       u_base = (u32)(((value) & 0xFFFFFFFF00000000ULL) >> 32); \
-       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
-                  ADF_RING_CSR_RING_LBASE + ((ring) << 2), l_base); \
-       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
-                  ADF_RING_CSR_RING_UBASE + ((ring) << 2), u_base); \
-} while (0)
-
-#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
-       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
-                  ADF_RING_CSR_RING_HEAD + ((ring) << 2), value)
-#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
-       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
-                  ADF_RING_CSR_RING_TAIL + ((ring) << 2), value)
-#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \
-       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
-                  ADF_RING_CSR_INT_FLAG, value)
-#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
-do { \
-       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
-       ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \
-       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
-       ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \
-} while (0)
-#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
-       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
-                  ADF_RING_CSR_INT_COL_EN, value)
-#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
-       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
-                  ADF_RING_CSR_INT_COL_CTL, \
-                  ADF_RING_CSR_INT_COL_CTL_ENABLE | (value))
-#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
-       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
-                  ADF_RING_CSR_INT_FLAG_AND_COL, value)
-
-/* AE to function map */
-#define AE2FUNCTION_MAP_A_OFFSET       (0x3A400 + 0x190)
-#define AE2FUNCTION_MAP_B_OFFSET       (0x3A400 + 0x310)
-#define AE2FUNCTION_MAP_REG_SIZE       4
-#define AE2FUNCTION_MAP_VALID          BIT(7)
-
-#define READ_CSR_AE2FUNCTION_MAP_A(pmisc_bar_addr, index) \
-       ADF_CSR_RD(pmisc_bar_addr, AE2FUNCTION_MAP_A_OFFSET + \
-                  AE2FUNCTION_MAP_REG_SIZE * (index))
-#define WRITE_CSR_AE2FUNCTION_MAP_A(pmisc_bar_addr, index, value) \
-       ADF_CSR_WR(pmisc_bar_addr, AE2FUNCTION_MAP_A_OFFSET + \
-                  AE2FUNCTION_MAP_REG_SIZE * (index), value)
-#define READ_CSR_AE2FUNCTION_MAP_B(pmisc_bar_addr, index) \
-       ADF_CSR_RD(pmisc_bar_addr, AE2FUNCTION_MAP_B_OFFSET + \
-                  AE2FUNCTION_MAP_REG_SIZE * (index))
-#define WRITE_CSR_AE2FUNCTION_MAP_B(pmisc_bar_addr, index, value) \
-       ADF_CSR_WR(pmisc_bar_addr, AE2FUNCTION_MAP_B_OFFSET + \
-                  AE2FUNCTION_MAP_REG_SIZE * (index), value)
-
-/* Admin Interface Offsets */
-#define ADF_ADMINMSGUR_OFFSET  (0x3A000 + 0x574)
-#define ADF_ADMINMSGLR_OFFSET  (0x3A000 + 0x578)
-#define ADF_MAILBOX_BASE_OFFSET        0x20970
-
-/* Arbiter configuration */
-#define ADF_ARB_OFFSET                 0x30000
-#define ADF_ARB_WRK_2_SER_MAP_OFFSET   0x180
-#define ADF_ARB_CONFIG                 (BIT(31) | BIT(6) | BIT(0))
-#define ADF_ARB_REG_SLOT               0x1000
-#define ADF_ARB_RINGSRVARBEN_OFFSET    0x19C
-
-#define WRITE_CSR_RING_SRV_ARB_EN(csr_addr, index, value) \
-       ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
-       (ADF_ARB_REG_SLOT * (index)), value)
-
-/* Power gating */
-#define ADF_POWERGATE_DC               BIT(23)
-#define ADF_POWERGATE_PKE              BIT(24)
-
-/* Default ring mapping */
-#define ADF_GEN2_DEFAULT_RING_TO_SRV_MAP \
-       (CRYPTO << ADF_CFG_SERV_RING_PAIR_0_SHIFT | \
-        CRYPTO << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
-        UNUSED << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
-          COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
-
-/* WDT timers
- *
- * Timeout is in cycles. Clock speed may vary across products but this
- * value should be a few milli-seconds.
- */
-#define ADF_SSM_WDT_DEFAULT_VALUE      0x200000
-#define ADF_SSM_WDT_PKE_DEFAULT_VALUE  0x2000000
-#define ADF_SSMWDT_OFFSET              0x54
-#define ADF_SSMWDTPKE_OFFSET           0x58
-#define ADF_SSMWDT(i)          (ADF_SSMWDT_OFFSET + ((i) * 0x4000))
-#define ADF_SSMWDTPKE(i)       (ADF_SSMWDTPKE_OFFSET + ((i) * 0x4000))
-
-/* Error detection and correction */
-#define ADF_GEN2_AE_CTX_ENABLES(i)     ((i) * 0x1000 + 0x20818)
-#define ADF_GEN2_AE_MISC_CONTROL(i)    ((i) * 0x1000 + 0x20960)
-#define ADF_GEN2_ENABLE_AE_ECC_ERR     BIT(28)
-#define ADF_GEN2_ENABLE_AE_ECC_PARITY_CORR     (BIT(24) | BIT(12))
-#define ADF_GEN2_UERRSSMSH(i)          ((i) * 0x4000 + 0x18)
-#define ADF_GEN2_CERRSSMSH(i)          ((i) * 0x4000 + 0x10)
-#define ADF_GEN2_ERRSSMSH_EN           BIT(3)
-
-/* Interrupts */
-#define ADF_GEN2_SMIAPF0_MASK_OFFSET    (0x3A000 + 0x28)
-#define ADF_GEN2_SMIAPF1_MASK_OFFSET    (0x3A000 + 0x30)
-#define ADF_GEN2_SMIA1_MASK             0x1
-
-u32 adf_gen2_get_num_accels(struct adf_hw_device_data *self);
-u32 adf_gen2_get_num_aes(struct adf_hw_device_data *self);
-void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev);
-void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable,
-                          int num_a_regs, int num_b_regs);
-void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
-void adf_gen2_get_admin_info(struct admin_info *admin_csrs_info);
-void adf_gen2_get_arb_info(struct arb_info *arb_info);
-void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev);
-u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev);
-void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
-
-#endif
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_pfvf.c b/drivers/crypto/qat/qat_common/adf_gen2_pfvf.c
deleted file mode 100644 (file)
index 70ef119..0000000
+++ /dev/null
@@ -1,399 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2021 Intel Corporation */
-#include <linux/delay.h>
-#include <linux/iopoll.h>
-#include <linux/mutex.h>
-#include <linux/types.h>
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "adf_gen2_pfvf.h"
-#include "adf_pfvf_msg.h"
-#include "adf_pfvf_pf_proto.h"
-#include "adf_pfvf_vf_proto.h"
-#include "adf_pfvf_utils.h"
-
- /* VF2PF interrupts */
-#define ADF_GEN2_VF_MSK                        0xFFFF
-#define ADF_GEN2_ERR_REG_VF2PF(vf_src) (((vf_src) & 0x01FFFE00) >> 9)
-#define ADF_GEN2_ERR_MSK_VF2PF(vf_mask)        (((vf_mask) & ADF_GEN2_VF_MSK) << 9)
-
-#define ADF_GEN2_PF_PF2VF_OFFSET(i)    (0x3A000 + 0x280 + ((i) * 0x04))
-#define ADF_GEN2_VF_PF2VF_OFFSET       0x200
-
-#define ADF_GEN2_CSR_IN_USE            0x6AC2
-#define ADF_GEN2_CSR_IN_USE_MASK       0xFFFE
-
-enum gen2_csr_pos {
-       ADF_GEN2_CSR_PF2VF_OFFSET       =  0,
-       ADF_GEN2_CSR_VF2PF_OFFSET       = 16,
-};
-
-#define ADF_PFVF_GEN2_MSGTYPE_SHIFT    2
-#define ADF_PFVF_GEN2_MSGTYPE_MASK     0x0F
-#define ADF_PFVF_GEN2_MSGDATA_SHIFT    6
-#define ADF_PFVF_GEN2_MSGDATA_MASK     0x3FF
-
-static const struct pfvf_csr_format csr_gen2_fmt = {
-       { ADF_PFVF_GEN2_MSGTYPE_SHIFT, ADF_PFVF_GEN2_MSGTYPE_MASK },
-       { ADF_PFVF_GEN2_MSGDATA_SHIFT, ADF_PFVF_GEN2_MSGDATA_MASK },
-};
-
-#define ADF_PFVF_MSG_RETRY_DELAY       5
-#define ADF_PFVF_MSG_MAX_RETRIES       3
-
-static u32 adf_gen2_pf_get_pfvf_offset(u32 i)
-{
-       return ADF_GEN2_PF_PF2VF_OFFSET(i);
-}
-
-static u32 adf_gen2_vf_get_pfvf_offset(u32 i)
-{
-       return ADF_GEN2_VF_PF2VF_OFFSET;
-}
-
-static void adf_gen2_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
-{
-       /* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */
-       if (vf_mask & ADF_GEN2_VF_MSK) {
-               u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
-                         & ~ADF_GEN2_ERR_MSK_VF2PF(vf_mask);
-               ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
-       }
-}
-
-static void adf_gen2_disable_all_vf2pf_interrupts(void __iomem *pmisc_addr)
-{
-       /* Disable VF2PF interrupts for VFs 0 through 15 per vf_mask[15:0] */
-       u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
-                 | ADF_GEN2_ERR_MSK_VF2PF(ADF_GEN2_VF_MSK);
-       ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
-}
-
-static u32 adf_gen2_disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
-{
-       u32 sources, disabled, pending;
-       u32 errsou3, errmsk3;
-
-       /* Get the interrupt sources triggered by VFs */
-       errsou3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU3);
-       sources = ADF_GEN2_ERR_REG_VF2PF(errsou3);
-
-       if (!sources)
-               return 0;
-
-       /* Get the already disabled interrupts */
-       errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3);
-       disabled = ADF_GEN2_ERR_REG_VF2PF(errmsk3);
-
-       pending = sources & ~disabled;
-       if (!pending)
-               return 0;
-
-       /* Due to HW limitations, when disabling the interrupts, we can't
-        * just disable the requested sources, as this would lead to missed
-        * interrupts if ERRSOU3 changes just before writing to ERRMSK3.
-        * To work around it, disable all and re-enable only the sources that
-        * are not in vf_mask and were not already disabled. Re-enabling will
-        * trigger a new interrupt for the sources that have changed in the
-        * meantime, if any.
-        */
-       errmsk3 |= ADF_GEN2_ERR_MSK_VF2PF(ADF_GEN2_VF_MSK);
-       ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
-
-       errmsk3 &= ADF_GEN2_ERR_MSK_VF2PF(sources | disabled);
-       ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
-
-       /* Return the sources of the (new) interrupt(s) */
-       return pending;
-}
-
-static u32 gen2_csr_get_int_bit(enum gen2_csr_pos offset)
-{
-       return ADF_PFVF_INT << offset;
-}
-
-static u32 gen2_csr_msg_to_position(u32 csr_msg, enum gen2_csr_pos offset)
-{
-       return (csr_msg & 0xFFFF) << offset;
-}
-
-static u32 gen2_csr_msg_from_position(u32 csr_val, enum gen2_csr_pos offset)
-{
-       return (csr_val >> offset) & 0xFFFF;
-}
-
-static bool gen2_csr_is_in_use(u32 msg, enum gen2_csr_pos offset)
-{
-       return ((msg >> offset) & ADF_GEN2_CSR_IN_USE_MASK) == ADF_GEN2_CSR_IN_USE;
-}
-
-static void gen2_csr_clear_in_use(u32 *msg, enum gen2_csr_pos offset)
-{
-       *msg &= ~(ADF_GEN2_CSR_IN_USE_MASK << offset);
-}
-
-static void gen2_csr_set_in_use(u32 *msg, enum gen2_csr_pos offset)
-{
-       *msg |= (ADF_GEN2_CSR_IN_USE << offset);
-}
-
-static bool is_legacy_user_pfvf_message(u32 msg)
-{
-       return !(msg & ADF_PFVF_MSGORIGIN_SYSTEM);
-}
-
-static bool is_pf2vf_notification(u8 msg_type)
-{
-       switch (msg_type) {
-       case ADF_PF2VF_MSGTYPE_RESTARTING:
-               return true;
-       default:
-               return false;
-       }
-}
-
-static bool is_vf2pf_notification(u8 msg_type)
-{
-       switch (msg_type) {
-       case ADF_VF2PF_MSGTYPE_INIT:
-       case ADF_VF2PF_MSGTYPE_SHUTDOWN:
-               return true;
-       default:
-               return false;
-       }
-}
-
-struct pfvf_gen2_params {
-       u32 pfvf_offset;
-       struct mutex *csr_lock; /* lock preventing concurrent access of CSR */
-       enum gen2_csr_pos local_offset;
-       enum gen2_csr_pos remote_offset;
-       bool (*is_notification_message)(u8 msg_type);
-       u8 compat_ver;
-};
-
-static int adf_gen2_pfvf_send(struct adf_accel_dev *accel_dev,
-                             struct pfvf_message msg,
-                             struct pfvf_gen2_params *params)
-{
-       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
-       enum gen2_csr_pos remote_offset = params->remote_offset;
-       enum gen2_csr_pos local_offset = params->local_offset;
-       unsigned int retries = ADF_PFVF_MSG_MAX_RETRIES;
-       struct mutex *lock = params->csr_lock;
-       u32 pfvf_offset = params->pfvf_offset;
-       u32 int_bit;
-       u32 csr_val;
-       u32 csr_msg;
-       int ret;
-
-       /* Gen2 messages, both PF->VF and VF->PF, are all 16 bits long. This
-        * allows us to build and read messages as if they where all 0 based.
-        * However, send and receive are in a single shared 32 bits register,
-        * so we need to shift and/or mask the message half before decoding
-        * it and after encoding it. Which one to shift depends on the
-        * direction.
-        */
-
-       int_bit = gen2_csr_get_int_bit(local_offset);
-
-       csr_msg = adf_pfvf_csr_msg_of(accel_dev, msg, &csr_gen2_fmt);
-       if (unlikely(!csr_msg))
-               return -EINVAL;
-
-       /* Prepare for CSR format, shifting the wire message in place and
-        * setting the in use pattern
-        */
-       csr_msg = gen2_csr_msg_to_position(csr_msg, local_offset);
-       gen2_csr_set_in_use(&csr_msg, remote_offset);
-
-       mutex_lock(lock);
-
-start:
-       /* Check if the PFVF CSR is in use by remote function */
-       csr_val = ADF_CSR_RD(pmisc_addr, pfvf_offset);
-       if (gen2_csr_is_in_use(csr_val, local_offset)) {
-               dev_dbg(&GET_DEV(accel_dev),
-                       "PFVF CSR in use by remote function\n");
-               goto retry;
-       }
-
-       /* Attempt to get ownership of the PFVF CSR */
-       ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_msg | int_bit);
-
-       /* Wait for confirmation from remote func it received the message */
-       ret = read_poll_timeout(ADF_CSR_RD, csr_val, !(csr_val & int_bit),
-                               ADF_PFVF_MSG_ACK_DELAY_US,
-                               ADF_PFVF_MSG_ACK_MAX_DELAY_US,
-                               true, pmisc_addr, pfvf_offset);
-       if (unlikely(ret < 0)) {
-               dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
-               csr_val &= ~int_bit;
-       }
-
-       /* For fire-and-forget notifications, the receiver does not clear
-        * the in-use pattern. This is used to detect collisions.
-        */
-       if (params->is_notification_message(msg.type) && csr_val != csr_msg) {
-               /* Collision must have overwritten the message */
-               dev_err(&GET_DEV(accel_dev),
-                       "Collision on notification - PFVF CSR overwritten by remote function\n");
-               goto retry;
-       }
-
-       /* If the far side did not clear the in-use pattern it is either
-        * 1) Notification - message left intact to detect collision
-        * 2) Older protocol (compatibility version < 3) on the far side
-        *    where the sender is responsible for clearing the in-use
-        *    pattern after the received has acknowledged receipt.
-        * In either case, clear the in-use pattern now.
-        */
-       if (gen2_csr_is_in_use(csr_val, remote_offset)) {
-               gen2_csr_clear_in_use(&csr_val, remote_offset);
-               ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_val);
-       }
-
-out:
-       mutex_unlock(lock);
-       return ret;
-
-retry:
-       if (--retries) {
-               msleep(ADF_PFVF_MSG_RETRY_DELAY);
-               goto start;
-       } else {
-               ret = -EBUSY;
-               goto out;
-       }
-}
-
-static struct pfvf_message adf_gen2_pfvf_recv(struct adf_accel_dev *accel_dev,
-                                             struct pfvf_gen2_params *params)
-{
-       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
-       enum gen2_csr_pos remote_offset = params->remote_offset;
-       enum gen2_csr_pos local_offset = params->local_offset;
-       u32 pfvf_offset = params->pfvf_offset;
-       struct pfvf_message msg = { 0 };
-       u32 int_bit;
-       u32 csr_val;
-       u16 csr_msg;
-
-       int_bit = gen2_csr_get_int_bit(local_offset);
-
-       /* Read message */
-       csr_val = ADF_CSR_RD(pmisc_addr, pfvf_offset);
-       if (!(csr_val & int_bit)) {
-               dev_info(&GET_DEV(accel_dev),
-                        "Spurious PFVF interrupt, msg 0x%.8x. Ignored\n", csr_val);
-               return msg;
-       }
-
-       /* Extract the message from the CSR */
-       csr_msg = gen2_csr_msg_from_position(csr_val, local_offset);
-
-       /* Ignore legacy non-system (non-kernel) messages */
-       if (unlikely(is_legacy_user_pfvf_message(csr_msg))) {
-               dev_dbg(&GET_DEV(accel_dev),
-                       "Ignored non-system message (0x%.8x);\n", csr_val);
-               /* Because this must be a legacy message, the far side
-                * must clear the in-use pattern, so don't do it.
-                */
-               return msg;
-       }
-
-       /* Return the pfvf_message format */
-       msg = adf_pfvf_message_of(accel_dev, csr_msg, &csr_gen2_fmt);
-
-       /* The in-use pattern is not cleared for notifications (so that
-        * it can be used for collision detection) or older implementations
-        */
-       if (params->compat_ver >= ADF_PFVF_COMPAT_FAST_ACK &&
-           !params->is_notification_message(msg.type))
-               gen2_csr_clear_in_use(&csr_val, remote_offset);
-
-       /* To ACK, clear the INT bit */
-       csr_val &= ~int_bit;
-       ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_val);
-
-       return msg;
-}
-
-static int adf_gen2_pf2vf_send(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
-                              u32 pfvf_offset, struct mutex *csr_lock)
-{
-       struct pfvf_gen2_params params = {
-               .csr_lock = csr_lock,
-               .pfvf_offset = pfvf_offset,
-               .local_offset = ADF_GEN2_CSR_PF2VF_OFFSET,
-               .remote_offset = ADF_GEN2_CSR_VF2PF_OFFSET,
-               .is_notification_message = is_pf2vf_notification,
-       };
-
-       return adf_gen2_pfvf_send(accel_dev, msg, &params);
-}
-
-static int adf_gen2_vf2pf_send(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
-                              u32 pfvf_offset, struct mutex *csr_lock)
-{
-       struct pfvf_gen2_params params = {
-               .csr_lock = csr_lock,
-               .pfvf_offset = pfvf_offset,
-               .local_offset = ADF_GEN2_CSR_VF2PF_OFFSET,
-               .remote_offset = ADF_GEN2_CSR_PF2VF_OFFSET,
-               .is_notification_message = is_vf2pf_notification,
-       };
-
-       return adf_gen2_pfvf_send(accel_dev, msg, &params);
-}
-
-static struct pfvf_message adf_gen2_pf2vf_recv(struct adf_accel_dev *accel_dev,
-                                              u32 pfvf_offset, u8 compat_ver)
-{
-       struct pfvf_gen2_params params = {
-               .pfvf_offset = pfvf_offset,
-               .local_offset = ADF_GEN2_CSR_PF2VF_OFFSET,
-               .remote_offset = ADF_GEN2_CSR_VF2PF_OFFSET,
-               .is_notification_message = is_pf2vf_notification,
-               .compat_ver = compat_ver,
-       };
-
-       return adf_gen2_pfvf_recv(accel_dev, &params);
-}
-
-static struct pfvf_message adf_gen2_vf2pf_recv(struct adf_accel_dev *accel_dev,
-                                              u32 pfvf_offset, u8 compat_ver)
-{
-       struct pfvf_gen2_params params = {
-               .pfvf_offset = pfvf_offset,
-               .local_offset = ADF_GEN2_CSR_VF2PF_OFFSET,
-               .remote_offset = ADF_GEN2_CSR_PF2VF_OFFSET,
-               .is_notification_message = is_vf2pf_notification,
-               .compat_ver = compat_ver,
-       };
-
-       return adf_gen2_pfvf_recv(accel_dev, &params);
-}
-
-void adf_gen2_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
-{
-       pfvf_ops->enable_comms = adf_enable_pf2vf_comms;
-       pfvf_ops->get_pf2vf_offset = adf_gen2_pf_get_pfvf_offset;
-       pfvf_ops->get_vf2pf_offset = adf_gen2_pf_get_pfvf_offset;
-       pfvf_ops->enable_vf2pf_interrupts = adf_gen2_enable_vf2pf_interrupts;
-       pfvf_ops->disable_all_vf2pf_interrupts = adf_gen2_disable_all_vf2pf_interrupts;
-       pfvf_ops->disable_pending_vf2pf_interrupts = adf_gen2_disable_pending_vf2pf_interrupts;
-       pfvf_ops->send_msg = adf_gen2_pf2vf_send;
-       pfvf_ops->recv_msg = adf_gen2_vf2pf_recv;
-}
-EXPORT_SYMBOL_GPL(adf_gen2_init_pf_pfvf_ops);
-
-void adf_gen2_init_vf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
-{
-       pfvf_ops->enable_comms = adf_enable_vf2pf_comms;
-       pfvf_ops->get_pf2vf_offset = adf_gen2_vf_get_pfvf_offset;
-       pfvf_ops->get_vf2pf_offset = adf_gen2_vf_get_pfvf_offset;
-       pfvf_ops->send_msg = adf_gen2_vf2pf_send;
-       pfvf_ops->recv_msg = adf_gen2_pf2vf_recv;
-}
-EXPORT_SYMBOL_GPL(adf_gen2_init_vf_pfvf_ops);
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_pfvf.h b/drivers/crypto/qat/qat_common/adf_gen2_pfvf.h
deleted file mode 100644 (file)
index a716545..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2021 Intel Corporation */
-#ifndef ADF_GEN2_PFVF_H
-#define ADF_GEN2_PFVF_H
-
-#include <linux/types.h>
-#include "adf_accel_devices.h"
-
-#define ADF_GEN2_ERRSOU3 (0x3A000 + 0x0C)
-#define ADF_GEN2_ERRSOU5 (0x3A000 + 0xD8)
-#define ADF_GEN2_ERRMSK3 (0x3A000 + 0x1C)
-#define ADF_GEN2_ERRMSK5 (0x3A000 + 0xDC)
-
-#if defined(CONFIG_PCI_IOV)
-void adf_gen2_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops);
-void adf_gen2_init_vf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops);
-#else
-static inline void adf_gen2_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
-{
-       pfvf_ops->enable_comms = adf_pfvf_comms_disabled;
-}
-
-static inline void adf_gen2_init_vf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
-{
-       pfvf_ops->enable_comms = adf_pfvf_comms_disabled;
-}
-#endif
-
-#endif /* ADF_GEN2_PFVF_H */
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_dc.c b/drivers/crypto/qat/qat_common/adf_gen4_dc.c
deleted file mode 100644 (file)
index 5859238..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright(c) 2022 Intel Corporation */
-#include "adf_accel_devices.h"
-#include "icp_qat_fw_comp.h"
-#include "icp_qat_hw_20_comp.h"
-#include "adf_gen4_dc.h"
-
-static void qat_comp_build_deflate(void *ctx)
-{
-       struct icp_qat_fw_comp_req *req_tmpl =
-                               (struct icp_qat_fw_comp_req *)ctx;
-       struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
-       struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
-       struct icp_qat_fw_comp_req_params *req_pars = &req_tmpl->comp_pars;
-       struct icp_qat_hw_comp_20_config_csr_upper hw_comp_upper_csr = {0};
-       struct icp_qat_hw_comp_20_config_csr_lower hw_comp_lower_csr = {0};
-       struct icp_qat_hw_decomp_20_config_csr_lower hw_decomp_lower_csr = {0};
-       u32 upper_val;
-       u32 lower_val;
-
-       memset(req_tmpl, 0, sizeof(*req_tmpl));
-       header->hdr_flags =
-               ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
-       header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
-       header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
-       header->comn_req_flags =
-               ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_16BYTE_DATA,
-                                           QAT_COMN_PTR_TYPE_SGL);
-       header->serv_specif_flags =
-               ICP_QAT_FW_COMP_FLAGS_BUILD(ICP_QAT_FW_COMP_STATELESS_SESSION,
-                                           ICP_QAT_FW_COMP_AUTO_SELECT_BEST,
-                                           ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
-                                           ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
-                                           ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
-       hw_comp_lower_csr.skip_ctrl = ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL;
-       hw_comp_lower_csr.algo = ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77;
-       hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED;
-       hw_comp_lower_csr.sd = ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1;
-       hw_comp_lower_csr.hash_update = ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW;
-       hw_comp_lower_csr.edmm = ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED;
-       hw_comp_upper_csr.nice = ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_DEFAULT_VAL;
-       hw_comp_upper_csr.lazy = ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_DEFAULT_VAL;
-
-       upper_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(hw_comp_upper_csr);
-       lower_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(hw_comp_lower_csr);
-
-       cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
-       cd_pars->u.sl.comp_slice_cfg_word[1] = upper_val;
-
-       req_pars->crc.legacy.initial_adler = COMP_CPR_INITIAL_ADLER;
-       req_pars->crc.legacy.initial_crc32 = COMP_CPR_INITIAL_CRC;
-       req_pars->req_par_flags =
-               ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(ICP_QAT_FW_COMP_SOP,
-                                                     ICP_QAT_FW_COMP_EOP,
-                                                     ICP_QAT_FW_COMP_BFINAL,
-                                                     ICP_QAT_FW_COMP_CNV,
-                                                     ICP_QAT_FW_COMP_CNV_RECOVERY,
-                                                     ICP_QAT_FW_COMP_NO_CNV_DFX,
-                                                     ICP_QAT_FW_COMP_CRC_MODE_LEGACY,
-                                                     ICP_QAT_FW_COMP_NO_XXHASH_ACC,
-                                                     ICP_QAT_FW_COMP_CNV_ERROR_NONE,
-                                                     ICP_QAT_FW_COMP_NO_APPEND_CRC,
-                                                     ICP_QAT_FW_COMP_NO_DROP_DATA);
-
-       /* Fill second half of the template for decompression */
-       memcpy(req_tmpl + 1, req_tmpl, sizeof(*req_tmpl));
-       req_tmpl++;
-       header = &req_tmpl->comn_hdr;
-       header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
-       cd_pars = &req_tmpl->cd_pars;
-
-       hw_decomp_lower_csr.algo = ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE;
-       lower_val = ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(hw_decomp_lower_csr);
-
-       cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
-       cd_pars->u.sl.comp_slice_cfg_word[1] = 0;
-}
-
-void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops)
-{
-       dc_ops->build_deflate_ctx = qat_comp_build_deflate;
-}
-EXPORT_SYMBOL_GPL(adf_gen4_init_dc_ops);
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_dc.h b/drivers/crypto/qat/qat_common/adf_gen4_dc.h
deleted file mode 100644 (file)
index 0b1a677..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright(c) 2022 Intel Corporation */
-#ifndef ADF_GEN4_DC_H
-#define ADF_GEN4_DC_H
-
-#include "adf_accel_devices.h"
-
-void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops);
-
-#endif /* ADF_GEN4_DC_H */
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.c
deleted file mode 100644 (file)
index 3148a62..0000000
+++ /dev/null
@@ -1,194 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2020 Intel Corporation */
-#include <linux/iopoll.h>
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "adf_gen4_hw_data.h"
-
-static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size)
-{
-       return BUILD_RING_BASE_ADDR(addr, size);
-}
-
-static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring)
-{
-       return READ_CSR_RING_HEAD(csr_base_addr, bank, ring);
-}
-
-static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring,
-                               u32 value)
-{
-       WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value);
-}
-
-static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring)
-{
-       return READ_CSR_RING_TAIL(csr_base_addr, bank, ring);
-}
-
-static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring,
-                               u32 value)
-{
-       WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value);
-}
-
-static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank)
-{
-       return READ_CSR_E_STAT(csr_base_addr, bank);
-}
-
-static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank, u32 ring,
-                                 u32 value)
-{
-       WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value);
-}
-
-static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring,
-                               dma_addr_t addr)
-{
-       WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr);
-}
-
-static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank,
-                              u32 value)
-{
-       WRITE_CSR_INT_FLAG(csr_base_addr, bank, value);
-}
-
-static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank)
-{
-       WRITE_CSR_INT_SRCSEL(csr_base_addr, bank);
-}
-
-static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank, u32 value)
-{
-       WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value);
-}
-
-static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank,
-                                 u32 value)
-{
-       WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value);
-}
-
-static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank,
-                                      u32 value)
-{
-       WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value);
-}
-
-static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank,
-                                     u32 value)
-{
-       WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value);
-}
-
-void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
-{
-       csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr;
-       csr_ops->read_csr_ring_head = read_csr_ring_head;
-       csr_ops->write_csr_ring_head = write_csr_ring_head;
-       csr_ops->read_csr_ring_tail = read_csr_ring_tail;
-       csr_ops->write_csr_ring_tail = write_csr_ring_tail;
-       csr_ops->read_csr_e_stat = read_csr_e_stat;
-       csr_ops->write_csr_ring_config = write_csr_ring_config;
-       csr_ops->write_csr_ring_base = write_csr_ring_base;
-       csr_ops->write_csr_int_flag = write_csr_int_flag;
-       csr_ops->write_csr_int_srcsel = write_csr_int_srcsel;
-       csr_ops->write_csr_int_col_en = write_csr_int_col_en;
-       csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl;
-       csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col;
-       csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
-}
-EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_ops);
-
-static inline void adf_gen4_unpack_ssm_wdtimer(u64 value, u32 *upper,
-                                              u32 *lower)
-{
-       *lower = lower_32_bits(value);
-       *upper = upper_32_bits(value);
-}
-
-void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
-{
-       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
-       u64 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
-       u64 timer_val = ADF_SSM_WDT_DEFAULT_VALUE;
-       u32 ssm_wdt_pke_high = 0;
-       u32 ssm_wdt_pke_low = 0;
-       u32 ssm_wdt_high = 0;
-       u32 ssm_wdt_low = 0;
-
-       /* Convert 64bit WDT timer value into 32bit values for
-        * mmio write to 32bit CSRs.
-        */
-       adf_gen4_unpack_ssm_wdtimer(timer_val, &ssm_wdt_high, &ssm_wdt_low);
-       adf_gen4_unpack_ssm_wdtimer(timer_val_pke, &ssm_wdt_pke_high,
-                                   &ssm_wdt_pke_low);
-
-       /* Enable WDT for sym and dc */
-       ADF_CSR_WR(pmisc_addr, ADF_SSMWDTL_OFFSET, ssm_wdt_low);
-       ADF_CSR_WR(pmisc_addr, ADF_SSMWDTH_OFFSET, ssm_wdt_high);
-       /* Enable WDT for pke */
-       ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEL_OFFSET, ssm_wdt_pke_low);
-       ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEH_OFFSET, ssm_wdt_pke_high);
-}
-EXPORT_SYMBOL_GPL(adf_gen4_set_ssm_wdtimer);
-
-int adf_pfvf_comms_disabled(struct adf_accel_dev *accel_dev)
-{
-       return 0;
-}
-EXPORT_SYMBOL_GPL(adf_pfvf_comms_disabled);
-
-static int reset_ring_pair(void __iomem *csr, u32 bank_number)
-{
-       u32 status;
-       int ret;
-
-       /* Write rpresetctl register BIT(0) as 1
-        * Since rpresetctl registers have no RW fields, no need to preserve
-        * values for other bits. Just write directly.
-        */
-       ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETCTL(bank_number),
-                  ADF_WQM_CSR_RPRESETCTL_RESET);
-
-       /* Read rpresetsts register and wait for rp reset to complete */
-       ret = read_poll_timeout(ADF_CSR_RD, status,
-                               status & ADF_WQM_CSR_RPRESETSTS_STATUS,
-                               ADF_RPRESET_POLL_DELAY_US,
-                               ADF_RPRESET_POLL_TIMEOUT_US, true,
-                               csr, ADF_WQM_CSR_RPRESETSTS(bank_number));
-       if (!ret) {
-               /* When rp reset is done, clear rpresetsts */
-               ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETSTS(bank_number),
-                          ADF_WQM_CSR_RPRESETSTS_STATUS);
-       }
-
-       return ret;
-}
-
-int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number)
-{
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       u32 etr_bar_id = hw_data->get_etr_bar_id(hw_data);
-       void __iomem *csr;
-       int ret;
-
-       if (bank_number >= hw_data->num_banks)
-               return -EINVAL;
-
-       dev_dbg(&GET_DEV(accel_dev),
-               "ring pair reset for bank:%d\n", bank_number);
-
-       csr = (&GET_BARS(accel_dev)[etr_bar_id])->virt_addr;
-       ret = reset_ring_pair(csr, bank_number);
-       if (ret)
-               dev_err(&GET_DEV(accel_dev),
-                       "ring pair reset failed (timeout)\n");
-       else
-               dev_dbg(&GET_DEV(accel_dev), "ring pair reset successful\n");
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(adf_gen4_ring_pair_reset);
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
deleted file mode 100644 (file)
index 4fb4b3d..0000000
+++ /dev/null
@@ -1,142 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2020 Intel Corporation */
-#ifndef ADF_GEN4_HW_CSR_DATA_H_
-#define ADF_GEN4_HW_CSR_DATA_H_
-
-#include "adf_accel_devices.h"
-#include "adf_cfg_common.h"
-
-/* Transport access */
-#define ADF_BANK_INT_SRC_SEL_MASK      0x44UL
-#define ADF_RING_CSR_RING_CONFIG       0x1000
-#define ADF_RING_CSR_RING_LBASE                0x1040
-#define ADF_RING_CSR_RING_UBASE                0x1080
-#define ADF_RING_CSR_RING_HEAD         0x0C0
-#define ADF_RING_CSR_RING_TAIL         0x100
-#define ADF_RING_CSR_E_STAT            0x14C
-#define ADF_RING_CSR_INT_FLAG          0x170
-#define ADF_RING_CSR_INT_SRCSEL                0x174
-#define ADF_RING_CSR_INT_COL_CTL       0x180
-#define ADF_RING_CSR_INT_FLAG_AND_COL  0x184
-#define ADF_RING_CSR_INT_COL_CTL_ENABLE        0x80000000
-#define ADF_RING_CSR_INT_COL_EN                0x17C
-#define ADF_RING_CSR_ADDR_OFFSET       0x100000
-#define ADF_RING_BUNDLE_SIZE           0x2000
-
-#define BUILD_RING_BASE_ADDR(addr, size) \
-       ((((addr) >> 6) & (GENMASK_ULL(63, 0) << (size))) << 6)
-#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
-       ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
-                  ADF_RING_BUNDLE_SIZE * (bank) + \
-                  ADF_RING_CSR_RING_HEAD + ((ring) << 2))
-#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
-       ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
-                  ADF_RING_BUNDLE_SIZE * (bank) + \
-                  ADF_RING_CSR_RING_TAIL + ((ring) << 2))
-#define READ_CSR_E_STAT(csr_base_addr, bank) \
-       ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
-                  ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_E_STAT)
-#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
-       ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
-                  ADF_RING_BUNDLE_SIZE * (bank) + \
-                  ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value)
-#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value)  \
-do { \
-       void __iomem *_csr_base_addr = csr_base_addr; \
-       u32 _bank = bank;                                               \
-       u32 _ring = ring;                                               \
-       dma_addr_t _value = value;                                      \
-       u32 l_base = 0, u_base = 0;                                     \
-       l_base = lower_32_bits(_value);                                 \
-       u_base = upper_32_bits(_value);                                 \
-       ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET,         \
-                  ADF_RING_BUNDLE_SIZE * (_bank) +                     \
-                  ADF_RING_CSR_RING_LBASE + ((_ring) << 2), l_base);   \
-       ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET,         \
-                  ADF_RING_BUNDLE_SIZE * (_bank) +                     \
-                  ADF_RING_CSR_RING_UBASE + ((_ring) << 2), u_base);   \
-} while (0)
-
-#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
-       ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
-                  ADF_RING_BUNDLE_SIZE * (bank) + \
-                  ADF_RING_CSR_RING_HEAD + ((ring) << 2), value)
-#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
-       ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
-                  ADF_RING_BUNDLE_SIZE * (bank) + \
-                  ADF_RING_CSR_RING_TAIL + ((ring) << 2), value)
-#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \
-       ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
-                  ADF_RING_BUNDLE_SIZE * (bank) + \
-                  ADF_RING_CSR_INT_FLAG, (value))
-#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
-       ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
-                  ADF_RING_BUNDLE_SIZE * (bank) + \
-                  ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK)
-#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
-       ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
-                  ADF_RING_BUNDLE_SIZE * (bank) + \
-                  ADF_RING_CSR_INT_COL_EN, (value))
-#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
-       ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
-                  ADF_RING_BUNDLE_SIZE * (bank) + \
-                  ADF_RING_CSR_INT_COL_CTL, \
-                  ADF_RING_CSR_INT_COL_CTL_ENABLE | (value))
-#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
-       ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
-                  ADF_RING_BUNDLE_SIZE * (bank) + \
-                  ADF_RING_CSR_INT_FLAG_AND_COL, (value))
-
-/* Arbiter configuration */
-#define ADF_RING_CSR_RING_SRV_ARB_EN 0x19C
-
-#define WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value) \
-       ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
-                  ADF_RING_BUNDLE_SIZE * (bank) + \
-                  ADF_RING_CSR_RING_SRV_ARB_EN, (value))
-
-/* Default ring mapping */
-#define ADF_GEN4_DEFAULT_RING_TO_SRV_MAP \
-       (ASYM << ADF_CFG_SERV_RING_PAIR_0_SHIFT | \
-         SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
-        ASYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
-         SYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
-
-/* WDT timers
- *
- * Timeout is in cycles. Clock speed may vary across products but this
- * value should be a few milli-seconds.
- */
-#define ADF_SSM_WDT_DEFAULT_VALUE      0x7000000ULL
-#define ADF_SSM_WDT_PKE_DEFAULT_VALUE  0x8000000
-#define ADF_SSMWDTL_OFFSET             0x54
-#define ADF_SSMWDTH_OFFSET             0x5C
-#define ADF_SSMWDTPKEL_OFFSET          0x58
-#define ADF_SSMWDTPKEH_OFFSET          0x60
-
-/* Ring reset */
-#define ADF_RPRESET_POLL_TIMEOUT_US    (5 * USEC_PER_SEC)
-#define ADF_RPRESET_POLL_DELAY_US      20
-#define ADF_WQM_CSR_RPRESETCTL_RESET   BIT(0)
-#define ADF_WQM_CSR_RPRESETCTL(bank)   (0x6000 + ((bank) << 3))
-#define ADF_WQM_CSR_RPRESETSTS_STATUS  BIT(0)
-#define ADF_WQM_CSR_RPRESETSTS(bank)   (ADF_WQM_CSR_RPRESETCTL(bank) + 4)
-
-/* Error source registers */
-#define ADF_GEN4_ERRSOU0       (0x41A200)
-#define ADF_GEN4_ERRSOU1       (0x41A204)
-#define ADF_GEN4_ERRSOU2       (0x41A208)
-#define ADF_GEN4_ERRSOU3       (0x41A20C)
-
-/* Error source mask registers */
-#define ADF_GEN4_ERRMSK0       (0x41A210)
-#define ADF_GEN4_ERRMSK1       (0x41A214)
-#define ADF_GEN4_ERRMSK2       (0x41A218)
-#define ADF_GEN4_ERRMSK3       (0x41A21C)
-
-#define ADF_GEN4_VFLNOTIFY     BIT(7)
-
-void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
-void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
-int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number);
-#endif
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c b/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c
deleted file mode 100644 (file)
index 8e8efe9..0000000
+++ /dev/null
@@ -1,147 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2021 Intel Corporation */
-#include <linux/iopoll.h>
-#include <linux/mutex.h>
-#include <linux/types.h>
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "adf_gen4_pfvf.h"
-#include "adf_pfvf_pf_proto.h"
-#include "adf_pfvf_utils.h"
-
-#define ADF_4XXX_PF2VM_OFFSET(i)       (0x40B010 + ((i) * 0x20))
-#define ADF_4XXX_VM2PF_OFFSET(i)       (0x40B014 + ((i) * 0x20))
-
-/* VF2PF interrupt source registers */
-#define ADF_4XXX_VM2PF_SOU             0x41A180
-#define ADF_4XXX_VM2PF_MSK             0x41A1C0
-#define ADF_GEN4_VF_MSK                        0xFFFF
-
-#define ADF_PFVF_GEN4_MSGTYPE_SHIFT    2
-#define ADF_PFVF_GEN4_MSGTYPE_MASK     0x3F
-#define ADF_PFVF_GEN4_MSGDATA_SHIFT    8
-#define ADF_PFVF_GEN4_MSGDATA_MASK     0xFFFFFF
-
-static const struct pfvf_csr_format csr_gen4_fmt = {
-       { ADF_PFVF_GEN4_MSGTYPE_SHIFT, ADF_PFVF_GEN4_MSGTYPE_MASK },
-       { ADF_PFVF_GEN4_MSGDATA_SHIFT, ADF_PFVF_GEN4_MSGDATA_MASK },
-};
-
-static u32 adf_gen4_pf_get_pf2vf_offset(u32 i)
-{
-       return ADF_4XXX_PF2VM_OFFSET(i);
-}
-
-static u32 adf_gen4_pf_get_vf2pf_offset(u32 i)
-{
-       return ADF_4XXX_VM2PF_OFFSET(i);
-}
-
-static void adf_gen4_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
-{
-       u32 val;
-
-       val = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK) & ~vf_mask;
-       ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, val);
-}
-
-static void adf_gen4_disable_all_vf2pf_interrupts(void __iomem *pmisc_addr)
-{
-       ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, ADF_GEN4_VF_MSK);
-}
-
-static u32 adf_gen4_disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
-{
-       u32 sources, disabled, pending;
-
-       /* Get the interrupt sources triggered by VFs */
-       sources = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_SOU);
-       if (!sources)
-               return 0;
-
-       /* Get the already disabled interrupts */
-       disabled = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK);
-
-       pending = sources & ~disabled;
-       if (!pending)
-               return 0;
-
-       /* Due to HW limitations, when disabling the interrupts, we can't
-        * just disable the requested sources, as this would lead to missed
-        * interrupts if VM2PF_SOU changes just before writing to VM2PF_MSK.
-        * To work around it, disable all and re-enable only the sources that
-        * are not in vf_mask and were not already disabled. Re-enabling will
-        * trigger a new interrupt for the sources that have changed in the
-        * meantime, if any.
-        */
-       ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, ADF_GEN4_VF_MSK);
-       ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, disabled | sources);
-
-       /* Return the sources of the (new) interrupt(s) */
-       return pending;
-}
-
-static int adf_gen4_pfvf_send(struct adf_accel_dev *accel_dev,
-                             struct pfvf_message msg, u32 pfvf_offset,
-                             struct mutex *csr_lock)
-{
-       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
-       u32 csr_val;
-       int ret;
-
-       csr_val = adf_pfvf_csr_msg_of(accel_dev, msg, &csr_gen4_fmt);
-       if (unlikely(!csr_val))
-               return -EINVAL;
-
-       mutex_lock(csr_lock);
-
-       ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_val | ADF_PFVF_INT);
-
-       /* Wait for confirmation from remote that it received the message */
-       ret = read_poll_timeout(ADF_CSR_RD, csr_val, !(csr_val & ADF_PFVF_INT),
-                               ADF_PFVF_MSG_ACK_DELAY_US,
-                               ADF_PFVF_MSG_ACK_MAX_DELAY_US,
-                               true, pmisc_addr, pfvf_offset);
-       if (ret < 0)
-               dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
-
-       mutex_unlock(csr_lock);
-       return ret;
-}
-
-static struct pfvf_message adf_gen4_pfvf_recv(struct adf_accel_dev *accel_dev,
-                                             u32 pfvf_offset, u8 compat_ver)
-{
-       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
-       struct pfvf_message msg = { 0 };
-       u32 csr_val;
-
-       /* Read message from the CSR */
-       csr_val = ADF_CSR_RD(pmisc_addr, pfvf_offset);
-       if (!(csr_val & ADF_PFVF_INT)) {
-               dev_info(&GET_DEV(accel_dev),
-                        "Spurious PFVF interrupt, msg 0x%.8x. Ignored\n", csr_val);
-               return msg;
-       }
-
-       /* We can now acknowledge the message reception by clearing the
-        * interrupt bit
-        */
-       ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_val & ~ADF_PFVF_INT);
-
-       /* Return the pfvf_message format */
-       return adf_pfvf_message_of(accel_dev, csr_val, &csr_gen4_fmt);
-}
-
-void adf_gen4_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
-{
-       pfvf_ops->enable_comms = adf_enable_pf2vf_comms;
-       pfvf_ops->get_pf2vf_offset = adf_gen4_pf_get_pf2vf_offset;
-       pfvf_ops->get_vf2pf_offset = adf_gen4_pf_get_vf2pf_offset;
-       pfvf_ops->enable_vf2pf_interrupts = adf_gen4_enable_vf2pf_interrupts;
-       pfvf_ops->disable_all_vf2pf_interrupts = adf_gen4_disable_all_vf2pf_interrupts;
-       pfvf_ops->disable_pending_vf2pf_interrupts = adf_gen4_disable_pending_vf2pf_interrupts;
-       pfvf_ops->send_msg = adf_gen4_pfvf_send;
-       pfvf_ops->recv_msg = adf_gen4_pfvf_recv;
-}
-EXPORT_SYMBOL_GPL(adf_gen4_init_pf_pfvf_ops);
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pfvf.h b/drivers/crypto/qat/qat_common/adf_gen4_pfvf.h
deleted file mode 100644 (file)
index 17d1b77..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2021 Intel Corporation */
-#ifndef ADF_GEN4_PFVF_H
-#define ADF_GEN4_PFVF_H
-
-#include "adf_accel_devices.h"
-
-#ifdef CONFIG_PCI_IOV
-void adf_gen4_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops);
-#else
-static inline void adf_gen4_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
-{
-       pfvf_ops->enable_comms = adf_pfvf_comms_disabled;
-}
-#endif
-
-#endif /* ADF_GEN4_PFVF_H */
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pm.c b/drivers/crypto/qat/qat_common/adf_gen4_pm.c
deleted file mode 100644 (file)
index 7037c08..0000000
+++ /dev/null
@@ -1,137 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2022 Intel Corporation */
-#include <linux/bitfield.h>
-#include <linux/iopoll.h>
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "adf_gen4_pm.h"
-#include "adf_cfg_strings.h"
-#include "icp_qat_fw_init_admin.h"
-#include "adf_gen4_hw_data.h"
-#include "adf_cfg.h"
-
-enum qat_pm_host_msg {
-       PM_NO_CHANGE = 0,
-       PM_SET_MIN,
-};
-
-struct adf_gen4_pm_data {
-       struct work_struct pm_irq_work;
-       struct adf_accel_dev *accel_dev;
-       u32 pm_int_sts;
-};
-
-static int send_host_msg(struct adf_accel_dev *accel_dev)
-{
-       void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
-       u32 msg;
-
-       msg = ADF_CSR_RD(pmisc, ADF_GEN4_PM_HOST_MSG);
-       if (msg & ADF_GEN4_PM_MSG_PENDING)
-               return -EBUSY;
-
-       /* Send HOST_MSG */
-       msg = FIELD_PREP(ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK, PM_SET_MIN);
-       msg |= ADF_GEN4_PM_MSG_PENDING;
-       ADF_CSR_WR(pmisc, ADF_GEN4_PM_HOST_MSG, msg);
-
-       /* Poll status register to make sure the HOST_MSG has been processed */
-       return read_poll_timeout(ADF_CSR_RD, msg,
-                               !(msg & ADF_GEN4_PM_MSG_PENDING),
-                               ADF_GEN4_PM_MSG_POLL_DELAY_US,
-                               ADF_GEN4_PM_POLL_TIMEOUT_US, true, pmisc,
-                               ADF_GEN4_PM_HOST_MSG);
-}
-
-static void pm_bh_handler(struct work_struct *work)
-{
-       struct adf_gen4_pm_data *pm_data =
-               container_of(work, struct adf_gen4_pm_data, pm_irq_work);
-       struct adf_accel_dev *accel_dev = pm_data->accel_dev;
-       void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
-       u32 pm_int_sts = pm_data->pm_int_sts;
-       u32 val;
-
-       /* PM Idle interrupt */
-       if (pm_int_sts & ADF_GEN4_PM_IDLE_STS) {
-               /* Issue host message to FW */
-               if (send_host_msg(accel_dev))
-                       dev_warn_ratelimited(&GET_DEV(accel_dev),
-                                            "Failed to send host msg to FW\n");
-       }
-
-       /* Clear interrupt status */
-       ADF_CSR_WR(pmisc, ADF_GEN4_PM_INTERRUPT, pm_int_sts);
-
-       /* Reenable PM interrupt */
-       val = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
-       val &= ~ADF_GEN4_PM_SOU;
-       ADF_CSR_WR(pmisc, ADF_GEN4_ERRMSK2, val);
-
-       kfree(pm_data);
-}
-
-bool adf_gen4_handle_pm_interrupt(struct adf_accel_dev *accel_dev)
-{
-       void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
-       struct adf_gen4_pm_data *pm_data = NULL;
-       u32 errsou2;
-       u32 errmsk2;
-       u32 val;
-
-       /* Only handle the interrupt triggered by PM */
-       errmsk2 = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
-       if (errmsk2 & ADF_GEN4_PM_SOU)
-               return false;
-
-       errsou2 = ADF_CSR_RD(pmisc, ADF_GEN4_ERRSOU2);
-       if (!(errsou2 & ADF_GEN4_PM_SOU))
-               return false;
-
-       /* Disable interrupt */
-       val = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
-       val |= ADF_GEN4_PM_SOU;
-       ADF_CSR_WR(pmisc, ADF_GEN4_ERRMSK2, val);
-
-       val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_INTERRUPT);
-
-       pm_data = kzalloc(sizeof(*pm_data), GFP_ATOMIC);
-       if (!pm_data)
-               return false;
-
-       pm_data->pm_int_sts = val;
-       pm_data->accel_dev = accel_dev;
-
-       INIT_WORK(&pm_data->pm_irq_work, pm_bh_handler);
-       adf_misc_wq_queue_work(&pm_data->pm_irq_work);
-
-       return true;
-}
-EXPORT_SYMBOL_GPL(adf_gen4_handle_pm_interrupt);
-
-int adf_gen4_enable_pm(struct adf_accel_dev *accel_dev)
-{
-       void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
-       int ret;
-       u32 val;
-
-       ret = adf_init_admin_pm(accel_dev, ADF_GEN4_PM_DEFAULT_IDLE_FILTER);
-       if (ret)
-               return ret;
-
-       /* Enable default PM interrupts: IDLE, THROTTLE */
-       val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_INTERRUPT);
-       val |= ADF_GEN4_PM_INT_EN_DEFAULT;
-
-       /* Clear interrupt status */
-       val |= ADF_GEN4_PM_INT_STS_MASK;
-       ADF_CSR_WR(pmisc, ADF_GEN4_PM_INTERRUPT, val);
-
-       /* Unmask PM Interrupt */
-       val = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
-       val &= ~ADF_GEN4_PM_SOU;
-       ADF_CSR_WR(pmisc, ADF_GEN4_ERRMSK2, val);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(adf_gen4_enable_pm);
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pm.h b/drivers/crypto/qat/qat_common/adf_gen4_pm.h
deleted file mode 100644 (file)
index f8f8a9e..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2022 Intel Corporation */
-#ifndef ADF_GEN4_PM_H
-#define ADF_GEN4_PM_H
-
-#include "adf_accel_devices.h"
-
-/* Power management registers */
-#define ADF_GEN4_PM_HOST_MSG (0x50A01C)
-
-/* Power management */
-#define ADF_GEN4_PM_POLL_DELAY_US      20
-#define ADF_GEN4_PM_POLL_TIMEOUT_US    USEC_PER_SEC
-#define ADF_GEN4_PM_MSG_POLL_DELAY_US  (10 * USEC_PER_MSEC)
-#define ADF_GEN4_PM_STATUS             (0x50A00C)
-#define ADF_GEN4_PM_INTERRUPT          (0x50A028)
-
-/* Power management source in ERRSOU2 and ERRMSK2 */
-#define ADF_GEN4_PM_SOU                        BIT(18)
-
-#define ADF_GEN4_PM_IDLE_INT_EN                BIT(18)
-#define ADF_GEN4_PM_THROTTLE_INT_EN    BIT(19)
-#define ADF_GEN4_PM_DRV_ACTIVE         BIT(20)
-#define ADF_GEN4_PM_INIT_STATE         BIT(21)
-#define ADF_GEN4_PM_INT_EN_DEFAULT     (ADF_GEN4_PM_IDLE_INT_EN | \
-                                       ADF_GEN4_PM_THROTTLE_INT_EN)
-
-#define ADF_GEN4_PM_THR_STS    BIT(0)
-#define ADF_GEN4_PM_IDLE_STS   BIT(1)
-#define ADF_GEN4_PM_FW_INT_STS BIT(2)
-#define ADF_GEN4_PM_INT_STS_MASK (ADF_GEN4_PM_THR_STS | \
-                                ADF_GEN4_PM_IDLE_STS | \
-                                ADF_GEN4_PM_FW_INT_STS)
-
-#define ADF_GEN4_PM_MSG_PENDING                        BIT(0)
-#define ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK       GENMASK(28, 1)
-
-#define ADF_GEN4_PM_DEFAULT_IDLE_FILTER                (0x0)
-#define ADF_GEN4_PM_MAX_IDLE_FILTER            (0x7)
-
-int adf_gen4_enable_pm(struct adf_accel_dev *accel_dev);
-bool adf_gen4_handle_pm_interrupt(struct adf_accel_dev *accel_dev);
-
-#endif
diff --git a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
deleted file mode 100644 (file)
index 64e4596..0000000
+++ /dev/null
@@ -1,105 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "adf_transport_internal.h"
-
-#define ADF_ARB_NUM 4
-#define ADF_ARB_REG_SIZE 0x4
-
-#define WRITE_CSR_ARB_SARCONFIG(csr_addr, arb_offset, index, value) \
-       ADF_CSR_WR(csr_addr, (arb_offset) + \
-       (ADF_ARB_REG_SIZE * (index)), value)
-
-#define WRITE_CSR_ARB_WT2SAM(csr_addr, arb_offset, wt_offset, index, value) \
-       ADF_CSR_WR(csr_addr, ((arb_offset) + (wt_offset)) + \
-       (ADF_ARB_REG_SIZE * (index)), value)
-
-int adf_init_arb(struct adf_accel_dev *accel_dev)
-{
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       void __iomem *csr = accel_dev->transport->banks[0].csr_addr;
-       unsigned long ae_mask = hw_data->ae_mask;
-       u32 arb_off, wt_off, arb_cfg;
-       const u32 *thd_2_arb_cfg;
-       struct arb_info info;
-       int arb, i;
-
-       hw_data->get_arb_info(&info);
-       arb_cfg = info.arb_cfg;
-       arb_off = info.arb_offset;
-       wt_off = info.wt2sam_offset;
-
-       /* Service arb configured for 32 bytes responses and
-        * ring flow control check enabled. */
-       for (arb = 0; arb < ADF_ARB_NUM; arb++)
-               WRITE_CSR_ARB_SARCONFIG(csr, arb_off, arb, arb_cfg);
-
-       /* Map worker threads to service arbiters */
-       thd_2_arb_cfg = hw_data->get_arb_mapping();
-
-       for_each_set_bit(i, &ae_mask, hw_data->num_engines)
-               WRITE_CSR_ARB_WT2SAM(csr, arb_off, wt_off, i, thd_2_arb_cfg[i]);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(adf_init_arb);
-
-void adf_update_ring_arb(struct adf_etr_ring_data *ring)
-{
-       struct adf_accel_dev *accel_dev = ring->bank->accel_dev;
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
-       u32 tx_ring_mask = hw_data->tx_rings_mask;
-       u32 shift = hw_data->tx_rx_gap;
-       u32 arben, arben_tx, arben_rx;
-       u32 rx_ring_mask;
-
-       /*
-        * Enable arbitration on a ring only if the TX half of the ring mask
-        * matches the RX part. This results in writes to CSR on both TX and
-        * RX update - only one is necessary, but both are done for
-        * simplicity.
-        */
-       rx_ring_mask = tx_ring_mask << shift;
-       arben_tx = (ring->bank->ring_mask & tx_ring_mask) >> 0;
-       arben_rx = (ring->bank->ring_mask & rx_ring_mask) >> shift;
-       arben = arben_tx & arben_rx;
-
-       csr_ops->write_csr_ring_srv_arb_en(ring->bank->csr_addr,
-                                          ring->bank->bank_number, arben);
-}
-
-void adf_exit_arb(struct adf_accel_dev *accel_dev)
-{
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
-       u32 arb_off, wt_off;
-       struct arb_info info;
-       void __iomem *csr;
-       unsigned int i;
-
-       hw_data->get_arb_info(&info);
-       arb_off = info.arb_offset;
-       wt_off = info.wt2sam_offset;
-
-       if (!accel_dev->transport)
-               return;
-
-       csr = accel_dev->transport->banks[0].csr_addr;
-
-       hw_data->get_arb_info(&info);
-
-       /* Reset arbiter configuration */
-       for (i = 0; i < ADF_ARB_NUM; i++)
-               WRITE_CSR_ARB_SARCONFIG(csr, arb_off, i, 0);
-
-       /* Unmap worker threads to service arbiters */
-       for (i = 0; i < hw_data->num_engines; i++)
-               WRITE_CSR_ARB_WT2SAM(csr, arb_off, wt_off, i, 0);
-
-       /* Disable arbitration on all rings */
-       for (i = 0; i < GET_MAX_BANKS(accel_dev); i++)
-               csr_ops->write_csr_ring_srv_arb_en(csr, i, 0);
-}
-EXPORT_SYMBOL_GPL(adf_exit_arb);
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
deleted file mode 100644 (file)
index cef7bb8..0000000
+++ /dev/null
@@ -1,402 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/mutex.h>
-#include <linux/list.h>
-#include <linux/bitops.h>
-#include <linux/delay.h>
-#include "adf_accel_devices.h"
-#include "adf_cfg.h"
-#include "adf_common_drv.h"
-
-static LIST_HEAD(service_table);
-static DEFINE_MUTEX(service_lock);
-
-static void adf_service_add(struct service_hndl *service)
-{
-       mutex_lock(&service_lock);
-       list_add(&service->list, &service_table);
-       mutex_unlock(&service_lock);
-}
-
-int adf_service_register(struct service_hndl *service)
-{
-       memset(service->init_status, 0, sizeof(service->init_status));
-       memset(service->start_status, 0, sizeof(service->start_status));
-       adf_service_add(service);
-       return 0;
-}
-
-static void adf_service_remove(struct service_hndl *service)
-{
-       mutex_lock(&service_lock);
-       list_del(&service->list);
-       mutex_unlock(&service_lock);
-}
-
-int adf_service_unregister(struct service_hndl *service)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(service->init_status); i++) {
-               if (service->init_status[i] || service->start_status[i]) {
-                       pr_err("QAT: Could not remove active service\n");
-                       return -EFAULT;
-               }
-       }
-       adf_service_remove(service);
-       return 0;
-}
-
-/**
- * adf_dev_init() - Init data structures and services for the given accel device
- * @accel_dev: Pointer to acceleration device.
- *
- * Initialize the ring data structures and the admin comms and arbitration
- * services.
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_dev_init(struct adf_accel_dev *accel_dev)
-{
-       struct service_hndl *service;
-       struct list_head *list_itr;
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       int ret;
-
-       if (!hw_data) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Failed to init device - hw_data not set\n");
-               return -EFAULT;
-       }
-
-       if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) &&
-           !accel_dev->is_vf) {
-               dev_err(&GET_DEV(accel_dev), "Device not configured\n");
-               return -EFAULT;
-       }
-
-       if (adf_init_etr_data(accel_dev)) {
-               dev_err(&GET_DEV(accel_dev), "Failed initialize etr\n");
-               return -EFAULT;
-       }
-
-       if (hw_data->init_device && hw_data->init_device(accel_dev)) {
-               dev_err(&GET_DEV(accel_dev), "Failed to initialize device\n");
-               return -EFAULT;
-       }
-
-       if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) {
-               dev_err(&GET_DEV(accel_dev), "Failed initialize admin comms\n");
-               return -EFAULT;
-       }
-
-       if (hw_data->init_arb && hw_data->init_arb(accel_dev)) {
-               dev_err(&GET_DEV(accel_dev), "Failed initialize hw arbiter\n");
-               return -EFAULT;
-       }
-
-       if (adf_ae_init(accel_dev)) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Failed to initialise Acceleration Engine\n");
-               return -EFAULT;
-       }
-       set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status);
-
-       if (adf_ae_fw_load(accel_dev)) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Failed to load acceleration FW\n");
-               return -EFAULT;
-       }
-       set_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
-
-       if (hw_data->alloc_irq(accel_dev)) {
-               dev_err(&GET_DEV(accel_dev), "Failed to allocate interrupts\n");
-               return -EFAULT;
-       }
-       set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
-
-       hw_data->enable_ints(accel_dev);
-       hw_data->enable_error_correction(accel_dev);
-
-       ret = hw_data->pfvf_ops.enable_comms(accel_dev);
-       if (ret)
-               return ret;
-
-       if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) &&
-           accel_dev->is_vf) {
-               if (qat_crypto_vf_dev_config(accel_dev))
-                       return -EFAULT;
-       }
-
-       /*
-        * Subservice initialisation is divided into two stages: init and start.
-        * This is to facilitate any ordering dependencies between services
-        * prior to starting any of the accelerators.
-        */
-       list_for_each(list_itr, &service_table) {
-               service = list_entry(list_itr, struct service_hndl, list);
-               if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
-                       dev_err(&GET_DEV(accel_dev),
-                               "Failed to initialise service %s\n",
-                               service->name);
-                       return -EFAULT;
-               }
-               set_bit(accel_dev->accel_id, service->init_status);
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(adf_dev_init);
-
-/**
- * adf_dev_start() - Start acceleration service for the given accel device
- * @accel_dev:    Pointer to acceleration device.
- *
- * Function notifies all the registered services that the acceleration device
- * is ready to be used.
- * To be used by QAT device specific drivers.
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_dev_start(struct adf_accel_dev *accel_dev)
-{
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       struct service_hndl *service;
-       struct list_head *list_itr;
-
-       set_bit(ADF_STATUS_STARTING, &accel_dev->status);
-
-       if (adf_ae_start(accel_dev)) {
-               dev_err(&GET_DEV(accel_dev), "AE Start Failed\n");
-               return -EFAULT;
-       }
-       set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
-
-       if (hw_data->send_admin_init(accel_dev)) {
-               dev_err(&GET_DEV(accel_dev), "Failed to send init message\n");
-               return -EFAULT;
-       }
-
-       /* Set ssm watch dog timer */
-       if (hw_data->set_ssm_wdtimer)
-               hw_data->set_ssm_wdtimer(accel_dev);
-
-       /* Enable Power Management */
-       if (hw_data->enable_pm && hw_data->enable_pm(accel_dev)) {
-               dev_err(&GET_DEV(accel_dev), "Failed to configure Power Management\n");
-               return -EFAULT;
-       }
-
-       list_for_each(list_itr, &service_table) {
-               service = list_entry(list_itr, struct service_hndl, list);
-               if (service->event_hld(accel_dev, ADF_EVENT_START)) {
-                       dev_err(&GET_DEV(accel_dev),
-                               "Failed to start service %s\n",
-                               service->name);
-                       return -EFAULT;
-               }
-               set_bit(accel_dev->accel_id, service->start_status);
-       }
-
-       clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
-       set_bit(ADF_STATUS_STARTED, &accel_dev->status);
-
-       if (!list_empty(&accel_dev->crypto_list) &&
-           (qat_algs_register() || qat_asym_algs_register())) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Failed to register crypto algs\n");
-               set_bit(ADF_STATUS_STARTING, &accel_dev->status);
-               clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
-               return -EFAULT;
-       }
-
-       if (!list_empty(&accel_dev->compression_list) && qat_comp_algs_register()) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Failed to register compression algs\n");
-               set_bit(ADF_STATUS_STARTING, &accel_dev->status);
-               clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
-               return -EFAULT;
-       }
-       return 0;
-}
-EXPORT_SYMBOL_GPL(adf_dev_start);
-
-/**
- * adf_dev_stop() - Stop acceleration service for the given accel device
- * @accel_dev:    Pointer to acceleration device.
- *
- * Function notifies all the registered services that the acceleration device
- * is shuting down.
- * To be used by QAT device specific drivers.
- *
- * Return: void
- */
-void adf_dev_stop(struct adf_accel_dev *accel_dev)
-{
-       struct service_hndl *service;
-       struct list_head *list_itr;
-       bool wait = false;
-       int ret;
-
-       if (!adf_dev_started(accel_dev) &&
-           !test_bit(ADF_STATUS_STARTING, &accel_dev->status))
-               return;
-
-       clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
-       clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
-
-       if (!list_empty(&accel_dev->crypto_list)) {
-               qat_algs_unregister();
-               qat_asym_algs_unregister();
-       }
-
-       if (!list_empty(&accel_dev->compression_list))
-               qat_comp_algs_unregister();
-
-       list_for_each(list_itr, &service_table) {
-               service = list_entry(list_itr, struct service_hndl, list);
-               if (!test_bit(accel_dev->accel_id, service->start_status))
-                       continue;
-               ret = service->event_hld(accel_dev, ADF_EVENT_STOP);
-               if (!ret) {
-                       clear_bit(accel_dev->accel_id, service->start_status);
-               } else if (ret == -EAGAIN) {
-                       wait = true;
-                       clear_bit(accel_dev->accel_id, service->start_status);
-               }
-       }
-
-       if (wait)
-               msleep(100);
-
-       if (test_bit(ADF_STATUS_AE_STARTED, &accel_dev->status)) {
-               if (adf_ae_stop(accel_dev))
-                       dev_err(&GET_DEV(accel_dev), "failed to stop AE\n");
-               else
-                       clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
-       }
-}
-EXPORT_SYMBOL_GPL(adf_dev_stop);
-
-/**
- * adf_dev_shutdown() - shutdown acceleration services and data strucutures
- * @accel_dev: Pointer to acceleration device
- *
- * Cleanup the ring data structures and the admin comms and arbitration
- * services.
- */
-void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
-{
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       struct service_hndl *service;
-       struct list_head *list_itr;
-
-       if (!hw_data) {
-               dev_err(&GET_DEV(accel_dev),
-                       "QAT: Failed to shutdown device - hw_data not set\n");
-               return;
-       }
-
-       if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) {
-               adf_ae_fw_release(accel_dev);
-               clear_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
-       }
-
-       if (test_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status)) {
-               if (adf_ae_shutdown(accel_dev))
-                       dev_err(&GET_DEV(accel_dev),
-                               "Failed to shutdown Accel Engine\n");
-               else
-                       clear_bit(ADF_STATUS_AE_INITIALISED,
-                                 &accel_dev->status);
-       }
-
-       list_for_each(list_itr, &service_table) {
-               service = list_entry(list_itr, struct service_hndl, list);
-               if (!test_bit(accel_dev->accel_id, service->init_status))
-                       continue;
-               if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
-                       dev_err(&GET_DEV(accel_dev),
-                               "Failed to shutdown service %s\n",
-                               service->name);
-               else
-                       clear_bit(accel_dev->accel_id, service->init_status);
-       }
-
-       hw_data->disable_iov(accel_dev);
-
-       if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
-               hw_data->free_irq(accel_dev);
-               clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
-       }
-
-       /* Delete configuration only if not restarting */
-       if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
-               adf_cfg_del_all(accel_dev);
-
-       if (hw_data->exit_arb)
-               hw_data->exit_arb(accel_dev);
-
-       if (hw_data->exit_admin_comms)
-               hw_data->exit_admin_comms(accel_dev);
-
-       adf_cleanup_etr_data(accel_dev);
-       adf_dev_restore(accel_dev);
-}
-EXPORT_SYMBOL_GPL(adf_dev_shutdown);
-
-int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
-{
-       struct service_hndl *service;
-       struct list_head *list_itr;
-
-       list_for_each(list_itr, &service_table) {
-               service = list_entry(list_itr, struct service_hndl, list);
-               if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
-                       dev_err(&GET_DEV(accel_dev),
-                               "Failed to restart service %s.\n",
-                               service->name);
-       }
-       return 0;
-}
-
-int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
-{
-       struct service_hndl *service;
-       struct list_head *list_itr;
-
-       list_for_each(list_itr, &service_table) {
-               service = list_entry(list_itr, struct service_hndl, list);
-               if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
-                       dev_err(&GET_DEV(accel_dev),
-                               "Failed to restart service %s.\n",
-                               service->name);
-       }
-       return 0;
-}
-
-int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev)
-{
-       char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
-       int ret;
-
-       ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
-                                     ADF_SERVICES_ENABLED, services);
-
-       adf_dev_stop(accel_dev);
-       adf_dev_shutdown(accel_dev);
-
-       if (!ret) {
-               ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
-               if (ret)
-                       return ret;
-
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
-                                                 ADF_SERVICES_ENABLED,
-                                                 services, ADF_STR);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c
deleted file mode 100644 (file)
index ad9e135..0000000
+++ /dev/null
@@ -1,382 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "adf_cfg.h"
-#include "adf_cfg_strings.h"
-#include "adf_cfg_common.h"
-#include "adf_transport_access_macros.h"
-#include "adf_transport_internal.h"
-
-#define ADF_MAX_NUM_VFS        32
-static struct workqueue_struct *adf_misc_wq;
-
-static int adf_enable_msix(struct adf_accel_dev *accel_dev)
-{
-       struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       u32 msix_num_entries = hw_data->num_banks + 1;
-       int ret;
-
-       if (hw_data->set_msix_rttable)
-               hw_data->set_msix_rttable(accel_dev);
-
-       ret = pci_alloc_irq_vectors(pci_dev_info->pci_dev, msix_num_entries,
-                                   msix_num_entries, PCI_IRQ_MSIX);
-       if (unlikely(ret < 0)) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Failed to allocate %d MSI-X vectors\n",
-                       msix_num_entries);
-               return ret;
-       }
-       return 0;
-}
-
-static void adf_disable_msix(struct adf_accel_pci *pci_dev_info)
-{
-       pci_free_irq_vectors(pci_dev_info->pci_dev);
-}
-
-static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr)
-{
-       struct adf_etr_bank_data *bank = bank_ptr;
-       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
-
-       csr_ops->write_csr_int_flag_and_col(bank->csr_addr, bank->bank_number,
-                                           0);
-       tasklet_hi_schedule(&bank->resp_handler);
-       return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_PCI_IOV
-void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
-{
-       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
-       unsigned long flags;
-
-       spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
-       GET_PFVF_OPS(accel_dev)->enable_vf2pf_interrupts(pmisc_addr, vf_mask);
-       spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
-}
-
-void adf_disable_all_vf2pf_interrupts(struct adf_accel_dev *accel_dev)
-{
-       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
-       unsigned long flags;
-
-       spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
-       GET_PFVF_OPS(accel_dev)->disable_all_vf2pf_interrupts(pmisc_addr);
-       spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
-}
-
-static u32 adf_disable_pending_vf2pf_interrupts(struct adf_accel_dev *accel_dev)
-{
-       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
-       u32 pending;
-
-       spin_lock(&accel_dev->pf.vf2pf_ints_lock);
-       pending = GET_PFVF_OPS(accel_dev)->disable_pending_vf2pf_interrupts(pmisc_addr);
-       spin_unlock(&accel_dev->pf.vf2pf_ints_lock);
-
-       return pending;
-}
-
-static bool adf_handle_vf2pf_int(struct adf_accel_dev *accel_dev)
-{
-       bool irq_handled = false;
-       unsigned long vf_mask;
-
-       /* Get the interrupt sources triggered by VFs, except for those already disabled */
-       vf_mask = adf_disable_pending_vf2pf_interrupts(accel_dev);
-       if (vf_mask) {
-               struct adf_accel_vf_info *vf_info;
-               int i;
-
-               /*
-                * Handle VF2PF interrupt unless the VF is malicious and
-                * is attempting to flood the host OS with VF2PF interrupts.
-                */
-               for_each_set_bit(i, &vf_mask, ADF_MAX_NUM_VFS) {
-                       vf_info = accel_dev->pf.vf_info + i;
-
-                       if (!__ratelimit(&vf_info->vf2pf_ratelimit)) {
-                               dev_info(&GET_DEV(accel_dev),
-                                        "Too many ints from VF%d\n",
-                                         vf_info->vf_nr);
-                               continue;
-                       }
-
-                       adf_schedule_vf2pf_handler(vf_info);
-                       irq_handled = true;
-               }
-       }
-       return irq_handled;
-}
-#endif /* CONFIG_PCI_IOV */
-
-static bool adf_handle_pm_int(struct adf_accel_dev *accel_dev)
-{
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-
-       if (hw_data->handle_pm_interrupt &&
-           hw_data->handle_pm_interrupt(accel_dev))
-               return true;
-
-       return false;
-}
-
-static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
-{
-       struct adf_accel_dev *accel_dev = dev_ptr;
-
-#ifdef CONFIG_PCI_IOV
-       /* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */
-       if (accel_dev->pf.vf_info && adf_handle_vf2pf_int(accel_dev))
-               return IRQ_HANDLED;
-#endif /* CONFIG_PCI_IOV */
-
-       if (adf_handle_pm_int(accel_dev))
-               return IRQ_HANDLED;
-
-       dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n",
-               accel_dev->accel_id);
-
-       return IRQ_NONE;
-}
-
-static void adf_free_irqs(struct adf_accel_dev *accel_dev)
-{
-       struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       struct adf_irq *irqs = pci_dev_info->msix_entries.irqs;
-       struct adf_etr_data *etr_data = accel_dev->transport;
-       int clust_irq = hw_data->num_banks;
-       int irq, i = 0;
-
-       if (pci_dev_info->msix_entries.num_entries > 1) {
-               for (i = 0; i < hw_data->num_banks; i++) {
-                       if (irqs[i].enabled) {
-                               irq = pci_irq_vector(pci_dev_info->pci_dev, i);
-                               irq_set_affinity_hint(irq, NULL);
-                               free_irq(irq, &etr_data->banks[i]);
-                       }
-               }
-       }
-
-       if (irqs[i].enabled) {
-               irq = pci_irq_vector(pci_dev_info->pci_dev, clust_irq);
-               free_irq(irq, accel_dev);
-       }
-}
-
-static int adf_request_irqs(struct adf_accel_dev *accel_dev)
-{
-       struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       struct adf_irq *irqs = pci_dev_info->msix_entries.irqs;
-       struct adf_etr_data *etr_data = accel_dev->transport;
-       int clust_irq = hw_data->num_banks;
-       int ret, irq, i = 0;
-       char *name;
-
-       /* Request msix irq for all banks unless SR-IOV enabled */
-       if (!accel_dev->pf.vf_info) {
-               for (i = 0; i < hw_data->num_banks; i++) {
-                       struct adf_etr_bank_data *bank = &etr_data->banks[i];
-                       unsigned int cpu, cpus = num_online_cpus();
-
-                       name = irqs[i].name;
-                       snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
-                                "qat%d-bundle%d", accel_dev->accel_id, i);
-                       irq = pci_irq_vector(pci_dev_info->pci_dev, i);
-                       if (unlikely(irq < 0)) {
-                               dev_err(&GET_DEV(accel_dev),
-                                       "Failed to get IRQ number of device vector %d - %s\n",
-                                       i, name);
-                               ret = irq;
-                               goto err;
-                       }
-                       ret = request_irq(irq, adf_msix_isr_bundle, 0,
-                                         &name[0], bank);
-                       if (ret) {
-                               dev_err(&GET_DEV(accel_dev),
-                                       "Failed to allocate IRQ %d for %s\n",
-                                       irq, name);
-                               goto err;
-                       }
-
-                       cpu = ((accel_dev->accel_id * hw_data->num_banks) +
-                              i) % cpus;
-                       irq_set_affinity_hint(irq, get_cpu_mask(cpu));
-                       irqs[i].enabled = true;
-               }
-       }
-
-       /* Request msix irq for AE */
-       name = irqs[i].name;
-       snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
-                "qat%d-ae-cluster", accel_dev->accel_id);
-       irq = pci_irq_vector(pci_dev_info->pci_dev, clust_irq);
-       if (unlikely(irq < 0)) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Failed to get IRQ number of device vector %d - %s\n",
-                       i, name);
-               ret = irq;
-               goto err;
-       }
-       ret = request_irq(irq, adf_msix_isr_ae, 0, &name[0], accel_dev);
-       if (ret) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Failed to allocate IRQ %d for %s\n", irq, name);
-               goto err;
-       }
-       irqs[i].enabled = true;
-       return ret;
-err:
-       adf_free_irqs(accel_dev);
-       return ret;
-}
-
-static int adf_isr_alloc_msix_vectors_data(struct adf_accel_dev *accel_dev)
-{
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       u32 msix_num_entries = 1;
-       struct adf_irq *irqs;
-
-       /* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */
-       if (!accel_dev->pf.vf_info)
-               msix_num_entries += hw_data->num_banks;
-
-       irqs = kzalloc_node(msix_num_entries * sizeof(*irqs),
-                           GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
-       if (!irqs)
-               return -ENOMEM;
-
-       accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries;
-       accel_dev->accel_pci_dev.msix_entries.irqs = irqs;
-       return 0;
-}
-
-static void adf_isr_free_msix_vectors_data(struct adf_accel_dev *accel_dev)
-{
-       kfree(accel_dev->accel_pci_dev.msix_entries.irqs);
-       accel_dev->accel_pci_dev.msix_entries.irqs = NULL;
-}
-
-static int adf_setup_bh(struct adf_accel_dev *accel_dev)
-{
-       struct adf_etr_data *priv_data = accel_dev->transport;
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       int i;
-
-       for (i = 0; i < hw_data->num_banks; i++)
-               tasklet_init(&priv_data->banks[i].resp_handler,
-                            adf_response_handler,
-                            (unsigned long)&priv_data->banks[i]);
-       return 0;
-}
-
-static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
-{
-       struct adf_etr_data *priv_data = accel_dev->transport;
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       int i;
-
-       for (i = 0; i < hw_data->num_banks; i++) {
-               tasklet_disable(&priv_data->banks[i].resp_handler);
-               tasklet_kill(&priv_data->banks[i].resp_handler);
-       }
-}
-
-/**
- * adf_isr_resource_free() - Free IRQ for acceleration device
- * @accel_dev:  Pointer to acceleration device.
- *
- * Function frees interrupts for acceleration device.
- */
-void adf_isr_resource_free(struct adf_accel_dev *accel_dev)
-{
-       adf_free_irqs(accel_dev);
-       adf_cleanup_bh(accel_dev);
-       adf_disable_msix(&accel_dev->accel_pci_dev);
-       adf_isr_free_msix_vectors_data(accel_dev);
-}
-EXPORT_SYMBOL_GPL(adf_isr_resource_free);
-
-/**
- * adf_isr_resource_alloc() - Allocate IRQ for acceleration device
- * @accel_dev:  Pointer to acceleration device.
- *
- * Function allocates interrupts for acceleration device.
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
-{
-       int ret;
-
-       ret = adf_isr_alloc_msix_vectors_data(accel_dev);
-       if (ret)
-               goto err_out;
-
-       ret = adf_enable_msix(accel_dev);
-       if (ret)
-               goto err_free_msix_table;
-
-       ret = adf_setup_bh(accel_dev);
-       if (ret)
-               goto err_disable_msix;
-
-       ret = adf_request_irqs(accel_dev);
-       if (ret)
-               goto err_cleanup_bh;
-
-       return 0;
-
-err_cleanup_bh:
-       adf_cleanup_bh(accel_dev);
-
-err_disable_msix:
-       adf_disable_msix(&accel_dev->accel_pci_dev);
-
-err_free_msix_table:
-       adf_isr_free_msix_vectors_data(accel_dev);
-
-err_out:
-       return ret;
-}
-EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);
-
-/**
- * adf_init_misc_wq() - Init misc workqueue
- *
- * Function init workqueue 'qat_misc_wq' for general purpose.
- *
- * Return: 0 on success, error code otherwise.
- */
-int __init adf_init_misc_wq(void)
-{
-       adf_misc_wq = alloc_workqueue("qat_misc_wq", WQ_MEM_RECLAIM, 0);
-
-       return !adf_misc_wq ? -ENOMEM : 0;
-}
-
-void adf_exit_misc_wq(void)
-{
-       if (adf_misc_wq)
-               destroy_workqueue(adf_misc_wq);
-
-       adf_misc_wq = NULL;
-}
-
-bool adf_misc_wq_queue_work(struct work_struct *work)
-{
-       return queue_work(adf_misc_wq, work);
-}
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_msg.h b/drivers/crypto/qat/qat_common/adf_pfvf_msg.h
deleted file mode 100644 (file)
index 204a424..0000000
+++ /dev/null
@@ -1,259 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2015 - 2021 Intel Corporation */
-#ifndef ADF_PFVF_MSG_H
-#define ADF_PFVF_MSG_H
-
-#include <linux/bits.h>
-
-/*
- * PF<->VF Gen2 Messaging format
- *
- * The PF has an array of 32-bit PF2VF registers, one for each VF. The
- * PF can access all these registers while each VF can access only the one
- * register associated with that particular VF.
- *
- * The register functionally is split into two parts:
- * The bottom half is for PF->VF messages. In particular when the first
- * bit of this register (bit 0) gets set an interrupt will be triggered
- * in the respective VF.
- * The top half is for VF->PF messages. In particular when the first bit
- * of this half of register (bit 16) gets set an interrupt will be triggered
- * in the PF.
- *
- * The remaining bits within this register are available to encode messages.
- * and implement a collision control mechanism to prevent concurrent use of
- * the PF2VF register by both the PF and VF.
- *
- *  31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16
- *  _______________________________________________
- * |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |
- * +-----------------------------------------------+
- *  \___________________________/ \_________/ ^   ^
- *                ^                    ^      |   |
- *                |                    |      |   VF2PF Int
- *                |                    |      Message Origin
- *                |                    Message Type
- *                Message-specific Data/Reserved
- *
- *  15 14 13 12 11 10  9  8  7  6  5  4  3  2  1  0
- *  _______________________________________________
- * |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |
- * +-----------------------------------------------+
- *  \___________________________/ \_________/ ^   ^
- *                ^                    ^      |   |
- *                |                    |      |   PF2VF Int
- *                |                    |      Message Origin
- *                |                    Message Type
- *                Message-specific Data/Reserved
- *
- * Message Origin (Should always be 1)
- * A legacy out-of-tree QAT driver allowed for a set of messages not supported
- * by this driver; these had a Msg Origin of 0 and are ignored by this driver.
- *
- * When a PF or VF attempts to send a message in the lower or upper 16 bits,
- * respectively, the other 16 bits are written to first with a defined
- * IN_USE_BY pattern as part of a collision control scheme (see function
- * adf_gen2_pfvf_send() in adf_pf2vf_msg.c).
- *
- *
- * PF<->VF Gen4 Messaging format
- *
- * Similarly to the gen2 messaging format, 32-bit long registers are used for
- * communication between PF and VFs. However, each VF and PF share a pair of
- * 32-bits register to avoid collisions: one for PV to VF messages and one
- * for VF to PF messages.
- *
- * Both the Interrupt bit and the Message Origin bit retain the same position
- * and meaning, although non-system messages are now deprecated and not
- * expected.
- *
- *  31 30              9  8  7  6  5  4  3  2  1  0
- *  _______________________________________________
- * |  |  |   . . .   |  |  |  |  |  |  |  |  |  |  |
- * +-----------------------------------------------+
- *  \_____________________/ \_______________/  ^  ^
- *             ^                     ^         |  |
- *             |                     |         |  PF/VF Int
- *             |                     |         Message Origin
- *             |                     Message Type
- *             Message-specific Data/Reserved
- *
- * For both formats, the message reception is acknowledged by lowering the
- * interrupt bit on the register where the message was sent.
- */
-
-/* PFVF message common bits */
-#define ADF_PFVF_INT                           BIT(0)
-#define ADF_PFVF_MSGORIGIN_SYSTEM              BIT(1)
-
-/* Different generations have different CSR layouts, use this struct
- * to abstract these differences away
- */
-struct pfvf_message {
-       u8 type;
-       u32 data;
-};
-
-/* PF->VF messages */
-enum pf2vf_msgtype {
-       ADF_PF2VF_MSGTYPE_RESTARTING            = 0x01,
-       ADF_PF2VF_MSGTYPE_VERSION_RESP          = 0x02,
-       ADF_PF2VF_MSGTYPE_BLKMSG_RESP           = 0x03,
-/* Values from 0x10 are Gen4 specific, message type is only 4 bits in Gen2 devices. */
-       ADF_PF2VF_MSGTYPE_RP_RESET_RESP         = 0x10,
-};
-
-/* VF->PF messages */
-enum vf2pf_msgtype {
-       ADF_VF2PF_MSGTYPE_INIT                  = 0x03,
-       ADF_VF2PF_MSGTYPE_SHUTDOWN              = 0x04,
-       ADF_VF2PF_MSGTYPE_VERSION_REQ           = 0x05,
-       ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ        = 0x06,
-       ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ       = 0x07,
-       ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ      = 0x08,
-       ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ       = 0x09,
-/* Values from 0x10 are Gen4 specific, message type is only 4 bits in Gen2 devices. */
-       ADF_VF2PF_MSGTYPE_RP_RESET              = 0x10,
-};
-
-/* VF/PF compatibility version. */
-enum pfvf_compatibility_version {
-       /* Support for extended capabilities */
-       ADF_PFVF_COMPAT_CAPABILITIES            = 0x02,
-       /* In-use pattern cleared by receiver */
-       ADF_PFVF_COMPAT_FAST_ACK                = 0x03,
-       /* Ring to service mapping support for non-standard mappings */
-       ADF_PFVF_COMPAT_RING_TO_SVC_MAP         = 0x04,
-       /* Reference to the latest version */
-       ADF_PFVF_COMPAT_THIS_VERSION            = 0x04,
-};
-
-/* PF->VF Version Response */
-#define ADF_PF2VF_VERSION_RESP_VERS_MASK       GENMASK(7, 0)
-#define ADF_PF2VF_VERSION_RESP_RESULT_MASK     GENMASK(9, 8)
-
-enum pf2vf_compat_response {
-       ADF_PF2VF_VF_COMPATIBLE                 = 0x01,
-       ADF_PF2VF_VF_INCOMPATIBLE               = 0x02,
-       ADF_PF2VF_VF_COMPAT_UNKNOWN             = 0x03,
-};
-
-enum ring_reset_result {
-       RPRESET_SUCCESS                         = 0x00,
-       RPRESET_NOT_SUPPORTED                   = 0x01,
-       RPRESET_INVAL_BANK                      = 0x02,
-       RPRESET_TIMEOUT                         = 0x03,
-};
-
-#define ADF_VF2PF_RNG_RESET_RP_MASK            GENMASK(1, 0)
-#define ADF_VF2PF_RNG_RESET_RSVD_MASK          GENMASK(25, 2)
-
-/* PF->VF Block Responses */
-#define ADF_PF2VF_BLKMSG_RESP_TYPE_MASK                GENMASK(1, 0)
-#define ADF_PF2VF_BLKMSG_RESP_DATA_MASK                GENMASK(9, 2)
-
-enum pf2vf_blkmsg_resp_type {
-       ADF_PF2VF_BLKMSG_RESP_TYPE_DATA         = 0x00,
-       ADF_PF2VF_BLKMSG_RESP_TYPE_CRC          = 0x01,
-       ADF_PF2VF_BLKMSG_RESP_TYPE_ERROR        = 0x02,
-};
-
-/* PF->VF Block Error Code */
-enum pf2vf_blkmsg_error {
-       ADF_PF2VF_INVALID_BLOCK_TYPE            = 0x00,
-       ADF_PF2VF_INVALID_BYTE_NUM_REQ          = 0x01,
-       ADF_PF2VF_PAYLOAD_TRUNCATED             = 0x02,
-       ADF_PF2VF_UNSPECIFIED_ERROR             = 0x03,
-};
-
-/* VF->PF Block Requests */
-#define ADF_VF2PF_LARGE_BLOCK_TYPE_MASK                GENMASK(1, 0)
-#define ADF_VF2PF_LARGE_BLOCK_BYTE_MASK                GENMASK(8, 2)
-#define ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK       GENMASK(2, 0)
-#define ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK       GENMASK(8, 3)
-#define ADF_VF2PF_SMALL_BLOCK_TYPE_MASK                GENMASK(3, 0)
-#define ADF_VF2PF_SMALL_BLOCK_BYTE_MASK                GENMASK(8, 4)
-#define ADF_VF2PF_BLOCK_CRC_REQ_MASK           BIT(9)
-
-/* PF->VF Block Request Types
- *  0..15 - 32 byte message
- * 16..23 - 64 byte message
- * 24..27 - 128 byte message
- */
-enum vf2pf_blkmsg_req_type {
-       ADF_VF2PF_BLKMSG_REQ_CAP_SUMMARY        = 0x02,
-       ADF_VF2PF_BLKMSG_REQ_RING_SVC_MAP       = 0x03,
-};
-
-#define ADF_VF2PF_SMALL_BLOCK_TYPE_MAX \
-               (FIELD_MAX(ADF_VF2PF_SMALL_BLOCK_TYPE_MASK))
-
-#define ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX \
-               (FIELD_MAX(ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK) + \
-               ADF_VF2PF_SMALL_BLOCK_TYPE_MAX + 1)
-
-#define ADF_VF2PF_LARGE_BLOCK_TYPE_MAX \
-               (FIELD_MAX(ADF_VF2PF_LARGE_BLOCK_TYPE_MASK) + \
-               ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX)
-
-#define ADF_VF2PF_SMALL_BLOCK_BYTE_MAX \
-               FIELD_MAX(ADF_VF2PF_SMALL_BLOCK_BYTE_MASK)
-
-#define ADF_VF2PF_MEDIUM_BLOCK_BYTE_MAX \
-               FIELD_MAX(ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK)
-
-#define ADF_VF2PF_LARGE_BLOCK_BYTE_MAX \
-               FIELD_MAX(ADF_VF2PF_LARGE_BLOCK_BYTE_MASK)
-
-struct pfvf_blkmsg_header {
-       u8 version;
-       u8 payload_size;
-} __packed;
-
-#define ADF_PFVF_BLKMSG_HEADER_SIZE            (sizeof(struct pfvf_blkmsg_header))
-#define ADF_PFVF_BLKMSG_PAYLOAD_SIZE(blkmsg)   (sizeof(blkmsg) - \
-                                                       ADF_PFVF_BLKMSG_HEADER_SIZE)
-#define ADF_PFVF_BLKMSG_MSG_SIZE(blkmsg)       (ADF_PFVF_BLKMSG_HEADER_SIZE + \
-                                                       (blkmsg)->hdr.payload_size)
-#define ADF_PFVF_BLKMSG_MSG_MAX_SIZE           128
-
-/* PF->VF Block message header bytes */
-#define ADF_PFVF_BLKMSG_VER_BYTE               0
-#define ADF_PFVF_BLKMSG_LEN_BYTE               1
-
-/* PF/VF Capabilities message values */
-enum blkmsg_capabilities_versions {
-       ADF_PFVF_CAPABILITIES_V1_VERSION        = 0x01,
-       ADF_PFVF_CAPABILITIES_V2_VERSION        = 0x02,
-       ADF_PFVF_CAPABILITIES_V3_VERSION        = 0x03,
-};
-
-struct capabilities_v1 {
-       struct pfvf_blkmsg_header hdr;
-       u32 ext_dc_caps;
-} __packed;
-
-struct capabilities_v2 {
-       struct pfvf_blkmsg_header hdr;
-       u32 ext_dc_caps;
-       u32 capabilities;
-} __packed;
-
-struct capabilities_v3 {
-       struct pfvf_blkmsg_header hdr;
-       u32 ext_dc_caps;
-       u32 capabilities;
-       u32 frequency;
-} __packed;
-
-/* PF/VF Ring to service mapping values */
-enum blkmsg_ring_to_svc_versions {
-       ADF_PFVF_RING_TO_SVC_VERSION            = 0x01,
-};
-
-struct ring_to_svc_map_v1 {
-       struct pfvf_blkmsg_header hdr;
-       u16 map;
-} __packed;
-
-#endif /* ADF_PFVF_MSG_H */
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.c b/drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.c
deleted file mode 100644 (file)
index 14c069f..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2015 - 2021 Intel Corporation */
-#include <linux/pci.h>
-#include "adf_accel_devices.h"
-#include "adf_pfvf_msg.h"
-#include "adf_pfvf_pf_msg.h"
-#include "adf_pfvf_pf_proto.h"
-
-void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
-{
-       struct adf_accel_vf_info *vf;
-       struct pfvf_message msg = { .type = ADF_PF2VF_MSGTYPE_RESTARTING };
-       int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
-
-       for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
-               if (vf->init && adf_send_pf2vf_msg(accel_dev, i, msg))
-                       dev_err(&GET_DEV(accel_dev),
-                               "Failed to send restarting msg to VF%d\n", i);
-       }
-}
-
-int adf_pf_capabilities_msg_provider(struct adf_accel_dev *accel_dev,
-                                    u8 *buffer, u8 compat)
-{
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       struct capabilities_v2 caps_msg;
-
-       caps_msg.ext_dc_caps = hw_data->extended_dc_capabilities;
-       caps_msg.capabilities = hw_data->accel_capabilities_mask;
-
-       caps_msg.hdr.version = ADF_PFVF_CAPABILITIES_V2_VERSION;
-       caps_msg.hdr.payload_size =
-                       ADF_PFVF_BLKMSG_PAYLOAD_SIZE(struct capabilities_v2);
-
-       memcpy(buffer, &caps_msg, sizeof(caps_msg));
-
-       return 0;
-}
-
-int adf_pf_ring_to_svc_msg_provider(struct adf_accel_dev *accel_dev,
-                                   u8 *buffer, u8 compat)
-{
-       struct ring_to_svc_map_v1 rts_map_msg;
-
-       rts_map_msg.map = accel_dev->hw_device->ring_to_svc_map;
-       rts_map_msg.hdr.version = ADF_PFVF_RING_TO_SVC_VERSION;
-       rts_map_msg.hdr.payload_size = ADF_PFVF_BLKMSG_PAYLOAD_SIZE(rts_map_msg);
-
-       memcpy(buffer, &rts_map_msg, sizeof(rts_map_msg));
-
-       return 0;
-}
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.h b/drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.h
deleted file mode 100644 (file)
index e8982d1..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2021 Intel Corporation */
-#ifndef ADF_PFVF_PF_MSG_H
-#define ADF_PFVF_PF_MSG_H
-
-#include "adf_accel_devices.h"
-
-void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev);
-
-typedef int (*adf_pf2vf_blkmsg_provider)(struct adf_accel_dev *accel_dev,
-                                        u8 *buffer, u8 compat);
-
-int adf_pf_capabilities_msg_provider(struct adf_accel_dev *accel_dev,
-                                    u8 *buffer, u8 comapt);
-int adf_pf_ring_to_svc_msg_provider(struct adf_accel_dev *accel_dev,
-                                   u8 *buffer, u8 comapt);
-
-#endif /* ADF_PFVF_PF_MSG_H */
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.c b/drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.c
deleted file mode 100644 (file)
index 388e58b..0000000
+++ /dev/null
@@ -1,348 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2015 - 2021 Intel Corporation */
-#include <linux/bitfield.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "adf_pfvf_msg.h"
-#include "adf_pfvf_pf_msg.h"
-#include "adf_pfvf_pf_proto.h"
-#include "adf_pfvf_utils.h"
-
-typedef u8 (*pf2vf_blkmsg_data_getter_fn)(u8 const *blkmsg, u8 byte);
-
-static const adf_pf2vf_blkmsg_provider pf2vf_blkmsg_providers[] = {
-       NULL,                             /* no message type defined for value 0 */
-       NULL,                             /* no message type defined for value 1 */
-       adf_pf_capabilities_msg_provider, /* ADF_VF2PF_BLKMSG_REQ_CAP_SUMMARY */
-       adf_pf_ring_to_svc_msg_provider,  /* ADF_VF2PF_BLKMSG_REQ_RING_SVC_MAP */
-};
-
-/**
- * adf_send_pf2vf_msg() - send PF to VF message
- * @accel_dev: Pointer to acceleration device
- * @vf_nr:     VF number to which the message will be sent
- * @msg:       Message to send
- *
- * This function allows the PF to send a message to a specific VF.
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_send_pf2vf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr, struct pfvf_message msg)
-{
-       struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev);
-       u32 pfvf_offset = pfvf_ops->get_pf2vf_offset(vf_nr);
-
-       return pfvf_ops->send_msg(accel_dev, msg, pfvf_offset,
-                                 &accel_dev->pf.vf_info[vf_nr].pf2vf_lock);
-}
-
-/**
- * adf_recv_vf2pf_msg() - receive a VF to PF message
- * @accel_dev: Pointer to acceleration device
- * @vf_nr:     Number of the VF from where the message will be received
- *
- * This function allows the PF to receive a message from a specific VF.
- *
- * Return: a valid message on success, zero otherwise.
- */
-static struct pfvf_message adf_recv_vf2pf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr)
-{
-       struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
-       struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev);
-       u32 pfvf_offset = pfvf_ops->get_vf2pf_offset(vf_nr);
-
-       return pfvf_ops->recv_msg(accel_dev, pfvf_offset, vf_info->vf_compat_ver);
-}
-
-static adf_pf2vf_blkmsg_provider get_blkmsg_response_provider(u8 type)
-{
-       if (type >= ARRAY_SIZE(pf2vf_blkmsg_providers))
-               return NULL;
-
-       return pf2vf_blkmsg_providers[type];
-}
-
-/* Byte pf2vf_blkmsg_data_getter_fn callback */
-static u8 adf_pf2vf_blkmsg_get_byte(u8 const *blkmsg, u8 index)
-{
-       return blkmsg[index];
-}
-
-/* CRC pf2vf_blkmsg_data_getter_fn callback */
-static u8 adf_pf2vf_blkmsg_get_crc(u8 const *blkmsg, u8 count)
-{
-       /* count is 0-based, turn it into a length */
-       return adf_pfvf_calc_blkmsg_crc(blkmsg, count + 1);
-}
-
-static int adf_pf2vf_blkmsg_get_data(struct adf_accel_vf_info *vf_info,
-                                    u8 type, u8 byte, u8 max_size, u8 *data,
-                                    pf2vf_blkmsg_data_getter_fn data_getter)
-{
-       u8 blkmsg[ADF_PFVF_BLKMSG_MSG_MAX_SIZE] = { 0 };
-       struct adf_accel_dev *accel_dev = vf_info->accel_dev;
-       adf_pf2vf_blkmsg_provider provider;
-       u8 msg_size;
-
-       provider = get_blkmsg_response_provider(type);
-
-       if (unlikely(!provider)) {
-               pr_err("QAT: No registered provider for message %d\n", type);
-               *data = ADF_PF2VF_INVALID_BLOCK_TYPE;
-               return -EINVAL;
-       }
-
-       if (unlikely((*provider)(accel_dev, blkmsg, vf_info->vf_compat_ver))) {
-               pr_err("QAT: unknown error from provider for message %d\n", type);
-               *data = ADF_PF2VF_UNSPECIFIED_ERROR;
-               return -EINVAL;
-       }
-
-       msg_size = ADF_PFVF_BLKMSG_HEADER_SIZE + blkmsg[ADF_PFVF_BLKMSG_LEN_BYTE];
-
-       if (unlikely(msg_size >= max_size)) {
-               pr_err("QAT: Invalid size %d provided for message type %d\n",
-                      msg_size, type);
-               *data = ADF_PF2VF_PAYLOAD_TRUNCATED;
-               return -EINVAL;
-       }
-
-       if (unlikely(byte >= msg_size)) {
-               pr_err("QAT: Out-of-bound byte number %d (msg size %d)\n",
-                      byte, msg_size);
-               *data = ADF_PF2VF_INVALID_BYTE_NUM_REQ;
-               return -EINVAL;
-       }
-
-       *data = data_getter(blkmsg, byte);
-       return 0;
-}
-
-static struct pfvf_message handle_blkmsg_req(struct adf_accel_vf_info *vf_info,
-                                            struct pfvf_message req)
-{
-       u8 resp_type = ADF_PF2VF_BLKMSG_RESP_TYPE_ERROR;
-       struct pfvf_message resp = { 0 };
-       u8 resp_data = 0;
-       u8 blk_type;
-       u8 blk_byte;
-       u8 byte_max;
-
-       switch (req.type) {
-       case ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ:
-               blk_type = FIELD_GET(ADF_VF2PF_LARGE_BLOCK_TYPE_MASK, req.data)
-                          + ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX + 1;
-               blk_byte = FIELD_GET(ADF_VF2PF_LARGE_BLOCK_BYTE_MASK, req.data);
-               byte_max = ADF_VF2PF_LARGE_BLOCK_BYTE_MAX;
-               break;
-       case ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ:
-               blk_type = FIELD_GET(ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK, req.data)
-                          + ADF_VF2PF_SMALL_BLOCK_TYPE_MAX + 1;
-               blk_byte = FIELD_GET(ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK, req.data);
-               byte_max = ADF_VF2PF_MEDIUM_BLOCK_BYTE_MAX;
-               break;
-       case ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ:
-               blk_type = FIELD_GET(ADF_VF2PF_SMALL_BLOCK_TYPE_MASK, req.data);
-               blk_byte = FIELD_GET(ADF_VF2PF_SMALL_BLOCK_BYTE_MASK, req.data);
-               byte_max = ADF_VF2PF_SMALL_BLOCK_BYTE_MAX;
-               break;
-       }
-
-       /* Is this a request for CRC or data? */
-       if (FIELD_GET(ADF_VF2PF_BLOCK_CRC_REQ_MASK, req.data)) {
-               dev_dbg(&GET_DEV(vf_info->accel_dev),
-                       "BlockMsg of type %d for CRC over %d bytes received from VF%d\n",
-                       blk_type, blk_byte + 1, vf_info->vf_nr);
-
-               if (!adf_pf2vf_blkmsg_get_data(vf_info, blk_type, blk_byte,
-                                              byte_max, &resp_data,
-                                              adf_pf2vf_blkmsg_get_crc))
-                       resp_type = ADF_PF2VF_BLKMSG_RESP_TYPE_CRC;
-       } else {
-               dev_dbg(&GET_DEV(vf_info->accel_dev),
-                       "BlockMsg of type %d for data byte %d received from VF%d\n",
-                       blk_type, blk_byte, vf_info->vf_nr);
-
-               if (!adf_pf2vf_blkmsg_get_data(vf_info, blk_type, blk_byte,
-                                              byte_max, &resp_data,
-                                              adf_pf2vf_blkmsg_get_byte))
-                       resp_type = ADF_PF2VF_BLKMSG_RESP_TYPE_DATA;
-       }
-
-       resp.type = ADF_PF2VF_MSGTYPE_BLKMSG_RESP;
-       resp.data = FIELD_PREP(ADF_PF2VF_BLKMSG_RESP_TYPE_MASK, resp_type) |
-                   FIELD_PREP(ADF_PF2VF_BLKMSG_RESP_DATA_MASK, resp_data);
-
-       return resp;
-}
-
-static struct pfvf_message handle_rp_reset_req(struct adf_accel_dev *accel_dev, u8 vf_nr,
-                                              struct pfvf_message req)
-{
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       struct pfvf_message resp = {
-               .type = ADF_PF2VF_MSGTYPE_RP_RESET_RESP,
-               .data = RPRESET_SUCCESS
-       };
-       u32 bank_number;
-       u32 rsvd_field;
-
-       bank_number = FIELD_GET(ADF_VF2PF_RNG_RESET_RP_MASK, req.data);
-       rsvd_field = FIELD_GET(ADF_VF2PF_RNG_RESET_RSVD_MASK, req.data);
-
-       dev_dbg(&GET_DEV(accel_dev),
-               "Ring Pair Reset Message received from VF%d for bank 0x%x\n",
-               vf_nr, bank_number);
-
-       if (!hw_data->ring_pair_reset || rsvd_field) {
-               dev_dbg(&GET_DEV(accel_dev),
-                       "Ring Pair Reset for VF%d is not supported\n", vf_nr);
-               resp.data = RPRESET_NOT_SUPPORTED;
-               goto out;
-       }
-
-       if (bank_number >= hw_data->num_banks_per_vf) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Invalid bank number (0x%x) from VF%d for Ring Reset\n",
-                       bank_number, vf_nr);
-               resp.data = RPRESET_INVAL_BANK;
-               goto out;
-       }
-
-       /* Convert the VF provided value to PF bank number */
-       bank_number = vf_nr * hw_data->num_banks_per_vf + bank_number;
-       if (hw_data->ring_pair_reset(accel_dev, bank_number)) {
-               dev_dbg(&GET_DEV(accel_dev),
-                       "Ring pair reset for VF%d failure\n", vf_nr);
-               resp.data = RPRESET_TIMEOUT;
-               goto out;
-       }
-
-       dev_dbg(&GET_DEV(accel_dev),
-               "Ring pair reset for VF%d successfully\n", vf_nr);
-
-out:
-       return resp;
-}
-
-static int adf_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr,
-                               struct pfvf_message msg, struct pfvf_message *resp)
-{
-       struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
-
-       switch (msg.type) {
-       case ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ:
-               {
-               u8 vf_compat_ver = msg.data;
-               u8 compat;
-
-               dev_dbg(&GET_DEV(accel_dev),
-                       "VersionRequest received from VF%d (vers %d) to PF (vers %d)\n",
-                       vf_nr, vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
-
-               if (vf_compat_ver == 0)
-                       compat = ADF_PF2VF_VF_INCOMPATIBLE;
-               else if (vf_compat_ver <= ADF_PFVF_COMPAT_THIS_VERSION)
-                       compat = ADF_PF2VF_VF_COMPATIBLE;
-               else
-                       compat = ADF_PF2VF_VF_COMPAT_UNKNOWN;
-
-               vf_info->vf_compat_ver = vf_compat_ver;
-
-               resp->type = ADF_PF2VF_MSGTYPE_VERSION_RESP;
-               resp->data = FIELD_PREP(ADF_PF2VF_VERSION_RESP_VERS_MASK,
-                                       ADF_PFVF_COMPAT_THIS_VERSION) |
-                            FIELD_PREP(ADF_PF2VF_VERSION_RESP_RESULT_MASK, compat);
-               }
-               break;
-       case ADF_VF2PF_MSGTYPE_VERSION_REQ:
-               {
-               u8 compat;
-
-               dev_dbg(&GET_DEV(accel_dev),
-                       "Legacy VersionRequest received from VF%d to PF (vers 1.1)\n",
-                       vf_nr);
-
-               /* legacy driver, VF compat_ver is 0 */
-               vf_info->vf_compat_ver = 0;
-
-               /* PF always newer than legacy VF */
-               compat = ADF_PF2VF_VF_COMPATIBLE;
-
-               /* Set legacy major and minor version to the latest, 1.1 */
-               resp->type = ADF_PF2VF_MSGTYPE_VERSION_RESP;
-               resp->data = FIELD_PREP(ADF_PF2VF_VERSION_RESP_VERS_MASK, 0x11) |
-                            FIELD_PREP(ADF_PF2VF_VERSION_RESP_RESULT_MASK, compat);
-               }
-               break;
-       case ADF_VF2PF_MSGTYPE_INIT:
-               {
-               dev_dbg(&GET_DEV(accel_dev),
-                       "Init message received from VF%d\n", vf_nr);
-               vf_info->init = true;
-               }
-               break;
-       case ADF_VF2PF_MSGTYPE_SHUTDOWN:
-               {
-               dev_dbg(&GET_DEV(accel_dev),
-                       "Shutdown message received from VF%d\n", vf_nr);
-               vf_info->init = false;
-               }
-               break;
-       case ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ:
-       case ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ:
-       case ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ:
-               *resp = handle_blkmsg_req(vf_info, msg);
-               break;
-       case ADF_VF2PF_MSGTYPE_RP_RESET:
-               *resp = handle_rp_reset_req(accel_dev, vf_nr, msg);
-               break;
-       default:
-               dev_dbg(&GET_DEV(accel_dev),
-                       "Unknown message from VF%d (type 0x%.4x, data: 0x%.4x)\n",
-                       vf_nr, msg.type, msg.data);
-               return -ENOMSG;
-       }
-
-       return 0;
-}
-
-bool adf_recv_and_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u32 vf_nr)
-{
-       struct pfvf_message req;
-       struct pfvf_message resp = {0};
-
-       req = adf_recv_vf2pf_msg(accel_dev, vf_nr);
-       if (!req.type)  /* Legacy or no message */
-               return true;
-
-       if (adf_handle_vf2pf_msg(accel_dev, vf_nr, req, &resp))
-               return false;
-
-       if (resp.type && adf_send_pf2vf_msg(accel_dev, vf_nr, resp))
-               dev_err(&GET_DEV(accel_dev),
-                       "Failed to send response to VF%d\n", vf_nr);
-
-       return true;
-}
-
-/**
- * adf_enable_pf2vf_comms() - Function enables communication from pf to vf
- *
- * @accel_dev: Pointer to acceleration device virtual function.
- *
- * This function carries out the necessary steps to setup and start the PFVF
- * communication channel, if any.
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
-{
-       adf_pfvf_crc_init();
-       spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(adf_enable_pf2vf_comms);
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.h b/drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.h
deleted file mode 100644 (file)
index 165d266..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2021 Intel Corporation */
-#ifndef ADF_PFVF_PF_PROTO_H
-#define ADF_PFVF_PF_PROTO_H
-
-#include <linux/types.h>
-#include "adf_accel_devices.h"
-
-int adf_send_pf2vf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr, struct pfvf_message msg);
-
-int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev);
-
-#endif /* ADF_PFVF_PF_PROTO_H */
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_utils.c b/drivers/crypto/qat/qat_common/adf_pfvf_utils.c
deleted file mode 100644 (file)
index c5f6d77..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2021 Intel Corporation */
-#include <linux/crc8.h>
-#include <linux/pci.h>
-#include <linux/types.h>
-#include "adf_accel_devices.h"
-#include "adf_pfvf_msg.h"
-#include "adf_pfvf_utils.h"
-
-/* CRC Calculation */
-DECLARE_CRC8_TABLE(pfvf_crc8_table);
-#define ADF_PFVF_CRC8_POLYNOMIAL 0x97
-
-void adf_pfvf_crc_init(void)
-{
-       crc8_populate_msb(pfvf_crc8_table, ADF_PFVF_CRC8_POLYNOMIAL);
-}
-
-u8 adf_pfvf_calc_blkmsg_crc(u8 const *buf, u8 buf_len)
-{
-       return crc8(pfvf_crc8_table, buf, buf_len, CRC8_INIT_VALUE);
-}
-
-static bool set_value_on_csr_msg(struct adf_accel_dev *accel_dev, u32 *csr_msg,
-                                u32 value, const struct pfvf_field_format *fmt)
-{
-       if (unlikely((value & fmt->mask) != value)) {
-               dev_err(&GET_DEV(accel_dev),
-                       "PFVF message value 0x%X out of range, %u max allowed\n",
-                       value, fmt->mask);
-               return false;
-       }
-
-       *csr_msg |= value << fmt->offset;
-
-       return true;
-}
-
-u32 adf_pfvf_csr_msg_of(struct adf_accel_dev *accel_dev,
-                       struct pfvf_message msg,
-                       const struct pfvf_csr_format *fmt)
-{
-       u32 csr_msg = 0;
-
-       if (!set_value_on_csr_msg(accel_dev, &csr_msg, msg.type, &fmt->type) ||
-           !set_value_on_csr_msg(accel_dev, &csr_msg, msg.data, &fmt->data))
-               return 0;
-
-       return csr_msg | ADF_PFVF_MSGORIGIN_SYSTEM;
-}
-
-struct pfvf_message adf_pfvf_message_of(struct adf_accel_dev *accel_dev, u32 csr_msg,
-                                       const struct pfvf_csr_format *fmt)
-{
-       struct pfvf_message msg = { 0 };
-
-       msg.type = (csr_msg >> fmt->type.offset) & fmt->type.mask;
-       msg.data = (csr_msg >> fmt->data.offset) & fmt->data.mask;
-
-       if (unlikely(!msg.type))
-               dev_err(&GET_DEV(accel_dev),
-                       "Invalid PFVF msg with no type received\n");
-
-       return msg;
-}
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_utils.h b/drivers/crypto/qat/qat_common/adf_pfvf_utils.h
deleted file mode 100644 (file)
index 2be048e..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2021 Intel Corporation */
-#ifndef ADF_PFVF_UTILS_H
-#define ADF_PFVF_UTILS_H
-
-#include <linux/types.h>
-#include "adf_pfvf_msg.h"
-
-/* How long to wait for far side to acknowledge receipt */
-#define ADF_PFVF_MSG_ACK_DELAY_US      4
-#define ADF_PFVF_MSG_ACK_MAX_DELAY_US  (1 * USEC_PER_SEC)
-
-u8 adf_pfvf_calc_blkmsg_crc(u8 const *buf, u8 buf_len);
-void adf_pfvf_crc_init(void);
-
-struct pfvf_field_format {
-       u8  offset;
-       u32 mask;
-};
-
-struct pfvf_csr_format {
-       struct pfvf_field_format type;
-       struct pfvf_field_format data;
-};
-
-u32 adf_pfvf_csr_msg_of(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
-                       const struct pfvf_csr_format *fmt);
-struct pfvf_message adf_pfvf_message_of(struct adf_accel_dev *accel_dev, u32 raw_msg,
-                                       const struct pfvf_csr_format *fmt);
-
-#endif /* ADF_PFVF_UTILS_H */
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c b/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c
deleted file mode 100644 (file)
index 1141258..0000000
+++ /dev/null
@@ -1,167 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2015 - 2021 Intel Corporation */
-#include <linux/bitfield.h>
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "adf_pfvf_msg.h"
-#include "adf_pfvf_vf_msg.h"
-#include "adf_pfvf_vf_proto.h"
-
-/**
- * adf_vf2pf_notify_init() - send init msg to PF
- * @accel_dev:  Pointer to acceleration VF device.
- *
- * Function sends an init message from the VF to a PF
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
-{
-       struct pfvf_message msg = { .type = ADF_VF2PF_MSGTYPE_INIT };
-
-       if (adf_send_vf2pf_msg(accel_dev, msg)) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Failed to send Init event to PF\n");
-               return -EFAULT;
-       }
-       set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
-       return 0;
-}
-EXPORT_SYMBOL_GPL(adf_vf2pf_notify_init);
-
-/**
- * adf_vf2pf_notify_shutdown() - send shutdown msg to PF
- * @accel_dev:  Pointer to acceleration VF device.
- *
- * Function sends a shutdown message from the VF to a PF
- *
- * Return: void
- */
-void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
-{
-       struct pfvf_message msg = { .type = ADF_VF2PF_MSGTYPE_SHUTDOWN };
-
-       if (test_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status))
-               if (adf_send_vf2pf_msg(accel_dev, msg))
-                       dev_err(&GET_DEV(accel_dev),
-                               "Failed to send Shutdown event to PF\n");
-}
-EXPORT_SYMBOL_GPL(adf_vf2pf_notify_shutdown);
-
-int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
-{
-       u8 pf_version;
-       int compat;
-       int ret;
-       struct pfvf_message resp;
-       struct pfvf_message msg = {
-               .type = ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ,
-               .data = ADF_PFVF_COMPAT_THIS_VERSION,
-       };
-
-       BUILD_BUG_ON(ADF_PFVF_COMPAT_THIS_VERSION > 255);
-
-       ret = adf_send_vf2pf_req(accel_dev, msg, &resp);
-       if (ret) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Failed to send Compatibility Version Request.\n");
-               return ret;
-       }
-
-       pf_version = FIELD_GET(ADF_PF2VF_VERSION_RESP_VERS_MASK, resp.data);
-       compat = FIELD_GET(ADF_PF2VF_VERSION_RESP_RESULT_MASK, resp.data);
-
-       /* Response from PF received, check compatibility */
-       switch (compat) {
-       case ADF_PF2VF_VF_COMPATIBLE:
-               break;
-       case ADF_PF2VF_VF_COMPAT_UNKNOWN:
-               /* VF is newer than PF - compatible for now */
-               break;
-       case ADF_PF2VF_VF_INCOMPATIBLE:
-               dev_err(&GET_DEV(accel_dev),
-                       "PF (vers %d) and VF (vers %d) are not compatible\n",
-                       pf_version, ADF_PFVF_COMPAT_THIS_VERSION);
-               return -EINVAL;
-       default:
-               dev_err(&GET_DEV(accel_dev),
-                       "Invalid response from PF; assume not compatible\n");
-               return -EINVAL;
-       }
-
-       accel_dev->vf.pf_compat_ver = pf_version;
-       return 0;
-}
-
-int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev)
-{
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       struct capabilities_v3 cap_msg = { 0 };
-       unsigned int len = sizeof(cap_msg);
-
-       if (accel_dev->vf.pf_compat_ver < ADF_PFVF_COMPAT_CAPABILITIES)
-               /* The PF is too old to support the extended capabilities */
-               return 0;
-
-       if (adf_send_vf2pf_blkmsg_req(accel_dev, ADF_VF2PF_BLKMSG_REQ_CAP_SUMMARY,
-                                     (u8 *)&cap_msg, &len)) {
-               dev_err(&GET_DEV(accel_dev),
-                       "QAT: Failed to get block message response\n");
-               return -EFAULT;
-       }
-
-       switch (cap_msg.hdr.version) {
-       default:
-               /* Newer version received, handle only the know parts */
-               fallthrough;
-       case ADF_PFVF_CAPABILITIES_V3_VERSION:
-               if (likely(len >= sizeof(struct capabilities_v3)))
-                       hw_data->clock_frequency = cap_msg.frequency;
-               else
-                       dev_info(&GET_DEV(accel_dev), "Could not get frequency");
-               fallthrough;
-       case ADF_PFVF_CAPABILITIES_V2_VERSION:
-               if (likely(len >= sizeof(struct capabilities_v2)))
-                       hw_data->accel_capabilities_mask = cap_msg.capabilities;
-               else
-                       dev_info(&GET_DEV(accel_dev), "Could not get capabilities");
-               fallthrough;
-       case ADF_PFVF_CAPABILITIES_V1_VERSION:
-               if (likely(len >= sizeof(struct capabilities_v1))) {
-                       hw_data->extended_dc_capabilities = cap_msg.ext_dc_caps;
-               } else {
-                       dev_err(&GET_DEV(accel_dev),
-                               "Capabilities message truncated to %d bytes\n", len);
-                       return -EFAULT;
-               }
-       }
-
-       return 0;
-}
-
-int adf_vf2pf_get_ring_to_svc(struct adf_accel_dev *accel_dev)
-{
-       struct ring_to_svc_map_v1 rts_map_msg = { 0 };
-       unsigned int len = sizeof(rts_map_msg);
-
-       if (accel_dev->vf.pf_compat_ver < ADF_PFVF_COMPAT_RING_TO_SVC_MAP)
-               /* Use already set default mappings */
-               return 0;
-
-       if (adf_send_vf2pf_blkmsg_req(accel_dev, ADF_VF2PF_BLKMSG_REQ_RING_SVC_MAP,
-                                     (u8 *)&rts_map_msg, &len)) {
-               dev_err(&GET_DEV(accel_dev),
-                       "QAT: Failed to get block message response\n");
-               return -EFAULT;
-       }
-
-       if (unlikely(len < sizeof(struct ring_to_svc_map_v1))) {
-               dev_err(&GET_DEV(accel_dev),
-                       "RING_TO_SVC message truncated to %d bytes\n", len);
-               return -EFAULT;
-       }
-
-       /* Only v1 at present */
-       accel_dev->hw_device->ring_to_svc_map = rts_map_msg.map;
-       return 0;
-}
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.h b/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.h
deleted file mode 100644 (file)
index 71bc0e3..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2021 Intel Corporation */
-#ifndef ADF_PFVF_VF_MSG_H
-#define ADF_PFVF_VF_MSG_H
-
-#if defined(CONFIG_PCI_IOV)
-int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev);
-void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev);
-int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev);
-int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev);
-int adf_vf2pf_get_ring_to_svc(struct adf_accel_dev *accel_dev);
-#else
-static inline int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
-{
-       return 0;
-}
-
-static inline void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
-{
-}
-#endif
-
-#endif /* ADF_PFVF_VF_MSG_H */
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.c b/drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.c
deleted file mode 100644 (file)
index 1015155..0000000
+++ /dev/null
@@ -1,368 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2015 - 2021 Intel Corporation */
-#include <linux/bitfield.h>
-#include <linux/completion.h>
-#include <linux/minmax.h>
-#include <linux/types.h>
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "adf_pfvf_msg.h"
-#include "adf_pfvf_utils.h"
-#include "adf_pfvf_vf_msg.h"
-#include "adf_pfvf_vf_proto.h"
-
-#define ADF_PFVF_MSG_COLLISION_DETECT_DELAY    10
-#define ADF_PFVF_MSG_ACK_DELAY                 2
-#define ADF_PFVF_MSG_ACK_MAX_RETRY             100
-
-/* How often to retry if there is no response */
-#define ADF_PFVF_MSG_RESP_RETRIES      5
-#define ADF_PFVF_MSG_RESP_TIMEOUT      (ADF_PFVF_MSG_ACK_DELAY * \
-                                        ADF_PFVF_MSG_ACK_MAX_RETRY + \
-                                        ADF_PFVF_MSG_COLLISION_DETECT_DELAY)
-
-/**
- * adf_send_vf2pf_msg() - send VF to PF message
- * @accel_dev: Pointer to acceleration device
- * @msg:       Message to send
- *
- * This function allows the VF to send a message to the PF.
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_send_vf2pf_msg(struct adf_accel_dev *accel_dev, struct pfvf_message msg)
-{
-       struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev);
-       u32 pfvf_offset = pfvf_ops->get_vf2pf_offset(0);
-
-       return pfvf_ops->send_msg(accel_dev, msg, pfvf_offset,
-                                 &accel_dev->vf.vf2pf_lock);
-}
-
-/**
- * adf_recv_pf2vf_msg() - receive a PF to VF message
- * @accel_dev: Pointer to acceleration device
- *
- * This function allows the VF to receive a message from the PF.
- *
- * Return: a valid message on success, zero otherwise.
- */
-static struct pfvf_message adf_recv_pf2vf_msg(struct adf_accel_dev *accel_dev)
-{
-       struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev);
-       u32 pfvf_offset = pfvf_ops->get_pf2vf_offset(0);
-
-       return pfvf_ops->recv_msg(accel_dev, pfvf_offset, accel_dev->vf.pf_compat_ver);
-}
-
-/**
- * adf_send_vf2pf_req() - send VF2PF request message
- * @accel_dev: Pointer to acceleration device.
- * @msg:       Request message to send
- * @resp:      Returned PF response
- *
- * This function sends a message that requires a response from the VF to the PF
- * and waits for a reply.
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_send_vf2pf_req(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
-                      struct pfvf_message *resp)
-{
-       unsigned long timeout = msecs_to_jiffies(ADF_PFVF_MSG_RESP_TIMEOUT);
-       unsigned int retries = ADF_PFVF_MSG_RESP_RETRIES;
-       int ret;
-
-       reinit_completion(&accel_dev->vf.msg_received);
-
-       /* Send request from VF to PF */
-       do {
-               ret = adf_send_vf2pf_msg(accel_dev, msg);
-               if (ret) {
-                       dev_err(&GET_DEV(accel_dev),
-                               "Failed to send request msg to PF\n");
-                       return ret;
-               }
-
-               /* Wait for response, if it times out retry */
-               ret = wait_for_completion_timeout(&accel_dev->vf.msg_received,
-                                                 timeout);
-               if (ret) {
-                       if (likely(resp))
-                               *resp = accel_dev->vf.response;
-
-                       /* Once copied, set to an invalid value */
-                       accel_dev->vf.response.type = 0;
-
-                       return 0;
-               }
-
-               dev_err(&GET_DEV(accel_dev), "PFVF response message timeout\n");
-       } while (--retries);
-
-       return -EIO;
-}
-
-static int adf_vf2pf_blkmsg_data_req(struct adf_accel_dev *accel_dev, bool crc,
-                                    u8 *type, u8 *data)
-{
-       struct pfvf_message req = { 0 };
-       struct pfvf_message resp = { 0 };
-       u8 blk_type;
-       u8 blk_byte;
-       u8 msg_type;
-       u8 max_data;
-       int err;
-
-       /* Convert the block type to {small, medium, large} size category */
-       if (*type <= ADF_VF2PF_SMALL_BLOCK_TYPE_MAX) {
-               msg_type = ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ;
-               blk_type = FIELD_PREP(ADF_VF2PF_SMALL_BLOCK_TYPE_MASK, *type);
-               blk_byte = FIELD_PREP(ADF_VF2PF_SMALL_BLOCK_BYTE_MASK, *data);
-               max_data = ADF_VF2PF_SMALL_BLOCK_BYTE_MAX;
-       } else if (*type <= ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX) {
-               msg_type = ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ;
-               blk_type = FIELD_PREP(ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK,
-                                     *type - ADF_VF2PF_SMALL_BLOCK_TYPE_MAX);
-               blk_byte = FIELD_PREP(ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK, *data);
-               max_data = ADF_VF2PF_MEDIUM_BLOCK_BYTE_MAX;
-       } else if (*type <= ADF_VF2PF_LARGE_BLOCK_TYPE_MAX) {
-               msg_type = ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ;
-               blk_type = FIELD_PREP(ADF_VF2PF_LARGE_BLOCK_TYPE_MASK,
-                                     *type - ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX);
-               blk_byte = FIELD_PREP(ADF_VF2PF_LARGE_BLOCK_BYTE_MASK, *data);
-               max_data = ADF_VF2PF_LARGE_BLOCK_BYTE_MAX;
-       } else {
-               dev_err(&GET_DEV(accel_dev), "Invalid message type %u\n", *type);
-               return -EINVAL;
-       }
-
-       /* Sanity check */
-       if (*data > max_data) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Invalid byte %s %u for message type %u\n",
-                       crc ? "count" : "index", *data, *type);
-               return -EINVAL;
-       }
-
-       /* Build the block message */
-       req.type = msg_type;
-       req.data = blk_type | blk_byte | FIELD_PREP(ADF_VF2PF_BLOCK_CRC_REQ_MASK, crc);
-
-       err = adf_send_vf2pf_req(accel_dev, req, &resp);
-       if (err)
-               return err;
-
-       *type = FIELD_GET(ADF_PF2VF_BLKMSG_RESP_TYPE_MASK, resp.data);
-       *data = FIELD_GET(ADF_PF2VF_BLKMSG_RESP_DATA_MASK, resp.data);
-
-       return 0;
-}
-
-static int adf_vf2pf_blkmsg_get_byte(struct adf_accel_dev *accel_dev, u8 type,
-                                    u8 index, u8 *data)
-{
-       int ret;
-
-       ret = adf_vf2pf_blkmsg_data_req(accel_dev, false, &type, &index);
-       if (ret < 0)
-               return ret;
-
-       if (unlikely(type != ADF_PF2VF_BLKMSG_RESP_TYPE_DATA)) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Unexpected BLKMSG response type %u, byte 0x%x\n",
-                       type, index);
-               return -EFAULT;
-       }
-
-       *data = index;
-       return 0;
-}
-
-static int adf_vf2pf_blkmsg_get_crc(struct adf_accel_dev *accel_dev, u8 type,
-                                   u8 bytes, u8 *crc)
-{
-       int ret;
-
-       /* The count of bytes refers to a length, however shift it to a 0-based
-        * count to avoid overflows. Thus, a request for 0 bytes is technically
-        * valid.
-        */
-       --bytes;
-
-       ret = adf_vf2pf_blkmsg_data_req(accel_dev, true, &type, &bytes);
-       if (ret < 0)
-               return ret;
-
-       if (unlikely(type != ADF_PF2VF_BLKMSG_RESP_TYPE_CRC)) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Unexpected CRC BLKMSG response type %u, crc 0x%x\n",
-                       type, bytes);
-               return  -EFAULT;
-       }
-
-       *crc = bytes;
-       return 0;
-}
-
-/**
- * adf_send_vf2pf_blkmsg_req() - retrieve block message
- * @accel_dev: Pointer to acceleration VF device.
- * @type:      The block message type, see adf_pfvf_msg.h for allowed values
- * @buffer:    input buffer where to place the received data
- * @buffer_len:        buffer length as input, the amount of written bytes on output
- *
- * Request a message of type 'type' over the block message transport.
- * This function will send the required amount block message requests and
- * return the overall content back to the caller through the provided buffer.
- * The buffer should be large enough to contain the requested message type,
- * otherwise the response will be truncated.
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_send_vf2pf_blkmsg_req(struct adf_accel_dev *accel_dev, u8 type,
-                             u8 *buffer, unsigned int *buffer_len)
-{
-       unsigned int index;
-       unsigned int msg_len;
-       int ret;
-       u8 remote_crc;
-       u8 local_crc;
-
-       if (unlikely(type > ADF_VF2PF_LARGE_BLOCK_TYPE_MAX)) {
-               dev_err(&GET_DEV(accel_dev), "Invalid block message type %d\n",
-                       type);
-               return -EINVAL;
-       }
-
-       if (unlikely(*buffer_len < ADF_PFVF_BLKMSG_HEADER_SIZE)) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Buffer size too small for a block message\n");
-               return -EINVAL;
-       }
-
-       ret = adf_vf2pf_blkmsg_get_byte(accel_dev, type,
-                                       ADF_PFVF_BLKMSG_VER_BYTE,
-                                       &buffer[ADF_PFVF_BLKMSG_VER_BYTE]);
-       if (unlikely(ret))
-               return ret;
-
-       if (unlikely(!buffer[ADF_PFVF_BLKMSG_VER_BYTE])) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Invalid version 0 received for block request %u", type);
-               return -EFAULT;
-       }
-
-       ret = adf_vf2pf_blkmsg_get_byte(accel_dev, type,
-                                       ADF_PFVF_BLKMSG_LEN_BYTE,
-                                       &buffer[ADF_PFVF_BLKMSG_LEN_BYTE]);
-       if (unlikely(ret))
-               return ret;
-
-       if (unlikely(!buffer[ADF_PFVF_BLKMSG_LEN_BYTE])) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Invalid size 0 received for block request %u", type);
-               return -EFAULT;
-       }
-
-       /* We need to pick the minimum since there is no way to request a
-        * specific version. As a consequence any scenario is possible:
-        * - PF has a newer (longer) version which doesn't fit in the buffer
-        * - VF expects a newer (longer) version, so we must not ask for
-        *   bytes in excess
-        * - PF and VF share the same version, no problem
-        */
-       msg_len = ADF_PFVF_BLKMSG_HEADER_SIZE + buffer[ADF_PFVF_BLKMSG_LEN_BYTE];
-       msg_len = min(*buffer_len, msg_len);
-
-       /* Get the payload */
-       for (index = ADF_PFVF_BLKMSG_HEADER_SIZE; index < msg_len; index++) {
-               ret = adf_vf2pf_blkmsg_get_byte(accel_dev, type, index,
-                                               &buffer[index]);
-               if (unlikely(ret))
-                       return ret;
-       }
-
-       ret = adf_vf2pf_blkmsg_get_crc(accel_dev, type, msg_len, &remote_crc);
-       if (unlikely(ret))
-               return ret;
-
-       local_crc = adf_pfvf_calc_blkmsg_crc(buffer, msg_len);
-       if (unlikely(local_crc != remote_crc)) {
-               dev_err(&GET_DEV(accel_dev),
-                       "CRC error on msg type %d. Local %02X, remote %02X\n",
-                       type, local_crc, remote_crc);
-               return -EIO;
-       }
-
-       *buffer_len = msg_len;
-       return 0;
-}
-
-static bool adf_handle_pf2vf_msg(struct adf_accel_dev *accel_dev,
-                                struct pfvf_message msg)
-{
-       switch (msg.type) {
-       case ADF_PF2VF_MSGTYPE_RESTARTING:
-               dev_dbg(&GET_DEV(accel_dev), "Restarting message received from PF\n");
-
-               adf_pf2vf_handle_pf_restarting(accel_dev);
-               return false;
-       case ADF_PF2VF_MSGTYPE_VERSION_RESP:
-       case ADF_PF2VF_MSGTYPE_BLKMSG_RESP:
-       case ADF_PF2VF_MSGTYPE_RP_RESET_RESP:
-               dev_dbg(&GET_DEV(accel_dev),
-                       "Response Message received from PF (type 0x%.4x, data 0x%.4x)\n",
-                       msg.type, msg.data);
-               accel_dev->vf.response = msg;
-               complete(&accel_dev->vf.msg_received);
-               return true;
-       default:
-               dev_err(&GET_DEV(accel_dev),
-                       "Unknown message from PF (type 0x%.4x, data: 0x%.4x)\n",
-                       msg.type, msg.data);
-       }
-
-       return false;
-}
-
-bool adf_recv_and_handle_pf2vf_msg(struct adf_accel_dev *accel_dev)
-{
-       struct pfvf_message msg;
-
-       msg = adf_recv_pf2vf_msg(accel_dev);
-       if (msg.type)  /* Invalid or no message */
-               return adf_handle_pf2vf_msg(accel_dev, msg);
-
-       /* No replies for PF->VF messages at present */
-
-       return true;
-}
-
-/**
- * adf_enable_vf2pf_comms() - Function enables communication from vf to pf
- *
- * @accel_dev: Pointer to acceleration device virtual function.
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
-{
-       int ret;
-
-       adf_pfvf_crc_init();
-       adf_enable_pf2vf_interrupts(accel_dev);
-
-       ret = adf_vf2pf_request_version(accel_dev);
-       if (ret)
-               return ret;
-
-       ret = adf_vf2pf_get_capabilities(accel_dev);
-       if (ret)
-               return ret;
-
-       ret = adf_vf2pf_get_ring_to_svc(accel_dev);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms);
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.h b/drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.h
deleted file mode 100644 (file)
index f6ee9b3..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2021 Intel Corporation */
-#ifndef ADF_PFVF_VF_PROTO_H
-#define ADF_PFVF_VF_PROTO_H
-
-#include <linux/types.h>
-#include "adf_accel_devices.h"
-
-int adf_send_vf2pf_msg(struct adf_accel_dev *accel_dev, struct pfvf_message msg);
-int adf_send_vf2pf_req(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
-                      struct pfvf_message *resp);
-int adf_send_vf2pf_blkmsg_req(struct adf_accel_dev *accel_dev, u8 type,
-                             u8 *buffer, unsigned int *buffer_len);
-
-int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev);
-
-#endif /* ADF_PFVF_VF_PROTO_H */
diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c
deleted file mode 100644 (file)
index d85a90c..0000000
+++ /dev/null
@@ -1,221 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2015 - 2021 Intel Corporation */
-#include <linux/workqueue.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include "adf_common_drv.h"
-#include "adf_cfg.h"
-#include "adf_pfvf_pf_msg.h"
-
-#define ADF_VF2PF_RATELIMIT_INTERVAL   8
-#define ADF_VF2PF_RATELIMIT_BURST      130
-
-static struct workqueue_struct *pf2vf_resp_wq;
-
-struct adf_pf2vf_resp {
-       struct work_struct pf2vf_resp_work;
-       struct adf_accel_vf_info *vf_info;
-};
-
-static void adf_iov_send_resp(struct work_struct *work)
-{
-       struct adf_pf2vf_resp *pf2vf_resp =
-               container_of(work, struct adf_pf2vf_resp, pf2vf_resp_work);
-       struct adf_accel_vf_info *vf_info = pf2vf_resp->vf_info;
-       struct adf_accel_dev *accel_dev = vf_info->accel_dev;
-       u32 vf_nr = vf_info->vf_nr;
-       bool ret;
-
-       ret = adf_recv_and_handle_vf2pf_msg(accel_dev, vf_nr);
-       if (ret)
-               /* re-enable interrupt on PF from this VF */
-               adf_enable_vf2pf_interrupts(accel_dev, 1 << vf_nr);
-
-       kfree(pf2vf_resp);
-}
-
-void adf_schedule_vf2pf_handler(struct adf_accel_vf_info *vf_info)
-{
-       struct adf_pf2vf_resp *pf2vf_resp;
-
-       pf2vf_resp = kzalloc(sizeof(*pf2vf_resp), GFP_ATOMIC);
-       if (!pf2vf_resp)
-               return;
-
-       pf2vf_resp->vf_info = vf_info;
-       INIT_WORK(&pf2vf_resp->pf2vf_resp_work, adf_iov_send_resp);
-       queue_work(pf2vf_resp_wq, &pf2vf_resp->pf2vf_resp_work);
-}
-
-static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
-{
-       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
-       int totalvfs = pci_sriov_get_totalvfs(pdev);
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       struct adf_accel_vf_info *vf_info;
-       int i;
-
-       for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs;
-            i++, vf_info++) {
-               /* This ptr will be populated when VFs will be created */
-               vf_info->accel_dev = accel_dev;
-               vf_info->vf_nr = i;
-               vf_info->vf_compat_ver = 0;
-
-               mutex_init(&vf_info->pf2vf_lock);
-               ratelimit_state_init(&vf_info->vf2pf_ratelimit,
-                                    ADF_VF2PF_RATELIMIT_INTERVAL,
-                                    ADF_VF2PF_RATELIMIT_BURST);
-       }
-
-       /* Set Valid bits in AE Thread to PCIe Function Mapping */
-       if (hw_data->configure_iov_threads)
-               hw_data->configure_iov_threads(accel_dev, true);
-
-       /* Enable VF to PF interrupts for all VFs */
-       adf_enable_vf2pf_interrupts(accel_dev, BIT_ULL(totalvfs) - 1);
-
-       /*
-        * Due to the hardware design, when SR-IOV and the ring arbiter
-        * are enabled all the VFs supported in hardware must be enabled in
-        * order for all the hardware resources (i.e. bundles) to be usable.
-        * When SR-IOV is enabled, each of the VFs will own one bundle.
-        */
-       return pci_enable_sriov(pdev, totalvfs);
-}
-
-/**
- * adf_disable_sriov() - Disable SRIOV for the device
- * @accel_dev:  Pointer to accel device.
- *
- * Function disables SRIOV for the accel device.
- *
- * Return: 0 on success, error code otherwise.
- */
-void adf_disable_sriov(struct adf_accel_dev *accel_dev)
-{
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       int totalvfs = pci_sriov_get_totalvfs(accel_to_pci_dev(accel_dev));
-       struct adf_accel_vf_info *vf;
-       int i;
-
-       if (!accel_dev->pf.vf_info)
-               return;
-
-       adf_pf2vf_notify_restarting(accel_dev);
-       pci_disable_sriov(accel_to_pci_dev(accel_dev));
-
-       /* Disable VF to PF interrupts */
-       adf_disable_all_vf2pf_interrupts(accel_dev);
-
-       /* Clear Valid bits in AE Thread to PCIe Function Mapping */
-       if (hw_data->configure_iov_threads)
-               hw_data->configure_iov_threads(accel_dev, false);
-
-       for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++)
-               mutex_destroy(&vf->pf2vf_lock);
-
-       kfree(accel_dev->pf.vf_info);
-       accel_dev->pf.vf_info = NULL;
-}
-EXPORT_SYMBOL_GPL(adf_disable_sriov);
-
-/**
- * adf_sriov_configure() - Enable SRIOV for the device
- * @pdev:  Pointer to PCI device.
- * @numvfs: Number of virtual functions (VFs) to enable.
- *
- * Note that the @numvfs parameter is ignored and all VFs supported by the
- * device are enabled due to the design of the hardware.
- *
- * Function enables SRIOV for the PCI device.
- *
- * Return: number of VFs enabled on success, error code otherwise.
- */
-int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
-{
-       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
-       int totalvfs = pci_sriov_get_totalvfs(pdev);
-       unsigned long val;
-       int ret;
-
-       if (!accel_dev) {
-               dev_err(&pdev->dev, "Failed to find accel_dev\n");
-               return -EFAULT;
-       }
-
-       if (!device_iommu_mapped(&pdev->dev))
-               dev_warn(&pdev->dev, "IOMMU should be enabled for SR-IOV to work correctly\n");
-
-       if (accel_dev->pf.vf_info) {
-               dev_info(&pdev->dev, "Already enabled for this device\n");
-               return -EINVAL;
-       }
-
-       if (adf_dev_started(accel_dev)) {
-               if (adf_devmgr_in_reset(accel_dev) ||
-                   adf_dev_in_use(accel_dev)) {
-                       dev_err(&GET_DEV(accel_dev), "Device busy\n");
-                       return -EBUSY;
-               }
-
-               ret = adf_dev_shutdown_cache_cfg(accel_dev);
-               if (ret)
-                       return ret;
-       }
-
-       if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
-               return -EFAULT;
-       val = 0;
-       if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                       ADF_NUM_CY, (void *)&val, ADF_DEC))
-               return -EFAULT;
-       ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
-                                         &val, ADF_DEC);
-       if (ret)
-               return ret;
-
-       set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
-
-       /* Allocate memory for VF info structs */
-       accel_dev->pf.vf_info = kcalloc(totalvfs,
-                                       sizeof(struct adf_accel_vf_info),
-                                       GFP_KERNEL);
-       if (!accel_dev->pf.vf_info)
-               return -ENOMEM;
-
-       if (adf_dev_init(accel_dev)) {
-               dev_err(&GET_DEV(accel_dev), "Failed to init qat_dev%d\n",
-                       accel_dev->accel_id);
-               return -EFAULT;
-       }
-
-       if (adf_dev_start(accel_dev)) {
-               dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
-                       accel_dev->accel_id);
-               return -EFAULT;
-       }
-
-       ret = adf_enable_sriov(accel_dev);
-       if (ret)
-               return ret;
-
-       return numvfs;
-}
-EXPORT_SYMBOL_GPL(adf_sriov_configure);
-
-int __init adf_init_pf_wq(void)
-{
-       /* Workqueue for PF2VF responses */
-       pf2vf_resp_wq = alloc_workqueue("qat_pf2vf_resp_wq", WQ_MEM_RECLAIM, 0);
-
-       return !pf2vf_resp_wq ? -ENOMEM : 0;
-}
-
-void adf_exit_pf_wq(void)
-{
-       if (pf2vf_resp_wq) {
-               destroy_workqueue(pf2vf_resp_wq);
-               pf2vf_resp_wq = NULL;
-       }
-}
diff --git a/drivers/crypto/qat/qat_common/adf_sysfs.c b/drivers/crypto/qat/qat_common/adf_sysfs.c
deleted file mode 100644 (file)
index e8b078e..0000000
+++ /dev/null
@@ -1,191 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2022 Intel Corporation */
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include "adf_accel_devices.h"
-#include "adf_cfg.h"
-#include "adf_common_drv.h"
-
-static const char * const state_operations[] = {
-       [DEV_DOWN] = "down",
-       [DEV_UP] = "up",
-};
-
-static ssize_t state_show(struct device *dev, struct device_attribute *attr,
-                         char *buf)
-{
-       struct adf_accel_dev *accel_dev;
-       char *state;
-
-       accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
-       if (!accel_dev)
-               return -EINVAL;
-
-       state = adf_dev_started(accel_dev) ? "up" : "down";
-       return sysfs_emit(buf, "%s\n", state);
-}
-
-static ssize_t state_store(struct device *dev, struct device_attribute *attr,
-                          const char *buf, size_t count)
-{
-       struct adf_accel_dev *accel_dev;
-       u32 accel_id;
-       int ret;
-
-       accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
-       if (!accel_dev)
-               return -EINVAL;
-
-       accel_id = accel_dev->accel_id;
-
-       if (adf_devmgr_in_reset(accel_dev) || adf_dev_in_use(accel_dev)) {
-               dev_info(dev, "Device qat_dev%d is busy\n", accel_id);
-               return -EBUSY;
-       }
-
-       ret = sysfs_match_string(state_operations, buf);
-       if (ret < 0)
-               return ret;
-
-       switch (ret) {
-       case DEV_DOWN:
-               if (!adf_dev_started(accel_dev)) {
-                       dev_info(dev, "Device qat_dev%d already down\n",
-                                accel_id);
-                       return -EINVAL;
-               }
-
-               dev_info(dev, "Stopping device qat_dev%d\n", accel_id);
-
-               ret = adf_dev_shutdown_cache_cfg(accel_dev);
-               if (ret < 0)
-                       return -EINVAL;
-
-               break;
-       case DEV_UP:
-               if (adf_dev_started(accel_dev)) {
-                       dev_info(dev, "Device qat_dev%d already up\n",
-                                accel_id);
-                       return -EINVAL;
-               }
-
-               dev_info(dev, "Starting device qat_dev%d\n", accel_id);
-
-               ret = GET_HW_DATA(accel_dev)->dev_config(accel_dev);
-               if (!ret)
-                       ret = adf_dev_init(accel_dev);
-               if (!ret)
-                       ret = adf_dev_start(accel_dev);
-
-               if (ret < 0) {
-                       dev_err(dev, "Failed to start device qat_dev%d\n",
-                               accel_id);
-                       adf_dev_shutdown_cache_cfg(accel_dev);
-                       return ret;
-               }
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       return count;
-}
-
-static const char * const services_operations[] = {
-       ADF_CFG_CY,
-       ADF_CFG_DC,
-};
-
-static ssize_t cfg_services_show(struct device *dev, struct device_attribute *attr,
-                                char *buf)
-{
-       char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
-       struct adf_accel_dev *accel_dev;
-       int ret;
-
-       accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
-       if (!accel_dev)
-               return -EINVAL;
-
-       ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
-                                     ADF_SERVICES_ENABLED, services);
-       if (ret)
-               return ret;
-
-       return sysfs_emit(buf, "%s\n", services);
-}
-
-static int adf_sysfs_update_dev_config(struct adf_accel_dev *accel_dev,
-                                      const char *services)
-{
-       return adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
-                                          ADF_SERVICES_ENABLED, services,
-                                          ADF_STR);
-}
-
-static ssize_t cfg_services_store(struct device *dev, struct device_attribute *attr,
-                                 const char *buf, size_t count)
-{
-       struct adf_hw_device_data *hw_data;
-       struct adf_accel_dev *accel_dev;
-       int ret;
-
-       ret = sysfs_match_string(services_operations, buf);
-       if (ret < 0)
-               return ret;
-
-       accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
-       if (!accel_dev)
-               return -EINVAL;
-
-       if (adf_dev_started(accel_dev)) {
-               dev_info(dev, "Device qat_dev%d must be down to reconfigure the service.\n",
-                        accel_dev->accel_id);
-               return -EINVAL;
-       }
-
-       ret = adf_sysfs_update_dev_config(accel_dev, services_operations[ret]);
-       if (ret < 0)
-               return ret;
-
-       hw_data = GET_HW_DATA(accel_dev);
-
-       /* Update capabilities mask after change in configuration.
-        * A call to this function is required as capabilities are, at the
-        * moment, tied to configuration
-        */
-       hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
-       if (!hw_data->accel_capabilities_mask)
-               return -EINVAL;
-
-       return count;
-}
-
-static DEVICE_ATTR_RW(state);
-static DEVICE_ATTR_RW(cfg_services);
-
-static struct attribute *qat_attrs[] = {
-       &dev_attr_state.attr,
-       &dev_attr_cfg_services.attr,
-       NULL,
-};
-
-static struct attribute_group qat_group = {
-       .attrs = qat_attrs,
-       .name = "qat",
-};
-
-int adf_sysfs_init(struct adf_accel_dev *accel_dev)
-{
-       int ret;
-
-       ret = devm_device_add_group(&GET_DEV(accel_dev), &qat_group);
-       if (ret) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Failed to create qat attribute group: %d\n", ret);
-       }
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(adf_sysfs_init);
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
deleted file mode 100644 (file)
index 630d048..0000000
+++ /dev/null
@@ -1,577 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/delay.h>
-#include <linux/nospec.h>
-#include "adf_accel_devices.h"
-#include "adf_transport_internal.h"
-#include "adf_transport_access_macros.h"
-#include "adf_cfg.h"
-#include "adf_common_drv.h"
-
-#define ADF_MAX_RING_THRESHOLD         80
-#define ADF_PERCENT(tot, percent)      (((tot) * (percent)) / 100)
-
-static inline u32 adf_modulo(u32 data, u32 shift)
-{
-       u32 div = data >> shift;
-       u32 mult = div << shift;
-
-       return data - mult;
-}
-
-static inline int adf_check_ring_alignment(u64 addr, u64 size)
-{
-       if (((size - 1) & addr) != 0)
-               return -EFAULT;
-       return 0;
-}
-
-static int adf_verify_ring_size(u32 msg_size, u32 msg_num)
-{
-       int i = ADF_MIN_RING_SIZE;
-
-       for (; i <= ADF_MAX_RING_SIZE; i++)
-               if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i))
-                       return i;
-
-       return ADF_DEFAULT_RING_SIZE;
-}
-
-static int adf_reserve_ring(struct adf_etr_bank_data *bank, u32 ring)
-{
-       spin_lock(&bank->lock);
-       if (bank->ring_mask & (1 << ring)) {
-               spin_unlock(&bank->lock);
-               return -EFAULT;
-       }
-       bank->ring_mask |= (1 << ring);
-       spin_unlock(&bank->lock);
-       return 0;
-}
-
-static void adf_unreserve_ring(struct adf_etr_bank_data *bank, u32 ring)
-{
-       spin_lock(&bank->lock);
-       bank->ring_mask &= ~(1 << ring);
-       spin_unlock(&bank->lock);
-}
-
-static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
-{
-       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
-
-       spin_lock_bh(&bank->lock);
-       bank->irq_mask |= (1 << ring);
-       spin_unlock_bh(&bank->lock);
-       csr_ops->write_csr_int_col_en(bank->csr_addr, bank->bank_number,
-                                     bank->irq_mask);
-       csr_ops->write_csr_int_col_ctl(bank->csr_addr, bank->bank_number,
-                                      bank->irq_coalesc_timer);
-}
-
-static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
-{
-       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
-
-       spin_lock_bh(&bank->lock);
-       bank->irq_mask &= ~(1 << ring);
-       spin_unlock_bh(&bank->lock);
-       csr_ops->write_csr_int_col_en(bank->csr_addr, bank->bank_number,
-                                     bank->irq_mask);
-}
-
-bool adf_ring_nearly_full(struct adf_etr_ring_data *ring)
-{
-       return atomic_read(ring->inflights) > ring->threshold;
-}
-
-int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg)
-{
-       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
-
-       if (atomic_add_return(1, ring->inflights) >
-           ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) {
-               atomic_dec(ring->inflights);
-               return -EAGAIN;
-       }
-       spin_lock_bh(&ring->lock);
-       memcpy((void *)((uintptr_t)ring->base_addr + ring->tail), msg,
-              ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
-
-       ring->tail = adf_modulo(ring->tail +
-                               ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
-                               ADF_RING_SIZE_MODULO(ring->ring_size));
-       csr_ops->write_csr_ring_tail(ring->bank->csr_addr,
-                                    ring->bank->bank_number, ring->ring_number,
-                                    ring->tail);
-       spin_unlock_bh(&ring->lock);
-
-       return 0;
-}
-
-static int adf_handle_response(struct adf_etr_ring_data *ring)
-{
-       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
-       u32 msg_counter = 0;
-       u32 *msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
-
-       while (*msg != ADF_RING_EMPTY_SIG) {
-               ring->callback((u32 *)msg);
-               atomic_dec(ring->inflights);
-               *msg = ADF_RING_EMPTY_SIG;
-               ring->head = adf_modulo(ring->head +
-                                       ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
-                                       ADF_RING_SIZE_MODULO(ring->ring_size));
-               msg_counter++;
-               msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
-       }
-       if (msg_counter > 0) {
-               csr_ops->write_csr_ring_head(ring->bank->csr_addr,
-                                            ring->bank->bank_number,
-                                            ring->ring_number, ring->head);
-       }
-       return 0;
-}
-
-static void adf_configure_tx_ring(struct adf_etr_ring_data *ring)
-{
-       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
-       u32 ring_config = BUILD_RING_CONFIG(ring->ring_size);
-
-       csr_ops->write_csr_ring_config(ring->bank->csr_addr,
-                                      ring->bank->bank_number,
-                                      ring->ring_number, ring_config);
-
-}
-
-static void adf_configure_rx_ring(struct adf_etr_ring_data *ring)
-{
-       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
-       u32 ring_config =
-                       BUILD_RESP_RING_CONFIG(ring->ring_size,
-                                              ADF_RING_NEAR_WATERMARK_512,
-                                              ADF_RING_NEAR_WATERMARK_0);
-
-       csr_ops->write_csr_ring_config(ring->bank->csr_addr,
-                                      ring->bank->bank_number,
-                                      ring->ring_number, ring_config);
-}
-
-static int adf_init_ring(struct adf_etr_ring_data *ring)
-{
-       struct adf_etr_bank_data *bank = ring->bank;
-       struct adf_accel_dev *accel_dev = bank->accel_dev;
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
-       u64 ring_base;
-       u32 ring_size_bytes =
-                       ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
-
-       ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
-       ring->base_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
-                                            ring_size_bytes, &ring->dma_addr,
-                                            GFP_KERNEL);
-       if (!ring->base_addr)
-               return -ENOMEM;
-
-       memset(ring->base_addr, 0x7F, ring_size_bytes);
-       /* The base_addr has to be aligned to the size of the buffer */
-       if (adf_check_ring_alignment(ring->dma_addr, ring_size_bytes)) {
-               dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n");
-               dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
-                                 ring->base_addr, ring->dma_addr);
-               ring->base_addr = NULL;
-               return -EFAULT;
-       }
-
-       if (hw_data->tx_rings_mask & (1 << ring->ring_number))
-               adf_configure_tx_ring(ring);
-
-       else
-               adf_configure_rx_ring(ring);
-
-       ring_base = csr_ops->build_csr_ring_base_addr(ring->dma_addr,
-                                                     ring->ring_size);
-
-       csr_ops->write_csr_ring_base(ring->bank->csr_addr,
-                                    ring->bank->bank_number, ring->ring_number,
-                                    ring_base);
-       spin_lock_init(&ring->lock);
-       return 0;
-}
-
-static void adf_cleanup_ring(struct adf_etr_ring_data *ring)
-{
-       u32 ring_size_bytes =
-                       ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
-       ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
-
-       if (ring->base_addr) {
-               memset(ring->base_addr, 0x7F, ring_size_bytes);
-               dma_free_coherent(&GET_DEV(ring->bank->accel_dev),
-                                 ring_size_bytes, ring->base_addr,
-                                 ring->dma_addr);
-       }
-}
-
-int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
-                   u32 bank_num, u32 num_msgs,
-                   u32 msg_size, const char *ring_name,
-                   adf_callback_fn callback, int poll_mode,
-                   struct adf_etr_ring_data **ring_ptr)
-{
-       struct adf_etr_data *transport_data = accel_dev->transport;
-       u8 num_rings_per_bank = GET_NUM_RINGS_PER_BANK(accel_dev);
-       struct adf_etr_bank_data *bank;
-       struct adf_etr_ring_data *ring;
-       char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
-       int max_inflights;
-       u32 ring_num;
-       int ret;
-
-       if (bank_num >= GET_MAX_BANKS(accel_dev)) {
-               dev_err(&GET_DEV(accel_dev), "Invalid bank number\n");
-               return -EFAULT;
-       }
-       if (msg_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
-               dev_err(&GET_DEV(accel_dev), "Invalid msg size\n");
-               return -EFAULT;
-       }
-       if (ADF_MAX_INFLIGHTS(adf_verify_ring_size(msg_size, num_msgs),
-                             ADF_BYTES_TO_MSG_SIZE(msg_size)) < 2) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Invalid ring size for given msg size\n");
-               return -EFAULT;
-       }
-       if (adf_cfg_get_param_value(accel_dev, section, ring_name, val)) {
-               dev_err(&GET_DEV(accel_dev), "Section %s, no such entry : %s\n",
-                       section, ring_name);
-               return -EFAULT;
-       }
-       if (kstrtouint(val, 10, &ring_num)) {
-               dev_err(&GET_DEV(accel_dev), "Can't get ring number\n");
-               return -EFAULT;
-       }
-       if (ring_num >= num_rings_per_bank) {
-               dev_err(&GET_DEV(accel_dev), "Invalid ring number\n");
-               return -EFAULT;
-       }
-
-       ring_num = array_index_nospec(ring_num, num_rings_per_bank);
-       bank = &transport_data->banks[bank_num];
-       if (adf_reserve_ring(bank, ring_num)) {
-               dev_err(&GET_DEV(accel_dev), "Ring %d, %s already exists.\n",
-                       ring_num, ring_name);
-               return -EFAULT;
-       }
-       ring = &bank->rings[ring_num];
-       ring->ring_number = ring_num;
-       ring->bank = bank;
-       ring->callback = callback;
-       ring->msg_size = ADF_BYTES_TO_MSG_SIZE(msg_size);
-       ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
-       ring->head = 0;
-       ring->tail = 0;
-       max_inflights = ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size);
-       ring->threshold = ADF_PERCENT(max_inflights, ADF_MAX_RING_THRESHOLD);
-       atomic_set(ring->inflights, 0);
-       ret = adf_init_ring(ring);
-       if (ret)
-               goto err;
-
-       /* Enable HW arbitration for the given ring */
-       adf_update_ring_arb(ring);
-
-       if (adf_ring_debugfs_add(ring, ring_name)) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Couldn't add ring debugfs entry\n");
-               ret = -EFAULT;
-               goto err;
-       }
-
-       /* Enable interrupts if needed */
-       if (callback && (!poll_mode))
-               adf_enable_ring_irq(bank, ring->ring_number);
-       *ring_ptr = ring;
-       return 0;
-err:
-       adf_cleanup_ring(ring);
-       adf_unreserve_ring(bank, ring_num);
-       adf_update_ring_arb(ring);
-       return ret;
-}
-
-void adf_remove_ring(struct adf_etr_ring_data *ring)
-{
-       struct adf_etr_bank_data *bank = ring->bank;
-       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
-
-       /* Disable interrupts for the given ring */
-       adf_disable_ring_irq(bank, ring->ring_number);
-
-       /* Clear PCI config space */
-
-       csr_ops->write_csr_ring_config(bank->csr_addr, bank->bank_number,
-                                      ring->ring_number, 0);
-       csr_ops->write_csr_ring_base(bank->csr_addr, bank->bank_number,
-                                    ring->ring_number, 0);
-       adf_ring_debugfs_rm(ring);
-       adf_unreserve_ring(bank, ring->ring_number);
-       /* Disable HW arbitration for the given ring */
-       adf_update_ring_arb(ring);
-       adf_cleanup_ring(ring);
-}
-
-static void adf_ring_response_handler(struct adf_etr_bank_data *bank)
-{
-       struct adf_accel_dev *accel_dev = bank->accel_dev;
-       u8 num_rings_per_bank = GET_NUM_RINGS_PER_BANK(accel_dev);
-       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
-       unsigned long empty_rings;
-       int i;
-
-       empty_rings = csr_ops->read_csr_e_stat(bank->csr_addr,
-                                              bank->bank_number);
-       empty_rings = ~empty_rings & bank->irq_mask;
-
-       for_each_set_bit(i, &empty_rings, num_rings_per_bank)
-               adf_handle_response(&bank->rings[i]);
-}
-
-void adf_response_handler(uintptr_t bank_addr)
-{
-       struct adf_etr_bank_data *bank = (void *)bank_addr;
-       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
-
-       /* Handle all the responses and reenable IRQs */
-       adf_ring_response_handler(bank);
-
-       csr_ops->write_csr_int_flag_and_col(bank->csr_addr, bank->bank_number,
-                                           bank->irq_mask);
-}
-
-static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev,
-                                 const char *section, const char *format,
-                                 u32 key, u32 *value)
-{
-       char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
-       char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
-
-       snprintf(key_buf, ADF_CFG_MAX_KEY_LEN_IN_BYTES, format, key);
-
-       if (adf_cfg_get_param_value(accel_dev, section, key_buf, val_buf))
-               return -EFAULT;
-
-       if (kstrtouint(val_buf, 10, value))
-               return -EFAULT;
-       return 0;
-}
-
-static void adf_get_coalesc_timer(struct adf_etr_bank_data *bank,
-                                 const char *section,
-                                 u32 bank_num_in_accel)
-{
-       if (adf_get_cfg_int(bank->accel_dev, section,
-                           ADF_ETRMGR_COALESCE_TIMER_FORMAT,
-                           bank_num_in_accel, &bank->irq_coalesc_timer))
-               bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
-
-       if (ADF_COALESCING_MAX_TIME < bank->irq_coalesc_timer ||
-           ADF_COALESCING_MIN_TIME > bank->irq_coalesc_timer)
-               bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
-}
-
-static int adf_init_bank(struct adf_accel_dev *accel_dev,
-                        struct adf_etr_bank_data *bank,
-                        u32 bank_num, void __iomem *csr_addr)
-{
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       u8 num_rings_per_bank = hw_data->num_rings_per_bank;
-       struct adf_hw_csr_ops *csr_ops = &hw_data->csr_ops;
-       u32 irq_mask = BIT(num_rings_per_bank) - 1;
-       struct adf_etr_ring_data *ring;
-       struct adf_etr_ring_data *tx_ring;
-       u32 i, coalesc_enabled = 0;
-       unsigned long ring_mask;
-       int size;
-
-       memset(bank, 0, sizeof(*bank));
-       bank->bank_number = bank_num;
-       bank->csr_addr = csr_addr;
-       bank->accel_dev = accel_dev;
-       spin_lock_init(&bank->lock);
-
-       /* Allocate the rings in the bank */
-       size = num_rings_per_bank * sizeof(struct adf_etr_ring_data);
-       bank->rings = kzalloc_node(size, GFP_KERNEL,
-                                  dev_to_node(&GET_DEV(accel_dev)));
-       if (!bank->rings)
-               return -ENOMEM;
-
-       /* Enable IRQ coalescing always. This will allow to use
-        * the optimised flag and coalesc register.
-        * If it is disabled in the config file just use min time value */
-       if ((adf_get_cfg_int(accel_dev, "Accelerator0",
-                            ADF_ETRMGR_COALESCING_ENABLED_FORMAT, bank_num,
-                            &coalesc_enabled) == 0) && coalesc_enabled)
-               adf_get_coalesc_timer(bank, "Accelerator0", bank_num);
-       else
-               bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
-
-       for (i = 0; i < num_rings_per_bank; i++) {
-               csr_ops->write_csr_ring_config(csr_addr, bank_num, i, 0);
-               csr_ops->write_csr_ring_base(csr_addr, bank_num, i, 0);
-
-               ring = &bank->rings[i];
-               if (hw_data->tx_rings_mask & (1 << i)) {
-                       ring->inflights =
-                               kzalloc_node(sizeof(atomic_t),
-                                            GFP_KERNEL,
-                                            dev_to_node(&GET_DEV(accel_dev)));
-                       if (!ring->inflights)
-                               goto err;
-               } else {
-                       if (i < hw_data->tx_rx_gap) {
-                               dev_err(&GET_DEV(accel_dev),
-                                       "Invalid tx rings mask config\n");
-                               goto err;
-                       }
-                       tx_ring = &bank->rings[i - hw_data->tx_rx_gap];
-                       ring->inflights = tx_ring->inflights;
-               }
-       }
-       if (adf_bank_debugfs_add(bank)) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Failed to add bank debugfs entry\n");
-               goto err;
-       }
-
-       csr_ops->write_csr_int_flag(csr_addr, bank_num, irq_mask);
-       csr_ops->write_csr_int_srcsel(csr_addr, bank_num);
-
-       return 0;
-err:
-       ring_mask = hw_data->tx_rings_mask;
-       for_each_set_bit(i, &ring_mask, num_rings_per_bank) {
-               ring = &bank->rings[i];
-               kfree(ring->inflights);
-               ring->inflights = NULL;
-       }
-       kfree(bank->rings);
-       return -ENOMEM;
-}
-
-/**
- * adf_init_etr_data() - Initialize transport rings for acceleration device
- * @accel_dev:  Pointer to acceleration device.
- *
- * Function is the initializes the communications channels (rings) to the
- * acceleration device accel_dev.
- * To be used by QAT device specific drivers.
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_init_etr_data(struct adf_accel_dev *accel_dev)
-{
-       struct adf_etr_data *etr_data;
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       void __iomem *csr_addr;
-       u32 size;
-       u32 num_banks = 0;
-       int i, ret;
-
-       etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
-                               dev_to_node(&GET_DEV(accel_dev)));
-       if (!etr_data)
-               return -ENOMEM;
-
-       num_banks = GET_MAX_BANKS(accel_dev);
-       size = num_banks * sizeof(struct adf_etr_bank_data);
-       etr_data->banks = kzalloc_node(size, GFP_KERNEL,
-                                      dev_to_node(&GET_DEV(accel_dev)));
-       if (!etr_data->banks) {
-               ret = -ENOMEM;
-               goto err_bank;
-       }
-
-       accel_dev->transport = etr_data;
-       i = hw_data->get_etr_bar_id(hw_data);
-       csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr;
-
-       /* accel_dev->debugfs_dir should always be non-NULL here */
-       etr_data->debug = debugfs_create_dir("transport",
-                                            accel_dev->debugfs_dir);
-
-       for (i = 0; i < num_banks; i++) {
-               ret = adf_init_bank(accel_dev, &etr_data->banks[i], i,
-                                   csr_addr);
-               if (ret)
-                       goto err_bank_all;
-       }
-
-       return 0;
-
-err_bank_all:
-       debugfs_remove(etr_data->debug);
-       kfree(etr_data->banks);
-err_bank:
-       kfree(etr_data);
-       accel_dev->transport = NULL;
-       return ret;
-}
-EXPORT_SYMBOL_GPL(adf_init_etr_data);
-
-static void cleanup_bank(struct adf_etr_bank_data *bank)
-{
-       struct adf_accel_dev *accel_dev = bank->accel_dev;
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       u8 num_rings_per_bank = hw_data->num_rings_per_bank;
-       u32 i;
-
-       for (i = 0; i < num_rings_per_bank; i++) {
-               struct adf_etr_ring_data *ring = &bank->rings[i];
-
-               if (bank->ring_mask & (1 << i))
-                       adf_cleanup_ring(ring);
-
-               if (hw_data->tx_rings_mask & (1 << i))
-                       kfree(ring->inflights);
-       }
-       kfree(bank->rings);
-       adf_bank_debugfs_rm(bank);
-       memset(bank, 0, sizeof(*bank));
-}
-
-static void adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev)
-{
-       struct adf_etr_data *etr_data = accel_dev->transport;
-       u32 i, num_banks = GET_MAX_BANKS(accel_dev);
-
-       for (i = 0; i < num_banks; i++)
-               cleanup_bank(&etr_data->banks[i]);
-}
-
-/**
- * adf_cleanup_etr_data() - Clear transport rings for acceleration device
- * @accel_dev:  Pointer to acceleration device.
- *
- * Function is the clears the communications channels (rings) of the
- * acceleration device accel_dev.
- * To be used by QAT device specific drivers.
- *
- * Return: void
- */
-void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev)
-{
-       struct adf_etr_data *etr_data = accel_dev->transport;
-
-       if (etr_data) {
-               adf_cleanup_etr_handles(accel_dev);
-               debugfs_remove(etr_data->debug);
-               kfree(etr_data->banks->rings);
-               kfree(etr_data->banks);
-               kfree(etr_data);
-               accel_dev->transport = NULL;
-       }
-}
-EXPORT_SYMBOL_GPL(adf_cleanup_etr_data);
diff --git a/drivers/crypto/qat/qat_common/adf_transport.h b/drivers/crypto/qat/qat_common/adf_transport.h
deleted file mode 100644 (file)
index e6ef6f9..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef ADF_TRANSPORT_H
-#define ADF_TRANSPORT_H
-
-#include "adf_accel_devices.h"
-
-struct adf_etr_ring_data;
-
-typedef void (*adf_callback_fn)(void *resp_msg);
-
-int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
-                   u32 bank_num, u32 num_mgs, u32 msg_size,
-                   const char *ring_name, adf_callback_fn callback,
-                   int poll_mode, struct adf_etr_ring_data **ring_ptr);
-
-bool adf_ring_nearly_full(struct adf_etr_ring_data *ring);
-int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg);
-void adf_remove_ring(struct adf_etr_ring_data *ring);
-#endif
diff --git a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
deleted file mode 100644 (file)
index d3667db..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef ADF_TRANSPORT_ACCESS_MACROS_H
-#define ADF_TRANSPORT_ACCESS_MACROS_H
-
-#include "adf_accel_devices.h"
-#define ADF_RING_CONFIG_NEAR_FULL_WM 0x0A
-#define ADF_RING_CONFIG_NEAR_EMPTY_WM 0x05
-#define ADF_COALESCING_MIN_TIME 0x1FF
-#define ADF_COALESCING_MAX_TIME 0xFFFFF
-#define ADF_COALESCING_DEF_TIME 0x27FF
-#define ADF_RING_NEAR_WATERMARK_512 0x08
-#define ADF_RING_NEAR_WATERMARK_0 0x00
-#define ADF_RING_EMPTY_SIG 0x7F7F7F7F
-
-/* Valid internal ring size values */
-#define ADF_RING_SIZE_128 0x01
-#define ADF_RING_SIZE_256 0x02
-#define ADF_RING_SIZE_512 0x03
-#define ADF_RING_SIZE_4K 0x06
-#define ADF_RING_SIZE_16K 0x08
-#define ADF_RING_SIZE_4M 0x10
-#define ADF_MIN_RING_SIZE ADF_RING_SIZE_128
-#define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
-#define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K
-
-/* Valid internal msg size values */
-#define ADF_MSG_SIZE_32 0x01
-#define ADF_MSG_SIZE_64 0x02
-#define ADF_MSG_SIZE_128 0x04
-#define ADF_MIN_MSG_SIZE ADF_MSG_SIZE_32
-#define ADF_MAX_MSG_SIZE ADF_MSG_SIZE_128
-
-/* Size to bytes conversion macros for ring and msg size values */
-#define ADF_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5)
-#define ADF_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5)
-#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
-#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
-
-/* Minimum ring buffer size for memory allocation */
-#define ADF_RING_SIZE_BYTES_MIN(SIZE) \
-       ((SIZE < ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K)) ? \
-               ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K) : SIZE)
-#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
-#define ADF_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \
-                               SIZE) & ~0x4)
-/* Max outstanding requests */
-#define ADF_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \
-       ((((1 << (RING_SIZE - 1)) << 3) >> ADF_SIZE_TO_POW(MSG_SIZE)) - 1)
-#define BUILD_RING_CONFIG(size)        \
-       ((ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_FULL_WM) \
-       | (ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
-       | size)
-#define BUILD_RESP_RING_CONFIG(size, watermark_nf, watermark_ne) \
-       ((watermark_nf << ADF_RING_CONFIG_NEAR_FULL_WM) \
-       | (watermark_ne << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
-       | size)
-#endif
diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c
deleted file mode 100644 (file)
index 08bca1c..0000000
+++ /dev/null
@@ -1,209 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/seq_file.h>
-#include "adf_accel_devices.h"
-#include "adf_transport_internal.h"
-#include "adf_transport_access_macros.h"
-
-static DEFINE_MUTEX(ring_read_lock);
-static DEFINE_MUTEX(bank_read_lock);
-
-static void *adf_ring_start(struct seq_file *sfile, loff_t *pos)
-{
-       struct adf_etr_ring_data *ring = sfile->private;
-
-       mutex_lock(&ring_read_lock);
-       if (*pos == 0)
-               return SEQ_START_TOKEN;
-
-       if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
-                    ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
-               return NULL;
-
-       return ring->base_addr +
-               (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++);
-}
-
-static void *adf_ring_next(struct seq_file *sfile, void *v, loff_t *pos)
-{
-       struct adf_etr_ring_data *ring = sfile->private;
-
-       if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
-                    ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
-               return NULL;
-
-       return ring->base_addr +
-               (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++);
-}
-
-static int adf_ring_show(struct seq_file *sfile, void *v)
-{
-       struct adf_etr_ring_data *ring = sfile->private;
-       struct adf_etr_bank_data *bank = ring->bank;
-       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
-       void __iomem *csr = ring->bank->csr_addr;
-
-       if (v == SEQ_START_TOKEN) {
-               int head, tail, empty;
-
-               head = csr_ops->read_csr_ring_head(csr, bank->bank_number,
-                                                  ring->ring_number);
-               tail = csr_ops->read_csr_ring_tail(csr, bank->bank_number,
-                                                  ring->ring_number);
-               empty = csr_ops->read_csr_e_stat(csr, bank->bank_number);
-
-               seq_puts(sfile, "------- Ring configuration -------\n");
-               seq_printf(sfile, "ring name: %s\n",
-                          ring->ring_debug->ring_name);
-               seq_printf(sfile, "ring num %d, bank num %d\n",
-                          ring->ring_number, ring->bank->bank_number);
-               seq_printf(sfile, "head %x, tail %x, empty: %d\n",
-                          head, tail, (empty & 1 << ring->ring_number)
-                          >> ring->ring_number);
-               seq_printf(sfile, "ring size %lld, msg size %d\n",
-                          (long long)ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size),
-                          ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
-               seq_puts(sfile, "----------- Ring data ------------\n");
-               return 0;
-       }
-       seq_hex_dump(sfile, "", DUMP_PREFIX_ADDRESS, 32, 4,
-                    v, ADF_MSG_SIZE_TO_BYTES(ring->msg_size), false);
-       return 0;
-}
-
-static void adf_ring_stop(struct seq_file *sfile, void *v)
-{
-       mutex_unlock(&ring_read_lock);
-}
-
-static const struct seq_operations adf_ring_debug_sops = {
-       .start = adf_ring_start,
-       .next = adf_ring_next,
-       .stop = adf_ring_stop,
-       .show = adf_ring_show
-};
-
-DEFINE_SEQ_ATTRIBUTE(adf_ring_debug);
-
-int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name)
-{
-       struct adf_etr_ring_debug_entry *ring_debug;
-       char entry_name[8];
-
-       ring_debug = kzalloc(sizeof(*ring_debug), GFP_KERNEL);
-       if (!ring_debug)
-               return -ENOMEM;
-
-       strscpy(ring_debug->ring_name, name, sizeof(ring_debug->ring_name));
-       snprintf(entry_name, sizeof(entry_name), "ring_%02d",
-                ring->ring_number);
-
-       ring_debug->debug = debugfs_create_file(entry_name, S_IRUSR,
-                                               ring->bank->bank_debug_dir,
-                                               ring, &adf_ring_debug_fops);
-       ring->ring_debug = ring_debug;
-       return 0;
-}
-
-void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring)
-{
-       if (ring->ring_debug) {
-               debugfs_remove(ring->ring_debug->debug);
-               kfree(ring->ring_debug);
-               ring->ring_debug = NULL;
-       }
-}
-
-static void *adf_bank_start(struct seq_file *sfile, loff_t *pos)
-{
-       struct adf_etr_bank_data *bank = sfile->private;
-       u8 num_rings_per_bank = GET_NUM_RINGS_PER_BANK(bank->accel_dev);
-
-       mutex_lock(&bank_read_lock);
-       if (*pos == 0)
-               return SEQ_START_TOKEN;
-
-       if (*pos >= num_rings_per_bank)
-               return NULL;
-
-       return pos;
-}
-
-static void *adf_bank_next(struct seq_file *sfile, void *v, loff_t *pos)
-{
-       struct adf_etr_bank_data *bank = sfile->private;
-       u8 num_rings_per_bank = GET_NUM_RINGS_PER_BANK(bank->accel_dev);
-
-       if (++(*pos) >= num_rings_per_bank)
-               return NULL;
-
-       return pos;
-}
-
-static int adf_bank_show(struct seq_file *sfile, void *v)
-{
-       struct adf_etr_bank_data *bank = sfile->private;
-       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
-
-       if (v == SEQ_START_TOKEN) {
-               seq_printf(sfile, "------- Bank %d configuration -------\n",
-                          bank->bank_number);
-       } else {
-               int ring_id = *((int *)v) - 1;
-               struct adf_etr_ring_data *ring = &bank->rings[ring_id];
-               void __iomem *csr = bank->csr_addr;
-               int head, tail, empty;
-
-               if (!(bank->ring_mask & 1 << ring_id))
-                       return 0;
-
-               head = csr_ops->read_csr_ring_head(csr, bank->bank_number,
-                                                  ring->ring_number);
-               tail = csr_ops->read_csr_ring_tail(csr, bank->bank_number,
-                                                  ring->ring_number);
-               empty = csr_ops->read_csr_e_stat(csr, bank->bank_number);
-
-               seq_printf(sfile,
-                          "ring num %02d, head %04x, tail %04x, empty: %d\n",
-                          ring->ring_number, head, tail,
-                          (empty & 1 << ring->ring_number) >>
-                          ring->ring_number);
-       }
-       return 0;
-}
-
-static void adf_bank_stop(struct seq_file *sfile, void *v)
-{
-       mutex_unlock(&bank_read_lock);
-}
-
-static const struct seq_operations adf_bank_debug_sops = {
-       .start = adf_bank_start,
-       .next = adf_bank_next,
-       .stop = adf_bank_stop,
-       .show = adf_bank_show
-};
-
-DEFINE_SEQ_ATTRIBUTE(adf_bank_debug);
-
-int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
-{
-       struct adf_accel_dev *accel_dev = bank->accel_dev;
-       struct dentry *parent = accel_dev->transport->debug;
-       char name[8];
-
-       snprintf(name, sizeof(name), "bank_%02d", bank->bank_number);
-       bank->bank_debug_dir = debugfs_create_dir(name, parent);
-       bank->bank_debug_cfg = debugfs_create_file("config", S_IRUSR,
-                                                  bank->bank_debug_dir, bank,
-                                                  &adf_bank_debug_fops);
-       return 0;
-}
-
-void adf_bank_debugfs_rm(struct adf_etr_bank_data *bank)
-{
-       debugfs_remove(bank->bank_debug_cfg);
-       debugfs_remove(bank->bank_debug_dir);
-}
diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h
deleted file mode 100644 (file)
index 8b2c92b..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef ADF_TRANSPORT_INTRN_H
-#define ADF_TRANSPORT_INTRN_H
-
-#include <linux/interrupt.h>
-#include <linux/spinlock_types.h>
-#include "adf_transport.h"
-
-struct adf_etr_ring_debug_entry {
-       char ring_name[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
-       struct dentry *debug;
-};
-
-struct adf_etr_ring_data {
-       void *base_addr;
-       atomic_t *inflights;
-       adf_callback_fn callback;
-       struct adf_etr_bank_data *bank;
-       dma_addr_t dma_addr;
-       struct adf_etr_ring_debug_entry *ring_debug;
-       spinlock_t lock;        /* protects ring data struct */
-       u16 head;
-       u16 tail;
-       u32 threshold;
-       u8 ring_number;
-       u8 ring_size;
-       u8 msg_size;
-};
-
-struct adf_etr_bank_data {
-       struct adf_etr_ring_data *rings;
-       struct tasklet_struct resp_handler;
-       void __iomem *csr_addr;
-       u32 irq_coalesc_timer;
-       u32 bank_number;
-       u16 ring_mask;
-       u16 irq_mask;
-       spinlock_t lock;        /* protects bank data struct */
-       struct adf_accel_dev *accel_dev;
-       struct dentry *bank_debug_dir;
-       struct dentry *bank_debug_cfg;
-};
-
-struct adf_etr_data {
-       struct adf_etr_bank_data *banks;
-       struct dentry *debug;
-};
-
-void adf_response_handler(uintptr_t bank_addr);
-#ifdef CONFIG_DEBUG_FS
-#include <linux/debugfs.h>
-int adf_bank_debugfs_add(struct adf_etr_bank_data *bank);
-void adf_bank_debugfs_rm(struct adf_etr_bank_data *bank);
-int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name);
-void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring);
-#else
-static inline int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
-{
-       return 0;
-}
-
-#define adf_bank_debugfs_rm(bank) do {} while (0)
-
-static inline int adf_ring_debugfs_add(struct adf_etr_ring_data *ring,
-                                      const char *name)
-{
-       return 0;
-}
-
-#define adf_ring_debugfs_rm(ring) do {} while (0)
-#endif
-#endif
diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c
deleted file mode 100644 (file)
index 8c95fcd..0000000
+++ /dev/null
@@ -1,314 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/workqueue.h>
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "adf_cfg.h"
-#include "adf_cfg_strings.h"
-#include "adf_cfg_common.h"
-#include "adf_transport_access_macros.h"
-#include "adf_transport_internal.h"
-
-#define ADF_VINTSOU_OFFSET     0x204
-#define ADF_VINTMSK_OFFSET     0x208
-#define ADF_VINTSOU_BUN                BIT(0)
-#define ADF_VINTSOU_PF2VF      BIT(1)
-
-static struct workqueue_struct *adf_vf_stop_wq;
-
-struct adf_vf_stop_data {
-       struct adf_accel_dev *accel_dev;
-       struct work_struct work;
-};
-
-void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
-{
-       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
-
-       ADF_CSR_WR(pmisc_addr, ADF_VINTMSK_OFFSET, 0x0);
-}
-
-void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
-{
-       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
-
-       ADF_CSR_WR(pmisc_addr, ADF_VINTMSK_OFFSET, 0x2);
-}
-EXPORT_SYMBOL_GPL(adf_disable_pf2vf_interrupts);
-
-static int adf_enable_msi(struct adf_accel_dev *accel_dev)
-{
-       struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
-       int stat = pci_alloc_irq_vectors(pci_dev_info->pci_dev, 1, 1,
-                                        PCI_IRQ_MSI);
-       if (unlikely(stat < 0)) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Failed to enable MSI interrupt: %d\n", stat);
-               return stat;
-       }
-
-       return 0;
-}
-
-static void adf_disable_msi(struct adf_accel_dev *accel_dev)
-{
-       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
-
-       pci_free_irq_vectors(pdev);
-}
-
-static void adf_dev_stop_async(struct work_struct *work)
-{
-       struct adf_vf_stop_data *stop_data =
-               container_of(work, struct adf_vf_stop_data, work);
-       struct adf_accel_dev *accel_dev = stop_data->accel_dev;
-
-       adf_dev_restarting_notify(accel_dev);
-       adf_dev_stop(accel_dev);
-       adf_dev_shutdown(accel_dev);
-
-       /* Re-enable PF2VF interrupts */
-       adf_enable_pf2vf_interrupts(accel_dev);
-       kfree(stop_data);
-}
-
-int adf_pf2vf_handle_pf_restarting(struct adf_accel_dev *accel_dev)
-{
-       struct adf_vf_stop_data *stop_data;
-
-       clear_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
-       stop_data = kzalloc(sizeof(*stop_data), GFP_ATOMIC);
-       if (!stop_data) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Couldn't schedule stop for vf_%d\n",
-                       accel_dev->accel_id);
-               return -ENOMEM;
-       }
-       stop_data->accel_dev = accel_dev;
-       INIT_WORK(&stop_data->work, adf_dev_stop_async);
-       queue_work(adf_vf_stop_wq, &stop_data->work);
-
-       return 0;
-}
-
-static void adf_pf2vf_bh_handler(void *data)
-{
-       struct adf_accel_dev *accel_dev = data;
-       bool ret;
-
-       ret = adf_recv_and_handle_pf2vf_msg(accel_dev);
-       if (ret)
-               /* Re-enable PF2VF interrupts */
-               adf_enable_pf2vf_interrupts(accel_dev);
-
-       return;
-
-}
-
-static int adf_setup_pf2vf_bh(struct adf_accel_dev *accel_dev)
-{
-       tasklet_init(&accel_dev->vf.pf2vf_bh_tasklet,
-                    (void *)adf_pf2vf_bh_handler, (unsigned long)accel_dev);
-
-       mutex_init(&accel_dev->vf.vf2pf_lock);
-       return 0;
-}
-
-static void adf_cleanup_pf2vf_bh(struct adf_accel_dev *accel_dev)
-{
-       tasklet_disable(&accel_dev->vf.pf2vf_bh_tasklet);
-       tasklet_kill(&accel_dev->vf.pf2vf_bh_tasklet);
-       mutex_destroy(&accel_dev->vf.vf2pf_lock);
-}
-
-static irqreturn_t adf_isr(int irq, void *privdata)
-{
-       struct adf_accel_dev *accel_dev = privdata;
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       struct adf_hw_csr_ops *csr_ops = &hw_data->csr_ops;
-       struct adf_bar *pmisc =
-                       &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
-       void __iomem *pmisc_bar_addr = pmisc->virt_addr;
-       bool handled = false;
-       u32 v_int, v_mask;
-
-       /* Read VF INT source CSR to determine the source of VF interrupt */
-       v_int = ADF_CSR_RD(pmisc_bar_addr, ADF_VINTSOU_OFFSET);
-
-       /* Read VF INT mask CSR to determine which sources are masked */
-       v_mask = ADF_CSR_RD(pmisc_bar_addr, ADF_VINTMSK_OFFSET);
-
-       /*
-        * Recompute v_int ignoring sources that are masked. This is to
-        * avoid rescheduling the tasklet for interrupts already handled
-        */
-       v_int &= ~v_mask;
-
-       /* Check for PF2VF interrupt */
-       if (v_int & ADF_VINTSOU_PF2VF) {
-               /* Disable PF to VF interrupt */
-               adf_disable_pf2vf_interrupts(accel_dev);
-
-               /* Schedule tasklet to handle interrupt BH */
-               tasklet_hi_schedule(&accel_dev->vf.pf2vf_bh_tasklet);
-               handled = true;
-       }
-
-       /* Check bundle interrupt */
-       if (v_int & ADF_VINTSOU_BUN) {
-               struct adf_etr_data *etr_data = accel_dev->transport;
-               struct adf_etr_bank_data *bank = &etr_data->banks[0];
-
-               /* Disable Flag and Coalesce Ring Interrupts */
-               csr_ops->write_csr_int_flag_and_col(bank->csr_addr,
-                                                   bank->bank_number, 0);
-               tasklet_hi_schedule(&bank->resp_handler);
-               handled = true;
-       }
-
-       return handled ? IRQ_HANDLED : IRQ_NONE;
-}
-
-static int adf_request_msi_irq(struct adf_accel_dev *accel_dev)
-{
-       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
-       unsigned int cpu;
-       int ret;
-
-       snprintf(accel_dev->vf.irq_name, ADF_MAX_MSIX_VECTOR_NAME,
-                "qat_%02x:%02d.%02d", pdev->bus->number, PCI_SLOT(pdev->devfn),
-                PCI_FUNC(pdev->devfn));
-       ret = request_irq(pdev->irq, adf_isr, 0, accel_dev->vf.irq_name,
-                         (void *)accel_dev);
-       if (ret) {
-               dev_err(&GET_DEV(accel_dev), "failed to enable irq for %s\n",
-                       accel_dev->vf.irq_name);
-               return ret;
-       }
-       cpu = accel_dev->accel_id % num_online_cpus();
-       irq_set_affinity_hint(pdev->irq, get_cpu_mask(cpu));
-       accel_dev->vf.irq_enabled = true;
-
-       return ret;
-}
-
-static int adf_setup_bh(struct adf_accel_dev *accel_dev)
-{
-       struct adf_etr_data *priv_data = accel_dev->transport;
-
-       tasklet_init(&priv_data->banks[0].resp_handler, adf_response_handler,
-                    (unsigned long)priv_data->banks);
-       return 0;
-}
-
-static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
-{
-       struct adf_etr_data *priv_data = accel_dev->transport;
-
-       tasklet_disable(&priv_data->banks[0].resp_handler);
-       tasklet_kill(&priv_data->banks[0].resp_handler);
-}
-
-/**
- * adf_vf_isr_resource_free() - Free IRQ for acceleration device
- * @accel_dev:  Pointer to acceleration device.
- *
- * Function frees interrupts for acceleration device virtual function.
- */
-void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev)
-{
-       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
-
-       if (accel_dev->vf.irq_enabled) {
-               irq_set_affinity_hint(pdev->irq, NULL);
-               free_irq(pdev->irq, accel_dev);
-       }
-       adf_cleanup_bh(accel_dev);
-       adf_cleanup_pf2vf_bh(accel_dev);
-       adf_disable_msi(accel_dev);
-}
-EXPORT_SYMBOL_GPL(adf_vf_isr_resource_free);
-
-/**
- * adf_vf_isr_resource_alloc() - Allocate IRQ for acceleration device
- * @accel_dev:  Pointer to acceleration device.
- *
- * Function allocates interrupts for acceleration device virtual function.
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
-{
-       if (adf_enable_msi(accel_dev))
-               goto err_out;
-
-       if (adf_setup_pf2vf_bh(accel_dev))
-               goto err_disable_msi;
-
-       if (adf_setup_bh(accel_dev))
-               goto err_cleanup_pf2vf_bh;
-
-       if (adf_request_msi_irq(accel_dev))
-               goto err_cleanup_bh;
-
-       return 0;
-
-err_cleanup_bh:
-       adf_cleanup_bh(accel_dev);
-
-err_cleanup_pf2vf_bh:
-       adf_cleanup_pf2vf_bh(accel_dev);
-
-err_disable_msi:
-       adf_disable_msi(accel_dev);
-
-err_out:
-       return -EFAULT;
-}
-EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc);
-
-/**
- * adf_flush_vf_wq() - Flush workqueue for VF
- * @accel_dev:  Pointer to acceleration device.
- *
- * Function disables the PF/VF interrupts on the VF so that no new messages
- * are received and flushes the workqueue 'adf_vf_stop_wq'.
- *
- * Return: void.
- */
-void adf_flush_vf_wq(struct adf_accel_dev *accel_dev)
-{
-       adf_disable_pf2vf_interrupts(accel_dev);
-
-       flush_workqueue(adf_vf_stop_wq);
-}
-EXPORT_SYMBOL_GPL(adf_flush_vf_wq);
-
-/**
- * adf_init_vf_wq() - Init workqueue for VF
- *
- * Function init workqueue 'adf_vf_stop_wq' for VF.
- *
- * Return: 0 on success, error code otherwise.
- */
-int __init adf_init_vf_wq(void)
-{
-       adf_vf_stop_wq = alloc_workqueue("adf_vf_stop_wq", WQ_MEM_RECLAIM, 0);
-
-       return !adf_vf_stop_wq ? -EFAULT : 0;
-}
-
-void adf_exit_vf_wq(void)
-{
-       if (adf_vf_stop_wq)
-               destroy_workqueue(adf_vf_stop_wq);
-
-       adf_vf_stop_wq = NULL;
-}
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw.h b/drivers/crypto/qat/qat_common/icp_qat_fw.h
deleted file mode 100644 (file)
index c141160..0000000
+++ /dev/null
@@ -1,298 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef _ICP_QAT_FW_H_
-#define _ICP_QAT_FW_H_
-#include <linux/types.h>
-#include "icp_qat_hw.h"
-
-#define QAT_FIELD_SET(flags, val, bitpos, mask) \
-{ (flags) = (((flags) & (~((mask) << (bitpos)))) | \
-               (((val) & (mask)) << (bitpos))) ; }
-
-#define QAT_FIELD_GET(flags, bitpos, mask) \
-       (((flags) >> (bitpos)) & (mask))
-
-#define ICP_QAT_FW_REQ_DEFAULT_SZ 128
-#define ICP_QAT_FW_RESP_DEFAULT_SZ 32
-#define ICP_QAT_FW_COMN_ONE_BYTE_SHIFT 8
-#define ICP_QAT_FW_COMN_SINGLE_BYTE_MASK 0xFF
-#define ICP_QAT_FW_NUM_LONGWORDS_1 1
-#define ICP_QAT_FW_NUM_LONGWORDS_2 2
-#define ICP_QAT_FW_NUM_LONGWORDS_3 3
-#define ICP_QAT_FW_NUM_LONGWORDS_4 4
-#define ICP_QAT_FW_NUM_LONGWORDS_5 5
-#define ICP_QAT_FW_NUM_LONGWORDS_6 6
-#define ICP_QAT_FW_NUM_LONGWORDS_7 7
-#define ICP_QAT_FW_NUM_LONGWORDS_10 10
-#define ICP_QAT_FW_NUM_LONGWORDS_13 13
-#define ICP_QAT_FW_NULL_REQ_SERV_ID 1
-
-enum icp_qat_fw_comn_resp_serv_id {
-       ICP_QAT_FW_COMN_RESP_SERV_NULL,
-       ICP_QAT_FW_COMN_RESP_SERV_CPM_FW,
-       ICP_QAT_FW_COMN_RESP_SERV_DELIMITER
-};
-
-enum icp_qat_fw_comn_request_id {
-       ICP_QAT_FW_COMN_REQ_NULL = 0,
-       ICP_QAT_FW_COMN_REQ_CPM_FW_PKE = 3,
-       ICP_QAT_FW_COMN_REQ_CPM_FW_LA = 4,
-       ICP_QAT_FW_COMN_REQ_CPM_FW_DMA = 7,
-       ICP_QAT_FW_COMN_REQ_CPM_FW_COMP = 9,
-       ICP_QAT_FW_COMN_REQ_DELIMITER
-};
-
-struct icp_qat_fw_comn_req_hdr_cd_pars {
-       union {
-               struct {
-                       __u64 content_desc_addr;
-                       __u16 content_desc_resrvd1;
-                       __u8 content_desc_params_sz;
-                       __u8 content_desc_hdr_resrvd2;
-                       __u32 content_desc_resrvd3;
-               } s;
-               struct {
-                       __u32 serv_specif_fields[4];
-               } s1;
-       } u;
-};
-
-struct icp_qat_fw_comn_req_mid {
-       __u64 opaque_data;
-       __u64 src_data_addr;
-       __u64 dest_data_addr;
-       __u32 src_length;
-       __u32 dst_length;
-};
-
-struct icp_qat_fw_comn_req_cd_ctrl {
-       __u32 content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5];
-};
-
-struct icp_qat_fw_comn_req_hdr {
-       __u8 resrvd1;
-       __u8 service_cmd_id;
-       __u8 service_type;
-       __u8 hdr_flags;
-       __u16 serv_specif_flags;
-       __u16 comn_req_flags;
-};
-
-struct icp_qat_fw_comn_req_rqpars {
-       __u32 serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13];
-};
-
-struct icp_qat_fw_comn_req {
-       struct icp_qat_fw_comn_req_hdr comn_hdr;
-       struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
-       struct icp_qat_fw_comn_req_mid comn_mid;
-       struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
-       struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
-};
-
-struct icp_qat_fw_comn_error {
-       __u8 xlat_err_code;
-       __u8 cmp_err_code;
-};
-
-struct icp_qat_fw_comn_resp_hdr {
-       __u8 resrvd1;
-       __u8 service_id;
-       __u8 response_type;
-       __u8 hdr_flags;
-       struct icp_qat_fw_comn_error comn_error;
-       __u8 comn_status;
-       __u8 cmd_id;
-};
-
-struct icp_qat_fw_comn_resp {
-       struct icp_qat_fw_comn_resp_hdr comn_hdr;
-       __u64 opaque_data;
-       __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
-};
-
-#define ICP_QAT_FW_COMN_REQ_FLAG_SET 1
-#define ICP_QAT_FW_COMN_REQ_FLAG_CLR 0
-#define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7
-#define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1
-#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F
-#define ICP_QAT_FW_COMN_CNV_FLAG_BITPOS 6
-#define ICP_QAT_FW_COMN_CNV_FLAG_MASK 0x1
-#define ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS 5
-#define ICP_QAT_FW_COMN_CNVNR_FLAG_MASK 0x1
-
-#define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \
-       icp_qat_fw_comn_req_hdr_t.service_type
-
-#define ICP_QAT_FW_COMN_OV_SRV_TYPE_SET(icp_qat_fw_comn_req_hdr_t, val) \
-       icp_qat_fw_comn_req_hdr_t.service_type = val
-
-#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_GET(icp_qat_fw_comn_req_hdr_t) \
-       icp_qat_fw_comn_req_hdr_t.service_cmd_id
-
-#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_SET(icp_qat_fw_comn_req_hdr_t, val) \
-       icp_qat_fw_comn_req_hdr_t.service_cmd_id = val
-
-#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \
-       ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags)
-
-#define ICP_QAT_FW_COMN_HDR_CNVNR_FLAG_GET(hdr_flags) \
-       QAT_FIELD_GET(hdr_flags, \
-       ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS, \
-       ICP_QAT_FW_COMN_CNVNR_FLAG_MASK)
-
-#define ICP_QAT_FW_COMN_HDR_CNVNR_FLAG_SET(hdr_t, val) \
-       QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
-       ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS, \
-       ICP_QAT_FW_COMN_CNVNR_FLAG_MASK)
-
-#define ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(hdr_flags) \
-       QAT_FIELD_GET(hdr_flags, \
-       ICP_QAT_FW_COMN_CNV_FLAG_BITPOS, \
-       ICP_QAT_FW_COMN_CNV_FLAG_MASK)
-
-#define ICP_QAT_FW_COMN_HDR_CNV_FLAG_SET(hdr_t, val) \
-       QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
-       ICP_QAT_FW_COMN_CNV_FLAG_BITPOS, \
-       ICP_QAT_FW_COMN_CNV_FLAG_MASK)
-
-#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \
-       ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val)
-
-#define ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_flags) \
-       QAT_FIELD_GET(hdr_flags, \
-       ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
-       ICP_QAT_FW_COMN_VALID_FLAG_MASK)
-
-#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_GET(hdr_flags) \
-       (hdr_flags & ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK)
-
-#define ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) \
-       QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
-       ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
-       ICP_QAT_FW_COMN_VALID_FLAG_MASK)
-
-#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(valid) \
-       (((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
-        ICP_QAT_FW_COMN_VALID_FLAG_BITPOS)
-
-#define QAT_COMN_PTR_TYPE_BITPOS 0
-#define QAT_COMN_PTR_TYPE_MASK 0x1
-#define QAT_COMN_CD_FLD_TYPE_BITPOS 1
-#define QAT_COMN_CD_FLD_TYPE_MASK 0x1
-#define QAT_COMN_PTR_TYPE_FLAT 0x0
-#define QAT_COMN_PTR_TYPE_SGL 0x1
-#define QAT_COMN_CD_FLD_TYPE_64BIT_ADR 0x0
-#define QAT_COMN_CD_FLD_TYPE_16BYTE_DATA 0x1
-
-#define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \
-       ((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \
-        | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS))
-
-#define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \
-       QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK)
-
-#define ICP_QAT_FW_COMN_CD_FLD_TYPE_GET(flags) \
-       QAT_FIELD_GET(flags, QAT_COMN_CD_FLD_TYPE_BITPOS, \
-                       QAT_COMN_CD_FLD_TYPE_MASK)
-
-#define ICP_QAT_FW_COMN_PTR_TYPE_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_COMN_PTR_TYPE_BITPOS, \
-                       QAT_COMN_PTR_TYPE_MASK)
-
-#define ICP_QAT_FW_COMN_CD_FLD_TYPE_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_COMN_CD_FLD_TYPE_BITPOS, \
-                       QAT_COMN_CD_FLD_TYPE_MASK)
-
-#define ICP_QAT_FW_COMN_NEXT_ID_BITPOS 4
-#define ICP_QAT_FW_COMN_NEXT_ID_MASK 0xF0
-#define ICP_QAT_FW_COMN_CURR_ID_BITPOS 0
-#define ICP_QAT_FW_COMN_CURR_ID_MASK 0x0F
-
-#define ICP_QAT_FW_COMN_NEXT_ID_GET(cd_ctrl_hdr_t) \
-       ((((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
-       >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
-
-#define ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
-       { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
-       & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
-       ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
-        & ICP_QAT_FW_COMN_NEXT_ID_MASK)); }
-
-#define ICP_QAT_FW_COMN_CURR_ID_GET(cd_ctrl_hdr_t) \
-       (((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK)
-
-#define ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl_hdr_t, val) \
-       { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
-       & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
-       ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)); }
-
-#define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7
-#define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1
-#define QAT_COMN_RESP_PKE_STATUS_BITPOS 6
-#define QAT_COMN_RESP_PKE_STATUS_MASK 0x1
-#define QAT_COMN_RESP_CMP_STATUS_BITPOS 5
-#define QAT_COMN_RESP_CMP_STATUS_MASK 0x1
-#define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4
-#define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1
-#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3
-#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1
-
-#define ICP_QAT_FW_COMN_RESP_STATUS_BUILD(crypto, comp, xlat, eolb) \
-       ((((crypto) & QAT_COMN_RESP_CRYPTO_STATUS_MASK) << \
-       QAT_COMN_RESP_CRYPTO_STATUS_BITPOS) | \
-       (((comp) & QAT_COMN_RESP_CMP_STATUS_MASK) << \
-       QAT_COMN_RESP_CMP_STATUS_BITPOS) | \
-       (((xlat) & QAT_COMN_RESP_XLAT_STATUS_MASK) << \
-       QAT_COMN_RESP_XLAT_STATUS_BITPOS) | \
-       (((eolb) & QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) << \
-       QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS))
-
-#define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \
-       QAT_FIELD_GET(status, QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \
-       QAT_COMN_RESP_CRYPTO_STATUS_MASK)
-
-#define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \
-       QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_STATUS_BITPOS, \
-       QAT_COMN_RESP_CMP_STATUS_MASK)
-
-#define ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(status) \
-       QAT_FIELD_GET(status, QAT_COMN_RESP_XLAT_STATUS_BITPOS, \
-       QAT_COMN_RESP_XLAT_STATUS_MASK)
-
-#define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \
-       QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \
-       QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK)
-
-#define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0
-#define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1
-#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0
-#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET 1
-#define ERR_CODE_NO_ERROR 0
-#define ERR_CODE_INVALID_BLOCK_TYPE -1
-#define ERR_CODE_NO_MATCH_ONES_COMP -2
-#define ERR_CODE_TOO_MANY_LEN_OR_DIS -3
-#define ERR_CODE_INCOMPLETE_LEN -4
-#define ERR_CODE_RPT_LEN_NO_FIRST_LEN -5
-#define ERR_CODE_RPT_GT_SPEC_LEN -6
-#define ERR_CODE_INV_LIT_LEN_CODE_LEN -7
-#define ERR_CODE_INV_DIS_CODE_LEN -8
-#define ERR_CODE_INV_LIT_LEN_DIS_IN_BLK -9
-#define ERR_CODE_DIS_TOO_FAR_BACK -10
-#define ERR_CODE_OVERFLOW_ERROR -11
-#define ERR_CODE_SOFT_ERROR -12
-#define ERR_CODE_FATAL_ERROR -13
-#define ERR_CODE_SSM_ERROR -14
-#define ERR_CODE_ENDPOINT_ERROR -15
-
-enum icp_qat_fw_slice {
-       ICP_QAT_FW_SLICE_NULL = 0,
-       ICP_QAT_FW_SLICE_CIPHER = 1,
-       ICP_QAT_FW_SLICE_AUTH = 2,
-       ICP_QAT_FW_SLICE_DRAM_RD = 3,
-       ICP_QAT_FW_SLICE_DRAM_WR = 4,
-       ICP_QAT_FW_SLICE_COMP = 5,
-       ICP_QAT_FW_SLICE_XLAT = 6,
-       ICP_QAT_FW_SLICE_DELIMITER
-};
-#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_comp.h b/drivers/crypto/qat/qat_common/icp_qat_fw_comp.h
deleted file mode 100644 (file)
index a03d43f..0000000
+++ /dev/null
@@ -1,404 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright(c) 2022 Intel Corporation */
-#ifndef _ICP_QAT_FW_COMP_H_
-#define _ICP_QAT_FW_COMP_H_
-#include "icp_qat_fw.h"
-
-enum icp_qat_fw_comp_cmd_id {
-       ICP_QAT_FW_COMP_CMD_STATIC = 0,
-       ICP_QAT_FW_COMP_CMD_DYNAMIC = 1,
-       ICP_QAT_FW_COMP_CMD_DECOMPRESS = 2,
-       ICP_QAT_FW_COMP_CMD_DELIMITER
-};
-
-enum icp_qat_fw_comp_20_cmd_id {
-       ICP_QAT_FW_COMP_20_CMD_LZ4_COMPRESS = 3,
-       ICP_QAT_FW_COMP_20_CMD_LZ4_DECOMPRESS = 4,
-       ICP_QAT_FW_COMP_20_CMD_LZ4S_COMPRESS = 5,
-       ICP_QAT_FW_COMP_20_CMD_LZ4S_DECOMPRESS = 6,
-       ICP_QAT_FW_COMP_20_CMD_XP10_COMPRESS = 7,
-       ICP_QAT_FW_COMP_20_CMD_XP10_DECOMPRESS = 8,
-       ICP_QAT_FW_COMP_20_CMD_RESERVED_9 = 9,
-       ICP_QAT_FW_COMP_23_CMD_ZSTD_COMPRESS = 10,
-       ICP_QAT_FW_COMP_23_CMD_ZSTD_DECOMPRESS = 11,
-       ICP_QAT_FW_COMP_20_CMD_DELIMITER
-};
-
-#define ICP_QAT_FW_COMP_STATELESS_SESSION 0
-#define ICP_QAT_FW_COMP_STATEFUL_SESSION 1
-#define ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST 0
-#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST 1
-#define ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST 0
-#define ICP_QAT_FW_COMP_ENH_AUTO_SELECT_BEST 1
-#define ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST 0
-#define ICP_QAT_FW_COMP_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST 1
-#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_USED_AS_INTMD_BUF 1
-#define ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF 0
-#define ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS 2
-#define ICP_QAT_FW_COMP_SESSION_TYPE_MASK 0x1
-#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS 3
-#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK 0x1
-#define ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS 4
-#define ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK 0x1
-#define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS 5
-#define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK 0x1
-#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS 7
-#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK 0x1
-
-#define ICP_QAT_FW_COMP_FLAGS_BUILD(sesstype, autoselect, enhanced_asb, \
-       ret_uncomp, secure_ram) \
-       ((((sesstype) & ICP_QAT_FW_COMP_SESSION_TYPE_MASK) << \
-       ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS) | \
-       (((autoselect) & ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK) << \
-       ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS) | \
-       (((enhanced_asb) & ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK) << \
-       ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS) | \
-       (((ret_uncomp) & ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK) << \
-       ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS) | \
-       (((secure_ram) & ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK) << \
-       ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS))
-
-#define ICP_QAT_FW_COMP_SESSION_TYPE_GET(flags) \
-       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS, \
-       ICP_QAT_FW_COMP_SESSION_TYPE_MASK)
-
-#define ICP_QAT_FW_COMP_SESSION_TYPE_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS, \
-       ICP_QAT_FW_COMP_SESSION_TYPE_MASK)
-
-#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_GET(flags) \
-       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS, \
-       ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK)
-
-#define ICP_QAT_FW_COMP_EN_ASB_GET(flags) \
-       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS, \
-       ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK)
-
-#define ICP_QAT_FW_COMP_RET_UNCOMP_GET(flags) \
-       QAT_FIELD_GET(flags, \
-       ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS, \
-       ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK)
-
-#define ICP_QAT_FW_COMP_SECURE_RAM_USE_GET(flags) \
-       QAT_FIELD_GET(flags, \
-       ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS, \
-       ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK)
-
-struct icp_qat_fw_comp_req_hdr_cd_pars {
-       union {
-               struct {
-                       __u64 content_desc_addr;
-                       __u16 content_desc_resrvd1;
-                       __u8 content_desc_params_sz;
-                       __u8 content_desc_hdr_resrvd2;
-                       __u32 content_desc_resrvd3;
-               } s;
-               struct {
-                       __u32 comp_slice_cfg_word[ICP_QAT_FW_NUM_LONGWORDS_2];
-                       __u32 content_desc_resrvd4;
-               } sl;
-       } u;
-};
-
-struct icp_qat_fw_comp_req_params {
-       __u32 comp_len;
-       __u32 out_buffer_sz;
-       union {
-               struct {
-                       __u32 initial_crc32;
-                       __u32 initial_adler;
-               } legacy;
-               __u64 crc_data_addr;
-       } crc;
-       __u32 req_par_flags;
-       __u32 rsrvd;
-};
-
-#define ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(sop, eop, bfinal, cnv, cnvnr, \
-                                             cnvdfx, crc, xxhash_acc, \
-                                             cnv_error_type, append_crc, \
-                                             drop_data) \
-       ((((sop) & ICP_QAT_FW_COMP_SOP_MASK) << \
-       ICP_QAT_FW_COMP_SOP_BITPOS) | \
-       (((eop) & ICP_QAT_FW_COMP_EOP_MASK) << \
-       ICP_QAT_FW_COMP_EOP_BITPOS) | \
-       (((bfinal) & ICP_QAT_FW_COMP_BFINAL_MASK) \
-       << ICP_QAT_FW_COMP_BFINAL_BITPOS) | \
-       (((cnv) & ICP_QAT_FW_COMP_CNV_MASK) << \
-       ICP_QAT_FW_COMP_CNV_BITPOS) | \
-       (((cnvnr) & ICP_QAT_FW_COMP_CNVNR_MASK) \
-       << ICP_QAT_FW_COMP_CNVNR_BITPOS) | \
-       (((cnvdfx) & ICP_QAT_FW_COMP_CNV_DFX_MASK) \
-       << ICP_QAT_FW_COMP_CNV_DFX_BITPOS) | \
-       (((crc) & ICP_QAT_FW_COMP_CRC_MODE_MASK) \
-       << ICP_QAT_FW_COMP_CRC_MODE_BITPOS) | \
-       (((xxhash_acc) & ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK) \
-       << ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS) | \
-       (((cnv_error_type) & ICP_QAT_FW_COMP_CNV_ERROR_MASK) \
-       << ICP_QAT_FW_COMP_CNV_ERROR_BITPOS) | \
-       (((append_crc) & ICP_QAT_FW_COMP_APPEND_CRC_MASK) \
-       << ICP_QAT_FW_COMP_APPEND_CRC_BITPOS) | \
-       (((drop_data) & ICP_QAT_FW_COMP_DROP_DATA_MASK) \
-       << ICP_QAT_FW_COMP_DROP_DATA_BITPOS))
-
-#define ICP_QAT_FW_COMP_NOT_SOP 0
-#define ICP_QAT_FW_COMP_SOP 1
-#define ICP_QAT_FW_COMP_NOT_EOP 0
-#define ICP_QAT_FW_COMP_EOP 1
-#define ICP_QAT_FW_COMP_NOT_BFINAL 0
-#define ICP_QAT_FW_COMP_BFINAL 1
-#define ICP_QAT_FW_COMP_NO_CNV 0
-#define ICP_QAT_FW_COMP_CNV 1
-#define ICP_QAT_FW_COMP_NO_CNV_RECOVERY 0
-#define ICP_QAT_FW_COMP_CNV_RECOVERY 1
-#define ICP_QAT_FW_COMP_NO_CNV_DFX 0
-#define ICP_QAT_FW_COMP_CNV_DFX 1
-#define ICP_QAT_FW_COMP_CRC_MODE_LEGACY 0
-#define ICP_QAT_FW_COMP_CRC_MODE_E2E 1
-#define ICP_QAT_FW_COMP_NO_XXHASH_ACC 0
-#define ICP_QAT_FW_COMP_XXHASH_ACC 1
-#define ICP_QAT_FW_COMP_APPEND_CRC 1
-#define ICP_QAT_FW_COMP_NO_APPEND_CRC 0
-#define ICP_QAT_FW_COMP_DROP_DATA 1
-#define ICP_QAT_FW_COMP_NO_DROP_DATA 0
-#define ICP_QAT_FW_COMP_SOP_BITPOS 0
-#define ICP_QAT_FW_COMP_SOP_MASK 0x1
-#define ICP_QAT_FW_COMP_EOP_BITPOS 1
-#define ICP_QAT_FW_COMP_EOP_MASK 0x1
-#define ICP_QAT_FW_COMP_BFINAL_BITPOS 6
-#define ICP_QAT_FW_COMP_BFINAL_MASK 0x1
-#define ICP_QAT_FW_COMP_CNV_BITPOS 16
-#define ICP_QAT_FW_COMP_CNV_MASK 0x1
-#define ICP_QAT_FW_COMP_CNVNR_BITPOS 17
-#define ICP_QAT_FW_COMP_CNVNR_MASK 0x1
-#define ICP_QAT_FW_COMP_CNV_DFX_BITPOS 18
-#define ICP_QAT_FW_COMP_CNV_DFX_MASK 0x1
-#define ICP_QAT_FW_COMP_CRC_MODE_BITPOS 19
-#define ICP_QAT_FW_COMP_CRC_MODE_MASK 0x1
-#define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS 20
-#define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK 0x1
-#define ICP_QAT_FW_COMP_CNV_ERROR_BITPOS 21
-#define ICP_QAT_FW_COMP_CNV_ERROR_MASK 0b111
-#define ICP_QAT_FW_COMP_CNV_ERROR_NONE 0b000
-#define ICP_QAT_FW_COMP_CNV_ERROR_CHECKSUM 0b001
-#define ICP_QAT_FW_COMP_CNV_ERROR_DCPR_OBC_DIFF 0b010
-#define ICP_QAT_FW_COMP_CNV_ERROR_DCPR 0b011
-#define ICP_QAT_FW_COMP_CNV_ERROR_XLT 0b100
-#define ICP_QAT_FW_COMP_CNV_ERROR_DCPR_IBC_DIFF 0b101
-#define ICP_QAT_FW_COMP_APPEND_CRC_BITPOS 24
-#define ICP_QAT_FW_COMP_APPEND_CRC_MASK 0x1
-#define ICP_QAT_FW_COMP_DROP_DATA_BITPOS 25
-#define ICP_QAT_FW_COMP_DROP_DATA_MASK 0x1
-
-#define ICP_QAT_FW_COMP_SOP_GET(flags) \
-       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_SOP_BITPOS, \
-       ICP_QAT_FW_COMP_SOP_MASK)
-
-#define ICP_QAT_FW_COMP_SOP_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_SOP_BITPOS, \
-       ICP_QAT_FW_COMP_SOP_MASK)
-
-#define ICP_QAT_FW_COMP_EOP_GET(flags) \
-       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_EOP_BITPOS, \
-       ICP_QAT_FW_COMP_EOP_MASK)
-
-#define ICP_QAT_FW_COMP_EOP_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_EOP_BITPOS, \
-       ICP_QAT_FW_COMP_EOP_MASK)
-
-#define ICP_QAT_FW_COMP_BFINAL_GET(flags) \
-       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_BFINAL_BITPOS, \
-       ICP_QAT_FW_COMP_BFINAL_MASK)
-
-#define ICP_QAT_FW_COMP_BFINAL_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_BFINAL_BITPOS, \
-       ICP_QAT_FW_COMP_BFINAL_MASK)
-
-#define ICP_QAT_FW_COMP_CNV_GET(flags) \
-       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_CNV_BITPOS, \
-       ICP_QAT_FW_COMP_CNV_MASK)
-
-#define ICP_QAT_FW_COMP_CNVNR_GET(flags) \
-       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_CNVNR_BITPOS, \
-       ICP_QAT_FW_COMP_CNVNR_MASK)
-
-#define ICP_QAT_FW_COMP_CNV_DFX_GET(flags) \
-       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_CNV_DFX_BITPOS, \
-       ICP_QAT_FW_COMP_CNV_DFX_MASK)
-
-#define ICP_QAT_FW_COMP_CNV_DFX_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_CNV_DFX_BITPOS, \
-       ICP_QAT_FW_COMP_CNV_DFX_MASK)
-
-#define ICP_QAT_FW_COMP_CRC_MODE_GET(flags) \
-       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_CRC_MODE_BITPOS, \
-       ICP_QAT_FW_COMP_CRC_MODE_MASK)
-
-#define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_GET(flags) \
-       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS, \
-       ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK)
-
-#define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS, \
-       ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK)
-
-#define ICP_QAT_FW_COMP_CNV_ERROR_TYPE_GET(flags) \
-       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_CNV_ERROR_BITPOS, \
-       ICP_QAT_FW_COMP_CNV_ERROR_MASK)
-
-#define ICP_QAT_FW_COMP_CNV_ERROR_TYPE_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_CNV_ERROR_BITPOS, \
-       ICP_QAT_FW_COMP_CNV_ERROR_MASK)
-
-struct icp_qat_fw_xlt_req_params {
-       __u64 inter_buff_ptr;
-};
-
-struct icp_qat_fw_comp_cd_hdr {
-       __u16 ram_bank_flags;
-       __u8 comp_cfg_offset;
-       __u8 next_curr_id;
-       __u32 resrvd;
-       __u64 comp_state_addr;
-       __u64 ram_banks_addr;
-};
-
-#define COMP_CPR_INITIAL_CRC 0
-#define COMP_CPR_INITIAL_ADLER 1
-
-struct icp_qat_fw_xlt_cd_hdr {
-       __u16 resrvd1;
-       __u8 resrvd2;
-       __u8 next_curr_id;
-       __u32 resrvd3;
-};
-
-struct icp_qat_fw_comp_req {
-       struct icp_qat_fw_comn_req_hdr comn_hdr;
-       struct icp_qat_fw_comp_req_hdr_cd_pars cd_pars;
-       struct icp_qat_fw_comn_req_mid comn_mid;
-       struct icp_qat_fw_comp_req_params comp_pars;
-       union {
-               struct icp_qat_fw_xlt_req_params xlt_pars;
-               __u32 resrvd1[ICP_QAT_FW_NUM_LONGWORDS_2];
-       } u1;
-       __u32 resrvd2[ICP_QAT_FW_NUM_LONGWORDS_2];
-       struct icp_qat_fw_comp_cd_hdr comp_cd_ctrl;
-       union {
-               struct icp_qat_fw_xlt_cd_hdr xlt_cd_ctrl;
-               __u32 resrvd3[ICP_QAT_FW_NUM_LONGWORDS_2];
-       } u2;
-};
-
-struct icp_qat_fw_resp_comp_pars {
-       __u32 input_byte_counter;
-       __u32 output_byte_counter;
-       union {
-               struct {
-                       __u32 curr_crc32;
-                       __u32 curr_adler_32;
-               } legacy;
-               __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_2];
-       } crc;
-};
-
-struct icp_qat_fw_comp_state {
-       __u32 rd8_counter;
-       __u32 status_flags;
-       __u32 in_counter;
-       __u32 out_counter;
-       __u64 intermediate_state;
-       __u32 lobc;
-       __u32 replaybc;
-       __u64 pcrc64_poly;
-       __u32 crc32;
-       __u32 adler_xxhash32;
-       __u64 pcrc64_xorout;
-       __u32 out_buf_size;
-       __u32 in_buf_size;
-       __u64 in_pcrc64;
-       __u64 out_pcrc64;
-       __u32 lobs;
-       __u32 libc;
-       __u64 reserved;
-       __u32 xxhash_state[4];
-       __u32 cleartext[4];
-};
-
-struct icp_qat_fw_comp_resp {
-       struct icp_qat_fw_comn_resp_hdr comn_resp;
-       __u64 opaque_data;
-       struct icp_qat_fw_resp_comp_pars comp_resp_pars;
-};
-
-#define QAT_FW_COMP_BANK_FLAG_MASK 0x1
-#define QAT_FW_COMP_BANK_I_BITPOS 8
-#define QAT_FW_COMP_BANK_H_BITPOS 7
-#define QAT_FW_COMP_BANK_G_BITPOS 6
-#define QAT_FW_COMP_BANK_F_BITPOS 5
-#define QAT_FW_COMP_BANK_E_BITPOS 4
-#define QAT_FW_COMP_BANK_D_BITPOS 3
-#define QAT_FW_COMP_BANK_C_BITPOS 2
-#define QAT_FW_COMP_BANK_B_BITPOS 1
-#define QAT_FW_COMP_BANK_A_BITPOS 0
-
-enum icp_qat_fw_comp_bank_enabled {
-       ICP_QAT_FW_COMP_BANK_DISABLED = 0,
-       ICP_QAT_FW_COMP_BANK_ENABLED = 1,
-       ICP_QAT_FW_COMP_BANK_DELIMITER = 2
-};
-
-#define ICP_QAT_FW_COMP_RAM_FLAGS_BUILD(bank_i_enable, bank_h_enable, \
-                                       bank_g_enable, bank_f_enable, \
-                                       bank_e_enable, bank_d_enable, \
-                                       bank_c_enable, bank_b_enable, \
-                                       bank_a_enable) \
-       ((((bank_i_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
-       QAT_FW_COMP_BANK_I_BITPOS) | \
-       (((bank_h_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
-       QAT_FW_COMP_BANK_H_BITPOS) | \
-       (((bank_g_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
-       QAT_FW_COMP_BANK_G_BITPOS) | \
-       (((bank_f_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
-       QAT_FW_COMP_BANK_F_BITPOS) | \
-       (((bank_e_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
-       QAT_FW_COMP_BANK_E_BITPOS) | \
-       (((bank_d_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
-       QAT_FW_COMP_BANK_D_BITPOS) | \
-       (((bank_c_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
-       QAT_FW_COMP_BANK_C_BITPOS) | \
-       (((bank_b_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
-       QAT_FW_COMP_BANK_B_BITPOS) | \
-       (((bank_a_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
-       QAT_FW_COMP_BANK_A_BITPOS))
-
-struct icp_qat_fw_comp_crc_data_struct {
-       __u32 crc32;
-       union {
-               __u32 adler;
-               __u32 xxhash;
-       } adler_xxhash_u;
-       __u32 cpr_in_crc_lo;
-       __u32 cpr_in_crc_hi;
-       __u32 cpr_out_crc_lo;
-       __u32 cpr_out_crc_hi;
-       __u32 xlt_in_crc_lo;
-       __u32 xlt_in_crc_hi;
-       __u32 xlt_out_crc_lo;
-       __u32 xlt_out_crc_hi;
-       __u32 prog_crc_poly_lo;
-       __u32 prog_crc_poly_hi;
-       __u32 xor_out_lo;
-       __u32 xor_out_hi;
-       __u32 append_crc_lo;
-       __u32 append_crc_hi;
-};
-
-struct xxhash_acc_state_buff {
-       __u32 in_counter;
-       __u32 out_counter;
-       __u32 xxhash_state[4];
-       __u32 clear_txt[4];
-};
-
-#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
deleted file mode 100644 (file)
index 56cb827..0000000
+++ /dev/null
@@ -1,115 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef _ICP_QAT_FW_INIT_ADMIN_H_
-#define _ICP_QAT_FW_INIT_ADMIN_H_
-
-#include "icp_qat_fw.h"
-
-enum icp_qat_fw_init_admin_cmd_id {
-       ICP_QAT_FW_INIT_AE = 0,
-       ICP_QAT_FW_TRNG_ENABLE = 1,
-       ICP_QAT_FW_TRNG_DISABLE = 2,
-       ICP_QAT_FW_CONSTANTS_CFG = 3,
-       ICP_QAT_FW_STATUS_GET = 4,
-       ICP_QAT_FW_COUNTERS_GET = 5,
-       ICP_QAT_FW_LOOPBACK = 6,
-       ICP_QAT_FW_HEARTBEAT_SYNC = 7,
-       ICP_QAT_FW_HEARTBEAT_GET = 8,
-       ICP_QAT_FW_COMP_CAPABILITY_GET = 9,
-       ICP_QAT_FW_PM_STATE_CONFIG = 128,
-};
-
-enum icp_qat_fw_init_admin_resp_status {
-       ICP_QAT_FW_INIT_RESP_STATUS_SUCCESS = 0,
-       ICP_QAT_FW_INIT_RESP_STATUS_FAIL
-};
-
-struct icp_qat_fw_init_admin_req {
-       __u16 init_cfg_sz;
-       __u8 resrvd1;
-       __u8 cmd_id;
-       __u32 resrvd2;
-       __u64 opaque_data;
-       __u64 init_cfg_ptr;
-
-       union {
-               struct {
-                       __u16 ibuf_size_in_kb;
-                       __u16 resrvd3;
-               };
-               __u32 idle_filter;
-       };
-
-       __u32 resrvd4;
-} __packed;
-
-struct icp_qat_fw_init_admin_resp {
-       __u8 flags;
-       __u8 resrvd1;
-       __u8 status;
-       __u8 cmd_id;
-       union {
-               __u32 resrvd2;
-               struct {
-                       __u16 version_minor_num;
-                       __u16 version_major_num;
-               };
-               __u32 extended_features;
-       };
-       __u64 opaque_data;
-       union {
-               __u32 resrvd3[ICP_QAT_FW_NUM_LONGWORDS_4];
-               struct {
-                       __u32 version_patch_num;
-                       __u8 context_id;
-                       __u8 ae_id;
-                       __u16 resrvd4;
-                       __u64 resrvd5;
-               };
-               struct {
-                       __u64 req_rec_count;
-                       __u64 resp_sent_count;
-               };
-               struct {
-                       __u16 compression_algos;
-                       __u16 checksum_algos;
-                       __u32 deflate_capabilities;
-                       __u32 resrvd6;
-                       __u32 lzs_capabilities;
-               };
-               struct {
-                       __u32 cipher_algos;
-                       __u32 hash_algos;
-                       __u16 keygen_algos;
-                       __u16 other;
-                       __u16 public_key_algos;
-                       __u16 prime_algos;
-               };
-               struct {
-                       __u64 timestamp;
-                       __u64 resrvd7;
-               };
-               struct {
-                       __u32 successful_count;
-                       __u32 unsuccessful_count;
-                       __u64 resrvd8;
-               };
-       };
-} __packed;
-
-#define ICP_QAT_FW_COMN_HEARTBEAT_OK 0
-#define ICP_QAT_FW_COMN_HEARTBEAT_BLOCKED 1
-#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS 0
-#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK 0x1
-#define ICP_QAT_FW_COMN_STATUS_RESRVD_FLD_MASK 0xFE
-#define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_GET(hdr_t) \
-       ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(hdr_t.flags)
-
-#define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_SET(hdr_t, val) \
-       ICP_QAT_FW_COMN_HEARTBEAT_FLAG_SET(hdr_t, val)
-
-#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(flags) \
-       QAT_FIELD_GET(flags, \
-                ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS, \
-                ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK)
-#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_la.h b/drivers/crypto/qat/qat_common/icp_qat_fw_la.h
deleted file mode 100644 (file)
index 28fa17f..0000000
+++ /dev/null
@@ -1,367 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef _ICP_QAT_FW_LA_H_
-#define _ICP_QAT_FW_LA_H_
-#include "icp_qat_fw.h"
-
-enum icp_qat_fw_la_cmd_id {
-       ICP_QAT_FW_LA_CMD_CIPHER = 0,
-       ICP_QAT_FW_LA_CMD_AUTH = 1,
-       ICP_QAT_FW_LA_CMD_CIPHER_HASH = 2,
-       ICP_QAT_FW_LA_CMD_HASH_CIPHER = 3,
-       ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM = 4,
-       ICP_QAT_FW_LA_CMD_TRNG_TEST = 5,
-       ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE = 6,
-       ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE = 7,
-       ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE = 8,
-       ICP_QAT_FW_LA_CMD_MGF1 = 9,
-       ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP = 10,
-       ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP = 11,
-       ICP_QAT_FW_LA_CMD_DELIMITER = 12
-};
-
-#define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
-#define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
-#define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
-#define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
-
-struct icp_qat_fw_la_bulk_req {
-       struct icp_qat_fw_comn_req_hdr comn_hdr;
-       struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
-       struct icp_qat_fw_comn_req_mid comn_mid;
-       struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
-       struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
-};
-
-#define ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE 1
-#define QAT_LA_SLICE_TYPE_BITPOS 14
-#define QAT_LA_SLICE_TYPE_MASK 0x3
-#define ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS 1
-#define ICP_QAT_FW_LA_GCM_IV_LEN_NOT_12_OCTETS 0
-#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS 12
-#define ICP_QAT_FW_LA_ZUC_3G_PROTO 1
-#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK 0x1
-#define QAT_LA_GCM_IV_LEN_FLAG_BITPOS 11
-#define QAT_LA_GCM_IV_LEN_FLAG_MASK 0x1
-#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER 1
-#define ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER 0
-#define QAT_LA_DIGEST_IN_BUFFER_BITPOS 10
-#define QAT_LA_DIGEST_IN_BUFFER_MASK 0x1
-#define ICP_QAT_FW_LA_SNOW_3G_PROTO 4
-#define ICP_QAT_FW_LA_GCM_PROTO        2
-#define ICP_QAT_FW_LA_CCM_PROTO        1
-#define ICP_QAT_FW_LA_NO_PROTO 0
-#define QAT_LA_PROTO_BITPOS 7
-#define QAT_LA_PROTO_MASK 0x7
-#define ICP_QAT_FW_LA_CMP_AUTH_RES 1
-#define ICP_QAT_FW_LA_NO_CMP_AUTH_RES 0
-#define QAT_LA_CMP_AUTH_RES_BITPOS 6
-#define QAT_LA_CMP_AUTH_RES_MASK 0x1
-#define ICP_QAT_FW_LA_RET_AUTH_RES 1
-#define ICP_QAT_FW_LA_NO_RET_AUTH_RES 0
-#define QAT_LA_RET_AUTH_RES_BITPOS 5
-#define QAT_LA_RET_AUTH_RES_MASK 0x1
-#define ICP_QAT_FW_LA_UPDATE_STATE 1
-#define ICP_QAT_FW_LA_NO_UPDATE_STATE 0
-#define QAT_LA_UPDATE_STATE_BITPOS 4
-#define QAT_LA_UPDATE_STATE_MASK 0x1
-#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_CD_SETUP 0
-#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_SHRAM_CP 1
-#define QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS 3
-#define QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK 0x1
-#define ICP_QAT_FW_CIPH_IV_64BIT_PTR 0
-#define ICP_QAT_FW_CIPH_IV_16BYTE_DATA 1
-#define QAT_LA_CIPH_IV_FLD_BITPOS 2
-#define QAT_LA_CIPH_IV_FLD_MASK   0x1
-#define ICP_QAT_FW_LA_PARTIAL_NONE 0
-#define ICP_QAT_FW_LA_PARTIAL_START 1
-#define ICP_QAT_FW_LA_PARTIAL_MID 3
-#define ICP_QAT_FW_LA_PARTIAL_END 2
-#define QAT_LA_PARTIAL_BITPOS 0
-#define QAT_LA_PARTIAL_MASK 0x3
-#define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \
-       cmp_auth, ret_auth, update_state, \
-       ciph_iv, ciphcfg, partial) \
-       (((zuc_proto & QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) << \
-       QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS) | \
-       ((gcm_iv_len & QAT_LA_GCM_IV_LEN_FLAG_MASK) << \
-       QAT_LA_GCM_IV_LEN_FLAG_BITPOS) | \
-       ((auth_rslt & QAT_LA_DIGEST_IN_BUFFER_MASK) << \
-       QAT_LA_DIGEST_IN_BUFFER_BITPOS) | \
-       ((proto & QAT_LA_PROTO_MASK) << \
-       QAT_LA_PROTO_BITPOS)    | \
-       ((cmp_auth & QAT_LA_CMP_AUTH_RES_MASK) << \
-       QAT_LA_CMP_AUTH_RES_BITPOS) | \
-       ((ret_auth & QAT_LA_RET_AUTH_RES_MASK) << \
-       QAT_LA_RET_AUTH_RES_BITPOS) | \
-       ((update_state & QAT_LA_UPDATE_STATE_MASK) << \
-       QAT_LA_UPDATE_STATE_BITPOS) | \
-       ((ciph_iv & QAT_LA_CIPH_IV_FLD_MASK) << \
-       QAT_LA_CIPH_IV_FLD_BITPOS) | \
-       ((ciphcfg & QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) << \
-       QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS) | \
-       ((partial & QAT_LA_PARTIAL_MASK) << \
-       QAT_LA_PARTIAL_BITPOS))
-
-#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_GET(flags) \
-       QAT_FIELD_GET(flags, QAT_LA_CIPH_IV_FLD_BITPOS, \
-       QAT_LA_CIPH_IV_FLD_MASK)
-
-#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_GET(flags) \
-       QAT_FIELD_GET(flags, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
-       QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
-
-#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_GET(flags) \
-       QAT_FIELD_GET(flags, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
-       QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
-
-#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_GET(flags) \
-       QAT_FIELD_GET(flags, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
-       QAT_LA_GCM_IV_LEN_FLAG_MASK)
-
-#define ICP_QAT_FW_LA_PROTO_GET(flags) \
-       QAT_FIELD_GET(flags, QAT_LA_PROTO_BITPOS, QAT_LA_PROTO_MASK)
-
-#define ICP_QAT_FW_LA_CMP_AUTH_GET(flags) \
-       QAT_FIELD_GET(flags, QAT_LA_CMP_AUTH_RES_BITPOS, \
-       QAT_LA_CMP_AUTH_RES_MASK)
-
-#define ICP_QAT_FW_LA_RET_AUTH_GET(flags) \
-       QAT_FIELD_GET(flags, QAT_LA_RET_AUTH_RES_BITPOS, \
-       QAT_LA_RET_AUTH_RES_MASK)
-
-#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_GET(flags) \
-       QAT_FIELD_GET(flags, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
-       QAT_LA_DIGEST_IN_BUFFER_MASK)
-
-#define ICP_QAT_FW_LA_UPDATE_STATE_GET(flags) \
-       QAT_FIELD_GET(flags, QAT_LA_UPDATE_STATE_BITPOS, \
-       QAT_LA_UPDATE_STATE_MASK)
-
-#define ICP_QAT_FW_LA_PARTIAL_GET(flags) \
-       QAT_FIELD_GET(flags, QAT_LA_PARTIAL_BITPOS, \
-       QAT_LA_PARTIAL_MASK)
-
-#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_LA_CIPH_IV_FLD_BITPOS, \
-       QAT_LA_CIPH_IV_FLD_MASK)
-
-#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
-       QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
-
-#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
-       QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
-
-#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
-       QAT_LA_GCM_IV_LEN_FLAG_MASK)
-
-#define ICP_QAT_FW_LA_PROTO_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_LA_PROTO_BITPOS, \
-       QAT_LA_PROTO_MASK)
-
-#define ICP_QAT_FW_LA_CMP_AUTH_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_LA_CMP_AUTH_RES_BITPOS, \
-       QAT_LA_CMP_AUTH_RES_MASK)
-
-#define ICP_QAT_FW_LA_RET_AUTH_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_LA_RET_AUTH_RES_BITPOS, \
-       QAT_LA_RET_AUTH_RES_MASK)
-
-#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
-       QAT_LA_DIGEST_IN_BUFFER_MASK)
-
-#define ICP_QAT_FW_LA_UPDATE_STATE_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_LA_UPDATE_STATE_BITPOS, \
-       QAT_LA_UPDATE_STATE_MASK)
-
-#define ICP_QAT_FW_LA_PARTIAL_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \
-       QAT_LA_PARTIAL_MASK)
-
-#define ICP_QAT_FW_LA_SLICE_TYPE_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_LA_SLICE_TYPE_BITPOS, \
-       QAT_LA_SLICE_TYPE_MASK)
-
-struct icp_qat_fw_cipher_req_hdr_cd_pars {
-       union {
-               struct {
-                       __u64 content_desc_addr;
-                       __u16 content_desc_resrvd1;
-                       __u8 content_desc_params_sz;
-                       __u8 content_desc_hdr_resrvd2;
-                       __u32 content_desc_resrvd3;
-               } s;
-               struct {
-                       __u32 cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
-               } s1;
-       } u;
-};
-
-struct icp_qat_fw_cipher_auth_req_hdr_cd_pars {
-       union {
-               struct {
-                       __u64 content_desc_addr;
-                       __u16 content_desc_resrvd1;
-                       __u8 content_desc_params_sz;
-                       __u8 content_desc_hdr_resrvd2;
-                       __u32 content_desc_resrvd3;
-               } s;
-               struct {
-                       __u32 cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
-               } sl;
-       } u;
-};
-
-struct icp_qat_fw_cipher_cd_ctrl_hdr {
-       __u8 cipher_state_sz;
-       __u8 cipher_key_sz;
-       __u8 cipher_cfg_offset;
-       __u8 next_curr_id;
-       __u8 cipher_padding_sz;
-       __u8 resrvd1;
-       __u16 resrvd2;
-       __u32 resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3];
-};
-
-struct icp_qat_fw_auth_cd_ctrl_hdr {
-       __u32 resrvd1;
-       __u8 resrvd2;
-       __u8 hash_flags;
-       __u8 hash_cfg_offset;
-       __u8 next_curr_id;
-       __u8 resrvd3;
-       __u8 outer_prefix_sz;
-       __u8 final_sz;
-       __u8 inner_res_sz;
-       __u8 resrvd4;
-       __u8 inner_state1_sz;
-       __u8 inner_state2_offset;
-       __u8 inner_state2_sz;
-       __u8 outer_config_offset;
-       __u8 outer_state1_sz;
-       __u8 outer_res_sz;
-       __u8 outer_prefix_offset;
-};
-
-struct icp_qat_fw_cipher_auth_cd_ctrl_hdr {
-       __u8 cipher_state_sz;
-       __u8 cipher_key_sz;
-       __u8 cipher_cfg_offset;
-       __u8 next_curr_id_cipher;
-       __u8 cipher_padding_sz;
-       __u8 hash_flags;
-       __u8 hash_cfg_offset;
-       __u8 next_curr_id_auth;
-       __u8 resrvd1;
-       __u8 outer_prefix_sz;
-       __u8 final_sz;
-       __u8 inner_res_sz;
-       __u8 resrvd2;
-       __u8 inner_state1_sz;
-       __u8 inner_state2_offset;
-       __u8 inner_state2_sz;
-       __u8 outer_config_offset;
-       __u8 outer_state1_sz;
-       __u8 outer_res_sz;
-       __u8 outer_prefix_offset;
-};
-
-#define ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED 1
-#define ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED 0
-#define ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX  240
-#define ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET \
-       (sizeof(struct icp_qat_fw_la_cipher_req_params_t))
-#define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0)
-
-struct icp_qat_fw_la_cipher_req_params {
-       __u32 cipher_offset;
-       __u32 cipher_length;
-       union {
-               __u32 cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
-               struct {
-                       __u64 cipher_IV_ptr;
-                       __u64 resrvd1;
-               } s;
-       } u;
-};
-
-struct icp_qat_fw_la_auth_req_params {
-       __u32 auth_off;
-       __u32 auth_len;
-       union {
-               __u64 auth_partial_st_prefix;
-               __u64 aad_adr;
-       } u1;
-       __u64 auth_res_addr;
-       union {
-               __u8 inner_prefix_sz;
-               __u8 aad_sz;
-       } u2;
-       __u8 resrvd1;
-       __u8 hash_state_sz;
-       __u8 auth_res_sz;
-} __packed;
-
-struct icp_qat_fw_la_auth_req_params_resrvd_flds {
-       __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_6];
-       union {
-               __u8 inner_prefix_sz;
-               __u8 aad_sz;
-       } u2;
-       __u8 resrvd1;
-       __u16 resrvd2;
-};
-
-struct icp_qat_fw_la_resp {
-       struct icp_qat_fw_comn_resp_hdr comn_resp;
-       __u64 opaque_data;
-       __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
-};
-
-#define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \
-       ((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \
-         ICP_QAT_FW_COMN_NEXT_ID_MASK) >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
-
-#define ICP_QAT_FW_CIPHER_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
-{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
-       ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
-       & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
-       ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
-       & ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
-
-#define ICP_QAT_FW_CIPHER_CURR_ID_GET(cd_ctrl_hdr_t) \
-       (((cd_ctrl_hdr_t)->next_curr_id_cipher) \
-       & ICP_QAT_FW_COMN_CURR_ID_MASK)
-
-#define ICP_QAT_FW_CIPHER_CURR_ID_SET(cd_ctrl_hdr_t, val) \
-{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
-       ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
-       & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
-       ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
-
-#define ICP_QAT_FW_AUTH_NEXT_ID_GET(cd_ctrl_hdr_t) \
-       ((((cd_ctrl_hdr_t)->next_curr_id_auth) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
-       >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
-
-#define ICP_QAT_FW_AUTH_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
-{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
-       ((((cd_ctrl_hdr_t)->next_curr_id_auth) \
-       & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
-       ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
-       & ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
-
-#define ICP_QAT_FW_AUTH_CURR_ID_GET(cd_ctrl_hdr_t) \
-       (((cd_ctrl_hdr_t)->next_curr_id_auth) \
-       & ICP_QAT_FW_COMN_CURR_ID_MASK)
-
-#define ICP_QAT_FW_AUTH_CURR_ID_SET(cd_ctrl_hdr_t, val) \
-{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
-       ((((cd_ctrl_hdr_t)->next_curr_id_auth) \
-       & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
-       ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
-
-#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h
deleted file mode 100644 (file)
index 7eb5dae..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef __ICP_QAT_FW_LOADER_HANDLE_H__
-#define __ICP_QAT_FW_LOADER_HANDLE_H__
-#include "icp_qat_uclo.h"
-
-struct icp_qat_fw_loader_ae_data {
-       unsigned int state;
-       unsigned int ustore_size;
-       unsigned int free_addr;
-       unsigned int free_size;
-       unsigned int live_ctx_mask;
-};
-
-struct icp_qat_fw_loader_hal_handle {
-       struct icp_qat_fw_loader_ae_data aes[ICP_QAT_UCLO_MAX_AE];
-       unsigned int ae_mask;
-       unsigned int admin_ae_mask;
-       unsigned int slice_mask;
-       unsigned int revision_id;
-       unsigned int ae_max_num;
-       unsigned int upc_mask;
-       unsigned int max_ustore;
-};
-
-struct icp_qat_fw_loader_chip_info {
-       int mmp_sram_size;
-       bool nn;
-       bool lm2lm3;
-       u32 lm_size;
-       u32 icp_rst_csr;
-       u32 icp_rst_mask;
-       u32 glb_clk_enable_csr;
-       u32 misc_ctl_csr;
-       u32 wakeup_event_val;
-       bool fw_auth;
-       bool css_3k;
-       bool tgroup_share_ustore;
-       u32 fcu_ctl_csr;
-       u32 fcu_sts_csr;
-       u32 fcu_dram_addr_hi;
-       u32 fcu_dram_addr_lo;
-       u32 fcu_loaded_ae_csr;
-       u8 fcu_loaded_ae_pos;
-};
-
-struct icp_qat_fw_loader_handle {
-       struct icp_qat_fw_loader_hal_handle *hal_handle;
-       struct icp_qat_fw_loader_chip_info *chip_info;
-       struct pci_dev *pci_dev;
-       void *obj_handle;
-       void *sobj_handle;
-       void *mobj_handle;
-       unsigned int cfg_ae_mask;
-       void __iomem *hal_sram_addr_v;
-       void __iomem *hal_cap_g_ctl_csr_addr_v;
-       void __iomem *hal_cap_ae_xfer_csr_addr_v;
-       void __iomem *hal_cap_ae_local_csr_addr_v;
-       void __iomem *hal_ep_csr_addr_v;
-};
-
-struct icp_firml_dram_desc {
-       void __iomem *dram_base_addr;
-       void *dram_base_addr_v;
-       dma_addr_t dram_bus_addr;
-       u64 dram_size;
-};
-#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h b/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h
deleted file mode 100644 (file)
index 9dddae0..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef _ICP_QAT_FW_PKE_
-#define _ICP_QAT_FW_PKE_
-
-#include "icp_qat_fw.h"
-
-struct icp_qat_fw_req_hdr_pke_cd_pars {
-       __u64 content_desc_addr;
-       __u32 content_desc_resrvd;
-       __u32 func_id;
-};
-
-struct icp_qat_fw_req_pke_mid {
-       __u64 opaque;
-       __u64 src_data_addr;
-       __u64 dest_data_addr;
-};
-
-struct icp_qat_fw_req_pke_hdr {
-       __u8 resrvd1;
-       __u8 resrvd2;
-       __u8 service_type;
-       __u8 hdr_flags;
-       __u16 comn_req_flags;
-       __u16 resrvd4;
-       struct icp_qat_fw_req_hdr_pke_cd_pars cd_pars;
-};
-
-struct icp_qat_fw_pke_request {
-       struct icp_qat_fw_req_pke_hdr pke_hdr;
-       struct icp_qat_fw_req_pke_mid pke_mid;
-       __u8 output_param_count;
-       __u8 input_param_count;
-       __u16 resrvd1;
-       __u32 resrvd2;
-       __u64 next_req_adr;
-};
-
-struct icp_qat_fw_resp_pke_hdr {
-       __u8 resrvd1;
-       __u8 resrvd2;
-       __u8 response_type;
-       __u8 hdr_flags;
-       __u16 comn_resp_flags;
-       __u16 resrvd4;
-};
-
-struct icp_qat_fw_pke_resp {
-       struct icp_qat_fw_resp_pke_hdr pke_resp_hdr;
-       __u64 opaque;
-       __u64 src_data_addr;
-       __u64 dest_data_addr;
-};
-
-#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS              7
-#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK                0x1
-#define ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(status_word) \
-       QAT_FIELD_GET(((status_word >> ICP_QAT_FW_COMN_ONE_BYTE_SHIFT) & \
-               ICP_QAT_FW_COMN_SINGLE_BYTE_MASK), \
-               QAT_COMN_RESP_PKE_STATUS_BITPOS, \
-               QAT_COMN_RESP_PKE_STATUS_MASK)
-
-#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(hdr_t, val) \
-       QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
-               ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS, \
-               ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK)
-#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hal.h b/drivers/crypto/qat/qat_common/icp_qat_hal.h
deleted file mode 100644 (file)
index 20b2ee1..0000000
+++ /dev/null
@@ -1,143 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef __ICP_QAT_HAL_H
-#define __ICP_QAT_HAL_H
-#include "icp_qat_fw_loader_handle.h"
-
-enum hal_global_csr {
-       MISC_CONTROL = 0xA04,
-       ICP_RESET = 0xA0c,
-       ICP_GLOBAL_CLK_ENABLE = 0xA50
-};
-
-enum {
-       MISC_CONTROL_C4XXX = 0xAA0,
-       ICP_RESET_CPP0 = 0x938,
-       ICP_RESET_CPP1 = 0x93c,
-       ICP_GLOBAL_CLK_ENABLE_CPP0 = 0x964,
-       ICP_GLOBAL_CLK_ENABLE_CPP1 = 0x968
-};
-
-enum hal_ae_csr {
-       USTORE_ADDRESS = 0x000,
-       USTORE_DATA_LOWER = 0x004,
-       USTORE_DATA_UPPER = 0x008,
-       ALU_OUT = 0x010,
-       CTX_ARB_CNTL = 0x014,
-       CTX_ENABLES = 0x018,
-       CC_ENABLE = 0x01c,
-       CSR_CTX_POINTER = 0x020,
-       CTX_STS_INDIRECT = 0x040,
-       ACTIVE_CTX_STATUS = 0x044,
-       CTX_SIG_EVENTS_INDIRECT = 0x048,
-       CTX_SIG_EVENTS_ACTIVE = 0x04c,
-       CTX_WAKEUP_EVENTS_INDIRECT = 0x050,
-       LM_ADDR_0_INDIRECT = 0x060,
-       LM_ADDR_1_INDIRECT = 0x068,
-       LM_ADDR_2_INDIRECT = 0x0cc,
-       LM_ADDR_3_INDIRECT = 0x0d4,
-       INDIRECT_LM_ADDR_0_BYTE_INDEX = 0x0e0,
-       INDIRECT_LM_ADDR_1_BYTE_INDEX = 0x0e8,
-       INDIRECT_LM_ADDR_2_BYTE_INDEX = 0x10c,
-       INDIRECT_LM_ADDR_3_BYTE_INDEX = 0x114,
-       INDIRECT_T_INDEX = 0x0f8,
-       INDIRECT_T_INDEX_BYTE_INDEX = 0x0fc,
-       FUTURE_COUNT_SIGNAL_INDIRECT = 0x078,
-       TIMESTAMP_LOW = 0x0c0,
-       TIMESTAMP_HIGH = 0x0c4,
-       PROFILE_COUNT = 0x144,
-       SIGNATURE_ENABLE = 0x150,
-       AE_MISC_CONTROL = 0x160,
-       LOCAL_CSR_STATUS = 0x180,
-};
-
-enum fcu_csr {
-       FCU_CONTROL           = 0x8c0,
-       FCU_STATUS            = 0x8c4,
-       FCU_STATUS1           = 0x8c8,
-       FCU_DRAM_ADDR_LO      = 0x8cc,
-       FCU_DRAM_ADDR_HI      = 0x8d0,
-       FCU_RAMBASE_ADDR_HI   = 0x8d4,
-       FCU_RAMBASE_ADDR_LO   = 0x8d8
-};
-
-enum fcu_csr_4xxx {
-       FCU_CONTROL_4XXX           = 0x1000,
-       FCU_STATUS_4XXX            = 0x1004,
-       FCU_ME_BROADCAST_MASK_TYPE = 0x1008,
-       FCU_AE_LOADED_4XXX         = 0x1010,
-       FCU_DRAM_ADDR_LO_4XXX      = 0x1014,
-       FCU_DRAM_ADDR_HI_4XXX      = 0x1018,
-};
-
-enum fcu_cmd {
-       FCU_CTRL_CMD_NOOP  = 0,
-       FCU_CTRL_CMD_AUTH  = 1,
-       FCU_CTRL_CMD_LOAD  = 2,
-       FCU_CTRL_CMD_START = 3
-};
-
-enum fcu_sts {
-       FCU_STS_NO_STS    = 0,
-       FCU_STS_VERI_DONE = 1,
-       FCU_STS_LOAD_DONE = 2,
-       FCU_STS_VERI_FAIL = 3,
-       FCU_STS_LOAD_FAIL = 4,
-       FCU_STS_BUSY      = 5
-};
-
-#define ALL_AE_MASK                 0xFFFFFFFF
-#define UA_ECS                      (0x1 << 31)
-#define ACS_ABO_BITPOS              31
-#define ACS_ACNO                    0x7
-#define CE_ENABLE_BITPOS            0x8
-#define CE_LMADDR_0_GLOBAL_BITPOS   16
-#define CE_LMADDR_1_GLOBAL_BITPOS   17
-#define CE_LMADDR_2_GLOBAL_BITPOS   22
-#define CE_LMADDR_3_GLOBAL_BITPOS   23
-#define CE_T_INDEX_GLOBAL_BITPOS    21
-#define CE_NN_MODE_BITPOS           20
-#define CE_REG_PAR_ERR_BITPOS       25
-#define CE_BREAKPOINT_BITPOS        27
-#define CE_CNTL_STORE_PARITY_ERROR_BITPOS 29
-#define CE_INUSE_CONTEXTS_BITPOS    31
-#define CE_NN_MODE                  (0x1 << CE_NN_MODE_BITPOS)
-#define CE_INUSE_CONTEXTS           (0x1 << CE_INUSE_CONTEXTS_BITPOS)
-#define XCWE_VOLUNTARY              (0x1)
-#define LCS_STATUS          (0x1)
-#define MMC_SHARE_CS_BITPOS         2
-#define WAKEUP_EVENT 0x10000
-#define FCU_CTRL_BROADCAST_POS   0x4
-#define FCU_CTRL_AE_POS     0x8
-#define FCU_AUTH_STS_MASK   0x7
-#define FCU_STS_DONE_POS    0x9
-#define FCU_STS_AUTHFWLD_POS 0X8
-#define FCU_LOADED_AE_POS   0x16
-#define FW_AUTH_WAIT_PERIOD 10
-#define FW_AUTH_MAX_RETRY   300
-#define ICP_QAT_AE_OFFSET 0x20000
-#define ICP_QAT_CAP_OFFSET (ICP_QAT_AE_OFFSET + 0x10000)
-#define LOCAL_TO_XFER_REG_OFFSET 0x800
-#define ICP_QAT_EP_OFFSET 0x3a000
-#define ICP_QAT_EP_OFFSET_4XXX   0x200000 /* HI MMIO CSRs */
-#define ICP_QAT_AE_OFFSET_4XXX   0x600000
-#define ICP_QAT_CAP_OFFSET_4XXX  0x640000
-#define SET_CAP_CSR(handle, csr, val) \
-       ADF_CSR_WR((handle)->hal_cap_g_ctl_csr_addr_v, csr, val)
-#define GET_CAP_CSR(handle, csr) \
-       ADF_CSR_RD((handle)->hal_cap_g_ctl_csr_addr_v, csr)
-#define AE_CSR(handle, ae) \
-       ((char __iomem *)(handle)->hal_cap_ae_local_csr_addr_v + ((ae) << 12))
-#define AE_CSR_ADDR(handle, ae, csr) (AE_CSR(handle, ae) + (0x3ff & (csr)))
-#define SET_AE_CSR(handle, ae, csr, val) \
-       ADF_CSR_WR(AE_CSR_ADDR(handle, ae, csr), 0, val)
-#define GET_AE_CSR(handle, ae, csr) ADF_CSR_RD(AE_CSR_ADDR(handle, ae, csr), 0)
-#define AE_XFER(handle, ae) \
-       ((char __iomem *)(handle)->hal_cap_ae_xfer_csr_addr_v + ((ae) << 12))
-#define AE_XFER_ADDR(handle, ae, reg) (AE_XFER(handle, ae) + \
-       (((reg) & 0xff) << 2))
-#define SET_AE_XFER(handle, ae, reg, val) \
-       ADF_CSR_WR(AE_XFER_ADDR(handle, ae, reg), 0, val)
-#define SRAM_WRITE(handle, addr, val) \
-       ADF_CSR_WR((handle)->hal_sram_addr_v, addr, val)
-#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw.h b/drivers/crypto/qat/qat_common/icp_qat_hw.h
deleted file mode 100644 (file)
index 4042739..0000000
+++ /dev/null
@@ -1,376 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef _ICP_QAT_HW_H_
-#define _ICP_QAT_HW_H_
-
-enum icp_qat_hw_ae_id {
-       ICP_QAT_HW_AE_0 = 0,
-       ICP_QAT_HW_AE_1 = 1,
-       ICP_QAT_HW_AE_2 = 2,
-       ICP_QAT_HW_AE_3 = 3,
-       ICP_QAT_HW_AE_4 = 4,
-       ICP_QAT_HW_AE_5 = 5,
-       ICP_QAT_HW_AE_6 = 6,
-       ICP_QAT_HW_AE_7 = 7,
-       ICP_QAT_HW_AE_8 = 8,
-       ICP_QAT_HW_AE_9 = 9,
-       ICP_QAT_HW_AE_10 = 10,
-       ICP_QAT_HW_AE_11 = 11,
-       ICP_QAT_HW_AE_DELIMITER = 12
-};
-
-enum icp_qat_hw_qat_id {
-       ICP_QAT_HW_QAT_0 = 0,
-       ICP_QAT_HW_QAT_1 = 1,
-       ICP_QAT_HW_QAT_2 = 2,
-       ICP_QAT_HW_QAT_3 = 3,
-       ICP_QAT_HW_QAT_4 = 4,
-       ICP_QAT_HW_QAT_5 = 5,
-       ICP_QAT_HW_QAT_DELIMITER = 6
-};
-
-enum icp_qat_hw_auth_algo {
-       ICP_QAT_HW_AUTH_ALGO_NULL = 0,
-       ICP_QAT_HW_AUTH_ALGO_SHA1 = 1,
-       ICP_QAT_HW_AUTH_ALGO_MD5 = 2,
-       ICP_QAT_HW_AUTH_ALGO_SHA224 = 3,
-       ICP_QAT_HW_AUTH_ALGO_SHA256 = 4,
-       ICP_QAT_HW_AUTH_ALGO_SHA384 = 5,
-       ICP_QAT_HW_AUTH_ALGO_SHA512 = 6,
-       ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC = 7,
-       ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC = 8,
-       ICP_QAT_HW_AUTH_ALGO_AES_F9 = 9,
-       ICP_QAT_HW_AUTH_ALGO_GALOIS_128 = 10,
-       ICP_QAT_HW_AUTH_ALGO_GALOIS_64 = 11,
-       ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 = 12,
-       ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 = 13,
-       ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 = 14,
-       ICP_QAT_HW_AUTH_RESERVED_1 = 15,
-       ICP_QAT_HW_AUTH_RESERVED_2 = 16,
-       ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17,
-       ICP_QAT_HW_AUTH_RESERVED_3 = 18,
-       ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19,
-       ICP_QAT_HW_AUTH_ALGO_DELIMITER = 20
-};
-
-enum icp_qat_hw_auth_mode {
-       ICP_QAT_HW_AUTH_MODE0 = 0,
-       ICP_QAT_HW_AUTH_MODE1 = 1,
-       ICP_QAT_HW_AUTH_MODE2 = 2,
-       ICP_QAT_HW_AUTH_MODE_DELIMITER = 3
-};
-
-struct icp_qat_hw_auth_config {
-       __u32 config;
-       __u32 reserved;
-};
-
-struct icp_qat_hw_ucs_cipher_config {
-       __u32 val;
-       __u32 reserved[3];
-};
-
-enum icp_qat_slice_mask {
-       ICP_ACCEL_MASK_CIPHER_SLICE = BIT(0),
-       ICP_ACCEL_MASK_AUTH_SLICE = BIT(1),
-       ICP_ACCEL_MASK_PKE_SLICE = BIT(2),
-       ICP_ACCEL_MASK_COMPRESS_SLICE = BIT(3),
-       ICP_ACCEL_MASK_LZS_SLICE = BIT(4),
-       ICP_ACCEL_MASK_EIA3_SLICE = BIT(5),
-       ICP_ACCEL_MASK_SHA3_SLICE = BIT(6),
-};
-
-enum icp_qat_capabilities_mask {
-       ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = BIT(0),
-       ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = BIT(1),
-       ICP_ACCEL_CAPABILITIES_CIPHER = BIT(2),
-       ICP_ACCEL_CAPABILITIES_AUTHENTICATION = BIT(3),
-       ICP_ACCEL_CAPABILITIES_RESERVED_1 = BIT(4),
-       ICP_ACCEL_CAPABILITIES_COMPRESSION = BIT(5),
-       ICP_ACCEL_CAPABILITIES_LZS_COMPRESSION = BIT(6),
-       ICP_ACCEL_CAPABILITIES_RAND = BIT(7),
-       ICP_ACCEL_CAPABILITIES_ZUC = BIT(8),
-       ICP_ACCEL_CAPABILITIES_SHA3 = BIT(9),
-       /* Bits 10-11 are currently reserved */
-       ICP_ACCEL_CAPABILITIES_HKDF = BIT(12),
-       ICP_ACCEL_CAPABILITIES_ECEDMONT = BIT(13),
-       /* Bit 14 is currently reserved */
-       ICP_ACCEL_CAPABILITIES_SHA3_EXT = BIT(15),
-       ICP_ACCEL_CAPABILITIES_AESGCM_SPC = BIT(16),
-       ICP_ACCEL_CAPABILITIES_CHACHA_POLY = BIT(17),
-       /* Bits 18-21 are currently reserved */
-       ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY = BIT(22),
-       ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64 = BIT(23),
-       ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION = BIT(24),
-       ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION = BIT(25),
-       ICP_ACCEL_CAPABILITIES_AES_V2 = BIT(26)
-};
-
-#define QAT_AUTH_MODE_BITPOS 4
-#define QAT_AUTH_MODE_MASK 0xF
-#define QAT_AUTH_ALGO_BITPOS 0
-#define QAT_AUTH_ALGO_MASK 0xF
-#define QAT_AUTH_CMP_BITPOS 8
-#define QAT_AUTH_CMP_MASK 0x7F
-#define QAT_AUTH_SHA3_PADDING_BITPOS 16
-#define QAT_AUTH_SHA3_PADDING_MASK 0x1
-#define QAT_AUTH_ALGO_SHA3_BITPOS 22
-#define QAT_AUTH_ALGO_SHA3_MASK 0x3
-#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \
-       (((mode & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \
-       ((algo & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \
-       (((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) << \
-        QAT_AUTH_ALGO_SHA3_BITPOS) | \
-        (((((algo == ICP_QAT_HW_AUTH_ALGO_SHA3_256) || \
-       (algo == ICP_QAT_HW_AUTH_ALGO_SHA3_512)) ? 1 : 0) \
-       & QAT_AUTH_SHA3_PADDING_MASK) << QAT_AUTH_SHA3_PADDING_BITPOS) | \
-       ((cmp_len & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS))
-
-struct icp_qat_hw_auth_counter {
-       __be32 counter;
-       __u32 reserved;
-};
-
-#define QAT_AUTH_COUNT_MASK 0xFFFFFFFF
-#define QAT_AUTH_COUNT_BITPOS 0
-#define ICP_QAT_HW_AUTH_COUNT_BUILD(val) \
-       (((val) & QAT_AUTH_COUNT_MASK) << QAT_AUTH_COUNT_BITPOS)
-
-struct icp_qat_hw_auth_setup {
-       struct icp_qat_hw_auth_config auth_config;
-       struct icp_qat_hw_auth_counter auth_counter;
-};
-
-#define QAT_HW_DEFAULT_ALIGNMENT 8
-#define QAT_HW_ROUND_UP(val, n) (((val) + ((n) - 1)) & (~(n - 1)))
-#define ICP_QAT_HW_NULL_STATE1_SZ 32
-#define ICP_QAT_HW_MD5_STATE1_SZ 16
-#define ICP_QAT_HW_SHA1_STATE1_SZ 20
-#define ICP_QAT_HW_SHA224_STATE1_SZ 32
-#define ICP_QAT_HW_SHA256_STATE1_SZ 32
-#define ICP_QAT_HW_SHA3_256_STATE1_SZ 32
-#define ICP_QAT_HW_SHA384_STATE1_SZ 64
-#define ICP_QAT_HW_SHA512_STATE1_SZ 64
-#define ICP_QAT_HW_SHA3_512_STATE1_SZ 64
-#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28
-#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48
-#define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16
-#define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16
-#define ICP_QAT_HW_AES_F9_STATE1_SZ 32
-#define ICP_QAT_HW_KASUMI_F9_STATE1_SZ 16
-#define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16
-#define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8
-#define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8
-#define ICP_QAT_HW_NULL_STATE2_SZ 32
-#define ICP_QAT_HW_MD5_STATE2_SZ 16
-#define ICP_QAT_HW_SHA1_STATE2_SZ 20
-#define ICP_QAT_HW_SHA224_STATE2_SZ 32
-#define ICP_QAT_HW_SHA256_STATE2_SZ 32
-#define ICP_QAT_HW_SHA3_256_STATE2_SZ 0
-#define ICP_QAT_HW_SHA384_STATE2_SZ 64
-#define ICP_QAT_HW_SHA512_STATE2_SZ 64
-#define ICP_QAT_HW_SHA3_512_STATE2_SZ 0
-#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0
-#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0
-#define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16
-#define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16
-#define ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ 16
-#define ICP_QAT_HW_F9_IK_SZ 16
-#define ICP_QAT_HW_F9_FK_SZ 16
-#define ICP_QAT_HW_KASUMI_F9_STATE2_SZ (ICP_QAT_HW_F9_IK_SZ + \
-       ICP_QAT_HW_F9_FK_SZ)
-#define ICP_QAT_HW_AES_F9_STATE2_SZ ICP_QAT_HW_KASUMI_F9_STATE2_SZ
-#define ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ 24
-#define ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ 32
-#define ICP_QAT_HW_GALOIS_H_SZ 16
-#define ICP_QAT_HW_GALOIS_LEN_A_SZ 8
-#define ICP_QAT_HW_GALOIS_E_CTR0_SZ 16
-
-struct icp_qat_hw_auth_sha512 {
-       struct icp_qat_hw_auth_setup inner_setup;
-       __u8 state1[ICP_QAT_HW_SHA512_STATE1_SZ];
-       struct icp_qat_hw_auth_setup outer_setup;
-       __u8 state2[ICP_QAT_HW_SHA512_STATE2_SZ];
-};
-
-struct icp_qat_hw_auth_algo_blk {
-       struct icp_qat_hw_auth_sha512 sha;
-};
-
-#define ICP_QAT_HW_GALOIS_LEN_A_BITPOS 0
-#define ICP_QAT_HW_GALOIS_LEN_A_MASK 0xFFFFFFFF
-
-enum icp_qat_hw_cipher_algo {
-       ICP_QAT_HW_CIPHER_ALGO_NULL = 0,
-       ICP_QAT_HW_CIPHER_ALGO_DES = 1,
-       ICP_QAT_HW_CIPHER_ALGO_3DES = 2,
-       ICP_QAT_HW_CIPHER_ALGO_AES128 = 3,
-       ICP_QAT_HW_CIPHER_ALGO_AES192 = 4,
-       ICP_QAT_HW_CIPHER_ALGO_AES256 = 5,
-       ICP_QAT_HW_CIPHER_ALGO_ARC4 = 6,
-       ICP_QAT_HW_CIPHER_ALGO_KASUMI = 7,
-       ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 = 8,
-       ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9,
-       ICP_QAT_HW_CIPHER_DELIMITER = 10
-};
-
-enum icp_qat_hw_cipher_mode {
-       ICP_QAT_HW_CIPHER_ECB_MODE = 0,
-       ICP_QAT_HW_CIPHER_CBC_MODE = 1,
-       ICP_QAT_HW_CIPHER_CTR_MODE = 2,
-       ICP_QAT_HW_CIPHER_F8_MODE = 3,
-       ICP_QAT_HW_CIPHER_XTS_MODE = 6,
-       ICP_QAT_HW_CIPHER_MODE_DELIMITER = 7
-};
-
-struct icp_qat_hw_cipher_config {
-       __u32 val;
-       __u32 reserved;
-};
-
-enum icp_qat_hw_cipher_dir {
-       ICP_QAT_HW_CIPHER_ENCRYPT = 0,
-       ICP_QAT_HW_CIPHER_DECRYPT = 1,
-};
-
-enum icp_qat_hw_cipher_convert {
-       ICP_QAT_HW_CIPHER_NO_CONVERT = 0,
-       ICP_QAT_HW_CIPHER_KEY_CONVERT = 1,
-};
-
-#define QAT_CIPHER_MODE_BITPOS 4
-#define QAT_CIPHER_MODE_MASK 0xF
-#define QAT_CIPHER_ALGO_BITPOS 0
-#define QAT_CIPHER_ALGO_MASK 0xF
-#define QAT_CIPHER_CONVERT_BITPOS 9
-#define QAT_CIPHER_CONVERT_MASK 0x1
-#define QAT_CIPHER_DIR_BITPOS 8
-#define QAT_CIPHER_DIR_MASK 0x1
-#define QAT_CIPHER_MODE_F8_KEY_SZ_MULT 2
-#define QAT_CIPHER_MODE_XTS_KEY_SZ_MULT 2
-#define ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, algo, convert, dir) \
-       (((mode & QAT_CIPHER_MODE_MASK) << QAT_CIPHER_MODE_BITPOS) | \
-       ((algo & QAT_CIPHER_ALGO_MASK) << QAT_CIPHER_ALGO_BITPOS) | \
-       ((convert & QAT_CIPHER_CONVERT_MASK) << QAT_CIPHER_CONVERT_BITPOS) | \
-       ((dir & QAT_CIPHER_DIR_MASK) << QAT_CIPHER_DIR_BITPOS))
-#define ICP_QAT_HW_DES_BLK_SZ 8
-#define ICP_QAT_HW_3DES_BLK_SZ 8
-#define ICP_QAT_HW_NULL_BLK_SZ 8
-#define ICP_QAT_HW_AES_BLK_SZ 16
-#define ICP_QAT_HW_KASUMI_BLK_SZ 8
-#define ICP_QAT_HW_SNOW_3G_BLK_SZ 8
-#define ICP_QAT_HW_ZUC_3G_BLK_SZ 8
-#define ICP_QAT_HW_NULL_KEY_SZ 256
-#define ICP_QAT_HW_DES_KEY_SZ 8
-#define ICP_QAT_HW_3DES_KEY_SZ 24
-#define ICP_QAT_HW_AES_128_KEY_SZ 16
-#define ICP_QAT_HW_AES_192_KEY_SZ 24
-#define ICP_QAT_HW_AES_256_KEY_SZ 32
-#define ICP_QAT_HW_AES_128_F8_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
-       QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
-#define ICP_QAT_HW_AES_192_F8_KEY_SZ (ICP_QAT_HW_AES_192_KEY_SZ * \
-       QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
-#define ICP_QAT_HW_AES_256_F8_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
-       QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
-#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
-       QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
-#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
-       QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
-#define ICP_QAT_HW_KASUMI_KEY_SZ 16
-#define ICP_QAT_HW_KASUMI_F8_KEY_SZ (ICP_QAT_HW_KASUMI_KEY_SZ * \
-       QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
-#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
-       QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
-#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
-       QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
-#define ICP_QAT_HW_ARC4_KEY_SZ 256
-#define ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ 16
-#define ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ 16
-#define ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ 16
-#define ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ 16
-#define ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR 2
-#define INIT_SHRAM_CONSTANTS_TABLE_SZ 1024
-
-struct icp_qat_hw_cipher_aes256_f8 {
-       struct icp_qat_hw_cipher_config cipher_config;
-       __u8 key[ICP_QAT_HW_AES_256_F8_KEY_SZ];
-};
-
-struct icp_qat_hw_ucs_cipher_aes256_f8 {
-       struct icp_qat_hw_ucs_cipher_config cipher_config;
-       __u8 key[ICP_QAT_HW_AES_256_F8_KEY_SZ];
-};
-
-struct icp_qat_hw_cipher_algo_blk {
-       union {
-               struct icp_qat_hw_cipher_aes256_f8 aes;
-               struct icp_qat_hw_ucs_cipher_aes256_f8 ucs_aes;
-       };
-} __aligned(64);
-
-enum icp_qat_hw_compression_direction {
-       ICP_QAT_HW_COMPRESSION_DIR_COMPRESS = 0,
-       ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS = 1,
-       ICP_QAT_HW_COMPRESSION_DIR_DELIMITER = 2
-};
-
-enum icp_qat_hw_compression_delayed_match {
-       ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED = 0,
-       ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED = 1,
-       ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DELIMITER = 2
-};
-
-enum icp_qat_hw_compression_algo {
-       ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE = 0,
-       ICP_QAT_HW_COMPRESSION_ALGO_LZS = 1,
-       ICP_QAT_HW_COMPRESSION_ALGO_DELIMITER = 2
-};
-
-enum icp_qat_hw_compression_depth {
-       ICP_QAT_HW_COMPRESSION_DEPTH_1 = 0,
-       ICP_QAT_HW_COMPRESSION_DEPTH_4 = 1,
-       ICP_QAT_HW_COMPRESSION_DEPTH_8 = 2,
-       ICP_QAT_HW_COMPRESSION_DEPTH_16 = 3,
-       ICP_QAT_HW_COMPRESSION_DEPTH_128 = 4,
-       ICP_QAT_HW_COMPRESSION_DEPTH_DELIMITER = 5
-};
-
-enum icp_qat_hw_compression_file_type {
-       ICP_QAT_HW_COMPRESSION_FILE_TYPE_0 = 0,
-       ICP_QAT_HW_COMPRESSION_FILE_TYPE_1 = 1,
-       ICP_QAT_HW_COMPRESSION_FILE_TYPE_2 = 2,
-       ICP_QAT_HW_COMPRESSION_FILE_TYPE_3 = 3,
-       ICP_QAT_HW_COMPRESSION_FILE_TYPE_4 = 4,
-       ICP_QAT_HW_COMPRESSION_FILE_TYPE_DELIMITER = 5
-};
-
-struct icp_qat_hw_compression_config {
-       __u32 lower_val;
-       __u32 upper_val;
-};
-
-#define QAT_COMPRESSION_DIR_BITPOS 4
-#define QAT_COMPRESSION_DIR_MASK 0x7
-#define QAT_COMPRESSION_DELAYED_MATCH_BITPOS 16
-#define QAT_COMPRESSION_DELAYED_MATCH_MASK 0x1
-#define QAT_COMPRESSION_ALGO_BITPOS 31
-#define QAT_COMPRESSION_ALGO_MASK 0x1
-#define QAT_COMPRESSION_DEPTH_BITPOS 28
-#define QAT_COMPRESSION_DEPTH_MASK 0x7
-#define QAT_COMPRESSION_FILE_TYPE_BITPOS 24
-#define QAT_COMPRESSION_FILE_TYPE_MASK 0xF
-
-#define ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(dir, delayed, \
-       algo, depth, filetype) \
-       ((((dir) & QAT_COMPRESSION_DIR_MASK) << \
-       QAT_COMPRESSION_DIR_BITPOS) | \
-       (((delayed) & QAT_COMPRESSION_DELAYED_MATCH_MASK) << \
-       QAT_COMPRESSION_DELAYED_MATCH_BITPOS) | \
-       (((algo) & QAT_COMPRESSION_ALGO_MASK) << \
-       QAT_COMPRESSION_ALGO_BITPOS) | \
-       (((depth) & QAT_COMPRESSION_DEPTH_MASK) << \
-       QAT_COMPRESSION_DEPTH_BITPOS) | \
-       (((filetype) & QAT_COMPRESSION_FILE_TYPE_MASK) << \
-       QAT_COMPRESSION_FILE_TYPE_BITPOS))
-
-#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw_20_comp.h b/drivers/crypto/qat/qat_common/icp_qat_hw_20_comp.h
deleted file mode 100644 (file)
index 7ea8962..0000000
+++ /dev/null
@@ -1,164 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright(c) 2022 Intel Corporation */
-#ifndef _ICP_QAT_HW_20_COMP_H_
-#define _ICP_QAT_HW_20_COMP_H_
-
-#include "icp_qat_hw_20_comp_defs.h"
-#include "icp_qat_fw.h"
-
-struct icp_qat_hw_comp_20_config_csr_lower {
-       enum icp_qat_hw_comp_20_extended_delay_match_mode edmm;
-       enum icp_qat_hw_comp_20_hw_comp_format algo;
-       enum icp_qat_hw_comp_20_search_depth sd;
-       enum icp_qat_hw_comp_20_hbs_control hbs;
-       enum icp_qat_hw_comp_20_abd abd;
-       enum icp_qat_hw_comp_20_lllbd_ctrl lllbd;
-       enum icp_qat_hw_comp_20_min_match_control mmctrl;
-       enum icp_qat_hw_comp_20_skip_hash_collision hash_col;
-       enum icp_qat_hw_comp_20_skip_hash_update hash_update;
-       enum icp_qat_hw_comp_20_byte_skip skip_ctrl;
-};
-
-static inline __u32
-ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(struct icp_qat_hw_comp_20_config_csr_lower csr)
-{
-       u32 val32 = 0;
-
-       QAT_FIELD_SET(val32, csr.algo,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_MASK);
-       QAT_FIELD_SET(val32, csr.sd,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_MASK);
-       QAT_FIELD_SET(val32, csr.edmm,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_MASK);
-       QAT_FIELD_SET(val32, csr.hbs,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_MASK);
-       QAT_FIELD_SET(val32, csr.lllbd,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_MASK);
-       QAT_FIELD_SET(val32, csr.mmctrl,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK);
-       QAT_FIELD_SET(val32, csr.hash_col,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_MASK);
-       QAT_FIELD_SET(val32, csr.hash_update,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_MASK);
-       QAT_FIELD_SET(val32, csr.skip_ctrl,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_MASK);
-       QAT_FIELD_SET(val32, csr.abd, ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_MASK);
-
-       return __builtin_bswap32(val32);
-}
-
-struct icp_qat_hw_comp_20_config_csr_upper {
-       enum icp_qat_hw_comp_20_scb_control scb_ctrl;
-       enum icp_qat_hw_comp_20_rmb_control rmb_ctrl;
-       enum icp_qat_hw_comp_20_som_control som_ctrl;
-       enum icp_qat_hw_comp_20_skip_hash_rd_control skip_hash_ctrl;
-       enum icp_qat_hw_comp_20_scb_unload_control scb_unload_ctrl;
-       enum icp_qat_hw_comp_20_disable_token_fusion_control disable_token_fusion_ctrl;
-       enum icp_qat_hw_comp_20_lbms lbms;
-       enum icp_qat_hw_comp_20_scb_mode_reset_mask scb_mode_reset;
-       __u16 lazy;
-       __u16 nice;
-};
-
-static inline __u32
-ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(struct icp_qat_hw_comp_20_config_csr_upper csr)
-{
-       u32 val32 = 0;
-
-       QAT_FIELD_SET(val32, csr.scb_ctrl,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_MASK);
-       QAT_FIELD_SET(val32, csr.rmb_ctrl,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_MASK);
-       QAT_FIELD_SET(val32, csr.som_ctrl,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_MASK);
-       QAT_FIELD_SET(val32, csr.skip_hash_ctrl,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_MASK);
-       QAT_FIELD_SET(val32, csr.scb_unload_ctrl,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_MASK);
-       QAT_FIELD_SET(val32, csr.disable_token_fusion_ctrl,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_MASK);
-       QAT_FIELD_SET(val32, csr.lbms,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_MASK);
-       QAT_FIELD_SET(val32, csr.scb_mode_reset,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_MASK);
-       QAT_FIELD_SET(val32, csr.lazy,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_MASK);
-       QAT_FIELD_SET(val32, csr.nice,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_MASK);
-
-       return __builtin_bswap32(val32);
-}
-
-struct icp_qat_hw_decomp_20_config_csr_lower {
-       enum icp_qat_hw_decomp_20_hbs_control hbs;
-       enum icp_qat_hw_decomp_20_lbms lbms;
-       enum icp_qat_hw_decomp_20_hw_comp_format algo;
-       enum icp_qat_hw_decomp_20_min_match_control mmctrl;
-       enum icp_qat_hw_decomp_20_lz4_block_checksum_present lbc;
-};
-
-static inline __u32
-ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(struct icp_qat_hw_decomp_20_config_csr_lower csr)
-{
-       u32 val32 = 0;
-
-       QAT_FIELD_SET(val32, csr.hbs,
-                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS,
-                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_MASK);
-       QAT_FIELD_SET(val32, csr.lbms,
-                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_BITPOS,
-                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_MASK);
-       QAT_FIELD_SET(val32, csr.algo,
-                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_BITPOS,
-                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_MASK);
-       QAT_FIELD_SET(val32, csr.mmctrl,
-                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS,
-                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK);
-       QAT_FIELD_SET(val32, csr.lbc,
-                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_BITPOS,
-                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_MASK);
-
-       return __builtin_bswap32(val32);
-}
-
-struct icp_qat_hw_decomp_20_config_csr_upper {
-       enum icp_qat_hw_decomp_20_speculative_decoder_control sdc;
-       enum icp_qat_hw_decomp_20_mini_cam_control mcc;
-};
-
-static inline __u32
-ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_UPPER(struct icp_qat_hw_decomp_20_config_csr_upper csr)
-{
-       u32 val32 = 0;
-
-       QAT_FIELD_SET(val32, csr.sdc,
-                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_BITPOS,
-                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_MASK);
-       QAT_FIELD_SET(val32, csr.mcc,
-                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_BITPOS,
-                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_MASK);
-
-       return __builtin_bswap32(val32);
-}
-
-#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw_20_comp_defs.h b/drivers/crypto/qat/qat_common/icp_qat_hw_20_comp_defs.h
deleted file mode 100644 (file)
index 208d455..0000000
+++ /dev/null
@@ -1,300 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright(c) 2022 Intel Corporation */
-#ifndef _ICP_QAT_HW_20_COMP_DEFS_H
-#define _ICP_QAT_HW_20_COMP_DEFS_H
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_BITPOS 31
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_MASK 0x1
-
-enum icp_qat_hw_comp_20_scb_control {
-       ICP_QAT_HW_COMP_20_SCB_CONTROL_ENABLE = 0x0,
-       ICP_QAT_HW_COMP_20_SCB_CONTROL_DISABLE = 0x1,
-};
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_DEFAULT_VAL \
-       ICP_QAT_HW_COMP_20_SCB_CONTROL_DISABLE
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_BITPOS 30
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_MASK 0x1
-
-enum icp_qat_hw_comp_20_rmb_control {
-       ICP_QAT_HW_COMP_20_RMB_CONTROL_RESET_ALL = 0x0,
-       ICP_QAT_HW_COMP_20_RMB_CONTROL_RESET_FC_ONLY = 0x1,
-};
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_DEFAULT_VAL \
-       ICP_QAT_HW_COMP_20_RMB_CONTROL_RESET_ALL
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_BITPOS 28
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_MASK 0x3
-
-enum icp_qat_hw_comp_20_som_control {
-       ICP_QAT_HW_COMP_20_SOM_CONTROL_NORMAL_MODE = 0x0,
-       ICP_QAT_HW_COMP_20_SOM_CONTROL_REPLAY_MODE = 0x1,
-       ICP_QAT_HW_COMP_20_SOM_CONTROL_INPUT_CRC = 0x2,
-       ICP_QAT_HW_COMP_20_SOM_CONTROL_RESERVED_MODE = 0x3,
-};
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_DEFAULT_VAL \
-       ICP_QAT_HW_COMP_20_SOM_CONTROL_NORMAL_MODE
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_BITPOS 27
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_MASK 0x1
-
-enum icp_qat_hw_comp_20_skip_hash_rd_control {
-       ICP_QAT_HW_COMP_20_SKIP_HASH_RD_CONTROL_NO_SKIP = 0x0,
-       ICP_QAT_HW_COMP_20_SKIP_HASH_RD_CONTROL_SKIP_HASH_READS = 0x1,
-};
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_DEFAULT_VAL \
-       ICP_QAT_HW_COMP_20_SKIP_HASH_RD_CONTROL_NO_SKIP
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_BITPOS 26
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_MASK 0x1
-
-enum icp_qat_hw_comp_20_scb_unload_control {
-       ICP_QAT_HW_COMP_20_SCB_UNLOAD_CONTROL_UNLOAD = 0x0,
-       ICP_QAT_HW_COMP_20_SCB_UNLOAD_CONTROL_NO_UNLOAD = 0x1,
-};
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_DEFAULT_VAL \
-       ICP_QAT_HW_COMP_20_SCB_UNLOAD_CONTROL_UNLOAD
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_BITPOS 21
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_MASK 0x1
-
-enum icp_qat_hw_comp_20_disable_token_fusion_control {
-       ICP_QAT_HW_COMP_20_DISABLE_TOKEN_FUSION_CONTROL_ENABLE = 0x0,
-       ICP_QAT_HW_COMP_20_DISABLE_TOKEN_FUSION_CONTROL_DISABLE = 0x1,
-};
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_DEFAULT_VAL \
-       ICP_QAT_HW_COMP_20_DISABLE_TOKEN_FUSION_CONTROL_ENABLE
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_BITPOS 19
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_MASK 0x3
-
-enum icp_qat_hw_comp_20_lbms {
-       ICP_QAT_HW_COMP_20_LBMS_LBMS_64KB = 0x0,
-       ICP_QAT_HW_COMP_20_LBMS_LBMS_256KB = 0x1,
-       ICP_QAT_HW_COMP_20_LBMS_LBMS_1MB = 0x2,
-       ICP_QAT_HW_COMP_20_LBMS_LBMS_4MB = 0x3,
-};
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_DEFAULT_VAL \
-       ICP_QAT_HW_COMP_20_LBMS_LBMS_64KB
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_BITPOS 18
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_MASK 0x1
-
-enum icp_qat_hw_comp_20_scb_mode_reset_mask {
-       ICP_QAT_HW_COMP_20_SCB_MODE_RESET_MASK_RESET_COUNTERS = 0x0,
-       ICP_QAT_HW_COMP_20_SCB_MODE_RESET_MASK_RESET_COUNTERS_AND_HISTORY = 0x1,
-};
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_DEFAULT_VAL \
-       ICP_QAT_HW_COMP_20_SCB_MODE_RESET_MASK_RESET_COUNTERS
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_BITPOS 9
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_MASK 0x1ff
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_DEFAULT_VAL 258
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_BITPOS 0
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_MASK 0x1ff
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_DEFAULT_VAL 259
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS 14
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_MASK 0x7
-
-enum icp_qat_hw_comp_20_hbs_control {
-       ICP_QAT_HW_COMP_20_HBS_CONTROL_HBS_IS_32KB = 0x0,
-       ICP_QAT_HW_COMP_23_HBS_CONTROL_HBS_IS_64KB = 0x1,
-};
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_DEFAULT_VAL \
-       ICP_QAT_HW_COMP_20_HBS_CONTROL_HBS_IS_32KB
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_BITPOS 13
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_MASK 0x1
-
-enum icp_qat_hw_comp_20_abd {
-       ICP_QAT_HW_COMP_20_ABD_ABD_ENABLED = 0x0,
-       ICP_QAT_HW_COMP_20_ABD_ABD_DISABLED = 0x1,
-};
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_DEFAULT_VAL \
-       ICP_QAT_HW_COMP_20_ABD_ABD_ENABLED
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_BITPOS 12
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_MASK 0x1
-
-enum icp_qat_hw_comp_20_lllbd_ctrl {
-       ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED = 0x0,
-       ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_DISABLED = 0x1,
-};
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_DEFAULT_VAL \
-       ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_BITPOS 8
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_MASK 0xf
-
-enum icp_qat_hw_comp_20_search_depth {
-       ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1 = 0x1,
-       ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_6 = 0x3,
-       ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_9 = 0x4,
-};
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_DEFAULT_VAL \
-       ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_BITPOS 5
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_MASK 0x7
-
-enum icp_qat_hw_comp_20_hw_comp_format {
-       ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77 = 0x0,
-       ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_DEFLATE = 0x1,
-       ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_LZ4 = 0x2,
-       ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_LZ4S = 0x3,
-       ICP_QAT_HW_COMP_23_HW_COMP_FORMAT_ZSTD = 0x4,
-};
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_DEFAULT_VAL \
-       ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_DEFLATE
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS 4
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK 0x1
-
-enum icp_qat_hw_comp_20_min_match_control {
-       ICP_QAT_HW_COMP_20_MIN_MATCH_CONTROL_MATCH_3B = 0x0,
-       ICP_QAT_HW_COMP_20_MIN_MATCH_CONTROL_MATCH_4B = 0x1,
-};
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_DEFAULT_VAL \
-       ICP_QAT_HW_COMP_20_MIN_MATCH_CONTROL_MATCH_3B
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_BITPOS 3
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_MASK 0x1
-
-enum icp_qat_hw_comp_20_skip_hash_collision {
-       ICP_QAT_HW_COMP_20_SKIP_HASH_COLLISION_ALLOW = 0x0,
-       ICP_QAT_HW_COMP_20_SKIP_HASH_COLLISION_DONT_ALLOW = 0x1,
-};
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_DEFAULT_VAL \
-       ICP_QAT_HW_COMP_20_SKIP_HASH_COLLISION_ALLOW
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_BITPOS 2
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_MASK 0x1
-
-enum icp_qat_hw_comp_20_skip_hash_update {
-       ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_ALLOW = 0x0,
-       ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW = 0x1,
-};
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_DEFAULT_VAL \
-       ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_ALLOW
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_BITPOS 1
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_MASK 0x1
-
-enum icp_qat_hw_comp_20_byte_skip {
-       ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_TOKEN = 0x0,
-       ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL = 0x1,
-};
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_DEFAULT_VAL \
-       ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_TOKEN
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_BITPOS 0
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_MASK 0x1
-
-enum icp_qat_hw_comp_20_extended_delay_match_mode {
-       ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_DISABLED = 0x0,
-       ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED = 0x1,
-};
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_DEFAULT_VAL \
-       ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_DISABLED
-
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_BITPOS 31
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_MASK 0x1
-
-enum icp_qat_hw_decomp_20_speculative_decoder_control {
-       ICP_QAT_HW_DECOMP_20_SPECULATIVE_DECODER_CONTROL_ENABLE = 0x0,
-       ICP_QAT_HW_DECOMP_20_SPECULATIVE_DECODER_CONTROL_DISABLE = 0x1,
-};
-
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_DEFAULT_VAL \
-       ICP_QAT_HW_DECOMP_20_SPECULATIVE_DECODER_CONTROL_ENABLE
-
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_BITPOS 30
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_MASK 0x1
-
-enum icp_qat_hw_decomp_20_mini_cam_control {
-       ICP_QAT_HW_DECOMP_20_MINI_CAM_CONTROL_ENABLE = 0x0,
-       ICP_QAT_HW_DECOMP_20_MINI_CAM_CONTROL_DISABLE = 0x1,
-};
-
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_DEFAULT_VAL \
-       ICP_QAT_HW_DECOMP_20_MINI_CAM_CONTROL_ENABLE
-
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS 14
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_MASK 0x7
-
-enum icp_qat_hw_decomp_20_hbs_control {
-       ICP_QAT_HW_DECOMP_20_HBS_CONTROL_HBS_IS_32KB = 0x0,
-};
-
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_DEFAULT_VAL \
-       ICP_QAT_HW_DECOMP_20_HBS_CONTROL_HBS_IS_32KB
-
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_BITPOS 8
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_MASK 0x3
-
-enum icp_qat_hw_decomp_20_lbms {
-       ICP_QAT_HW_DECOMP_20_LBMS_LBMS_64KB = 0x0,
-       ICP_QAT_HW_DECOMP_20_LBMS_LBMS_256KB = 0x1,
-       ICP_QAT_HW_DECOMP_20_LBMS_LBMS_1MB = 0x2,
-       ICP_QAT_HW_DECOMP_20_LBMS_LBMS_4MB = 0x3,
-};
-
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_DEFAULT_VAL \
-       ICP_QAT_HW_DECOMP_20_LBMS_LBMS_64KB
-
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_BITPOS 5
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_MASK 0x7
-
-enum icp_qat_hw_decomp_20_hw_comp_format {
-       ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE = 0x1,
-       ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_LZ4 = 0x2,
-       ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_LZ4S = 0x3,
-       ICP_QAT_HW_DECOMP_23_HW_DECOMP_FORMAT_ZSTD = 0x4,
-};
-
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_DEFAULT_VAL \
-       ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE
-
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS 4
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK 0x1
-
-enum icp_qat_hw_decomp_20_min_match_control {
-       ICP_QAT_HW_DECOMP_20_MIN_MATCH_CONTROL_MATCH_3B = 0x0,
-       ICP_QAT_HW_DECOMP_20_MIN_MATCH_CONTROL_MATCH_4B = 0x1,
-};
-
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_DEFAULT_VAL \
-       ICP_QAT_HW_DECOMP_20_MIN_MATCH_CONTROL_MATCH_3B
-
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_BITPOS 3
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_MASK 0x1
-
-enum icp_qat_hw_decomp_20_lz4_block_checksum_present {
-       ICP_QAT_HW_DECOMP_20_LZ4_BLOCK_CHKSUM_ABSENT = 0x0,
-       ICP_QAT_HW_DECOMP_20_LZ4_BLOCK_CHKSUM_PRESENT = 0x1,
-};
-
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_DEFAULT_VAL \
-       ICP_QAT_HW_DECOMP_20_LZ4_BLOCK_CHKSUM_ABSENT
-
-#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/qat/qat_common/icp_qat_uclo.h
deleted file mode 100644 (file)
index 69482ab..0000000
+++ /dev/null
@@ -1,585 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef __ICP_QAT_UCLO_H__
-#define __ICP_QAT_UCLO_H__
-
-#define ICP_QAT_AC_895XCC_DEV_TYPE 0x00400000
-#define ICP_QAT_AC_C62X_DEV_TYPE   0x01000000
-#define ICP_QAT_AC_C3XXX_DEV_TYPE  0x02000000
-#define ICP_QAT_AC_4XXX_A_DEV_TYPE 0x08000000
-#define ICP_QAT_UCLO_MAX_AE       12
-#define ICP_QAT_UCLO_MAX_CTX      8
-#define ICP_QAT_UCLO_MAX_UIMAGE   (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX)
-#define ICP_QAT_UCLO_MAX_USTORE   0x4000
-#define ICP_QAT_UCLO_MAX_XFER_REG 128
-#define ICP_QAT_UCLO_MAX_GPR_REG  128
-#define ICP_QAT_UCLO_MAX_LMEM_REG 1024
-#define ICP_QAT_UCLO_MAX_LMEM_REG_2X 1280
-#define ICP_QAT_UCLO_AE_ALL_CTX   0xff
-#define ICP_QAT_UOF_OBJID_LEN     8
-#define ICP_QAT_UOF_FID 0xc6c2
-#define ICP_QAT_UOF_MAJVER 0x4
-#define ICP_QAT_UOF_MINVER 0x11
-#define ICP_QAT_UOF_OBJS        "UOF_OBJS"
-#define ICP_QAT_UOF_STRT        "UOF_STRT"
-#define ICP_QAT_UOF_IMAG        "UOF_IMAG"
-#define ICP_QAT_UOF_IMEM        "UOF_IMEM"
-#define ICP_QAT_UOF_LOCAL_SCOPE     1
-#define ICP_QAT_UOF_INIT_EXPR               0
-#define ICP_QAT_UOF_INIT_REG                1
-#define ICP_QAT_UOF_INIT_REG_CTX            2
-#define ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP   3
-#define ICP_QAT_SUOF_OBJ_ID_LEN             8
-#define ICP_QAT_SUOF_FID  0x53554f46
-#define ICP_QAT_SUOF_MAJVER 0x0
-#define ICP_QAT_SUOF_MINVER 0x1
-#define ICP_QAT_SUOF_OBJ_NAME_LEN 128
-#define ICP_QAT_MOF_OBJ_ID_LEN 8
-#define ICP_QAT_MOF_OBJ_CHUNKID_LEN 8
-#define ICP_QAT_MOF_FID 0x00666f6d
-#define ICP_QAT_MOF_MAJVER 0x0
-#define ICP_QAT_MOF_MINVER 0x1
-#define ICP_QAT_MOF_SYM_OBJS "SYM_OBJS"
-#define ICP_QAT_SUOF_OBJS "SUF_OBJS"
-#define ICP_QAT_SUOF_IMAG "SUF_IMAG"
-#define ICP_QAT_SIMG_AE_INIT_SEQ_LEN    (50 * sizeof(unsigned long long))
-#define ICP_QAT_SIMG_AE_INSTS_LEN       (0x4000 * sizeof(unsigned long long))
-
-#define DSS_FWSK_MODULUS_LEN    384 /* RSA3K */
-#define DSS_FWSK_EXPONENT_LEN   4
-#define DSS_FWSK_PADDING_LEN    380
-#define DSS_SIGNATURE_LEN       384 /* RSA3K */
-
-#define CSS_FWSK_MODULUS_LEN    256 /* RSA2K */
-#define CSS_FWSK_EXPONENT_LEN   4
-#define CSS_FWSK_PADDING_LEN    252
-#define CSS_SIGNATURE_LEN       256 /* RSA2K */
-
-#define ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)   ((handle)->chip_info->css_3k ? \
-                                               DSS_FWSK_MODULUS_LEN  : \
-                                               CSS_FWSK_MODULUS_LEN)
-
-#define ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle)  ((handle)->chip_info->css_3k ? \
-                                               DSS_FWSK_EXPONENT_LEN : \
-                                               CSS_FWSK_EXPONENT_LEN)
-
-#define ICP_QAT_CSS_FWSK_PAD_LEN(handle)       ((handle)->chip_info->css_3k ? \
-                                               DSS_FWSK_PADDING_LEN : \
-                                               CSS_FWSK_PADDING_LEN)
-
-#define ICP_QAT_CSS_FWSK_PUB_LEN(handle)       (ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) + \
-                                               ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle) + \
-                                               ICP_QAT_CSS_FWSK_PAD_LEN(handle))
-
-#define ICP_QAT_CSS_SIGNATURE_LEN(handle)      ((handle)->chip_info->css_3k ? \
-                                               DSS_SIGNATURE_LEN : \
-                                               CSS_SIGNATURE_LEN)
-
-#define ICP_QAT_CSS_AE_IMG_LEN     (sizeof(struct icp_qat_simg_ae_mode) + \
-                                   ICP_QAT_SIMG_AE_INIT_SEQ_LEN +         \
-                                   ICP_QAT_SIMG_AE_INSTS_LEN)
-#define ICP_QAT_CSS_AE_SIMG_LEN(handle) (sizeof(struct icp_qat_css_hdr) + \
-                                       ICP_QAT_CSS_FWSK_PUB_LEN(handle) + \
-                                       ICP_QAT_CSS_SIGNATURE_LEN(handle) + \
-                                       ICP_QAT_CSS_AE_IMG_LEN)
-#define ICP_QAT_AE_IMG_OFFSET(handle) (sizeof(struct icp_qat_css_hdr) + \
-                                       ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) + \
-                                       ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle) + \
-                                       ICP_QAT_CSS_SIGNATURE_LEN(handle))
-#define ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN    0x40000
-#define ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN    0x30000
-
-#define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf)
-#define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf)
-#define ICP_QAT_SHARED_USTORE_MODE(ae_mode) (((ae_mode) >> 0xb) & 0x1)
-#define RELOADABLE_CTX_SHARED_MODE(ae_mode) (((ae_mode) >> 0xc) & 0x1)
-
-#define ICP_QAT_LOC_MEM0_MODE(ae_mode) (((ae_mode) >> 0x8) & 0x1)
-#define ICP_QAT_LOC_MEM1_MODE(ae_mode) (((ae_mode) >> 0x9) & 0x1)
-#define ICP_QAT_LOC_MEM2_MODE(ae_mode) (((ae_mode) >> 0x6) & 0x1)
-#define ICP_QAT_LOC_MEM3_MODE(ae_mode) (((ae_mode) >> 0x7) & 0x1)
-#define ICP_QAT_LOC_TINDEX_MODE(ae_mode) (((ae_mode) >> 0xe) & 0x1)
-
-enum icp_qat_uof_mem_region {
-       ICP_QAT_UOF_SRAM_REGION = 0x0,
-       ICP_QAT_UOF_LMEM_REGION = 0x3,
-       ICP_QAT_UOF_UMEM_REGION = 0x5
-};
-
-enum icp_qat_uof_regtype {
-       ICP_NO_DEST     = 0,
-       ICP_GPA_REL     = 1,
-       ICP_GPA_ABS     = 2,
-       ICP_GPB_REL     = 3,
-       ICP_GPB_ABS     = 4,
-       ICP_SR_REL      = 5,
-       ICP_SR_RD_REL   = 6,
-       ICP_SR_WR_REL   = 7,
-       ICP_SR_ABS      = 8,
-       ICP_SR_RD_ABS   = 9,
-       ICP_SR_WR_ABS   = 10,
-       ICP_DR_REL      = 19,
-       ICP_DR_RD_REL   = 20,
-       ICP_DR_WR_REL   = 21,
-       ICP_DR_ABS      = 22,
-       ICP_DR_RD_ABS   = 23,
-       ICP_DR_WR_ABS   = 24,
-       ICP_LMEM        = 26,
-       ICP_LMEM0       = 27,
-       ICP_LMEM1       = 28,
-       ICP_NEIGH_REL   = 31,
-       ICP_LMEM2       = 61,
-       ICP_LMEM3       = 62,
-};
-
-enum icp_qat_css_fwtype {
-       CSS_AE_FIRMWARE = 0,
-       CSS_MMP_FIRMWARE = 1
-};
-
-struct icp_qat_uclo_page {
-       struct icp_qat_uclo_encap_page *encap_page;
-       struct icp_qat_uclo_region *region;
-       unsigned int flags;
-};
-
-struct icp_qat_uclo_region {
-       struct icp_qat_uclo_page *loaded;
-       struct icp_qat_uclo_page *page;
-};
-
-struct icp_qat_uclo_aeslice {
-       struct icp_qat_uclo_region *region;
-       struct icp_qat_uclo_page *page;
-       struct icp_qat_uclo_page *cur_page[ICP_QAT_UCLO_MAX_CTX];
-       struct icp_qat_uclo_encapme *encap_image;
-       unsigned int ctx_mask_assigned;
-       unsigned int new_uaddr[ICP_QAT_UCLO_MAX_CTX];
-};
-
-struct icp_qat_uclo_aedata {
-       unsigned int slice_num;
-       unsigned int eff_ustore_size;
-       struct icp_qat_uclo_aeslice ae_slices[ICP_QAT_UCLO_MAX_CTX];
-};
-
-struct icp_qat_uof_encap_obj {
-       char *beg_uof;
-       struct icp_qat_uof_objhdr *obj_hdr;
-       struct icp_qat_uof_chunkhdr *chunk_hdr;
-       struct icp_qat_uof_varmem_seg *var_mem_seg;
-};
-
-struct icp_qat_uclo_encap_uwblock {
-       unsigned int start_addr;
-       unsigned int words_num;
-       u64 micro_words;
-};
-
-struct icp_qat_uclo_encap_page {
-       unsigned int def_page;
-       unsigned int page_region;
-       unsigned int beg_addr_v;
-       unsigned int beg_addr_p;
-       unsigned int micro_words_num;
-       unsigned int uwblock_num;
-       struct icp_qat_uclo_encap_uwblock *uwblock;
-};
-
-struct icp_qat_uclo_encapme {
-       struct icp_qat_uof_image *img_ptr;
-       struct icp_qat_uclo_encap_page *page;
-       unsigned int ae_reg_num;
-       struct icp_qat_uof_ae_reg *ae_reg;
-       unsigned int init_regsym_num;
-       struct icp_qat_uof_init_regsym *init_regsym;
-       unsigned int sbreak_num;
-       struct icp_qat_uof_sbreak *sbreak;
-       unsigned int uwords_num;
-};
-
-struct icp_qat_uclo_init_mem_table {
-       unsigned int entry_num;
-       struct icp_qat_uof_initmem *init_mem;
-};
-
-struct icp_qat_uclo_objhdr {
-       char *file_buff;
-       unsigned int checksum;
-       unsigned int size;
-};
-
-struct icp_qat_uof_strtable {
-       unsigned int table_len;
-       unsigned int reserved;
-       u64 strings;
-};
-
-struct icp_qat_uclo_objhandle {
-       unsigned int prod_type;
-       unsigned int prod_rev;
-       struct icp_qat_uclo_objhdr *obj_hdr;
-       struct icp_qat_uof_encap_obj encap_uof_obj;
-       struct icp_qat_uof_strtable str_table;
-       struct icp_qat_uclo_encapme ae_uimage[ICP_QAT_UCLO_MAX_UIMAGE];
-       struct icp_qat_uclo_aedata ae_data[ICP_QAT_UCLO_MAX_AE];
-       struct icp_qat_uclo_init_mem_table init_mem_tab;
-       struct icp_qat_uof_batch_init *lm_init_tab[ICP_QAT_UCLO_MAX_AE];
-       struct icp_qat_uof_batch_init *umem_init_tab[ICP_QAT_UCLO_MAX_AE];
-       int uimage_num;
-       int uword_in_bytes;
-       int global_inited;
-       unsigned int ae_num;
-       unsigned int ustore_phy_size;
-       void *obj_buf;
-       u64 *uword_buf;
-};
-
-struct icp_qat_uof_uword_block {
-       unsigned int start_addr;
-       unsigned int words_num;
-       unsigned int uword_offset;
-       unsigned int reserved;
-};
-
-struct icp_qat_uof_filehdr {
-       unsigned short file_id;
-       unsigned short reserved1;
-       char min_ver;
-       char maj_ver;
-       unsigned short reserved2;
-       unsigned short max_chunks;
-       unsigned short num_chunks;
-};
-
-struct icp_qat_uof_filechunkhdr {
-       char chunk_id[ICP_QAT_UOF_OBJID_LEN];
-       unsigned int checksum;
-       unsigned int offset;
-       unsigned int size;
-};
-
-struct icp_qat_uof_objhdr {
-       unsigned int ac_dev_type;
-       unsigned short min_cpu_ver;
-       unsigned short max_cpu_ver;
-       short max_chunks;
-       short num_chunks;
-       unsigned int reserved1;
-       unsigned int reserved2;
-};
-
-struct icp_qat_uof_chunkhdr {
-       char chunk_id[ICP_QAT_UOF_OBJID_LEN];
-       unsigned int offset;
-       unsigned int size;
-};
-
-struct icp_qat_uof_memvar_attr {
-       unsigned int offset_in_byte;
-       unsigned int value;
-};
-
-struct icp_qat_uof_initmem {
-       unsigned int sym_name;
-       char region;
-       char scope;
-       unsigned short reserved1;
-       unsigned int addr;
-       unsigned int num_in_bytes;
-       unsigned int val_attr_num;
-};
-
-struct icp_qat_uof_init_regsym {
-       unsigned int sym_name;
-       char init_type;
-       char value_type;
-       char reg_type;
-       unsigned char ctx;
-       unsigned int reg_addr;
-       unsigned int value;
-};
-
-struct icp_qat_uof_varmem_seg {
-       unsigned int sram_base;
-       unsigned int sram_size;
-       unsigned int sram_alignment;
-       unsigned int sdram_base;
-       unsigned int sdram_size;
-       unsigned int sdram_alignment;
-       unsigned int sdram1_base;
-       unsigned int sdram1_size;
-       unsigned int sdram1_alignment;
-       unsigned int scratch_base;
-       unsigned int scratch_size;
-       unsigned int scratch_alignment;
-};
-
-struct icp_qat_uof_gtid {
-       char tool_id[ICP_QAT_UOF_OBJID_LEN];
-       int tool_ver;
-       unsigned int reserved1;
-       unsigned int reserved2;
-};
-
-struct icp_qat_uof_sbreak {
-       unsigned int page_num;
-       unsigned int virt_uaddr;
-       unsigned char sbreak_type;
-       unsigned char reg_type;
-       unsigned short reserved1;
-       unsigned int addr_offset;
-       unsigned int reg_addr;
-};
-
-struct icp_qat_uof_code_page {
-       unsigned int page_region;
-       unsigned int page_num;
-       unsigned char def_page;
-       unsigned char reserved2;
-       unsigned short reserved1;
-       unsigned int beg_addr_v;
-       unsigned int beg_addr_p;
-       unsigned int neigh_reg_tab_offset;
-       unsigned int uc_var_tab_offset;
-       unsigned int imp_var_tab_offset;
-       unsigned int imp_expr_tab_offset;
-       unsigned int code_area_offset;
-};
-
-struct icp_qat_uof_image {
-       unsigned int img_name;
-       unsigned int ae_assigned;
-       unsigned int ctx_assigned;
-       unsigned int ac_dev_type;
-       unsigned int entry_address;
-       unsigned int fill_pattern[2];
-       unsigned int reloadable_size;
-       unsigned char sensitivity;
-       unsigned char reserved;
-       unsigned short ae_mode;
-       unsigned short max_ver;
-       unsigned short min_ver;
-       unsigned short image_attrib;
-       unsigned short reserved2;
-       unsigned short page_region_num;
-       unsigned short numpages;
-       unsigned int reg_tab_offset;
-       unsigned int init_reg_sym_tab;
-       unsigned int sbreak_tab;
-       unsigned int app_metadata;
-};
-
-struct icp_qat_uof_objtable {
-       unsigned int entry_num;
-};
-
-struct icp_qat_uof_ae_reg {
-       unsigned int name;
-       unsigned int vis_name;
-       unsigned short type;
-       unsigned short addr;
-       unsigned short access_mode;
-       unsigned char visible;
-       unsigned char reserved1;
-       unsigned short ref_count;
-       unsigned short reserved2;
-       unsigned int xo_id;
-};
-
-struct icp_qat_uof_code_area {
-       unsigned int micro_words_num;
-       unsigned int uword_block_tab;
-};
-
-struct icp_qat_uof_batch_init {
-       unsigned int ae;
-       unsigned int addr;
-       unsigned int *value;
-       unsigned int size;
-       struct icp_qat_uof_batch_init *next;
-};
-
-struct icp_qat_suof_img_hdr {
-       char          *simg_buf;
-       unsigned long simg_len;
-       char          *css_header;
-       char          *css_key;
-       char          *css_signature;
-       char          *css_simg;
-       unsigned long simg_size;
-       unsigned int  ae_num;
-       unsigned int  ae_mask;
-       unsigned int  fw_type;
-       unsigned long simg_name;
-       unsigned long appmeta_data;
-};
-
-struct icp_qat_suof_img_tbl {
-       unsigned int num_simgs;
-       struct icp_qat_suof_img_hdr *simg_hdr;
-};
-
-struct icp_qat_suof_handle {
-       unsigned int  file_id;
-       unsigned int  check_sum;
-       char          min_ver;
-       char          maj_ver;
-       char          fw_type;
-       char          *suof_buf;
-       unsigned int  suof_size;
-       char          *sym_str;
-       unsigned int  sym_size;
-       struct icp_qat_suof_img_tbl img_table;
-};
-
-struct icp_qat_fw_auth_desc {
-       unsigned int   img_len;
-       unsigned int   ae_mask;
-       unsigned int   css_hdr_high;
-       unsigned int   css_hdr_low;
-       unsigned int   img_high;
-       unsigned int   img_low;
-       unsigned int   signature_high;
-       unsigned int   signature_low;
-       unsigned int   fwsk_pub_high;
-       unsigned int   fwsk_pub_low;
-       unsigned int   img_ae_mode_data_high;
-       unsigned int   img_ae_mode_data_low;
-       unsigned int   img_ae_init_data_high;
-       unsigned int   img_ae_init_data_low;
-       unsigned int   img_ae_insts_high;
-       unsigned int   img_ae_insts_low;
-};
-
-struct icp_qat_auth_chunk {
-       struct icp_qat_fw_auth_desc fw_auth_desc;
-       u64 chunk_size;
-       u64 chunk_bus_addr;
-};
-
-struct icp_qat_css_hdr {
-       unsigned int module_type;
-       unsigned int header_len;
-       unsigned int header_ver;
-       unsigned int module_id;
-       unsigned int module_vendor;
-       unsigned int date;
-       unsigned int size;
-       unsigned int key_size;
-       unsigned int module_size;
-       unsigned int exponent_size;
-       unsigned int fw_type;
-       unsigned int reserved[21];
-};
-
-struct icp_qat_simg_ae_mode {
-       unsigned int     file_id;
-       unsigned short   maj_ver;
-       unsigned short   min_ver;
-       unsigned int     dev_type;
-       unsigned short   devmax_ver;
-       unsigned short   devmin_ver;
-       unsigned int     ae_mask;
-       unsigned int     ctx_enables;
-       char             fw_type;
-       char             ctx_mode;
-       char             nn_mode;
-       char             lm0_mode;
-       char             lm1_mode;
-       char             scs_mode;
-       char             lm2_mode;
-       char             lm3_mode;
-       char             tindex_mode;
-       unsigned char    reserved[7];
-       char             simg_name[256];
-       char             appmeta_data[256];
-};
-
-struct icp_qat_suof_filehdr {
-       unsigned int     file_id;
-       unsigned int     check_sum;
-       char             min_ver;
-       char             maj_ver;
-       char             fw_type;
-       char             reserved;
-       unsigned short   max_chunks;
-       unsigned short   num_chunks;
-};
-
-struct icp_qat_suof_chunk_hdr {
-       char chunk_id[ICP_QAT_SUOF_OBJ_ID_LEN];
-       u64 offset;
-       u64 size;
-};
-
-struct icp_qat_suof_strtable {
-       unsigned int tab_length;
-       unsigned int strings;
-};
-
-struct icp_qat_suof_objhdr {
-       unsigned int img_length;
-       unsigned int reserved;
-};
-
-struct icp_qat_mof_file_hdr {
-       unsigned int file_id;
-       unsigned int checksum;
-       char min_ver;
-       char maj_ver;
-       unsigned short reserved;
-       unsigned short max_chunks;
-       unsigned short num_chunks;
-};
-
-struct icp_qat_mof_chunkhdr {
-       char chunk_id[ICP_QAT_MOF_OBJ_ID_LEN];
-       u64 offset;
-       u64 size;
-};
-
-struct icp_qat_mof_str_table {
-       unsigned int tab_len;
-       unsigned int strings;
-};
-
-struct icp_qat_mof_obj_hdr {
-       unsigned short max_chunks;
-       unsigned short num_chunks;
-       unsigned int reserved;
-};
-
-struct icp_qat_mof_obj_chunkhdr {
-       char chunk_id[ICP_QAT_MOF_OBJ_CHUNKID_LEN];
-       u64 offset;
-       u64 size;
-       unsigned int name;
-       unsigned int reserved;
-};
-
-struct icp_qat_mof_objhdr {
-       char *obj_name;
-       char *obj_buf;
-       unsigned int obj_size;
-};
-
-struct icp_qat_mof_table {
-       unsigned int num_objs;
-       struct icp_qat_mof_objhdr *obj_hdr;
-};
-
-struct icp_qat_mof_handle {
-       unsigned int file_id;
-       unsigned int checksum;
-       char min_ver;
-       char maj_ver;
-       char *mof_buf;
-       u32 mof_size;
-       char *sym_str;
-       unsigned int sym_size;
-       char *uobjs_hdr;
-       char *sobjs_hdr;
-       struct icp_qat_mof_table obj_table;
-};
-#endif
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
deleted file mode 100644 (file)
index 538dcbf..0000000
+++ /dev/null
@@ -1,1424 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/crypto.h>
-#include <crypto/internal/aead.h>
-#include <crypto/internal/cipher.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/aes.h>
-#include <crypto/sha1.h>
-#include <crypto/sha2.h>
-#include <crypto/hash.h>
-#include <crypto/hmac.h>
-#include <crypto/algapi.h>
-#include <crypto/authenc.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/xts.h>
-#include <linux/dma-mapping.h>
-#include "adf_accel_devices.h"
-#include "qat_algs_send.h"
-#include "adf_common_drv.h"
-#include "qat_crypto.h"
-#include "icp_qat_hw.h"
-#include "icp_qat_fw.h"
-#include "icp_qat_fw_la.h"
-#include "qat_bl.h"
-
-#define QAT_AES_HW_CONFIG_ENC(alg, mode) \
-       ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
-                                      ICP_QAT_HW_CIPHER_NO_CONVERT, \
-                                      ICP_QAT_HW_CIPHER_ENCRYPT)
-
-#define QAT_AES_HW_CONFIG_DEC(alg, mode) \
-       ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
-                                      ICP_QAT_HW_CIPHER_KEY_CONVERT, \
-                                      ICP_QAT_HW_CIPHER_DECRYPT)
-
-#define QAT_AES_HW_CONFIG_DEC_NO_CONV(alg, mode) \
-       ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
-                                      ICP_QAT_HW_CIPHER_NO_CONVERT, \
-                                      ICP_QAT_HW_CIPHER_DECRYPT)
-
-#define HW_CAP_AES_V2(accel_dev) \
-       (GET_HW_DATA(accel_dev)->accel_capabilities_mask & \
-        ICP_ACCEL_CAPABILITIES_AES_V2)
-
-static DEFINE_MUTEX(algs_lock);
-static unsigned int active_devs;
-
-/* Common content descriptor */
-struct qat_alg_cd {
-       union {
-               struct qat_enc { /* Encrypt content desc */
-                       struct icp_qat_hw_cipher_algo_blk cipher;
-                       struct icp_qat_hw_auth_algo_blk hash;
-               } qat_enc_cd;
-               struct qat_dec { /* Decrypt content desc */
-                       struct icp_qat_hw_auth_algo_blk hash;
-                       struct icp_qat_hw_cipher_algo_blk cipher;
-               } qat_dec_cd;
-       };
-} __aligned(64);
-
-struct qat_alg_aead_ctx {
-       struct qat_alg_cd *enc_cd;
-       struct qat_alg_cd *dec_cd;
-       dma_addr_t enc_cd_paddr;
-       dma_addr_t dec_cd_paddr;
-       struct icp_qat_fw_la_bulk_req enc_fw_req;
-       struct icp_qat_fw_la_bulk_req dec_fw_req;
-       struct crypto_shash *hash_tfm;
-       enum icp_qat_hw_auth_algo qat_hash_alg;
-       struct qat_crypto_instance *inst;
-       union {
-               struct sha1_state sha1;
-               struct sha256_state sha256;
-               struct sha512_state sha512;
-       };
-       char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
-       char opad[SHA512_BLOCK_SIZE];
-};
-
-struct qat_alg_skcipher_ctx {
-       struct icp_qat_hw_cipher_algo_blk *enc_cd;
-       struct icp_qat_hw_cipher_algo_blk *dec_cd;
-       dma_addr_t enc_cd_paddr;
-       dma_addr_t dec_cd_paddr;
-       struct icp_qat_fw_la_bulk_req enc_fw_req;
-       struct icp_qat_fw_la_bulk_req dec_fw_req;
-       struct qat_crypto_instance *inst;
-       struct crypto_skcipher *ftfm;
-       struct crypto_cipher *tweak;
-       bool fallback;
-       int mode;
-};
-
-static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
-{
-       switch (qat_hash_alg) {
-       case ICP_QAT_HW_AUTH_ALGO_SHA1:
-               return ICP_QAT_HW_SHA1_STATE1_SZ;
-       case ICP_QAT_HW_AUTH_ALGO_SHA256:
-               return ICP_QAT_HW_SHA256_STATE1_SZ;
-       case ICP_QAT_HW_AUTH_ALGO_SHA512:
-               return ICP_QAT_HW_SHA512_STATE1_SZ;
-       default:
-               return -EFAULT;
-       }
-       return -EFAULT;
-}
-
-static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
-                                 struct qat_alg_aead_ctx *ctx,
-                                 const u8 *auth_key,
-                                 unsigned int auth_keylen)
-{
-       SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
-       int block_size = crypto_shash_blocksize(ctx->hash_tfm);
-       int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
-       __be32 *hash_state_out;
-       __be64 *hash512_state_out;
-       int i, offset;
-
-       memset(ctx->ipad, 0, block_size);
-       memset(ctx->opad, 0, block_size);
-       shash->tfm = ctx->hash_tfm;
-
-       if (auth_keylen > block_size) {
-               int ret = crypto_shash_digest(shash, auth_key,
-                                             auth_keylen, ctx->ipad);
-               if (ret)
-                       return ret;
-
-               memcpy(ctx->opad, ctx->ipad, digest_size);
-       } else {
-               memcpy(ctx->ipad, auth_key, auth_keylen);
-               memcpy(ctx->opad, auth_key, auth_keylen);
-       }
-
-       for (i = 0; i < block_size; i++) {
-               char *ipad_ptr = ctx->ipad + i;
-               char *opad_ptr = ctx->opad + i;
-               *ipad_ptr ^= HMAC_IPAD_VALUE;
-               *opad_ptr ^= HMAC_OPAD_VALUE;
-       }
-
-       if (crypto_shash_init(shash))
-               return -EFAULT;
-
-       if (crypto_shash_update(shash, ctx->ipad, block_size))
-               return -EFAULT;
-
-       hash_state_out = (__be32 *)hash->sha.state1;
-       hash512_state_out = (__be64 *)hash_state_out;
-
-       switch (ctx->qat_hash_alg) {
-       case ICP_QAT_HW_AUTH_ALGO_SHA1:
-               if (crypto_shash_export(shash, &ctx->sha1))
-                       return -EFAULT;
-               for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
-                       *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
-               break;
-       case ICP_QAT_HW_AUTH_ALGO_SHA256:
-               if (crypto_shash_export(shash, &ctx->sha256))
-                       return -EFAULT;
-               for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
-                       *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
-               break;
-       case ICP_QAT_HW_AUTH_ALGO_SHA512:
-               if (crypto_shash_export(shash, &ctx->sha512))
-                       return -EFAULT;
-               for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
-                       *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
-               break;
-       default:
-               return -EFAULT;
-       }
-
-       if (crypto_shash_init(shash))
-               return -EFAULT;
-
-       if (crypto_shash_update(shash, ctx->opad, block_size))
-               return -EFAULT;
-
-       offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
-       if (offset < 0)
-               return -EFAULT;
-
-       hash_state_out = (__be32 *)(hash->sha.state1 + offset);
-       hash512_state_out = (__be64 *)hash_state_out;
-
-       switch (ctx->qat_hash_alg) {
-       case ICP_QAT_HW_AUTH_ALGO_SHA1:
-               if (crypto_shash_export(shash, &ctx->sha1))
-                       return -EFAULT;
-               for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
-                       *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
-               break;
-       case ICP_QAT_HW_AUTH_ALGO_SHA256:
-               if (crypto_shash_export(shash, &ctx->sha256))
-                       return -EFAULT;
-               for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
-                       *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
-               break;
-       case ICP_QAT_HW_AUTH_ALGO_SHA512:
-               if (crypto_shash_export(shash, &ctx->sha512))
-                       return -EFAULT;
-               for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
-                       *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
-               break;
-       default:
-               return -EFAULT;
-       }
-       memzero_explicit(ctx->ipad, block_size);
-       memzero_explicit(ctx->opad, block_size);
-       return 0;
-}
-
-static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
-{
-       header->hdr_flags =
-               ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
-       header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
-       header->comn_req_flags =
-               ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
-                                           QAT_COMN_PTR_TYPE_SGL);
-       ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
-                                 ICP_QAT_FW_LA_PARTIAL_NONE);
-       ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
-                                          ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
-       ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-                               ICP_QAT_FW_LA_NO_PROTO);
-       ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
-                                      ICP_QAT_FW_LA_NO_UPDATE_STATE);
-}
-
-static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
-                                        int alg,
-                                        struct crypto_authenc_keys *keys,
-                                        int mode)
-{
-       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
-       unsigned int digestsize = crypto_aead_authsize(aead_tfm);
-       struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
-       struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
-       struct icp_qat_hw_auth_algo_blk *hash =
-               (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
-               sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
-       struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
-       struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
-       struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
-       void *ptr = &req_tmpl->cd_ctrl;
-       struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
-       struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
-
-       /* CD setup */
-       cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
-       memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
-       hash->sha.inner_setup.auth_config.config =
-               ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
-                                            ctx->qat_hash_alg, digestsize);
-       hash->sha.inner_setup.auth_counter.counter =
-               cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
-
-       if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
-               return -EFAULT;
-
-       /* Request setup */
-       qat_alg_init_common_hdr(header);
-       header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
-       ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
-                                          ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
-       ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
-                                  ICP_QAT_FW_LA_RET_AUTH_RES);
-       ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
-                                  ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
-       cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
-       cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
-
-       /* Cipher CD config setup */
-       cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
-       cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
-       cipher_cd_ctrl->cipher_cfg_offset = 0;
-       ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
-       ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
-       /* Auth CD config setup */
-       hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
-       hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
-       hash_cd_ctrl->inner_res_sz = digestsize;
-       hash_cd_ctrl->final_sz = digestsize;
-
-       switch (ctx->qat_hash_alg) {
-       case ICP_QAT_HW_AUTH_ALGO_SHA1:
-               hash_cd_ctrl->inner_state1_sz =
-                       round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
-               hash_cd_ctrl->inner_state2_sz =
-                       round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
-               break;
-       case ICP_QAT_HW_AUTH_ALGO_SHA256:
-               hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
-               hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
-               break;
-       case ICP_QAT_HW_AUTH_ALGO_SHA512:
-               hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
-               hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
-               break;
-       default:
-               break;
-       }
-       hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
-                       ((sizeof(struct icp_qat_hw_auth_setup) +
-                        round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
-       ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
-       ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
-       return 0;
-}
-
-static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
-                                        int alg,
-                                        struct crypto_authenc_keys *keys,
-                                        int mode)
-{
-       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
-       unsigned int digestsize = crypto_aead_authsize(aead_tfm);
-       struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
-       struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
-       struct icp_qat_hw_cipher_algo_blk *cipher =
-               (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
-               sizeof(struct icp_qat_hw_auth_setup) +
-               roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
-       struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
-       struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
-       struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
-       void *ptr = &req_tmpl->cd_ctrl;
-       struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
-       struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
-       struct icp_qat_fw_la_auth_req_params *auth_param =
-               (struct icp_qat_fw_la_auth_req_params *)
-               ((char *)&req_tmpl->serv_specif_rqpars +
-               sizeof(struct icp_qat_fw_la_cipher_req_params));
-
-       /* CD setup */
-       cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
-       memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
-       hash->sha.inner_setup.auth_config.config =
-               ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
-                                            ctx->qat_hash_alg,
-                                            digestsize);
-       hash->sha.inner_setup.auth_counter.counter =
-               cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
-
-       if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
-               return -EFAULT;
-
-       /* Request setup */
-       qat_alg_init_common_hdr(header);
-       header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
-       ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
-                                          ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
-       ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
-                                  ICP_QAT_FW_LA_NO_RET_AUTH_RES);
-       ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
-                                  ICP_QAT_FW_LA_CMP_AUTH_RES);
-       cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
-       cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
-
-       /* Cipher CD config setup */
-       cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
-       cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
-       cipher_cd_ctrl->cipher_cfg_offset =
-               (sizeof(struct icp_qat_hw_auth_setup) +
-                roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
-       ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
-       ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
-
-       /* Auth CD config setup */
-       hash_cd_ctrl->hash_cfg_offset = 0;
-       hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
-       hash_cd_ctrl->inner_res_sz = digestsize;
-       hash_cd_ctrl->final_sz = digestsize;
-
-       switch (ctx->qat_hash_alg) {
-       case ICP_QAT_HW_AUTH_ALGO_SHA1:
-               hash_cd_ctrl->inner_state1_sz =
-                       round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
-               hash_cd_ctrl->inner_state2_sz =
-                       round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
-               break;
-       case ICP_QAT_HW_AUTH_ALGO_SHA256:
-               hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
-               hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
-               break;
-       case ICP_QAT_HW_AUTH_ALGO_SHA512:
-               hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
-               hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
-               break;
-       default:
-               break;
-       }
-
-       hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
-                       ((sizeof(struct icp_qat_hw_auth_setup) +
-                        round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
-       auth_param->auth_res_sz = digestsize;
-       ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
-       ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
-       return 0;
-}
-
-static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
-                                     struct icp_qat_fw_la_bulk_req *req,
-                                     struct icp_qat_hw_cipher_algo_blk *cd,
-                                     const u8 *key, unsigned int keylen)
-{
-       struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
-       struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
-       struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
-       bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
-       int mode = ctx->mode;
-
-       qat_alg_init_common_hdr(header);
-       header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
-       cd_pars->u.s.content_desc_params_sz =
-                               sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
-
-       if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_XTS_MODE) {
-               ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags,
-                                            ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
-
-               /* Store both XTS keys in CD, only the first key is sent
-                * to the HW, the second key is used for tweak calculation
-                */
-               memcpy(cd->ucs_aes.key, key, keylen);
-               keylen = keylen / 2;
-       } else if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
-               ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags,
-                                            ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
-               memcpy(cd->ucs_aes.key, key, keylen);
-               keylen = round_up(keylen, 16);
-       } else {
-               memcpy(cd->aes.key, key, keylen);
-       }
-
-       /* Cipher CD config setup */
-       cd_ctrl->cipher_key_sz = keylen >> 3;
-       cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
-       cd_ctrl->cipher_cfg_offset = 0;
-       ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
-       ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
-}
-
-static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
-                                     int alg, const u8 *key,
-                                     unsigned int keylen, int mode)
-{
-       struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
-       struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
-       struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
-
-       qat_alg_skcipher_init_com(ctx, req, enc_cd, key, keylen);
-       cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
-       enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
-}
-
-static void qat_alg_xts_reverse_key(const u8 *key_forward, unsigned int keylen,
-                                   u8 *key_reverse)
-{
-       struct crypto_aes_ctx aes_expanded;
-       int nrounds;
-       u8 *key;
-
-       aes_expandkey(&aes_expanded, key_forward, keylen);
-       if (keylen == AES_KEYSIZE_128) {
-               nrounds = 10;
-               key = (u8 *)aes_expanded.key_enc + (AES_BLOCK_SIZE * nrounds);
-               memcpy(key_reverse, key, AES_BLOCK_SIZE);
-       } else {
-               /* AES_KEYSIZE_256 */
-               nrounds = 14;
-               key = (u8 *)aes_expanded.key_enc + (AES_BLOCK_SIZE * nrounds);
-               memcpy(key_reverse, key, AES_BLOCK_SIZE);
-               memcpy(key_reverse + AES_BLOCK_SIZE, key - AES_BLOCK_SIZE,
-                      AES_BLOCK_SIZE);
-       }
-}
-
-static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx,
-                                     int alg, const u8 *key,
-                                     unsigned int keylen, int mode)
-{
-       struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
-       struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
-       struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
-       bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
-
-       qat_alg_skcipher_init_com(ctx, req, dec_cd, key, keylen);
-       cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
-
-       if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_XTS_MODE) {
-               /* Key reversing not supported, set no convert */
-               dec_cd->aes.cipher_config.val =
-                               QAT_AES_HW_CONFIG_DEC_NO_CONV(alg, mode);
-
-               /* In-place key reversal */
-               qat_alg_xts_reverse_key(dec_cd->ucs_aes.key, keylen / 2,
-                                       dec_cd->ucs_aes.key);
-       } else if (mode != ICP_QAT_HW_CIPHER_CTR_MODE) {
-               dec_cd->aes.cipher_config.val =
-                                       QAT_AES_HW_CONFIG_DEC(alg, mode);
-       } else {
-               dec_cd->aes.cipher_config.val =
-                                       QAT_AES_HW_CONFIG_ENC(alg, mode);
-       }
-}
-
-static int qat_alg_validate_key(int key_len, int *alg, int mode)
-{
-       if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
-               switch (key_len) {
-               case AES_KEYSIZE_128:
-                       *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
-                       break;
-               case AES_KEYSIZE_192:
-                       *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
-                       break;
-               case AES_KEYSIZE_256:
-                       *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
-                       break;
-               default:
-                       return -EINVAL;
-               }
-       } else {
-               switch (key_len) {
-               case AES_KEYSIZE_128 << 1:
-                       *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
-                       break;
-               case AES_KEYSIZE_256 << 1:
-                       *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
-                       break;
-               default:
-                       return -EINVAL;
-               }
-       }
-       return 0;
-}
-
-static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
-                                     unsigned int keylen,  int mode)
-{
-       struct crypto_authenc_keys keys;
-       int alg;
-
-       if (crypto_authenc_extractkeys(&keys, key, keylen))
-               goto bad_key;
-
-       if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
-               goto bad_key;
-
-       if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
-               goto error;
-
-       if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
-               goto error;
-
-       memzero_explicit(&keys, sizeof(keys));
-       return 0;
-bad_key:
-       memzero_explicit(&keys, sizeof(keys));
-       return -EINVAL;
-error:
-       memzero_explicit(&keys, sizeof(keys));
-       return -EFAULT;
-}
-
-static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
-                                         const u8 *key,
-                                         unsigned int keylen,
-                                         int mode)
-{
-       int alg;
-
-       if (qat_alg_validate_key(keylen, &alg, mode))
-               return -EINVAL;
-
-       qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode);
-       qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode);
-       return 0;
-}
-
-static int qat_alg_aead_rekey(struct crypto_aead *tfm, const u8 *key,
-                             unsigned int keylen)
-{
-       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
-
-       memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
-       memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
-       memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
-       memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
-
-       return qat_alg_aead_init_sessions(tfm, key, keylen,
-                                         ICP_QAT_HW_CIPHER_CBC_MODE);
-}
-
-static int qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key,
-                              unsigned int keylen)
-{
-       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
-       struct qat_crypto_instance *inst = NULL;
-       int node = numa_node_id();
-       struct device *dev;
-       int ret;
-
-       inst = qat_crypto_get_instance_node(node);
-       if (!inst)
-               return -EINVAL;
-       dev = &GET_DEV(inst->accel_dev);
-       ctx->inst = inst;
-       ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
-                                        &ctx->enc_cd_paddr,
-                                        GFP_ATOMIC);
-       if (!ctx->enc_cd) {
-               ret = -ENOMEM;
-               goto out_free_inst;
-       }
-       ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
-                                        &ctx->dec_cd_paddr,
-                                        GFP_ATOMIC);
-       if (!ctx->dec_cd) {
-               ret = -ENOMEM;
-               goto out_free_enc;
-       }
-
-       ret = qat_alg_aead_init_sessions(tfm, key, keylen,
-                                        ICP_QAT_HW_CIPHER_CBC_MODE);
-       if (ret)
-               goto out_free_all;
-
-       return 0;
-
-out_free_all:
-       memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
-       dma_free_coherent(dev, sizeof(struct qat_alg_cd),
-                         ctx->dec_cd, ctx->dec_cd_paddr);
-       ctx->dec_cd = NULL;
-out_free_enc:
-       memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
-       dma_free_coherent(dev, sizeof(struct qat_alg_cd),
-                         ctx->enc_cd, ctx->enc_cd_paddr);
-       ctx->enc_cd = NULL;
-out_free_inst:
-       ctx->inst = NULL;
-       qat_crypto_put_instance(inst);
-       return ret;
-}
-
-static int qat_alg_aead_setkey(struct crypto_aead *tfm, const u8 *key,
-                              unsigned int keylen)
-{
-       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
-
-       if (ctx->enc_cd)
-               return qat_alg_aead_rekey(tfm, key, keylen);
-       else
-               return qat_alg_aead_newkey(tfm, key, keylen);
-}
-
-static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
-                                 struct qat_crypto_request *qat_req)
-{
-       struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
-       struct qat_crypto_instance *inst = ctx->inst;
-       struct aead_request *areq = qat_req->aead_req;
-       u8 stat_filed = qat_resp->comn_resp.comn_status;
-       int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
-
-       qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
-       if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
-               res = -EBADMSG;
-       aead_request_complete(areq, res);
-}
-
-static void qat_alg_update_iv_ctr_mode(struct qat_crypto_request *qat_req)
-{
-       struct skcipher_request *sreq = qat_req->skcipher_req;
-       u64 iv_lo_prev;
-       u64 iv_lo;
-       u64 iv_hi;
-
-       memcpy(qat_req->iv, sreq->iv, AES_BLOCK_SIZE);
-
-       iv_lo = be64_to_cpu(qat_req->iv_lo);
-       iv_hi = be64_to_cpu(qat_req->iv_hi);
-
-       iv_lo_prev = iv_lo;
-       iv_lo += DIV_ROUND_UP(sreq->cryptlen, AES_BLOCK_SIZE);
-       if (iv_lo < iv_lo_prev)
-               iv_hi++;
-
-       qat_req->iv_lo = cpu_to_be64(iv_lo);
-       qat_req->iv_hi = cpu_to_be64(iv_hi);
-}
-
-static void qat_alg_update_iv_cbc_mode(struct qat_crypto_request *qat_req)
-{
-       struct skcipher_request *sreq = qat_req->skcipher_req;
-       int offset = sreq->cryptlen - AES_BLOCK_SIZE;
-       struct scatterlist *sgl;
-
-       if (qat_req->encryption)
-               sgl = sreq->dst;
-       else
-               sgl = sreq->src;
-
-       scatterwalk_map_and_copy(qat_req->iv, sgl, offset, AES_BLOCK_SIZE, 0);
-}
-
-static void qat_alg_update_iv(struct qat_crypto_request *qat_req)
-{
-       struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
-       struct device *dev = &GET_DEV(ctx->inst->accel_dev);
-
-       switch (ctx->mode) {
-       case ICP_QAT_HW_CIPHER_CTR_MODE:
-               qat_alg_update_iv_ctr_mode(qat_req);
-               break;
-       case ICP_QAT_HW_CIPHER_CBC_MODE:
-               qat_alg_update_iv_cbc_mode(qat_req);
-               break;
-       case ICP_QAT_HW_CIPHER_XTS_MODE:
-               break;
-       default:
-               dev_warn(dev, "Unsupported IV update for cipher mode %d\n",
-                        ctx->mode);
-       }
-}
-
-static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
-                                     struct qat_crypto_request *qat_req)
-{
-       struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
-       struct qat_crypto_instance *inst = ctx->inst;
-       struct skcipher_request *sreq = qat_req->skcipher_req;
-       u8 stat_filed = qat_resp->comn_resp.comn_status;
-       int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
-
-       qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
-       if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
-               res = -EINVAL;
-
-       if (qat_req->encryption)
-               qat_alg_update_iv(qat_req);
-
-       memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE);
-
-       skcipher_request_complete(sreq, res);
-}
-
-void qat_alg_callback(void *resp)
-{
-       struct icp_qat_fw_la_resp *qat_resp = resp;
-       struct qat_crypto_request *qat_req =
-                               (void *)(__force long)qat_resp->opaque_data;
-       struct qat_instance_backlog *backlog = qat_req->alg_req.backlog;
-
-       qat_req->cb(qat_resp, qat_req);
-
-       qat_alg_send_backlog(backlog);
-}
-
-static int qat_alg_send_sym_message(struct qat_crypto_request *qat_req,
-                                   struct qat_crypto_instance *inst,
-                                   struct crypto_async_request *base)
-{
-       struct qat_alg_req *alg_req = &qat_req->alg_req;
-
-       alg_req->fw_req = (u32 *)&qat_req->req;
-       alg_req->tx_ring = inst->sym_tx;
-       alg_req->base = base;
-       alg_req->backlog = &inst->backlog;
-
-       return qat_alg_send_message(alg_req);
-}
-
-static int qat_alg_aead_dec(struct aead_request *areq)
-{
-       struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
-       struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
-       struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
-       struct qat_crypto_request *qat_req = aead_request_ctx(areq);
-       struct icp_qat_fw_la_cipher_req_params *cipher_param;
-       struct icp_qat_fw_la_auth_req_params *auth_param;
-       struct icp_qat_fw_la_bulk_req *msg;
-       int digst_size = crypto_aead_authsize(aead_tfm);
-       gfp_t f = qat_algs_alloc_flags(&areq->base);
-       int ret;
-       u32 cipher_len;
-
-       cipher_len = areq->cryptlen - digst_size;
-       if (cipher_len % AES_BLOCK_SIZE != 0)
-               return -EINVAL;
-
-       ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
-                                &qat_req->buf, NULL, f);
-       if (unlikely(ret))
-               return ret;
-
-       msg = &qat_req->req;
-       *msg = ctx->dec_fw_req;
-       qat_req->aead_ctx = ctx;
-       qat_req->aead_req = areq;
-       qat_req->cb = qat_aead_alg_callback;
-       qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
-       qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
-       qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
-       cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
-       cipher_param->cipher_length = cipher_len;
-       cipher_param->cipher_offset = areq->assoclen;
-       memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
-       auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
-       auth_param->auth_off = 0;
-       auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
-
-       ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
-       if (ret == -ENOSPC)
-               qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
-
-       return ret;
-}
-
-static int qat_alg_aead_enc(struct aead_request *areq)
-{
-       struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
-       struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
-       struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
-       struct qat_crypto_request *qat_req = aead_request_ctx(areq);
-       struct icp_qat_fw_la_cipher_req_params *cipher_param;
-       struct icp_qat_fw_la_auth_req_params *auth_param;
-       gfp_t f = qat_algs_alloc_flags(&areq->base);
-       struct icp_qat_fw_la_bulk_req *msg;
-       u8 *iv = areq->iv;
-       int ret;
-
-       if (areq->cryptlen % AES_BLOCK_SIZE != 0)
-               return -EINVAL;
-
-       ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
-                                &qat_req->buf, NULL, f);
-       if (unlikely(ret))
-               return ret;
-
-       msg = &qat_req->req;
-       *msg = ctx->enc_fw_req;
-       qat_req->aead_ctx = ctx;
-       qat_req->aead_req = areq;
-       qat_req->cb = qat_aead_alg_callback;
-       qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
-       qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
-       qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
-       cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
-       auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
-
-       memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
-       cipher_param->cipher_length = areq->cryptlen;
-       cipher_param->cipher_offset = areq->assoclen;
-
-       auth_param->auth_off = 0;
-       auth_param->auth_len = areq->assoclen + areq->cryptlen;
-
-       ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
-       if (ret == -ENOSPC)
-               qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
-
-       return ret;
-}
-
-static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx,
-                                 const u8 *key, unsigned int keylen,
-                                 int mode)
-{
-       memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
-       memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
-       memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
-       memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
-
-       return qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
-}
-
-static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx,
-                                  const u8 *key, unsigned int keylen,
-                                  int mode)
-{
-       struct qat_crypto_instance *inst = NULL;
-       struct device *dev;
-       int node = numa_node_id();
-       int ret;
-
-       inst = qat_crypto_get_instance_node(node);
-       if (!inst)
-               return -EINVAL;
-       dev = &GET_DEV(inst->accel_dev);
-       ctx->inst = inst;
-       ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
-                                        &ctx->enc_cd_paddr,
-                                        GFP_ATOMIC);
-       if (!ctx->enc_cd) {
-               ret = -ENOMEM;
-               goto out_free_instance;
-       }
-       ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
-                                        &ctx->dec_cd_paddr,
-                                        GFP_ATOMIC);
-       if (!ctx->dec_cd) {
-               ret = -ENOMEM;
-               goto out_free_enc;
-       }
-
-       ret = qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
-       if (ret)
-               goto out_free_all;
-
-       return 0;
-
-out_free_all:
-       memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
-       dma_free_coherent(dev, sizeof(*ctx->dec_cd),
-                         ctx->dec_cd, ctx->dec_cd_paddr);
-       ctx->dec_cd = NULL;
-out_free_enc:
-       memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
-       dma_free_coherent(dev, sizeof(*ctx->enc_cd),
-                         ctx->enc_cd, ctx->enc_cd_paddr);
-       ctx->enc_cd = NULL;
-out_free_instance:
-       ctx->inst = NULL;
-       qat_crypto_put_instance(inst);
-       return ret;
-}
-
-static int qat_alg_skcipher_setkey(struct crypto_skcipher *tfm,
-                                  const u8 *key, unsigned int keylen,
-                                  int mode)
-{
-       struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
-
-       ctx->mode = mode;
-
-       if (ctx->enc_cd)
-               return qat_alg_skcipher_rekey(ctx, key, keylen, mode);
-       else
-               return qat_alg_skcipher_newkey(ctx, key, keylen, mode);
-}
-
-static int qat_alg_skcipher_cbc_setkey(struct crypto_skcipher *tfm,
-                                      const u8 *key, unsigned int keylen)
-{
-       return qat_alg_skcipher_setkey(tfm, key, keylen,
-                                      ICP_QAT_HW_CIPHER_CBC_MODE);
-}
-
-static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm,
-                                      const u8 *key, unsigned int keylen)
-{
-       return qat_alg_skcipher_setkey(tfm, key, keylen,
-                                      ICP_QAT_HW_CIPHER_CTR_MODE);
-}
-
-static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
-                                      const u8 *key, unsigned int keylen)
-{
-       struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
-       int ret;
-
-       ret = xts_verify_key(tfm, key, keylen);
-       if (ret)
-               return ret;
-
-       if (keylen >> 1 == AES_KEYSIZE_192) {
-               ret = crypto_skcipher_setkey(ctx->ftfm, key, keylen);
-               if (ret)
-                       return ret;
-
-               ctx->fallback = true;
-
-               return 0;
-       }
-
-       ctx->fallback = false;
-
-       ret = qat_alg_skcipher_setkey(tfm, key, keylen,
-                                     ICP_QAT_HW_CIPHER_XTS_MODE);
-       if (ret)
-               return ret;
-
-       if (HW_CAP_AES_V2(ctx->inst->accel_dev))
-               ret = crypto_cipher_setkey(ctx->tweak, key + (keylen / 2),
-                                          keylen / 2);
-
-       return ret;
-}
-
-static void qat_alg_set_req_iv(struct qat_crypto_request *qat_req)
-{
-       struct icp_qat_fw_la_cipher_req_params *cipher_param;
-       struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
-       bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
-       u8 *iv = qat_req->skcipher_req->iv;
-
-       cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
-
-       if (aes_v2_capable && ctx->mode == ICP_QAT_HW_CIPHER_XTS_MODE)
-               crypto_cipher_encrypt_one(ctx->tweak,
-                                         (u8 *)cipher_param->u.cipher_IV_array,
-                                         iv);
-       else
-               memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
-}
-
-static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
-{
-       struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
-       struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
-       struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
-       struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
-       struct icp_qat_fw_la_cipher_req_params *cipher_param;
-       gfp_t f = qat_algs_alloc_flags(&req->base);
-       struct icp_qat_fw_la_bulk_req *msg;
-       int ret;
-
-       if (req->cryptlen == 0)
-               return 0;
-
-       ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, req->src, req->dst,
-                                &qat_req->buf, NULL, f);
-       if (unlikely(ret))
-               return ret;
-
-       msg = &qat_req->req;
-       *msg = ctx->enc_fw_req;
-       qat_req->skcipher_ctx = ctx;
-       qat_req->skcipher_req = req;
-       qat_req->cb = qat_skcipher_alg_callback;
-       qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
-       qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
-       qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
-       qat_req->encryption = true;
-       cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
-       cipher_param->cipher_length = req->cryptlen;
-       cipher_param->cipher_offset = 0;
-
-       qat_alg_set_req_iv(qat_req);
-
-       ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
-       if (ret == -ENOSPC)
-               qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
-
-       return ret;
-}
-
-static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
-{
-       if (req->cryptlen % AES_BLOCK_SIZE != 0)
-               return -EINVAL;
-
-       return qat_alg_skcipher_encrypt(req);
-}
-
-static int qat_alg_skcipher_xts_encrypt(struct skcipher_request *req)
-{
-       struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
-       struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
-       struct skcipher_request *nreq = skcipher_request_ctx(req);
-
-       if (req->cryptlen < XTS_BLOCK_SIZE)
-               return -EINVAL;
-
-       if (ctx->fallback) {
-               memcpy(nreq, req, sizeof(*req));
-               skcipher_request_set_tfm(nreq, ctx->ftfm);
-               return crypto_skcipher_encrypt(nreq);
-       }
-
-       return qat_alg_skcipher_encrypt(req);
-}
-
-static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
-{
-       struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
-       struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
-       struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
-       struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
-       struct icp_qat_fw_la_cipher_req_params *cipher_param;
-       gfp_t f = qat_algs_alloc_flags(&req->base);
-       struct icp_qat_fw_la_bulk_req *msg;
-       int ret;
-
-       if (req->cryptlen == 0)
-               return 0;
-
-       ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, req->src, req->dst,
-                                &qat_req->buf, NULL, f);
-       if (unlikely(ret))
-               return ret;
-
-       msg = &qat_req->req;
-       *msg = ctx->dec_fw_req;
-       qat_req->skcipher_ctx = ctx;
-       qat_req->skcipher_req = req;
-       qat_req->cb = qat_skcipher_alg_callback;
-       qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
-       qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
-       qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
-       qat_req->encryption = false;
-       cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
-       cipher_param->cipher_length = req->cryptlen;
-       cipher_param->cipher_offset = 0;
-
-       qat_alg_set_req_iv(qat_req);
-       qat_alg_update_iv(qat_req);
-
-       ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
-       if (ret == -ENOSPC)
-               qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
-
-       return ret;
-}
-
-static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
-{
-       if (req->cryptlen % AES_BLOCK_SIZE != 0)
-               return -EINVAL;
-
-       return qat_alg_skcipher_decrypt(req);
-}
-
-static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req)
-{
-       struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
-       struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
-       struct skcipher_request *nreq = skcipher_request_ctx(req);
-
-       if (req->cryptlen < XTS_BLOCK_SIZE)
-               return -EINVAL;
-
-       if (ctx->fallback) {
-               memcpy(nreq, req, sizeof(*req));
-               skcipher_request_set_tfm(nreq, ctx->ftfm);
-               return crypto_skcipher_decrypt(nreq);
-       }
-
-       return qat_alg_skcipher_decrypt(req);
-}
-
-static int qat_alg_aead_init(struct crypto_aead *tfm,
-                            enum icp_qat_hw_auth_algo hash,
-                            const char *hash_name)
-{
-       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
-
-       ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
-       if (IS_ERR(ctx->hash_tfm))
-               return PTR_ERR(ctx->hash_tfm);
-       ctx->qat_hash_alg = hash;
-       crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
-       return 0;
-}
-
-static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
-{
-       return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
-}
-
-static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
-{
-       return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
-}
-
-static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
-{
-       return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
-}
-
-static void qat_alg_aead_exit(struct crypto_aead *tfm)
-{
-       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
-       struct qat_crypto_instance *inst = ctx->inst;
-       struct device *dev;
-
-       crypto_free_shash(ctx->hash_tfm);
-
-       if (!inst)
-               return;
-
-       dev = &GET_DEV(inst->accel_dev);
-       if (ctx->enc_cd) {
-               memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
-               dma_free_coherent(dev, sizeof(struct qat_alg_cd),
-                                 ctx->enc_cd, ctx->enc_cd_paddr);
-       }
-       if (ctx->dec_cd) {
-               memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
-               dma_free_coherent(dev, sizeof(struct qat_alg_cd),
-                                 ctx->dec_cd, ctx->dec_cd_paddr);
-       }
-       qat_crypto_put_instance(inst);
-}
-
-static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm)
-{
-       crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request));
-       return 0;
-}
-
-static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm)
-{
-       struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
-       int reqsize;
-
-       ctx->ftfm = crypto_alloc_skcipher("xts(aes)", 0,
-                                         CRYPTO_ALG_NEED_FALLBACK);
-       if (IS_ERR(ctx->ftfm))
-               return PTR_ERR(ctx->ftfm);
-
-       ctx->tweak = crypto_alloc_cipher("aes", 0, 0);
-       if (IS_ERR(ctx->tweak)) {
-               crypto_free_skcipher(ctx->ftfm);
-               return PTR_ERR(ctx->tweak);
-       }
-
-       reqsize = max(sizeof(struct qat_crypto_request),
-                     sizeof(struct skcipher_request) +
-                     crypto_skcipher_reqsize(ctx->ftfm));
-       crypto_skcipher_set_reqsize(tfm, reqsize);
-
-       return 0;
-}
-
-static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
-{
-       struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
-       struct qat_crypto_instance *inst = ctx->inst;
-       struct device *dev;
-
-       if (!inst)
-               return;
-
-       dev = &GET_DEV(inst->accel_dev);
-       if (ctx->enc_cd) {
-               memset(ctx->enc_cd, 0,
-                      sizeof(struct icp_qat_hw_cipher_algo_blk));
-               dma_free_coherent(dev,
-                                 sizeof(struct icp_qat_hw_cipher_algo_blk),
-                                 ctx->enc_cd, ctx->enc_cd_paddr);
-       }
-       if (ctx->dec_cd) {
-               memset(ctx->dec_cd, 0,
-                      sizeof(struct icp_qat_hw_cipher_algo_blk));
-               dma_free_coherent(dev,
-                                 sizeof(struct icp_qat_hw_cipher_algo_blk),
-                                 ctx->dec_cd, ctx->dec_cd_paddr);
-       }
-       qat_crypto_put_instance(inst);
-}
-
-static void qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher *tfm)
-{
-       struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
-
-       if (ctx->ftfm)
-               crypto_free_skcipher(ctx->ftfm);
-
-       if (ctx->tweak)
-               crypto_free_cipher(ctx->tweak);
-
-       qat_alg_skcipher_exit_tfm(tfm);
-}
-
-static struct aead_alg qat_aeads[] = { {
-       .base = {
-               .cra_name = "authenc(hmac(sha1),cbc(aes))",
-               .cra_driver_name = "qat_aes_cbc_hmac_sha1",
-               .cra_priority = 4001,
-               .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
-               .cra_blocksize = AES_BLOCK_SIZE,
-               .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
-               .cra_module = THIS_MODULE,
-       },
-       .init = qat_alg_aead_sha1_init,
-       .exit = qat_alg_aead_exit,
-       .setkey = qat_alg_aead_setkey,
-       .decrypt = qat_alg_aead_dec,
-       .encrypt = qat_alg_aead_enc,
-       .ivsize = AES_BLOCK_SIZE,
-       .maxauthsize = SHA1_DIGEST_SIZE,
-}, {
-       .base = {
-               .cra_name = "authenc(hmac(sha256),cbc(aes))",
-               .cra_driver_name = "qat_aes_cbc_hmac_sha256",
-               .cra_priority = 4001,
-               .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
-               .cra_blocksize = AES_BLOCK_SIZE,
-               .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
-               .cra_module = THIS_MODULE,
-       },
-       .init = qat_alg_aead_sha256_init,
-       .exit = qat_alg_aead_exit,
-       .setkey = qat_alg_aead_setkey,
-       .decrypt = qat_alg_aead_dec,
-       .encrypt = qat_alg_aead_enc,
-       .ivsize = AES_BLOCK_SIZE,
-       .maxauthsize = SHA256_DIGEST_SIZE,
-}, {
-       .base = {
-               .cra_name = "authenc(hmac(sha512),cbc(aes))",
-               .cra_driver_name = "qat_aes_cbc_hmac_sha512",
-               .cra_priority = 4001,
-               .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
-               .cra_blocksize = AES_BLOCK_SIZE,
-               .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
-               .cra_module = THIS_MODULE,
-       },
-       .init = qat_alg_aead_sha512_init,
-       .exit = qat_alg_aead_exit,
-       .setkey = qat_alg_aead_setkey,
-       .decrypt = qat_alg_aead_dec,
-       .encrypt = qat_alg_aead_enc,
-       .ivsize = AES_BLOCK_SIZE,
-       .maxauthsize = SHA512_DIGEST_SIZE,
-} };
-
-static struct skcipher_alg qat_skciphers[] = { {
-       .base.cra_name = "cbc(aes)",
-       .base.cra_driver_name = "qat_aes_cbc",
-       .base.cra_priority = 4001,
-       .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
-       .base.cra_blocksize = AES_BLOCK_SIZE,
-       .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
-       .base.cra_alignmask = 0,
-       .base.cra_module = THIS_MODULE,
-
-       .init = qat_alg_skcipher_init_tfm,
-       .exit = qat_alg_skcipher_exit_tfm,
-       .setkey = qat_alg_skcipher_cbc_setkey,
-       .decrypt = qat_alg_skcipher_blk_decrypt,
-       .encrypt = qat_alg_skcipher_blk_encrypt,
-       .min_keysize = AES_MIN_KEY_SIZE,
-       .max_keysize = AES_MAX_KEY_SIZE,
-       .ivsize = AES_BLOCK_SIZE,
-}, {
-       .base.cra_name = "ctr(aes)",
-       .base.cra_driver_name = "qat_aes_ctr",
-       .base.cra_priority = 4001,
-       .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
-       .base.cra_blocksize = 1,
-       .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
-       .base.cra_alignmask = 0,
-       .base.cra_module = THIS_MODULE,
-
-       .init = qat_alg_skcipher_init_tfm,
-       .exit = qat_alg_skcipher_exit_tfm,
-       .setkey = qat_alg_skcipher_ctr_setkey,
-       .decrypt = qat_alg_skcipher_decrypt,
-       .encrypt = qat_alg_skcipher_encrypt,
-       .min_keysize = AES_MIN_KEY_SIZE,
-       .max_keysize = AES_MAX_KEY_SIZE,
-       .ivsize = AES_BLOCK_SIZE,
-}, {
-       .base.cra_name = "xts(aes)",
-       .base.cra_driver_name = "qat_aes_xts",
-       .base.cra_priority = 4001,
-       .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
-                         CRYPTO_ALG_ALLOCATES_MEMORY,
-       .base.cra_blocksize = AES_BLOCK_SIZE,
-       .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
-       .base.cra_alignmask = 0,
-       .base.cra_module = THIS_MODULE,
-
-       .init = qat_alg_skcipher_init_xts_tfm,
-       .exit = qat_alg_skcipher_exit_xts_tfm,
-       .setkey = qat_alg_skcipher_xts_setkey,
-       .decrypt = qat_alg_skcipher_xts_decrypt,
-       .encrypt = qat_alg_skcipher_xts_encrypt,
-       .min_keysize = 2 * AES_MIN_KEY_SIZE,
-       .max_keysize = 2 * AES_MAX_KEY_SIZE,
-       .ivsize = AES_BLOCK_SIZE,
-} };
-
-int qat_algs_register(void)
-{
-       int ret = 0;
-
-       mutex_lock(&algs_lock);
-       if (++active_devs != 1)
-               goto unlock;
-
-       ret = crypto_register_skciphers(qat_skciphers,
-                                       ARRAY_SIZE(qat_skciphers));
-       if (ret)
-               goto unlock;
-
-       ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
-       if (ret)
-               goto unreg_algs;
-
-unlock:
-       mutex_unlock(&algs_lock);
-       return ret;
-
-unreg_algs:
-       crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
-       goto unlock;
-}
-
-void qat_algs_unregister(void)
-{
-       mutex_lock(&algs_lock);
-       if (--active_devs != 0)
-               goto unlock;
-
-       crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
-       crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
-
-unlock:
-       mutex_unlock(&algs_lock);
-}
diff --git a/drivers/crypto/qat/qat_common/qat_algs_send.c b/drivers/crypto/qat/qat_common/qat_algs_send.c
deleted file mode 100644 (file)
index bb80455..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2022 Intel Corporation */
-#include <crypto/algapi.h>
-#include "adf_transport.h"
-#include "qat_algs_send.h"
-#include "qat_crypto.h"
-
-#define ADF_MAX_RETRIES                20
-
-static int qat_alg_send_message_retry(struct qat_alg_req *req)
-{
-       int ret = 0, ctr = 0;
-
-       do {
-               ret = adf_send_message(req->tx_ring, req->fw_req);
-       } while (ret == -EAGAIN && ctr++ < ADF_MAX_RETRIES);
-
-       if (ret == -EAGAIN)
-               return -ENOSPC;
-
-       return -EINPROGRESS;
-}
-
-void qat_alg_send_backlog(struct qat_instance_backlog *backlog)
-{
-       struct qat_alg_req *req, *tmp;
-
-       spin_lock_bh(&backlog->lock);
-       list_for_each_entry_safe(req, tmp, &backlog->list, list) {
-               if (adf_send_message(req->tx_ring, req->fw_req)) {
-                       /* The HW ring is full. Do nothing.
-                        * qat_alg_send_backlog() will be invoked again by
-                        * another callback.
-                        */
-                       break;
-               }
-               list_del(&req->list);
-               crypto_request_complete(req->base, -EINPROGRESS);
-       }
-       spin_unlock_bh(&backlog->lock);
-}
-
-static void qat_alg_backlog_req(struct qat_alg_req *req,
-                               struct qat_instance_backlog *backlog)
-{
-       INIT_LIST_HEAD(&req->list);
-
-       spin_lock_bh(&backlog->lock);
-       list_add_tail(&req->list, &backlog->list);
-       spin_unlock_bh(&backlog->lock);
-}
-
-static int qat_alg_send_message_maybacklog(struct qat_alg_req *req)
-{
-       struct qat_instance_backlog *backlog = req->backlog;
-       struct adf_etr_ring_data *tx_ring = req->tx_ring;
-       u32 *fw_req = req->fw_req;
-
-       /* If any request is already backlogged, then add to backlog list */
-       if (!list_empty(&backlog->list))
-               goto enqueue;
-
-       /* If ring is nearly full, then add to backlog list */
-       if (adf_ring_nearly_full(tx_ring))
-               goto enqueue;
-
-       /* If adding request to HW ring fails, then add to backlog list */
-       if (adf_send_message(tx_ring, fw_req))
-               goto enqueue;
-
-       return -EINPROGRESS;
-
-enqueue:
-       qat_alg_backlog_req(req, backlog);
-
-       return -EBUSY;
-}
-
-int qat_alg_send_message(struct qat_alg_req *req)
-{
-       u32 flags = req->base->flags;
-
-       if (flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
-               return qat_alg_send_message_maybacklog(req);
-       else
-               return qat_alg_send_message_retry(req);
-}
diff --git a/drivers/crypto/qat/qat_common/qat_algs_send.h b/drivers/crypto/qat/qat_common/qat_algs_send.h
deleted file mode 100644 (file)
index 0baca16..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2022 Intel Corporation */
-#ifndef QAT_ALGS_SEND_H
-#define QAT_ALGS_SEND_H
-
-#include <linux/list.h>
-#include "adf_transport_internal.h"
-
-struct qat_instance_backlog {
-       struct list_head list;
-       spinlock_t lock; /* protects backlog list */
-};
-
-struct qat_alg_req {
-       u32 *fw_req;
-       struct adf_etr_ring_data *tx_ring;
-       struct crypto_async_request *base;
-       struct list_head list;
-       struct qat_instance_backlog *backlog;
-};
-
-int qat_alg_send_message(struct qat_alg_req *req);
-void qat_alg_send_backlog(struct qat_instance_backlog *backlog);
-
-#endif
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
deleted file mode 100644 (file)
index 935a7e0..0000000
+++ /dev/null
@@ -1,1309 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/module.h>
-#include <crypto/internal/rsa.h>
-#include <crypto/internal/akcipher.h>
-#include <crypto/akcipher.h>
-#include <crypto/kpp.h>
-#include <crypto/internal/kpp.h>
-#include <crypto/dh.h>
-#include <linux/dma-mapping.h>
-#include <linux/fips.h>
-#include <crypto/scatterwalk.h>
-#include "icp_qat_fw_pke.h"
-#include "adf_accel_devices.h"
-#include "qat_algs_send.h"
-#include "adf_transport.h"
-#include "adf_common_drv.h"
-#include "qat_crypto.h"
-
-static DEFINE_MUTEX(algs_lock);
-static unsigned int active_devs;
-
-struct qat_rsa_input_params {
-       union {
-               struct {
-                       dma_addr_t m;
-                       dma_addr_t e;
-                       dma_addr_t n;
-               } enc;
-               struct {
-                       dma_addr_t c;
-                       dma_addr_t d;
-                       dma_addr_t n;
-               } dec;
-               struct {
-                       dma_addr_t c;
-                       dma_addr_t p;
-                       dma_addr_t q;
-                       dma_addr_t dp;
-                       dma_addr_t dq;
-                       dma_addr_t qinv;
-               } dec_crt;
-               u64 in_tab[8];
-       };
-} __packed __aligned(64);
-
-struct qat_rsa_output_params {
-       union {
-               struct {
-                       dma_addr_t c;
-               } enc;
-               struct {
-                       dma_addr_t m;
-               } dec;
-               u64 out_tab[8];
-       };
-} __packed __aligned(64);
-
-struct qat_rsa_ctx {
-       char *n;
-       char *e;
-       char *d;
-       char *p;
-       char *q;
-       char *dp;
-       char *dq;
-       char *qinv;
-       dma_addr_t dma_n;
-       dma_addr_t dma_e;
-       dma_addr_t dma_d;
-       dma_addr_t dma_p;
-       dma_addr_t dma_q;
-       dma_addr_t dma_dp;
-       dma_addr_t dma_dq;
-       dma_addr_t dma_qinv;
-       unsigned int key_sz;
-       bool crt_mode;
-       struct qat_crypto_instance *inst;
-} __packed __aligned(64);
-
-struct qat_dh_input_params {
-       union {
-               struct {
-                       dma_addr_t b;
-                       dma_addr_t xa;
-                       dma_addr_t p;
-               } in;
-               struct {
-                       dma_addr_t xa;
-                       dma_addr_t p;
-               } in_g2;
-               u64 in_tab[8];
-       };
-} __packed __aligned(64);
-
-struct qat_dh_output_params {
-       union {
-               dma_addr_t r;
-               u64 out_tab[8];
-       };
-} __packed __aligned(64);
-
-struct qat_dh_ctx {
-       char *g;
-       char *xa;
-       char *p;
-       dma_addr_t dma_g;
-       dma_addr_t dma_xa;
-       dma_addr_t dma_p;
-       unsigned int p_size;
-       bool g2;
-       struct qat_crypto_instance *inst;
-} __packed __aligned(64);
-
-struct qat_asym_request {
-       union {
-               struct qat_rsa_input_params rsa;
-               struct qat_dh_input_params dh;
-       } in;
-       union {
-               struct qat_rsa_output_params rsa;
-               struct qat_dh_output_params dh;
-       } out;
-       dma_addr_t phy_in;
-       dma_addr_t phy_out;
-       char *src_align;
-       char *dst_align;
-       struct icp_qat_fw_pke_request req;
-       union {
-               struct qat_rsa_ctx *rsa;
-               struct qat_dh_ctx *dh;
-       } ctx;
-       union {
-               struct akcipher_request *rsa;
-               struct kpp_request *dh;
-       } areq;
-       int err;
-       void (*cb)(struct icp_qat_fw_pke_resp *resp);
-       struct qat_alg_req alg_req;
-} __aligned(64);
-
-static int qat_alg_send_asym_message(struct qat_asym_request *qat_req,
-                                    struct qat_crypto_instance *inst,
-                                    struct crypto_async_request *base)
-{
-       struct qat_alg_req *alg_req = &qat_req->alg_req;
-
-       alg_req->fw_req = (u32 *)&qat_req->req;
-       alg_req->tx_ring = inst->pke_tx;
-       alg_req->base = base;
-       alg_req->backlog = &inst->backlog;
-
-       return qat_alg_send_message(alg_req);
-}
-
-static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
-{
-       struct qat_asym_request *req = (void *)(__force long)resp->opaque;
-       struct kpp_request *areq = req->areq.dh;
-       struct device *dev = &GET_DEV(req->ctx.dh->inst->accel_dev);
-       int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
-                               resp->pke_resp_hdr.comn_resp_flags);
-
-       err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
-
-       if (areq->src) {
-               dma_unmap_single(dev, req->in.dh.in.b, req->ctx.dh->p_size,
-                                DMA_TO_DEVICE);
-               kfree_sensitive(req->src_align);
-       }
-
-       areq->dst_len = req->ctx.dh->p_size;
-       if (req->dst_align) {
-               scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
-                                        areq->dst_len, 1);
-               kfree_sensitive(req->dst_align);
-       }
-
-       dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
-                        DMA_FROM_DEVICE);
-
-       dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
-                        DMA_TO_DEVICE);
-       dma_unmap_single(dev, req->phy_out,
-                        sizeof(struct qat_dh_output_params),
-                        DMA_TO_DEVICE);
-
-       kpp_request_complete(areq, err);
-}
-
-#define PKE_DH_1536 0x390c1a49
-#define PKE_DH_G2_1536 0x2e0b1a3e
-#define PKE_DH_2048 0x4d0c1a60
-#define PKE_DH_G2_2048 0x3e0b1a55
-#define PKE_DH_3072 0x510c1a77
-#define PKE_DH_G2_3072 0x3a0b1a6c
-#define PKE_DH_4096 0x690c1a8e
-#define PKE_DH_G2_4096 0x4a0b1a83
-
-static unsigned long qat_dh_fn_id(unsigned int len, bool g2)
-{
-       unsigned int bitslen = len << 3;
-
-       switch (bitslen) {
-       case 1536:
-               return g2 ? PKE_DH_G2_1536 : PKE_DH_1536;
-       case 2048:
-               return g2 ? PKE_DH_G2_2048 : PKE_DH_2048;
-       case 3072:
-               return g2 ? PKE_DH_G2_3072 : PKE_DH_3072;
-       case 4096:
-               return g2 ? PKE_DH_G2_4096 : PKE_DH_4096;
-       default:
-               return 0;
-       }
-}
-
-static int qat_dh_compute_value(struct kpp_request *req)
-{
-       struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
-       struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
-       struct qat_crypto_instance *inst = ctx->inst;
-       struct device *dev = &GET_DEV(inst->accel_dev);
-       struct qat_asym_request *qat_req =
-                       PTR_ALIGN(kpp_request_ctx(req), 64);
-       struct icp_qat_fw_pke_request *msg = &qat_req->req;
-       gfp_t flags = qat_algs_alloc_flags(&req->base);
-       int n_input_params = 0;
-       u8 *vaddr;
-       int ret;
-
-       if (unlikely(!ctx->xa))
-               return -EINVAL;
-
-       if (req->dst_len < ctx->p_size) {
-               req->dst_len = ctx->p_size;
-               return -EOVERFLOW;
-       }
-
-       if (req->src_len > ctx->p_size)
-               return -EINVAL;
-
-       memset(msg, '\0', sizeof(*msg));
-       ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
-                                         ICP_QAT_FW_COMN_REQ_FLAG_SET);
-
-       msg->pke_hdr.cd_pars.func_id = qat_dh_fn_id(ctx->p_size,
-                                                   !req->src && ctx->g2);
-       if (unlikely(!msg->pke_hdr.cd_pars.func_id))
-               return -EINVAL;
-
-       qat_req->cb = qat_dh_cb;
-       qat_req->ctx.dh = ctx;
-       qat_req->areq.dh = req;
-       msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
-       msg->pke_hdr.comn_req_flags =
-               ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
-                                           QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
-
-       /*
-        * If no source is provided use g as base
-        */
-       if (req->src) {
-               qat_req->in.dh.in.xa = ctx->dma_xa;
-               qat_req->in.dh.in.p = ctx->dma_p;
-               n_input_params = 3;
-       } else {
-               if (ctx->g2) {
-                       qat_req->in.dh.in_g2.xa = ctx->dma_xa;
-                       qat_req->in.dh.in_g2.p = ctx->dma_p;
-                       n_input_params = 2;
-               } else {
-                       qat_req->in.dh.in.b = ctx->dma_g;
-                       qat_req->in.dh.in.xa = ctx->dma_xa;
-                       qat_req->in.dh.in.p = ctx->dma_p;
-                       n_input_params = 3;
-               }
-       }
-
-       ret = -ENOMEM;
-       if (req->src) {
-               /*
-                * src can be of any size in valid range, but HW expects it to
-                * be the same as modulo p so in case it is different we need
-                * to allocate a new buf and copy src data.
-                * In other case we just need to map the user provided buffer.
-                * Also need to make sure that it is in contiguous buffer.
-                */
-               if (sg_is_last(req->src) && req->src_len == ctx->p_size) {
-                       qat_req->src_align = NULL;
-                       vaddr = sg_virt(req->src);
-               } else {
-                       int shift = ctx->p_size - req->src_len;
-
-                       qat_req->src_align = kzalloc(ctx->p_size, flags);
-                       if (unlikely(!qat_req->src_align))
-                               return ret;
-
-                       scatterwalk_map_and_copy(qat_req->src_align + shift,
-                                                req->src, 0, req->src_len, 0);
-
-                       vaddr = qat_req->src_align;
-               }
-
-               qat_req->in.dh.in.b = dma_map_single(dev, vaddr, ctx->p_size,
-                                                    DMA_TO_DEVICE);
-               if (unlikely(dma_mapping_error(dev, qat_req->in.dh.in.b)))
-                       goto unmap_src;
-       }
-       /*
-        * dst can be of any size in valid range, but HW expects it to be the
-        * same as modulo m so in case it is different we need to allocate a
-        * new buf and copy src data.
-        * In other case we just need to map the user provided buffer.
-        * Also need to make sure that it is in contiguous buffer.
-        */
-       if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) {
-               qat_req->dst_align = NULL;
-               vaddr = sg_virt(req->dst);
-       } else {
-               qat_req->dst_align = kzalloc(ctx->p_size, flags);
-               if (unlikely(!qat_req->dst_align))
-                       goto unmap_src;
-
-               vaddr = qat_req->dst_align;
-       }
-       qat_req->out.dh.r = dma_map_single(dev, vaddr, ctx->p_size,
-                                          DMA_FROM_DEVICE);
-       if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
-               goto unmap_dst;
-
-       qat_req->in.dh.in_tab[n_input_params] = 0;
-       qat_req->out.dh.out_tab[1] = 0;
-       /* Mapping in.in.b or in.in_g2.xa is the same */
-       qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh,
-                                        sizeof(struct qat_dh_input_params),
-                                        DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
-               goto unmap_dst;
-
-       qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh,
-                                         sizeof(struct qat_dh_output_params),
-                                         DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
-               goto unmap_in_params;
-
-       msg->pke_mid.src_data_addr = qat_req->phy_in;
-       msg->pke_mid.dest_data_addr = qat_req->phy_out;
-       msg->pke_mid.opaque = (u64)(__force long)qat_req;
-       msg->input_param_count = n_input_params;
-       msg->output_param_count = 1;
-
-       ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
-       if (ret == -ENOSPC)
-               goto unmap_all;
-
-       return ret;
-
-unmap_all:
-       if (!dma_mapping_error(dev, qat_req->phy_out))
-               dma_unmap_single(dev, qat_req->phy_out,
-                                sizeof(struct qat_dh_output_params),
-                                DMA_TO_DEVICE);
-unmap_in_params:
-       if (!dma_mapping_error(dev, qat_req->phy_in))
-               dma_unmap_single(dev, qat_req->phy_in,
-                                sizeof(struct qat_dh_input_params),
-                                DMA_TO_DEVICE);
-unmap_dst:
-       if (!dma_mapping_error(dev, qat_req->out.dh.r))
-               dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
-                                DMA_FROM_DEVICE);
-       kfree_sensitive(qat_req->dst_align);
-unmap_src:
-       if (req->src) {
-               if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
-                       dma_unmap_single(dev, qat_req->in.dh.in.b,
-                                        ctx->p_size,
-                                        DMA_TO_DEVICE);
-               kfree_sensitive(qat_req->src_align);
-       }
-       return ret;
-}
-
-static int qat_dh_check_params_length(unsigned int p_len)
-{
-       switch (p_len) {
-       case 1536:
-       case 2048:
-       case 3072:
-       case 4096:
-               return 0;
-       }
-       return -EINVAL;
-}
-
-static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
-{
-       struct qat_crypto_instance *inst = ctx->inst;
-       struct device *dev = &GET_DEV(inst->accel_dev);
-
-       if (qat_dh_check_params_length(params->p_size << 3))
-               return -EINVAL;
-
-       ctx->p_size = params->p_size;
-       ctx->p = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
-       if (!ctx->p)
-               return -ENOMEM;
-       memcpy(ctx->p, params->p, ctx->p_size);
-
-       /* If g equals 2 don't copy it */
-       if (params->g_size == 1 && *(char *)params->g == 0x02) {
-               ctx->g2 = true;
-               return 0;
-       }
-
-       ctx->g = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
-       if (!ctx->g)
-               return -ENOMEM;
-       memcpy(ctx->g + (ctx->p_size - params->g_size), params->g,
-              params->g_size);
-
-       return 0;
-}
-
-static void qat_dh_clear_ctx(struct device *dev, struct qat_dh_ctx *ctx)
-{
-       if (ctx->g) {
-               memset(ctx->g, 0, ctx->p_size);
-               dma_free_coherent(dev, ctx->p_size, ctx->g, ctx->dma_g);
-               ctx->g = NULL;
-       }
-       if (ctx->xa) {
-               memset(ctx->xa, 0, ctx->p_size);
-               dma_free_coherent(dev, ctx->p_size, ctx->xa, ctx->dma_xa);
-               ctx->xa = NULL;
-       }
-       if (ctx->p) {
-               memset(ctx->p, 0, ctx->p_size);
-               dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
-               ctx->p = NULL;
-       }
-       ctx->p_size = 0;
-       ctx->g2 = false;
-}
-
-static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
-                            unsigned int len)
-{
-       struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
-       struct device *dev = &GET_DEV(ctx->inst->accel_dev);
-       struct dh params;
-       int ret;
-
-       if (crypto_dh_decode_key(buf, len, &params) < 0)
-               return -EINVAL;
-
-       /* Free old secret if any */
-       qat_dh_clear_ctx(dev, ctx);
-
-       ret = qat_dh_set_params(ctx, &params);
-       if (ret < 0)
-               goto err_clear_ctx;
-
-       ctx->xa = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
-                                    GFP_KERNEL);
-       if (!ctx->xa) {
-               ret = -ENOMEM;
-               goto err_clear_ctx;
-       }
-       memcpy(ctx->xa + (ctx->p_size - params.key_size), params.key,
-              params.key_size);
-
-       return 0;
-
-err_clear_ctx:
-       qat_dh_clear_ctx(dev, ctx);
-       return ret;
-}
-
-static unsigned int qat_dh_max_size(struct crypto_kpp *tfm)
-{
-       struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
-
-       return ctx->p_size;
-}
-
-static int qat_dh_init_tfm(struct crypto_kpp *tfm)
-{
-       struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
-       struct qat_crypto_instance *inst =
-                       qat_crypto_get_instance_node(numa_node_id());
-
-       if (!inst)
-               return -EINVAL;
-
-       kpp_set_reqsize(tfm, sizeof(struct qat_asym_request) + 64);
-
-       ctx->p_size = 0;
-       ctx->g2 = false;
-       ctx->inst = inst;
-       return 0;
-}
-
-static void qat_dh_exit_tfm(struct crypto_kpp *tfm)
-{
-       struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
-       struct device *dev = &GET_DEV(ctx->inst->accel_dev);
-
-       qat_dh_clear_ctx(dev, ctx);
-       qat_crypto_put_instance(ctx->inst);
-}
-
-static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
-{
-       struct qat_asym_request *req = (void *)(__force long)resp->opaque;
-       struct akcipher_request *areq = req->areq.rsa;
-       struct device *dev = &GET_DEV(req->ctx.rsa->inst->accel_dev);
-       int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
-                               resp->pke_resp_hdr.comn_resp_flags);
-
-       err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
-
-       kfree_sensitive(req->src_align);
-
-       dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
-                        DMA_TO_DEVICE);
-
-       areq->dst_len = req->ctx.rsa->key_sz;
-       if (req->dst_align) {
-               scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
-                                        areq->dst_len, 1);
-
-               kfree_sensitive(req->dst_align);
-       }
-
-       dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
-                        DMA_FROM_DEVICE);
-
-       dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
-                        DMA_TO_DEVICE);
-       dma_unmap_single(dev, req->phy_out,
-                        sizeof(struct qat_rsa_output_params),
-                        DMA_TO_DEVICE);
-
-       akcipher_request_complete(areq, err);
-}
-
-void qat_alg_asym_callback(void *_resp)
-{
-       struct icp_qat_fw_pke_resp *resp = _resp;
-       struct qat_asym_request *areq = (void *)(__force long)resp->opaque;
-       struct qat_instance_backlog *backlog = areq->alg_req.backlog;
-
-       areq->cb(resp);
-
-       qat_alg_send_backlog(backlog);
-}
-
-#define PKE_RSA_EP_512 0x1c161b21
-#define PKE_RSA_EP_1024 0x35111bf7
-#define PKE_RSA_EP_1536 0x4d111cdc
-#define PKE_RSA_EP_2048 0x6e111dba
-#define PKE_RSA_EP_3072 0x7d111ea3
-#define PKE_RSA_EP_4096 0xa5101f7e
-
-static unsigned long qat_rsa_enc_fn_id(unsigned int len)
-{
-       unsigned int bitslen = len << 3;
-
-       switch (bitslen) {
-       case 512:
-               return PKE_RSA_EP_512;
-       case 1024:
-               return PKE_RSA_EP_1024;
-       case 1536:
-               return PKE_RSA_EP_1536;
-       case 2048:
-               return PKE_RSA_EP_2048;
-       case 3072:
-               return PKE_RSA_EP_3072;
-       case 4096:
-               return PKE_RSA_EP_4096;
-       default:
-               return 0;
-       }
-}
-
-#define PKE_RSA_DP1_512 0x1c161b3c
-#define PKE_RSA_DP1_1024 0x35111c12
-#define PKE_RSA_DP1_1536 0x4d111cf7
-#define PKE_RSA_DP1_2048 0x6e111dda
-#define PKE_RSA_DP1_3072 0x7d111ebe
-#define PKE_RSA_DP1_4096 0xa5101f98
-
-static unsigned long qat_rsa_dec_fn_id(unsigned int len)
-{
-       unsigned int bitslen = len << 3;
-
-       switch (bitslen) {
-       case 512:
-               return PKE_RSA_DP1_512;
-       case 1024:
-               return PKE_RSA_DP1_1024;
-       case 1536:
-               return PKE_RSA_DP1_1536;
-       case 2048:
-               return PKE_RSA_DP1_2048;
-       case 3072:
-               return PKE_RSA_DP1_3072;
-       case 4096:
-               return PKE_RSA_DP1_4096;
-       default:
-               return 0;
-       }
-}
-
-#define PKE_RSA_DP2_512 0x1c131b57
-#define PKE_RSA_DP2_1024 0x26131c2d
-#define PKE_RSA_DP2_1536 0x45111d12
-#define PKE_RSA_DP2_2048 0x59121dfa
-#define PKE_RSA_DP2_3072 0x81121ed9
-#define PKE_RSA_DP2_4096 0xb1111fb2
-
-static unsigned long qat_rsa_dec_fn_id_crt(unsigned int len)
-{
-       unsigned int bitslen = len << 3;
-
-       switch (bitslen) {
-       case 512:
-               return PKE_RSA_DP2_512;
-       case 1024:
-               return PKE_RSA_DP2_1024;
-       case 1536:
-               return PKE_RSA_DP2_1536;
-       case 2048:
-               return PKE_RSA_DP2_2048;
-       case 3072:
-               return PKE_RSA_DP2_3072;
-       case 4096:
-               return PKE_RSA_DP2_4096;
-       default:
-               return 0;
-       }
-}
-
-static int qat_rsa_enc(struct akcipher_request *req)
-{
-       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
-       struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
-       struct qat_crypto_instance *inst = ctx->inst;
-       struct device *dev = &GET_DEV(inst->accel_dev);
-       struct qat_asym_request *qat_req =
-                       PTR_ALIGN(akcipher_request_ctx(req), 64);
-       struct icp_qat_fw_pke_request *msg = &qat_req->req;
-       gfp_t flags = qat_algs_alloc_flags(&req->base);
-       u8 *vaddr;
-       int ret;
-
-       if (unlikely(!ctx->n || !ctx->e))
-               return -EINVAL;
-
-       if (req->dst_len < ctx->key_sz) {
-               req->dst_len = ctx->key_sz;
-               return -EOVERFLOW;
-       }
-
-       if (req->src_len > ctx->key_sz)
-               return -EINVAL;
-
-       memset(msg, '\0', sizeof(*msg));
-       ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
-                                         ICP_QAT_FW_COMN_REQ_FLAG_SET);
-       msg->pke_hdr.cd_pars.func_id = qat_rsa_enc_fn_id(ctx->key_sz);
-       if (unlikely(!msg->pke_hdr.cd_pars.func_id))
-               return -EINVAL;
-
-       qat_req->cb = qat_rsa_cb;
-       qat_req->ctx.rsa = ctx;
-       qat_req->areq.rsa = req;
-       msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
-       msg->pke_hdr.comn_req_flags =
-               ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
-                                           QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
-
-       qat_req->in.rsa.enc.e = ctx->dma_e;
-       qat_req->in.rsa.enc.n = ctx->dma_n;
-       ret = -ENOMEM;
-
-       /*
-        * src can be of any size in valid range, but HW expects it to be the
-        * same as modulo n so in case it is different we need to allocate a
-        * new buf and copy src data.
-        * In other case we just need to map the user provided buffer.
-        * Also need to make sure that it is in contiguous buffer.
-        */
-       if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
-               qat_req->src_align = NULL;
-               vaddr = sg_virt(req->src);
-       } else {
-               int shift = ctx->key_sz - req->src_len;
-
-               qat_req->src_align = kzalloc(ctx->key_sz, flags);
-               if (unlikely(!qat_req->src_align))
-                       return ret;
-
-               scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
-                                        0, req->src_len, 0);
-               vaddr = qat_req->src_align;
-       }
-
-       qat_req->in.rsa.enc.m = dma_map_single(dev, vaddr, ctx->key_sz,
-                                              DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m)))
-               goto unmap_src;
-
-       if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
-               qat_req->dst_align = NULL;
-               vaddr = sg_virt(req->dst);
-       } else {
-               qat_req->dst_align = kzalloc(ctx->key_sz, flags);
-               if (unlikely(!qat_req->dst_align))
-                       goto unmap_src;
-               vaddr = qat_req->dst_align;
-       }
-
-       qat_req->out.rsa.enc.c = dma_map_single(dev, vaddr, ctx->key_sz,
-                                               DMA_FROM_DEVICE);
-       if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c)))
-               goto unmap_dst;
-
-       qat_req->in.rsa.in_tab[3] = 0;
-       qat_req->out.rsa.out_tab[1] = 0;
-       qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa,
-                                        sizeof(struct qat_rsa_input_params),
-                                        DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
-               goto unmap_dst;
-
-       qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa,
-                                         sizeof(struct qat_rsa_output_params),
-                                         DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
-               goto unmap_in_params;
-
-       msg->pke_mid.src_data_addr = qat_req->phy_in;
-       msg->pke_mid.dest_data_addr = qat_req->phy_out;
-       msg->pke_mid.opaque = (u64)(__force long)qat_req;
-       msg->input_param_count = 3;
-       msg->output_param_count = 1;
-
-       ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
-       if (ret == -ENOSPC)
-               goto unmap_all;
-
-       return ret;
-
-unmap_all:
-       if (!dma_mapping_error(dev, qat_req->phy_out))
-               dma_unmap_single(dev, qat_req->phy_out,
-                                sizeof(struct qat_rsa_output_params),
-                                DMA_TO_DEVICE);
-unmap_in_params:
-       if (!dma_mapping_error(dev, qat_req->phy_in))
-               dma_unmap_single(dev, qat_req->phy_in,
-                                sizeof(struct qat_rsa_input_params),
-                                DMA_TO_DEVICE);
-unmap_dst:
-       if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c))
-               dma_unmap_single(dev, qat_req->out.rsa.enc.c,
-                                ctx->key_sz, DMA_FROM_DEVICE);
-       kfree_sensitive(qat_req->dst_align);
-unmap_src:
-       if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m))
-               dma_unmap_single(dev, qat_req->in.rsa.enc.m, ctx->key_sz,
-                                DMA_TO_DEVICE);
-       kfree_sensitive(qat_req->src_align);
-       return ret;
-}
-
-static int qat_rsa_dec(struct akcipher_request *req)
-{
-       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
-       struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
-       struct qat_crypto_instance *inst = ctx->inst;
-       struct device *dev = &GET_DEV(inst->accel_dev);
-       struct qat_asym_request *qat_req =
-                       PTR_ALIGN(akcipher_request_ctx(req), 64);
-       struct icp_qat_fw_pke_request *msg = &qat_req->req;
-       gfp_t flags = qat_algs_alloc_flags(&req->base);
-       u8 *vaddr;
-       int ret;
-
-       if (unlikely(!ctx->n || !ctx->d))
-               return -EINVAL;
-
-       if (req->dst_len < ctx->key_sz) {
-               req->dst_len = ctx->key_sz;
-               return -EOVERFLOW;
-       }
-
-       if (req->src_len > ctx->key_sz)
-               return -EINVAL;
-
-       memset(msg, '\0', sizeof(*msg));
-       ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
-                                         ICP_QAT_FW_COMN_REQ_FLAG_SET);
-       msg->pke_hdr.cd_pars.func_id = ctx->crt_mode ?
-               qat_rsa_dec_fn_id_crt(ctx->key_sz) :
-               qat_rsa_dec_fn_id(ctx->key_sz);
-       if (unlikely(!msg->pke_hdr.cd_pars.func_id))
-               return -EINVAL;
-
-       qat_req->cb = qat_rsa_cb;
-       qat_req->ctx.rsa = ctx;
-       qat_req->areq.rsa = req;
-       msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
-       msg->pke_hdr.comn_req_flags =
-               ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
-                                           QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
-
-       if (ctx->crt_mode) {
-               qat_req->in.rsa.dec_crt.p = ctx->dma_p;
-               qat_req->in.rsa.dec_crt.q = ctx->dma_q;
-               qat_req->in.rsa.dec_crt.dp = ctx->dma_dp;
-               qat_req->in.rsa.dec_crt.dq = ctx->dma_dq;
-               qat_req->in.rsa.dec_crt.qinv = ctx->dma_qinv;
-       } else {
-               qat_req->in.rsa.dec.d = ctx->dma_d;
-               qat_req->in.rsa.dec.n = ctx->dma_n;
-       }
-       ret = -ENOMEM;
-
-       /*
-        * src can be of any size in valid range, but HW expects it to be the
-        * same as modulo n so in case it is different we need to allocate a
-        * new buf and copy src data.
-        * In other case we just need to map the user provided buffer.
-        * Also need to make sure that it is in contiguous buffer.
-        */
-       if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
-               qat_req->src_align = NULL;
-               vaddr = sg_virt(req->src);
-       } else {
-               int shift = ctx->key_sz - req->src_len;
-
-               qat_req->src_align = kzalloc(ctx->key_sz, flags);
-               if (unlikely(!qat_req->src_align))
-                       return ret;
-
-               scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
-                                        0, req->src_len, 0);
-               vaddr = qat_req->src_align;
-       }
-
-       qat_req->in.rsa.dec.c = dma_map_single(dev, vaddr, ctx->key_sz,
-                                              DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
-               goto unmap_src;
-
-       if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
-               qat_req->dst_align = NULL;
-               vaddr = sg_virt(req->dst);
-       } else {
-               qat_req->dst_align = kzalloc(ctx->key_sz, flags);
-               if (unlikely(!qat_req->dst_align))
-                       goto unmap_src;
-               vaddr = qat_req->dst_align;
-       }
-       qat_req->out.rsa.dec.m = dma_map_single(dev, vaddr, ctx->key_sz,
-                                               DMA_FROM_DEVICE);
-       if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
-               goto unmap_dst;
-
-       if (ctx->crt_mode)
-               qat_req->in.rsa.in_tab[6] = 0;
-       else
-               qat_req->in.rsa.in_tab[3] = 0;
-       qat_req->out.rsa.out_tab[1] = 0;
-       qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa,
-                                        sizeof(struct qat_rsa_input_params),
-                                        DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
-               goto unmap_dst;
-
-       qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa,
-                                         sizeof(struct qat_rsa_output_params),
-                                         DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
-               goto unmap_in_params;
-
-       msg->pke_mid.src_data_addr = qat_req->phy_in;
-       msg->pke_mid.dest_data_addr = qat_req->phy_out;
-       msg->pke_mid.opaque = (u64)(__force long)qat_req;
-       if (ctx->crt_mode)
-               msg->input_param_count = 6;
-       else
-               msg->input_param_count = 3;
-
-       msg->output_param_count = 1;
-
-       ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
-       if (ret == -ENOSPC)
-               goto unmap_all;
-
-       return ret;
-
-unmap_all:
-       if (!dma_mapping_error(dev, qat_req->phy_out))
-               dma_unmap_single(dev, qat_req->phy_out,
-                                sizeof(struct qat_rsa_output_params),
-                                DMA_TO_DEVICE);
-unmap_in_params:
-       if (!dma_mapping_error(dev, qat_req->phy_in))
-               dma_unmap_single(dev, qat_req->phy_in,
-                                sizeof(struct qat_rsa_input_params),
-                                DMA_TO_DEVICE);
-unmap_dst:
-       if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
-               dma_unmap_single(dev, qat_req->out.rsa.dec.m,
-                                ctx->key_sz, DMA_FROM_DEVICE);
-       kfree_sensitive(qat_req->dst_align);
-unmap_src:
-       if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
-               dma_unmap_single(dev, qat_req->in.rsa.dec.c, ctx->key_sz,
-                                DMA_TO_DEVICE);
-       kfree_sensitive(qat_req->src_align);
-       return ret;
-}
-
-static int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value,
-                        size_t vlen)
-{
-       struct qat_crypto_instance *inst = ctx->inst;
-       struct device *dev = &GET_DEV(inst->accel_dev);
-       const char *ptr = value;
-       int ret;
-
-       while (!*ptr && vlen) {
-               ptr++;
-               vlen--;
-       }
-
-       ctx->key_sz = vlen;
-       ret = -EINVAL;
-       /* invalid key size provided */
-       if (!qat_rsa_enc_fn_id(ctx->key_sz))
-               goto err;
-
-       ret = -ENOMEM;
-       ctx->n = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
-       if (!ctx->n)
-               goto err;
-
-       memcpy(ctx->n, ptr, ctx->key_sz);
-       return 0;
-err:
-       ctx->key_sz = 0;
-       ctx->n = NULL;
-       return ret;
-}
-
-static int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value,
-                        size_t vlen)
-{
-       struct qat_crypto_instance *inst = ctx->inst;
-       struct device *dev = &GET_DEV(inst->accel_dev);
-       const char *ptr = value;
-
-       while (!*ptr && vlen) {
-               ptr++;
-               vlen--;
-       }
-
-       if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) {
-               ctx->e = NULL;
-               return -EINVAL;
-       }
-
-       ctx->e = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
-       if (!ctx->e)
-               return -ENOMEM;
-
-       memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen);
-       return 0;
-}
-
-static int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value,
-                        size_t vlen)
-{
-       struct qat_crypto_instance *inst = ctx->inst;
-       struct device *dev = &GET_DEV(inst->accel_dev);
-       const char *ptr = value;
-       int ret;
-
-       while (!*ptr && vlen) {
-               ptr++;
-               vlen--;
-       }
-
-       ret = -EINVAL;
-       if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
-               goto err;
-
-       ret = -ENOMEM;
-       ctx->d = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
-       if (!ctx->d)
-               goto err;
-
-       memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen);
-       return 0;
-err:
-       ctx->d = NULL;
-       return ret;
-}
-
-static void qat_rsa_drop_leading_zeros(const char **ptr, unsigned int *len)
-{
-       while (!**ptr && *len) {
-               (*ptr)++;
-               (*len)--;
-       }
-}
-
-static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
-{
-       struct qat_crypto_instance *inst = ctx->inst;
-       struct device *dev = &GET_DEV(inst->accel_dev);
-       const char *ptr;
-       unsigned int len;
-       unsigned int half_key_sz = ctx->key_sz / 2;
-
-       /* p */
-       ptr = rsa_key->p;
-       len = rsa_key->p_sz;
-       qat_rsa_drop_leading_zeros(&ptr, &len);
-       if (!len)
-               goto err;
-       ctx->p = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL);
-       if (!ctx->p)
-               goto err;
-       memcpy(ctx->p + (half_key_sz - len), ptr, len);
-
-       /* q */
-       ptr = rsa_key->q;
-       len = rsa_key->q_sz;
-       qat_rsa_drop_leading_zeros(&ptr, &len);
-       if (!len)
-               goto free_p;
-       ctx->q = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL);
-       if (!ctx->q)
-               goto free_p;
-       memcpy(ctx->q + (half_key_sz - len), ptr, len);
-
-       /* dp */
-       ptr = rsa_key->dp;
-       len = rsa_key->dp_sz;
-       qat_rsa_drop_leading_zeros(&ptr, &len);
-       if (!len)
-               goto free_q;
-       ctx->dp = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dp,
-                                    GFP_KERNEL);
-       if (!ctx->dp)
-               goto free_q;
-       memcpy(ctx->dp + (half_key_sz - len), ptr, len);
-
-       /* dq */
-       ptr = rsa_key->dq;
-       len = rsa_key->dq_sz;
-       qat_rsa_drop_leading_zeros(&ptr, &len);
-       if (!len)
-               goto free_dp;
-       ctx->dq = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dq,
-                                    GFP_KERNEL);
-       if (!ctx->dq)
-               goto free_dp;
-       memcpy(ctx->dq + (half_key_sz - len), ptr, len);
-
-       /* qinv */
-       ptr = rsa_key->qinv;
-       len = rsa_key->qinv_sz;
-       qat_rsa_drop_leading_zeros(&ptr, &len);
-       if (!len)
-               goto free_dq;
-       ctx->qinv = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_qinv,
-                                      GFP_KERNEL);
-       if (!ctx->qinv)
-               goto free_dq;
-       memcpy(ctx->qinv + (half_key_sz - len), ptr, len);
-
-       ctx->crt_mode = true;
-       return;
-
-free_dq:
-       memset(ctx->dq, '\0', half_key_sz);
-       dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
-       ctx->dq = NULL;
-free_dp:
-       memset(ctx->dp, '\0', half_key_sz);
-       dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
-       ctx->dp = NULL;
-free_q:
-       memset(ctx->q, '\0', half_key_sz);
-       dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
-       ctx->q = NULL;
-free_p:
-       memset(ctx->p, '\0', half_key_sz);
-       dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
-       ctx->p = NULL;
-err:
-       ctx->crt_mode = false;
-}
-
-static void qat_rsa_clear_ctx(struct device *dev, struct qat_rsa_ctx *ctx)
-{
-       unsigned int half_key_sz = ctx->key_sz / 2;
-
-       /* Free the old key if any */
-       if (ctx->n)
-               dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
-       if (ctx->e)
-               dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
-       if (ctx->d) {
-               memset(ctx->d, '\0', ctx->key_sz);
-               dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
-       }
-       if (ctx->p) {
-               memset(ctx->p, '\0', half_key_sz);
-               dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
-       }
-       if (ctx->q) {
-               memset(ctx->q, '\0', half_key_sz);
-               dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
-       }
-       if (ctx->dp) {
-               memset(ctx->dp, '\0', half_key_sz);
-               dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
-       }
-       if (ctx->dq) {
-               memset(ctx->dq, '\0', half_key_sz);
-               dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
-       }
-       if (ctx->qinv) {
-               memset(ctx->qinv, '\0', half_key_sz);
-               dma_free_coherent(dev, half_key_sz, ctx->qinv, ctx->dma_qinv);
-       }
-
-       ctx->n = NULL;
-       ctx->e = NULL;
-       ctx->d = NULL;
-       ctx->p = NULL;
-       ctx->q = NULL;
-       ctx->dp = NULL;
-       ctx->dq = NULL;
-       ctx->qinv = NULL;
-       ctx->crt_mode = false;
-       ctx->key_sz = 0;
-}
-
-static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
-                         unsigned int keylen, bool private)
-{
-       struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
-       struct device *dev = &GET_DEV(ctx->inst->accel_dev);
-       struct rsa_key rsa_key;
-       int ret;
-
-       qat_rsa_clear_ctx(dev, ctx);
-
-       if (private)
-               ret = rsa_parse_priv_key(&rsa_key, key, keylen);
-       else
-               ret = rsa_parse_pub_key(&rsa_key, key, keylen);
-       if (ret < 0)
-               goto free;
-
-       ret = qat_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz);
-       if (ret < 0)
-               goto free;
-       ret = qat_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
-       if (ret < 0)
-               goto free;
-       if (private) {
-               ret = qat_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
-               if (ret < 0)
-                       goto free;
-               qat_rsa_setkey_crt(ctx, &rsa_key);
-       }
-
-       if (!ctx->n || !ctx->e) {
-               /* invalid key provided */
-               ret = -EINVAL;
-               goto free;
-       }
-       if (private && !ctx->d) {
-               /* invalid private key provided */
-               ret = -EINVAL;
-               goto free;
-       }
-
-       return 0;
-free:
-       qat_rsa_clear_ctx(dev, ctx);
-       return ret;
-}
-
-static int qat_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
-                            unsigned int keylen)
-{
-       return qat_rsa_setkey(tfm, key, keylen, false);
-}
-
-static int qat_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
-                             unsigned int keylen)
-{
-       return qat_rsa_setkey(tfm, key, keylen, true);
-}
-
-static unsigned int qat_rsa_max_size(struct crypto_akcipher *tfm)
-{
-       struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
-
-       return ctx->key_sz;
-}
-
-static int qat_rsa_init_tfm(struct crypto_akcipher *tfm)
-{
-       struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
-       struct qat_crypto_instance *inst =
-                       qat_crypto_get_instance_node(numa_node_id());
-
-       if (!inst)
-               return -EINVAL;
-
-       akcipher_set_reqsize(tfm, sizeof(struct qat_asym_request) + 64);
-
-       ctx->key_sz = 0;
-       ctx->inst = inst;
-       return 0;
-}
-
-static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
-{
-       struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
-       struct device *dev = &GET_DEV(ctx->inst->accel_dev);
-
-       qat_rsa_clear_ctx(dev, ctx);
-       qat_crypto_put_instance(ctx->inst);
-}
-
-static struct akcipher_alg rsa = {
-       .encrypt = qat_rsa_enc,
-       .decrypt = qat_rsa_dec,
-       .set_pub_key = qat_rsa_setpubkey,
-       .set_priv_key = qat_rsa_setprivkey,
-       .max_size = qat_rsa_max_size,
-       .init = qat_rsa_init_tfm,
-       .exit = qat_rsa_exit_tfm,
-       .base = {
-               .cra_name = "rsa",
-               .cra_driver_name = "qat-rsa",
-               .cra_priority = 1000,
-               .cra_module = THIS_MODULE,
-               .cra_ctxsize = sizeof(struct qat_rsa_ctx),
-       },
-};
-
-static struct kpp_alg dh = {
-       .set_secret = qat_dh_set_secret,
-       .generate_public_key = qat_dh_compute_value,
-       .compute_shared_secret = qat_dh_compute_value,
-       .max_size = qat_dh_max_size,
-       .init = qat_dh_init_tfm,
-       .exit = qat_dh_exit_tfm,
-       .base = {
-               .cra_name = "dh",
-               .cra_driver_name = "qat-dh",
-               .cra_priority = 1000,
-               .cra_module = THIS_MODULE,
-               .cra_ctxsize = sizeof(struct qat_dh_ctx),
-       },
-};
-
-int qat_asym_algs_register(void)
-{
-       int ret = 0;
-
-       mutex_lock(&algs_lock);
-       if (++active_devs == 1) {
-               rsa.base.cra_flags = 0;
-               ret = crypto_register_akcipher(&rsa);
-               if (ret)
-                       goto unlock;
-               ret = crypto_register_kpp(&dh);
-       }
-unlock:
-       mutex_unlock(&algs_lock);
-       return ret;
-}
-
-void qat_asym_algs_unregister(void)
-{
-       mutex_lock(&algs_lock);
-       if (--active_devs == 0) {
-               crypto_unregister_akcipher(&rsa);
-               crypto_unregister_kpp(&dh);
-       }
-       mutex_unlock(&algs_lock);
-}
diff --git a/drivers/crypto/qat/qat_common/qat_bl.c b/drivers/crypto/qat/qat_common/qat_bl.c
deleted file mode 100644 (file)
index 76baed0..0000000
+++ /dev/null
@@ -1,410 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright(c) 2014 - 2022 Intel Corporation */
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/pci.h>
-#include <linux/scatterlist.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include "adf_accel_devices.h"
-#include "qat_bl.h"
-#include "qat_crypto.h"
-
-void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
-                     struct qat_request_buffs *buf)
-{
-       struct device *dev = &GET_DEV(accel_dev);
-       struct qat_alg_buf_list *bl = buf->bl;
-       struct qat_alg_buf_list *blout = buf->blout;
-       dma_addr_t blp = buf->blp;
-       dma_addr_t blpout = buf->bloutp;
-       size_t sz = buf->sz;
-       size_t sz_out = buf->sz_out;
-       int bl_dma_dir;
-       int i;
-
-       bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
-
-       for (i = 0; i < bl->num_bufs; i++)
-               dma_unmap_single(dev, bl->buffers[i].addr,
-                                bl->buffers[i].len, bl_dma_dir);
-
-       dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
-
-       if (!buf->sgl_src_valid)
-               kfree(bl);
-
-       if (blp != blpout) {
-               for (i = 0; i < blout->num_mapped_bufs; i++) {
-                       dma_unmap_single(dev, blout->buffers[i].addr,
-                                        blout->buffers[i].len,
-                                        DMA_FROM_DEVICE);
-               }
-               dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
-
-               if (!buf->sgl_dst_valid)
-                       kfree(blout);
-       }
-}
-
-static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
-                               struct scatterlist *sgl,
-                               struct scatterlist *sglout,
-                               struct qat_request_buffs *buf,
-                               dma_addr_t extra_dst_buff,
-                               size_t sz_extra_dst_buff,
-                               unsigned int sskip,
-                               unsigned int dskip,
-                               gfp_t flags)
-{
-       struct device *dev = &GET_DEV(accel_dev);
-       int i, sg_nctr = 0;
-       int n = sg_nents(sgl);
-       struct qat_alg_buf_list *bufl;
-       struct qat_alg_buf_list *buflout = NULL;
-       dma_addr_t blp = DMA_MAPPING_ERROR;
-       dma_addr_t bloutp = DMA_MAPPING_ERROR;
-       struct scatterlist *sg;
-       size_t sz_out, sz = struct_size(bufl, buffers, n);
-       int node = dev_to_node(&GET_DEV(accel_dev));
-       unsigned int left;
-       int bufl_dma_dir;
-
-       if (unlikely(!n))
-               return -EINVAL;
-
-       buf->sgl_src_valid = false;
-       buf->sgl_dst_valid = false;
-
-       if (n > QAT_MAX_BUFF_DESC) {
-               bufl = kzalloc_node(sz, flags, node);
-               if (unlikely(!bufl))
-                       return -ENOMEM;
-       } else {
-               bufl = &buf->sgl_src.sgl_hdr;
-               memset(bufl, 0, sizeof(struct qat_alg_buf_list));
-               buf->sgl_src_valid = true;
-       }
-
-       bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
-
-       for (i = 0; i < n; i++)
-               bufl->buffers[i].addr = DMA_MAPPING_ERROR;
-
-       left = sskip;
-
-       for_each_sg(sgl, sg, n, i) {
-               int y = sg_nctr;
-
-               if (!sg->length)
-                       continue;
-
-               if (left >= sg->length) {
-                       left -= sg->length;
-                       continue;
-               }
-               bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
-                                                      sg->length - left,
-                                                      bufl_dma_dir);
-               bufl->buffers[y].len = sg->length;
-               if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
-                       goto err_in;
-               sg_nctr++;
-               if (left) {
-                       bufl->buffers[y].len -= left;
-                       left = 0;
-               }
-       }
-       bufl->num_bufs = sg_nctr;
-       blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(dev, blp)))
-               goto err_in;
-       buf->bl = bufl;
-       buf->blp = blp;
-       buf->sz = sz;
-       /* Handle out of place operation */
-       if (sgl != sglout) {
-               struct qat_alg_buf *buffers;
-               int extra_buff = extra_dst_buff ? 1 : 0;
-               int n_sglout = sg_nents(sglout);
-
-               n = n_sglout + extra_buff;
-               sz_out = struct_size(buflout, buffers, n);
-               left = dskip;
-
-               sg_nctr = 0;
-
-               if (n > QAT_MAX_BUFF_DESC) {
-                       buflout = kzalloc_node(sz_out, flags, node);
-                       if (unlikely(!buflout))
-                               goto err_in;
-               } else {
-                       buflout = &buf->sgl_dst.sgl_hdr;
-                       memset(buflout, 0, sizeof(struct qat_alg_buf_list));
-                       buf->sgl_dst_valid = true;
-               }
-
-               buffers = buflout->buffers;
-               for (i = 0; i < n; i++)
-                       buffers[i].addr = DMA_MAPPING_ERROR;
-
-               for_each_sg(sglout, sg, n_sglout, i) {
-                       int y = sg_nctr;
-
-                       if (!sg->length)
-                               continue;
-
-                       if (left >= sg->length) {
-                               left -= sg->length;
-                               continue;
-                       }
-                       buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
-                                                        sg->length - left,
-                                                        DMA_FROM_DEVICE);
-                       if (unlikely(dma_mapping_error(dev, buffers[y].addr)))
-                               goto err_out;
-                       buffers[y].len = sg->length;
-                       sg_nctr++;
-                       if (left) {
-                               buffers[y].len -= left;
-                               left = 0;
-                       }
-               }
-               if (extra_buff) {
-                       buffers[sg_nctr].addr = extra_dst_buff;
-                       buffers[sg_nctr].len = sz_extra_dst_buff;
-               }
-
-               buflout->num_bufs = sg_nctr;
-               buflout->num_bufs += extra_buff;
-               buflout->num_mapped_bufs = sg_nctr;
-               bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
-               if (unlikely(dma_mapping_error(dev, bloutp)))
-                       goto err_out;
-               buf->blout = buflout;
-               buf->bloutp = bloutp;
-               buf->sz_out = sz_out;
-       } else {
-               /* Otherwise set the src and dst to the same address */
-               buf->bloutp = buf->blp;
-               buf->sz_out = 0;
-       }
-       return 0;
-
-err_out:
-       if (!dma_mapping_error(dev, bloutp))
-               dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
-
-       n = sg_nents(sglout);
-       for (i = 0; i < n; i++) {
-               if (buflout->buffers[i].addr == extra_dst_buff)
-                       break;
-               if (!dma_mapping_error(dev, buflout->buffers[i].addr))
-                       dma_unmap_single(dev, buflout->buffers[i].addr,
-                                        buflout->buffers[i].len,
-                                        DMA_FROM_DEVICE);
-       }
-
-       if (!buf->sgl_dst_valid)
-               kfree(buflout);
-
-err_in:
-       if (!dma_mapping_error(dev, blp))
-               dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
-
-       n = sg_nents(sgl);
-       for (i = 0; i < n; i++)
-               if (!dma_mapping_error(dev, bufl->buffers[i].addr))
-                       dma_unmap_single(dev, bufl->buffers[i].addr,
-                                        bufl->buffers[i].len,
-                                        bufl_dma_dir);
-
-       if (!buf->sgl_src_valid)
-               kfree(bufl);
-
-       dev_err(dev, "Failed to map buf for dma\n");
-       return -ENOMEM;
-}
-
-int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
-                      struct scatterlist *sgl,
-                      struct scatterlist *sglout,
-                      struct qat_request_buffs *buf,
-                      struct qat_sgl_to_bufl_params *params,
-                      gfp_t flags)
-{
-       dma_addr_t extra_dst_buff = 0;
-       size_t sz_extra_dst_buff = 0;
-       unsigned int sskip = 0;
-       unsigned int dskip = 0;
-
-       if (params) {
-               extra_dst_buff = params->extra_dst_buff;
-               sz_extra_dst_buff = params->sz_extra_dst_buff;
-               sskip = params->sskip;
-               dskip = params->dskip;
-       }
-
-       return __qat_bl_sgl_to_bufl(accel_dev, sgl, sglout, buf,
-                                   extra_dst_buff, sz_extra_dst_buff,
-                                   sskip, dskip, flags);
-}
-
-static void qat_bl_sgl_unmap(struct adf_accel_dev *accel_dev,
-                            struct qat_alg_buf_list *bl)
-{
-       struct device *dev = &GET_DEV(accel_dev);
-       int n = bl->num_bufs;
-       int i;
-
-       for (i = 0; i < n; i++)
-               if (!dma_mapping_error(dev, bl->buffers[i].addr))
-                       dma_unmap_single(dev, bl->buffers[i].addr,
-                                        bl->buffers[i].len, DMA_FROM_DEVICE);
-}
-
-static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
-                         struct scatterlist *sgl,
-                         struct qat_alg_buf_list **bl)
-{
-       struct device *dev = &GET_DEV(accel_dev);
-       struct qat_alg_buf_list *bufl;
-       int node = dev_to_node(dev);
-       struct scatterlist *sg;
-       int n, i, sg_nctr;
-       size_t sz;
-
-       n = sg_nents(sgl);
-       sz = struct_size(bufl, buffers, n);
-       bufl = kzalloc_node(sz, GFP_KERNEL, node);
-       if (unlikely(!bufl))
-               return -ENOMEM;
-
-       for (i = 0; i < n; i++)
-               bufl->buffers[i].addr = DMA_MAPPING_ERROR;
-
-       sg_nctr = 0;
-       for_each_sg(sgl, sg, n, i) {
-               int y = sg_nctr;
-
-               if (!sg->length)
-                       continue;
-
-               bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg),
-                                                      sg->length,
-                                                      DMA_FROM_DEVICE);
-               bufl->buffers[y].len = sg->length;
-               if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
-                       goto err_map;
-               sg_nctr++;
-       }
-       bufl->num_bufs = sg_nctr;
-       bufl->num_mapped_bufs = sg_nctr;
-
-       *bl = bufl;
-
-       return 0;
-
-err_map:
-       for (i = 0; i < n; i++)
-               if (!dma_mapping_error(dev, bufl->buffers[i].addr))
-                       dma_unmap_single(dev, bufl->buffers[i].addr,
-                                        bufl->buffers[i].len,
-                                        DMA_FROM_DEVICE);
-       kfree(bufl);
-       *bl = NULL;
-
-       return -ENOMEM;
-}
-
-static void qat_bl_sgl_free_unmap(struct adf_accel_dev *accel_dev,
-                                 struct scatterlist *sgl,
-                                 struct qat_alg_buf_list *bl,
-                                 bool free_bl)
-{
-       if (bl) {
-               qat_bl_sgl_unmap(accel_dev, bl);
-
-               if (free_bl)
-                       kfree(bl);
-       }
-       if (sgl)
-               sgl_free(sgl);
-}
-
-static int qat_bl_sgl_alloc_map(struct adf_accel_dev *accel_dev,
-                               struct scatterlist **sgl,
-                               struct qat_alg_buf_list **bl,
-                               unsigned int dlen,
-                               gfp_t gfp)
-{
-       struct scatterlist *dst;
-       int ret;
-
-       dst = sgl_alloc(dlen, gfp, NULL);
-       if (!dst) {
-               dev_err(&GET_DEV(accel_dev), "sg_alloc failed\n");
-               return -ENOMEM;
-       }
-
-       ret = qat_bl_sgl_map(accel_dev, dst, bl);
-       if (ret)
-               goto err;
-
-       *sgl = dst;
-
-       return 0;
-
-err:
-       sgl_free(dst);
-       *sgl = NULL;
-       return ret;
-}
-
-int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev,
-                              struct scatterlist **sg,
-                              unsigned int dlen,
-                              struct qat_request_buffs *qat_bufs,
-                              gfp_t gfp)
-{
-       struct device *dev = &GET_DEV(accel_dev);
-       dma_addr_t new_blp = DMA_MAPPING_ERROR;
-       struct qat_alg_buf_list *new_bl;
-       struct scatterlist *new_sg;
-       size_t new_bl_size;
-       int ret;
-
-       ret = qat_bl_sgl_alloc_map(accel_dev, &new_sg, &new_bl, dlen, gfp);
-       if (ret)
-               return ret;
-
-       new_bl_size = struct_size(new_bl, buffers, new_bl->num_bufs);
-
-       /* Map new firmware SGL descriptor */
-       new_blp = dma_map_single(dev, new_bl, new_bl_size, DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(dev, new_blp)))
-               goto err;
-
-       /* Unmap old firmware SGL descriptor */
-       dma_unmap_single(dev, qat_bufs->bloutp, qat_bufs->sz_out, DMA_TO_DEVICE);
-
-       /* Free and unmap old scatterlist */
-       qat_bl_sgl_free_unmap(accel_dev, *sg, qat_bufs->blout,
-                             !qat_bufs->sgl_dst_valid);
-
-       qat_bufs->sgl_dst_valid = false;
-       qat_bufs->blout = new_bl;
-       qat_bufs->bloutp = new_blp;
-       qat_bufs->sz_out = new_bl_size;
-
-       *sg = new_sg;
-
-       return 0;
-err:
-       qat_bl_sgl_free_unmap(accel_dev, new_sg, new_bl, true);
-
-       if (!dma_mapping_error(dev, new_blp))
-               dma_unmap_single(dev, new_blp, new_bl_size, DMA_TO_DEVICE);
-
-       return -ENOMEM;
-}
diff --git a/drivers/crypto/qat/qat_common/qat_bl.h b/drivers/crypto/qat/qat_common/qat_bl.h
deleted file mode 100644 (file)
index d87e4f3..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright(c) 2014 - 2022 Intel Corporation */
-#ifndef QAT_BL_H
-#define QAT_BL_H
-#include <linux/crypto.h>
-#include <linux/scatterlist.h>
-#include <linux/types.h>
-
-#define QAT_MAX_BUFF_DESC      4
-
-struct qat_alg_buf {
-       u32 len;
-       u32 resrvd;
-       u64 addr;
-} __packed;
-
-struct qat_alg_buf_list {
-       u64 resrvd;
-       u32 num_bufs;
-       u32 num_mapped_bufs;
-       struct qat_alg_buf buffers[];
-} __packed;
-
-struct qat_alg_fixed_buf_list {
-       struct qat_alg_buf_list sgl_hdr;
-       struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC];
-} __packed __aligned(64);
-
-struct qat_request_buffs {
-       struct qat_alg_buf_list *bl;
-       dma_addr_t blp;
-       struct qat_alg_buf_list *blout;
-       dma_addr_t bloutp;
-       size_t sz;
-       size_t sz_out;
-       bool sgl_src_valid;
-       bool sgl_dst_valid;
-       struct qat_alg_fixed_buf_list sgl_src;
-       struct qat_alg_fixed_buf_list sgl_dst;
-};
-
-struct qat_sgl_to_bufl_params {
-       dma_addr_t extra_dst_buff;
-       size_t sz_extra_dst_buff;
-       unsigned int sskip;
-       unsigned int dskip;
-};
-
-void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
-                     struct qat_request_buffs *buf);
-int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
-                      struct scatterlist *sgl,
-                      struct scatterlist *sglout,
-                      struct qat_request_buffs *buf,
-                      struct qat_sgl_to_bufl_params *params,
-                      gfp_t flags);
-
-static inline gfp_t qat_algs_alloc_flags(struct crypto_async_request *req)
-{
-       return req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
-}
-
-int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev,
-                              struct scatterlist **newd,
-                              unsigned int dlen,
-                              struct qat_request_buffs *qat_bufs,
-                              gfp_t gfp);
-
-#endif
diff --git a/drivers/crypto/qat/qat_common/qat_comp_algs.c b/drivers/crypto/qat/qat_common/qat_comp_algs.c
deleted file mode 100644 (file)
index b533984..0000000
+++ /dev/null
@@ -1,489 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright(c) 2022 Intel Corporation */
-#include <linux/crypto.h>
-#include <crypto/acompress.h>
-#include <crypto/internal/acompress.h>
-#include <crypto/scatterwalk.h>
-#include <linux/dma-mapping.h>
-#include <linux/workqueue.h>
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "qat_bl.h"
-#include "qat_comp_req.h"
-#include "qat_compression.h"
-#include "qat_algs_send.h"
-
-#define QAT_RFC_1950_HDR_SIZE 2
-#define QAT_RFC_1950_FOOTER_SIZE 4
-#define QAT_RFC_1950_CM_DEFLATE 8
-#define QAT_RFC_1950_CM_DEFLATE_CINFO_32K 7
-#define QAT_RFC_1950_CM_MASK 0x0f
-#define QAT_RFC_1950_CM_OFFSET 4
-#define QAT_RFC_1950_DICT_MASK 0x20
-#define QAT_RFC_1950_COMP_HDR 0x785e
-
-static DEFINE_MUTEX(algs_lock);
-static unsigned int active_devs;
-
-enum direction {
-       DECOMPRESSION = 0,
-       COMPRESSION = 1,
-};
-
-struct qat_compression_req;
-
-struct qat_compression_ctx {
-       u8 comp_ctx[QAT_COMP_CTX_SIZE];
-       struct qat_compression_instance *inst;
-       int (*qat_comp_callback)(struct qat_compression_req *qat_req, void *resp);
-};
-
-struct qat_dst {
-       bool is_null;
-       int resubmitted;
-};
-
-struct qat_compression_req {
-       u8 req[QAT_COMP_REQ_SIZE];
-       struct qat_compression_ctx *qat_compression_ctx;
-       struct acomp_req *acompress_req;
-       struct qat_request_buffs buf;
-       enum direction dir;
-       int actual_dlen;
-       struct qat_alg_req alg_req;
-       struct work_struct resubmit;
-       struct qat_dst dst;
-};
-
-static int qat_alg_send_dc_message(struct qat_compression_req *qat_req,
-                                  struct qat_compression_instance *inst,
-                                  struct crypto_async_request *base)
-{
-       struct qat_alg_req *alg_req = &qat_req->alg_req;
-
-       alg_req->fw_req = (u32 *)&qat_req->req;
-       alg_req->tx_ring = inst->dc_tx;
-       alg_req->base = base;
-       alg_req->backlog = &inst->backlog;
-
-       return qat_alg_send_message(alg_req);
-}
-
-static void qat_comp_resubmit(struct work_struct *work)
-{
-       struct qat_compression_req *qat_req =
-               container_of(work, struct qat_compression_req, resubmit);
-       struct qat_compression_ctx *ctx = qat_req->qat_compression_ctx;
-       struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
-       struct qat_request_buffs *qat_bufs = &qat_req->buf;
-       struct qat_compression_instance *inst = ctx->inst;
-       struct acomp_req *areq = qat_req->acompress_req;
-       struct crypto_acomp *tfm = crypto_acomp_reqtfm(areq);
-       unsigned int dlen = CRYPTO_ACOMP_DST_MAX;
-       u8 *req = qat_req->req;
-       dma_addr_t dfbuf;
-       int ret;
-
-       areq->dlen = dlen;
-
-       dev_dbg(&GET_DEV(accel_dev), "[%s][%s] retry NULL dst request - dlen = %d\n",
-               crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)),
-               qat_req->dir == COMPRESSION ? "comp" : "decomp", dlen);
-
-       ret = qat_bl_realloc_map_new_dst(accel_dev, &areq->dst, dlen, qat_bufs,
-                                        qat_algs_alloc_flags(&areq->base));
-       if (ret)
-               goto err;
-
-       qat_req->dst.resubmitted = true;
-
-       dfbuf = qat_req->buf.bloutp;
-       qat_comp_override_dst(req, dfbuf, dlen);
-
-       ret = qat_alg_send_dc_message(qat_req, inst, &areq->base);
-       if (ret != -ENOSPC)
-               return;
-
-err:
-       qat_bl_free_bufl(accel_dev, qat_bufs);
-       acomp_request_complete(areq, ret);
-}
-
-static int parse_zlib_header(u16 zlib_h)
-{
-       int ret = -EINVAL;
-       __be16 header;
-       u8 *header_p;
-       u8 cmf, flg;
-
-       header = cpu_to_be16(zlib_h);
-       header_p = (u8 *)&header;
-
-       flg = header_p[0];
-       cmf = header_p[1];
-
-       if (cmf >> QAT_RFC_1950_CM_OFFSET > QAT_RFC_1950_CM_DEFLATE_CINFO_32K)
-               return ret;
-
-       if ((cmf & QAT_RFC_1950_CM_MASK) != QAT_RFC_1950_CM_DEFLATE)
-               return ret;
-
-       if (flg & QAT_RFC_1950_DICT_MASK)
-               return ret;
-
-       return 0;
-}
-
-static int qat_comp_rfc1950_callback(struct qat_compression_req *qat_req,
-                                    void *resp)
-{
-       struct acomp_req *areq = qat_req->acompress_req;
-       enum direction dir = qat_req->dir;
-       __be32 qat_produced_adler;
-
-       qat_produced_adler = cpu_to_be32(qat_comp_get_produced_adler32(resp));
-
-       if (dir == COMPRESSION) {
-               __be16 zlib_header;
-
-               zlib_header = cpu_to_be16(QAT_RFC_1950_COMP_HDR);
-               scatterwalk_map_and_copy(&zlib_header, areq->dst, 0, QAT_RFC_1950_HDR_SIZE, 1);
-               areq->dlen += QAT_RFC_1950_HDR_SIZE;
-
-               scatterwalk_map_and_copy(&qat_produced_adler, areq->dst, areq->dlen,
-                                        QAT_RFC_1950_FOOTER_SIZE, 1);
-               areq->dlen += QAT_RFC_1950_FOOTER_SIZE;
-       } else {
-               __be32 decomp_adler;
-               int footer_offset;
-               int consumed;
-
-               consumed = qat_comp_get_consumed_ctr(resp);
-               footer_offset = consumed + QAT_RFC_1950_HDR_SIZE;
-               if (footer_offset + QAT_RFC_1950_FOOTER_SIZE > areq->slen)
-                       return -EBADMSG;
-
-               scatterwalk_map_and_copy(&decomp_adler, areq->src, footer_offset,
-                                        QAT_RFC_1950_FOOTER_SIZE, 0);
-
-               if (qat_produced_adler != decomp_adler)
-                       return -EBADMSG;
-       }
-       return 0;
-}
-
-static void qat_comp_generic_callback(struct qat_compression_req *qat_req,
-                                     void *resp)
-{
-       struct acomp_req *areq = qat_req->acompress_req;
-       struct qat_compression_ctx *ctx = qat_req->qat_compression_ctx;
-       struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
-       struct crypto_acomp *tfm = crypto_acomp_reqtfm(areq);
-       struct qat_compression_instance *inst = ctx->inst;
-       int consumed, produced;
-       s8 cmp_err, xlt_err;
-       int res = -EBADMSG;
-       int status;
-       u8 cnv;
-
-       status = qat_comp_get_cmp_status(resp);
-       status |= qat_comp_get_xlt_status(resp);
-       cmp_err = qat_comp_get_cmp_err(resp);
-       xlt_err = qat_comp_get_xlt_err(resp);
-
-       consumed = qat_comp_get_consumed_ctr(resp);
-       produced = qat_comp_get_produced_ctr(resp);
-
-       dev_dbg(&GET_DEV(accel_dev),
-               "[%s][%s][%s] slen = %8d dlen = %8d consumed = %8d produced = %8d cmp_err = %3d xlt_err = %3d",
-               crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)),
-               qat_req->dir == COMPRESSION ? "comp  " : "decomp",
-               status ? "ERR" : "OK ",
-               areq->slen, areq->dlen, consumed, produced, cmp_err, xlt_err);
-
-       areq->dlen = 0;
-
-       if (qat_req->dir == DECOMPRESSION && qat_req->dst.is_null) {
-               if (cmp_err == ERR_CODE_OVERFLOW_ERROR) {
-                       if (qat_req->dst.resubmitted) {
-                               dev_dbg(&GET_DEV(accel_dev),
-                                       "Output does not fit destination buffer\n");
-                               res = -EOVERFLOW;
-                               goto end;
-                       }
-
-                       INIT_WORK(&qat_req->resubmit, qat_comp_resubmit);
-                       adf_misc_wq_queue_work(&qat_req->resubmit);
-                       return;
-               }
-       }
-
-       if (unlikely(status != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
-               goto end;
-
-       if (qat_req->dir == COMPRESSION) {
-               cnv = qat_comp_get_cmp_cnv_flag(resp);
-               if (unlikely(!cnv)) {
-                       dev_err(&GET_DEV(accel_dev),
-                               "Verified compression not supported\n");
-                       goto end;
-               }
-
-               if (unlikely(produced > qat_req->actual_dlen)) {
-                       memset(inst->dc_data->ovf_buff, 0,
-                              inst->dc_data->ovf_buff_sz);
-                       dev_dbg(&GET_DEV(accel_dev),
-                               "Actual buffer overflow: produced=%d, dlen=%d\n",
-                               produced, qat_req->actual_dlen);
-                       goto end;
-               }
-       }
-
-       res = 0;
-       areq->dlen = produced;
-
-       if (ctx->qat_comp_callback)
-               res = ctx->qat_comp_callback(qat_req, resp);
-
-end:
-       qat_bl_free_bufl(accel_dev, &qat_req->buf);
-       acomp_request_complete(areq, res);
-}
-
-void qat_comp_alg_callback(void *resp)
-{
-       struct qat_compression_req *qat_req =
-                       (void *)(__force long)qat_comp_get_opaque(resp);
-       struct qat_instance_backlog *backlog = qat_req->alg_req.backlog;
-
-       qat_comp_generic_callback(qat_req, resp);
-
-       qat_alg_send_backlog(backlog);
-}
-
-static int qat_comp_alg_init_tfm(struct crypto_acomp *acomp_tfm)
-{
-       struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
-       struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
-       struct qat_compression_instance *inst;
-       int node;
-
-       if (tfm->node == NUMA_NO_NODE)
-               node = numa_node_id();
-       else
-               node = tfm->node;
-
-       memset(ctx, 0, sizeof(*ctx));
-       inst = qat_compression_get_instance_node(node);
-       if (!inst)
-               return -EINVAL;
-       ctx->inst = inst;
-
-       ctx->inst->build_deflate_ctx(ctx->comp_ctx);
-
-       return 0;
-}
-
-static void qat_comp_alg_exit_tfm(struct crypto_acomp *acomp_tfm)
-{
-       struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
-       struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
-
-       qat_compression_put_instance(ctx->inst);
-       memset(ctx, 0, sizeof(*ctx));
-}
-
-static int qat_comp_alg_rfc1950_init_tfm(struct crypto_acomp *acomp_tfm)
-{
-       struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
-       struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
-       int ret;
-
-       ret = qat_comp_alg_init_tfm(acomp_tfm);
-       ctx->qat_comp_callback = &qat_comp_rfc1950_callback;
-
-       return ret;
-}
-
-static int qat_comp_alg_compress_decompress(struct acomp_req *areq, enum direction dir,
-                                           unsigned int shdr, unsigned int sftr,
-                                           unsigned int dhdr, unsigned int dftr)
-{
-       struct qat_compression_req *qat_req = acomp_request_ctx(areq);
-       struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(areq);
-       struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
-       struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
-       struct qat_compression_instance *inst = ctx->inst;
-       gfp_t f = qat_algs_alloc_flags(&areq->base);
-       struct qat_sgl_to_bufl_params params = {0};
-       int slen = areq->slen - shdr - sftr;
-       int dlen = areq->dlen - dhdr - dftr;
-       dma_addr_t sfbuf, dfbuf;
-       u8 *req = qat_req->req;
-       size_t ovf_buff_sz;
-       int ret;
-
-       params.sskip = shdr;
-       params.dskip = dhdr;
-
-       if (!areq->src || !slen)
-               return -EINVAL;
-
-       if (areq->dst && !dlen)
-               return -EINVAL;
-
-       qat_req->dst.is_null = false;
-
-       /* Handle acomp requests that require the allocation of a destination
-        * buffer. The size of the destination buffer is double the source
-        * buffer (rounded up to the size of a page) to fit the decompressed
-        * output or an expansion on the data for compression.
-        */
-       if (!areq->dst) {
-               qat_req->dst.is_null = true;
-
-               dlen = round_up(2 * slen, PAGE_SIZE);
-               areq->dst = sgl_alloc(dlen, f, NULL);
-               if (!areq->dst)
-                       return -ENOMEM;
-
-               dlen -= dhdr + dftr;
-               areq->dlen = dlen;
-               qat_req->dst.resubmitted = false;
-       }
-
-       if (dir == COMPRESSION) {
-               params.extra_dst_buff = inst->dc_data->ovf_buff_p;
-               ovf_buff_sz = inst->dc_data->ovf_buff_sz;
-               params.sz_extra_dst_buff = ovf_buff_sz;
-       }
-
-       ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
-                                &qat_req->buf, &params, f);
-       if (unlikely(ret))
-               return ret;
-
-       sfbuf = qat_req->buf.blp;
-       dfbuf = qat_req->buf.bloutp;
-       qat_req->qat_compression_ctx = ctx;
-       qat_req->acompress_req = areq;
-       qat_req->dir = dir;
-
-       if (dir == COMPRESSION) {
-               qat_req->actual_dlen = dlen;
-               dlen += ovf_buff_sz;
-               qat_comp_create_compression_req(ctx->comp_ctx, req,
-                                               (u64)(__force long)sfbuf, slen,
-                                               (u64)(__force long)dfbuf, dlen,
-                                               (u64)(__force long)qat_req);
-       } else {
-               qat_comp_create_decompression_req(ctx->comp_ctx, req,
-                                                 (u64)(__force long)sfbuf, slen,
-                                                 (u64)(__force long)dfbuf, dlen,
-                                                 (u64)(__force long)qat_req);
-       }
-
-       ret = qat_alg_send_dc_message(qat_req, inst, &areq->base);
-       if (ret == -ENOSPC)
-               qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
-
-       return ret;
-}
-
-static int qat_comp_alg_compress(struct acomp_req *req)
-{
-       return qat_comp_alg_compress_decompress(req, COMPRESSION, 0, 0, 0, 0);
-}
-
-static int qat_comp_alg_decompress(struct acomp_req *req)
-{
-       return qat_comp_alg_compress_decompress(req, DECOMPRESSION, 0, 0, 0, 0);
-}
-
-static int qat_comp_alg_rfc1950_compress(struct acomp_req *req)
-{
-       if (!req->dst && req->dlen != 0)
-               return -EINVAL;
-
-       if (req->dst && req->dlen <= QAT_RFC_1950_HDR_SIZE + QAT_RFC_1950_FOOTER_SIZE)
-               return -EINVAL;
-
-       return qat_comp_alg_compress_decompress(req, COMPRESSION, 0, 0,
-                                               QAT_RFC_1950_HDR_SIZE,
-                                               QAT_RFC_1950_FOOTER_SIZE);
-}
-
-static int qat_comp_alg_rfc1950_decompress(struct acomp_req *req)
-{
-       struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(req);
-       struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
-       struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
-       struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
-       u16 zlib_header;
-       int ret;
-
-       if (req->slen <= QAT_RFC_1950_HDR_SIZE + QAT_RFC_1950_FOOTER_SIZE)
-               return -EBADMSG;
-
-       scatterwalk_map_and_copy(&zlib_header, req->src, 0, QAT_RFC_1950_HDR_SIZE, 0);
-
-       ret = parse_zlib_header(zlib_header);
-       if (ret) {
-               dev_dbg(&GET_DEV(accel_dev), "Error parsing zlib header\n");
-               return ret;
-       }
-
-       return qat_comp_alg_compress_decompress(req, DECOMPRESSION, QAT_RFC_1950_HDR_SIZE,
-                                               QAT_RFC_1950_FOOTER_SIZE, 0, 0);
-}
-
-static struct acomp_alg qat_acomp[] = { {
-       .base = {
-               .cra_name = "deflate",
-               .cra_driver_name = "qat_deflate",
-               .cra_priority = 4001,
-               .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
-               .cra_ctxsize = sizeof(struct qat_compression_ctx),
-               .cra_module = THIS_MODULE,
-       },
-       .init = qat_comp_alg_init_tfm,
-       .exit = qat_comp_alg_exit_tfm,
-       .compress = qat_comp_alg_compress,
-       .decompress = qat_comp_alg_decompress,
-       .dst_free = sgl_free,
-       .reqsize = sizeof(struct qat_compression_req),
-}, {
-       .base = {
-               .cra_name = "zlib-deflate",
-               .cra_driver_name = "qat_zlib_deflate",
-               .cra_priority = 4001,
-               .cra_flags = CRYPTO_ALG_ASYNC,
-               .cra_ctxsize = sizeof(struct qat_compression_ctx),
-               .cra_module = THIS_MODULE,
-       },
-       .init = qat_comp_alg_rfc1950_init_tfm,
-       .exit = qat_comp_alg_exit_tfm,
-       .compress = qat_comp_alg_rfc1950_compress,
-       .decompress = qat_comp_alg_rfc1950_decompress,
-       .dst_free = sgl_free,
-       .reqsize = sizeof(struct qat_compression_req),
-} };
-
-int qat_comp_algs_register(void)
-{
-       int ret = 0;
-
-       mutex_lock(&algs_lock);
-       if (++active_devs == 1)
-               ret = crypto_register_acomps(qat_acomp, ARRAY_SIZE(qat_acomp));
-       mutex_unlock(&algs_lock);
-       return ret;
-}
-
-void qat_comp_algs_unregister(void)
-{
-       mutex_lock(&algs_lock);
-       if (--active_devs == 0)
-               crypto_unregister_acomps(qat_acomp, ARRAY_SIZE(qat_acomp));
-       mutex_unlock(&algs_lock);
-}
diff --git a/drivers/crypto/qat/qat_common/qat_comp_req.h b/drivers/crypto/qat/qat_common/qat_comp_req.h
deleted file mode 100644 (file)
index 404e32c..0000000
+++ /dev/null
@@ -1,123 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright(c) 2022 Intel Corporation */
-#ifndef _QAT_COMP_REQ_H_
-#define _QAT_COMP_REQ_H_
-
-#include "icp_qat_fw_comp.h"
-
-#define QAT_COMP_REQ_SIZE (sizeof(struct icp_qat_fw_comp_req))
-#define QAT_COMP_CTX_SIZE (QAT_COMP_REQ_SIZE * 2)
-
-static inline void qat_comp_create_req(void *ctx, void *req, u64 src, u32 slen,
-                                      u64 dst, u32 dlen, u64 opaque)
-{
-       struct icp_qat_fw_comp_req *fw_tmpl = ctx;
-       struct icp_qat_fw_comp_req *fw_req = req;
-       struct icp_qat_fw_comp_req_params *req_pars = &fw_req->comp_pars;
-
-       memcpy(fw_req, fw_tmpl, sizeof(*fw_req));
-       fw_req->comn_mid.src_data_addr = src;
-       fw_req->comn_mid.src_length = slen;
-       fw_req->comn_mid.dest_data_addr = dst;
-       fw_req->comn_mid.dst_length = dlen;
-       fw_req->comn_mid.opaque_data = opaque;
-       req_pars->comp_len = slen;
-       req_pars->out_buffer_sz = dlen;
-}
-
-static inline void qat_comp_override_dst(void *req, u64 dst, u32 dlen)
-{
-       struct icp_qat_fw_comp_req *fw_req = req;
-       struct icp_qat_fw_comp_req_params *req_pars = &fw_req->comp_pars;
-
-       fw_req->comn_mid.dest_data_addr = dst;
-       fw_req->comn_mid.dst_length = dlen;
-       req_pars->out_buffer_sz = dlen;
-}
-
-static inline void qat_comp_create_compression_req(void *ctx, void *req,
-                                                  u64 src, u32 slen,
-                                                  u64 dst, u32 dlen,
-                                                  u64 opaque)
-{
-       qat_comp_create_req(ctx, req, src, slen, dst, dlen, opaque);
-}
-
-static inline void qat_comp_create_decompression_req(void *ctx, void *req,
-                                                    u64 src, u32 slen,
-                                                    u64 dst, u32 dlen,
-                                                    u64 opaque)
-{
-       struct icp_qat_fw_comp_req *fw_tmpl = ctx;
-
-       fw_tmpl++;
-       qat_comp_create_req(fw_tmpl, req, src, slen, dst, dlen, opaque);
-}
-
-static inline u32 qat_comp_get_consumed_ctr(void *resp)
-{
-       struct icp_qat_fw_comp_resp *qat_resp = resp;
-
-       return qat_resp->comp_resp_pars.input_byte_counter;
-}
-
-static inline u32 qat_comp_get_produced_ctr(void *resp)
-{
-       struct icp_qat_fw_comp_resp *qat_resp = resp;
-
-       return qat_resp->comp_resp_pars.output_byte_counter;
-}
-
-static inline u32 qat_comp_get_produced_adler32(void *resp)
-{
-       struct icp_qat_fw_comp_resp *qat_resp = resp;
-
-       return qat_resp->comp_resp_pars.crc.legacy.curr_adler_32;
-}
-
-static inline u64 qat_comp_get_opaque(void *resp)
-{
-       struct icp_qat_fw_comp_resp *qat_resp = resp;
-
-       return qat_resp->opaque_data;
-}
-
-static inline s8 qat_comp_get_cmp_err(void *resp)
-{
-       struct icp_qat_fw_comp_resp *qat_resp = resp;
-
-       return qat_resp->comn_resp.comn_error.cmp_err_code;
-}
-
-static inline s8 qat_comp_get_xlt_err(void *resp)
-{
-       struct icp_qat_fw_comp_resp *qat_resp = resp;
-
-       return qat_resp->comn_resp.comn_error.xlat_err_code;
-}
-
-static inline s8 qat_comp_get_cmp_status(void *resp)
-{
-       struct icp_qat_fw_comp_resp *qat_resp = resp;
-       u8 stat_filed = qat_resp->comn_resp.comn_status;
-
-       return ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(stat_filed);
-}
-
-static inline s8 qat_comp_get_xlt_status(void *resp)
-{
-       struct icp_qat_fw_comp_resp *qat_resp = resp;
-       u8 stat_filed = qat_resp->comn_resp.comn_status;
-
-       return ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(stat_filed);
-}
-
-static inline u8 qat_comp_get_cmp_cnv_flag(void *resp)
-{
-       struct icp_qat_fw_comp_resp *qat_resp = resp;
-       u8 flags = qat_resp->comn_resp.hdr_flags;
-
-       return ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(flags);
-}
-
-#endif
diff --git a/drivers/crypto/qat/qat_common/qat_compression.c b/drivers/crypto/qat/qat_common/qat_compression.c
deleted file mode 100644 (file)
index 3f1f352..0000000
+++ /dev/null
@@ -1,297 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright(c) 2022 Intel Corporation */
-#include <linux/module.h>
-#include <linux/slab.h>
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "adf_transport.h"
-#include "adf_transport_access_macros.h"
-#include "adf_cfg.h"
-#include "adf_cfg_strings.h"
-#include "qat_compression.h"
-#include "icp_qat_fw.h"
-
-#define SEC ADF_KERNEL_SEC
-
-static struct service_hndl qat_compression;
-
-void qat_compression_put_instance(struct qat_compression_instance *inst)
-{
-       atomic_dec(&inst->refctr);
-       adf_dev_put(inst->accel_dev);
-}
-
-static int qat_compression_free_instances(struct adf_accel_dev *accel_dev)
-{
-       struct qat_compression_instance *inst;
-       struct list_head *list_ptr, *tmp;
-       int i;
-
-       list_for_each_safe(list_ptr, tmp, &accel_dev->compression_list) {
-               inst = list_entry(list_ptr,
-                                 struct qat_compression_instance, list);
-
-               for (i = 0; i < atomic_read(&inst->refctr); i++)
-                       qat_compression_put_instance(inst);
-
-               if (inst->dc_tx)
-                       adf_remove_ring(inst->dc_tx);
-
-               if (inst->dc_rx)
-                       adf_remove_ring(inst->dc_rx);
-
-               list_del(list_ptr);
-               kfree(inst);
-       }
-       return 0;
-}
-
-struct qat_compression_instance *qat_compression_get_instance_node(int node)
-{
-       struct qat_compression_instance *inst = NULL;
-       struct adf_accel_dev *accel_dev = NULL;
-       unsigned long best = ~0;
-       struct list_head *itr;
-
-       list_for_each(itr, adf_devmgr_get_head()) {
-               struct adf_accel_dev *tmp_dev;
-               unsigned long ctr;
-               int tmp_dev_node;
-
-               tmp_dev = list_entry(itr, struct adf_accel_dev, list);
-               tmp_dev_node = dev_to_node(&GET_DEV(tmp_dev));
-
-               if ((node == tmp_dev_node || tmp_dev_node < 0) &&
-                   adf_dev_started(tmp_dev) && !list_empty(&tmp_dev->compression_list)) {
-                       ctr = atomic_read(&tmp_dev->ref_count);
-                       if (best > ctr) {
-                               accel_dev = tmp_dev;
-                               best = ctr;
-                       }
-               }
-       }
-
-       if (!accel_dev) {
-               pr_debug_ratelimited("QAT: Could not find a device on node %d\n", node);
-               /* Get any started device */
-               list_for_each(itr, adf_devmgr_get_head()) {
-                       struct adf_accel_dev *tmp_dev;
-
-                       tmp_dev = list_entry(itr, struct adf_accel_dev, list);
-                       if (adf_dev_started(tmp_dev) &&
-                           !list_empty(&tmp_dev->compression_list)) {
-                               accel_dev = tmp_dev;
-                               break;
-                       }
-               }
-       }
-
-       if (!accel_dev)
-               return NULL;
-
-       best = ~0;
-       list_for_each(itr, &accel_dev->compression_list) {
-               struct qat_compression_instance *tmp_inst;
-               unsigned long ctr;
-
-               tmp_inst = list_entry(itr, struct qat_compression_instance, list);
-               ctr = atomic_read(&tmp_inst->refctr);
-               if (best > ctr) {
-                       inst = tmp_inst;
-                       best = ctr;
-               }
-       }
-       if (inst) {
-               if (adf_dev_get(accel_dev)) {
-                       dev_err(&GET_DEV(accel_dev), "Could not increment dev refctr\n");
-                       return NULL;
-               }
-               atomic_inc(&inst->refctr);
-       }
-       return inst;
-}
-
-static int qat_compression_create_instances(struct adf_accel_dev *accel_dev)
-{
-       struct qat_compression_instance *inst;
-       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
-       char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
-       unsigned long num_inst, num_msg_dc;
-       unsigned long bank;
-       int msg_size;
-       int ret;
-       int i;
-
-       INIT_LIST_HEAD(&accel_dev->compression_list);
-       strscpy(key, ADF_NUM_DC, sizeof(key));
-       ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
-       if (ret)
-               return ret;
-
-       ret = kstrtoul(val, 10, &num_inst);
-       if (ret)
-               return ret;
-
-       for (i = 0; i < num_inst; i++) {
-               inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
-                                   dev_to_node(&GET_DEV(accel_dev)));
-               if (!inst) {
-                       ret = -ENOMEM;
-                       goto err;
-               }
-
-               list_add_tail(&inst->list, &accel_dev->compression_list);
-               inst->id = i;
-               atomic_set(&inst->refctr, 0);
-               inst->accel_dev = accel_dev;
-               inst->build_deflate_ctx = GET_DC_OPS(accel_dev)->build_deflate_ctx;
-
-               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
-               ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
-               if (ret)
-                       return ret;
-
-               ret = kstrtoul(val, 10, &bank);
-               if (ret)
-                       return ret;
-
-               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i);
-               ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
-               if (ret)
-                       return ret;
-
-               ret = kstrtoul(val, 10, &num_msg_dc);
-               if (ret)
-                       return ret;
-
-               msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
-               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i);
-               ret = adf_create_ring(accel_dev, SEC, bank, num_msg_dc,
-                                     msg_size, key, NULL, 0, &inst->dc_tx);
-               if (ret)
-                       return ret;
-
-               msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
-               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i);
-               ret = adf_create_ring(accel_dev, SEC, bank, num_msg_dc,
-                                     msg_size, key, qat_comp_alg_callback, 0,
-                                     &inst->dc_rx);
-               if (ret)
-                       return ret;
-
-               inst->dc_data = accel_dev->dc_data;
-               INIT_LIST_HEAD(&inst->backlog.list);
-               spin_lock_init(&inst->backlog.lock);
-       }
-       return 0;
-err:
-       qat_compression_free_instances(accel_dev);
-       return ret;
-}
-
-static int qat_compression_alloc_dc_data(struct adf_accel_dev *accel_dev)
-{
-       struct device *dev = &GET_DEV(accel_dev);
-       dma_addr_t obuff_p = DMA_MAPPING_ERROR;
-       size_t ovf_buff_sz = QAT_COMP_MAX_SKID;
-       struct adf_dc_data *dc_data = NULL;
-       u8 *obuff = NULL;
-
-       dc_data = devm_kzalloc(dev, sizeof(*dc_data), GFP_KERNEL);
-       if (!dc_data)
-               goto err;
-
-       obuff = kzalloc_node(ovf_buff_sz, GFP_KERNEL, dev_to_node(dev));
-       if (!obuff)
-               goto err;
-
-       obuff_p = dma_map_single(dev, obuff, ovf_buff_sz, DMA_FROM_DEVICE);
-       if (unlikely(dma_mapping_error(dev, obuff_p)))
-               goto err;
-
-       dc_data->ovf_buff = obuff;
-       dc_data->ovf_buff_p = obuff_p;
-       dc_data->ovf_buff_sz = ovf_buff_sz;
-
-       accel_dev->dc_data = dc_data;
-
-       return 0;
-
-err:
-       accel_dev->dc_data = NULL;
-       kfree(obuff);
-       devm_kfree(dev, dc_data);
-       return -ENOMEM;
-}
-
-static void qat_free_dc_data(struct adf_accel_dev *accel_dev)
-{
-       struct adf_dc_data *dc_data = accel_dev->dc_data;
-       struct device *dev = &GET_DEV(accel_dev);
-
-       if (!dc_data)
-               return;
-
-       dma_unmap_single(dev, dc_data->ovf_buff_p, dc_data->ovf_buff_sz,
-                        DMA_FROM_DEVICE);
-       memset(dc_data->ovf_buff, 0, dc_data->ovf_buff_sz);
-       kfree(dc_data->ovf_buff);
-       devm_kfree(dev, dc_data);
-       accel_dev->dc_data = NULL;
-}
-
-static int qat_compression_init(struct adf_accel_dev *accel_dev)
-{
-       int ret;
-
-       ret = qat_compression_alloc_dc_data(accel_dev);
-       if (ret)
-               return ret;
-
-       ret = qat_compression_create_instances(accel_dev);
-       if (ret)
-               qat_free_dc_data(accel_dev);
-
-       return ret;
-}
-
-static int qat_compression_shutdown(struct adf_accel_dev *accel_dev)
-{
-       qat_free_dc_data(accel_dev);
-       return qat_compression_free_instances(accel_dev);
-}
-
-static int qat_compression_event_handler(struct adf_accel_dev *accel_dev,
-                                        enum adf_event event)
-{
-       int ret;
-
-       switch (event) {
-       case ADF_EVENT_INIT:
-               ret = qat_compression_init(accel_dev);
-               break;
-       case ADF_EVENT_SHUTDOWN:
-               ret = qat_compression_shutdown(accel_dev);
-               break;
-       case ADF_EVENT_RESTARTING:
-       case ADF_EVENT_RESTARTED:
-       case ADF_EVENT_START:
-       case ADF_EVENT_STOP:
-       default:
-               ret = 0;
-       }
-       return ret;
-}
-
-int qat_compression_register(void)
-{
-       memset(&qat_compression, 0, sizeof(qat_compression));
-       qat_compression.event_hld = qat_compression_event_handler;
-       qat_compression.name = "qat_compression";
-       return adf_service_register(&qat_compression);
-}
-
-int qat_compression_unregister(void)
-{
-       return adf_service_unregister(&qat_compression);
-}
diff --git a/drivers/crypto/qat/qat_common/qat_compression.h b/drivers/crypto/qat/qat_common/qat_compression.h
deleted file mode 100644 (file)
index aebac23..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright(c) 2022 Intel Corporation */
-#ifndef _QAT_COMPRESSION_H_
-#define _QAT_COMPRESSION_H_
-
-#include <linux/list.h>
-#include <linux/types.h>
-#include "adf_accel_devices.h"
-#include "qat_algs_send.h"
-
-#define QAT_COMP_MAX_SKID 4096
-
-struct qat_compression_instance {
-       struct adf_etr_ring_data *dc_tx;
-       struct adf_etr_ring_data *dc_rx;
-       struct adf_accel_dev *accel_dev;
-       struct list_head list;
-       unsigned long state;
-       int id;
-       atomic_t refctr;
-       struct qat_instance_backlog backlog;
-       struct adf_dc_data *dc_data;
-       void (*build_deflate_ctx)(void *ctx);
-};
-
-static inline bool adf_hw_dev_has_compression(struct adf_accel_dev *accel_dev)
-{
-       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
-       u32 mask = ~hw_device->accel_capabilities_mask;
-
-       if (mask & ADF_ACCEL_CAPABILITIES_COMPRESSION)
-               return false;
-
-       return true;
-}
-
-#endif
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c
deleted file mode 100644 (file)
index 40c8e74..0000000
+++ /dev/null
@@ -1,287 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/module.h>
-#include <linux/slab.h>
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "adf_transport.h"
-#include "adf_cfg.h"
-#include "adf_cfg_strings.h"
-#include "adf_gen2_hw_data.h"
-#include "qat_crypto.h"
-#include "icp_qat_fw.h"
-
-#define SEC ADF_KERNEL_SEC
-
-static struct service_hndl qat_crypto;
-
-void qat_crypto_put_instance(struct qat_crypto_instance *inst)
-{
-       atomic_dec(&inst->refctr);
-       adf_dev_put(inst->accel_dev);
-}
-
-static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
-{
-       struct qat_crypto_instance *inst, *tmp;
-       int i;
-
-       list_for_each_entry_safe(inst, tmp, &accel_dev->crypto_list, list) {
-               for (i = 0; i < atomic_read(&inst->refctr); i++)
-                       qat_crypto_put_instance(inst);
-
-               if (inst->sym_tx)
-                       adf_remove_ring(inst->sym_tx);
-
-               if (inst->sym_rx)
-                       adf_remove_ring(inst->sym_rx);
-
-               if (inst->pke_tx)
-                       adf_remove_ring(inst->pke_tx);
-
-               if (inst->pke_rx)
-                       adf_remove_ring(inst->pke_rx);
-
-               list_del(&inst->list);
-               kfree(inst);
-       }
-       return 0;
-}
-
-struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
-{
-       struct adf_accel_dev *accel_dev = NULL, *tmp_dev;
-       struct qat_crypto_instance *inst = NULL, *tmp_inst;
-       unsigned long best = ~0;
-
-       list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
-               unsigned long ctr;
-
-               if ((node == dev_to_node(&GET_DEV(tmp_dev)) ||
-                    dev_to_node(&GET_DEV(tmp_dev)) < 0) &&
-                   adf_dev_started(tmp_dev) &&
-                   !list_empty(&tmp_dev->crypto_list)) {
-                       ctr = atomic_read(&tmp_dev->ref_count);
-                       if (best > ctr) {
-                               accel_dev = tmp_dev;
-                               best = ctr;
-                       }
-               }
-       }
-
-       if (!accel_dev) {
-               pr_debug_ratelimited("QAT: Could not find a device on node %d\n", node);
-               /* Get any started device */
-               list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
-                       if (adf_dev_started(tmp_dev) &&
-                           !list_empty(&tmp_dev->crypto_list)) {
-                               accel_dev = tmp_dev;
-                               break;
-                       }
-               }
-       }
-
-       if (!accel_dev)
-               return NULL;
-
-       best = ~0;
-       list_for_each_entry(tmp_inst, &accel_dev->crypto_list, list) {
-               unsigned long ctr;
-
-               ctr = atomic_read(&tmp_inst->refctr);
-               if (best > ctr) {
-                       inst = tmp_inst;
-                       best = ctr;
-               }
-       }
-       if (inst) {
-               if (adf_dev_get(accel_dev)) {
-                       dev_err(&GET_DEV(accel_dev), "Could not increment dev refctr\n");
-                       return NULL;
-               }
-               atomic_inc(&inst->refctr);
-       }
-       return inst;
-}
-
-/**
- * qat_crypto_vf_dev_config()
- *     create dev config required to create crypto inst.
- *
- * @accel_dev: Pointer to acceleration device.
- *
- * Function creates device configuration required to create
- * asym, sym or, crypto instances
- *
- * Return: 0 on success, error code otherwise.
- */
-int qat_crypto_vf_dev_config(struct adf_accel_dev *accel_dev)
-{
-       u16 ring_to_svc_map = GET_HW_DATA(accel_dev)->ring_to_svc_map;
-
-       if (ring_to_svc_map != ADF_GEN2_DEFAULT_RING_TO_SRV_MAP) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Unsupported ring/service mapping present on PF");
-               return -EFAULT;
-       }
-
-       return GET_HW_DATA(accel_dev)->dev_config(accel_dev);
-}
-
-static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
-{
-       unsigned long num_inst, num_msg_sym, num_msg_asym;
-       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
-       char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
-       unsigned long sym_bank, asym_bank;
-       struct qat_crypto_instance *inst;
-       int msg_size;
-       int ret;
-       int i;
-
-       INIT_LIST_HEAD(&accel_dev->crypto_list);
-       ret = adf_cfg_get_param_value(accel_dev, SEC, ADF_NUM_CY, val);
-       if (ret)
-               return ret;
-
-       ret = kstrtoul(val, 0, &num_inst);
-       if (ret)
-               return ret;
-
-       for (i = 0; i < num_inst; i++) {
-               inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
-                                   dev_to_node(&GET_DEV(accel_dev)));
-               if (!inst) {
-                       ret = -ENOMEM;
-                       goto err;
-               }
-
-               list_add_tail(&inst->list, &accel_dev->crypto_list);
-               inst->id = i;
-               atomic_set(&inst->refctr, 0);
-               inst->accel_dev = accel_dev;
-
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i);
-               ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
-               if (ret)
-                       goto err;
-
-               ret = kstrtoul(val, 10, &sym_bank);
-               if (ret)
-                       goto err;
-
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
-               ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
-               if (ret)
-                       goto err;
-
-               ret = kstrtoul(val, 10, &asym_bank);
-               if (ret)
-                       goto err;
-
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
-               ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
-               if (ret)
-                       goto err;
-
-               ret = kstrtoul(val, 10, &num_msg_sym);
-               if (ret)
-                       goto err;
-
-               num_msg_sym = num_msg_sym >> 1;
-
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
-               ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
-               if (ret)
-                       goto err;
-
-               ret = kstrtoul(val, 10, &num_msg_asym);
-               if (ret)
-                       goto err;
-               num_msg_asym = num_msg_asym >> 1;
-
-               msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
-               ret = adf_create_ring(accel_dev, SEC, sym_bank, num_msg_sym,
-                                     msg_size, key, NULL, 0, &inst->sym_tx);
-               if (ret)
-                       goto err;
-
-               msg_size = msg_size >> 1;
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
-               ret = adf_create_ring(accel_dev, SEC, asym_bank, num_msg_asym,
-                                     msg_size, key, NULL, 0, &inst->pke_tx);
-               if (ret)
-                       goto err;
-
-               msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
-               ret = adf_create_ring(accel_dev, SEC, sym_bank, num_msg_sym,
-                                     msg_size, key, qat_alg_callback, 0,
-                                     &inst->sym_rx);
-               if (ret)
-                       goto err;
-
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
-               ret = adf_create_ring(accel_dev, SEC, asym_bank, num_msg_asym,
-                                     msg_size, key, qat_alg_asym_callback, 0,
-                                     &inst->pke_rx);
-               if (ret)
-                       goto err;
-
-               INIT_LIST_HEAD(&inst->backlog.list);
-               spin_lock_init(&inst->backlog.lock);
-       }
-       return 0;
-err:
-       qat_crypto_free_instances(accel_dev);
-       return ret;
-}
-
-static int qat_crypto_init(struct adf_accel_dev *accel_dev)
-{
-       if (qat_crypto_create_instances(accel_dev))
-               return -EFAULT;
-
-       return 0;
-}
-
-static int qat_crypto_shutdown(struct adf_accel_dev *accel_dev)
-{
-       return qat_crypto_free_instances(accel_dev);
-}
-
-static int qat_crypto_event_handler(struct adf_accel_dev *accel_dev,
-                                   enum adf_event event)
-{
-       int ret;
-
-       switch (event) {
-       case ADF_EVENT_INIT:
-               ret = qat_crypto_init(accel_dev);
-               break;
-       case ADF_EVENT_SHUTDOWN:
-               ret = qat_crypto_shutdown(accel_dev);
-               break;
-       case ADF_EVENT_RESTARTING:
-       case ADF_EVENT_RESTARTED:
-       case ADF_EVENT_START:
-       case ADF_EVENT_STOP:
-       default:
-               ret = 0;
-       }
-       return ret;
-}
-
-int qat_crypto_register(void)
-{
-       memset(&qat_crypto, 0, sizeof(qat_crypto));
-       qat_crypto.event_hld = qat_crypto_event_handler;
-       qat_crypto.name = "qat_crypto";
-       return adf_service_register(&qat_crypto);
-}
-
-int qat_crypto_unregister(void)
-{
-       return adf_service_unregister(&qat_crypto);
-}
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h
deleted file mode 100644 (file)
index 6a0e961..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef _QAT_CRYPTO_INSTANCE_H_
-#define _QAT_CRYPTO_INSTANCE_H_
-
-#include <crypto/aes.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include "adf_accel_devices.h"
-#include "icp_qat_fw_la.h"
-#include "qat_algs_send.h"
-#include "qat_bl.h"
-
-struct qat_crypto_instance {
-       struct adf_etr_ring_data *sym_tx;
-       struct adf_etr_ring_data *sym_rx;
-       struct adf_etr_ring_data *pke_tx;
-       struct adf_etr_ring_data *pke_rx;
-       struct adf_accel_dev *accel_dev;
-       struct list_head list;
-       unsigned long state;
-       int id;
-       atomic_t refctr;
-       struct qat_instance_backlog backlog;
-};
-
-struct qat_crypto_request;
-
-struct qat_crypto_request {
-       struct icp_qat_fw_la_bulk_req req;
-       union {
-               struct qat_alg_aead_ctx *aead_ctx;
-               struct qat_alg_skcipher_ctx *skcipher_ctx;
-       };
-       union {
-               struct aead_request *aead_req;
-               struct skcipher_request *skcipher_req;
-       };
-       struct qat_request_buffs buf;
-       void (*cb)(struct icp_qat_fw_la_resp *resp,
-                  struct qat_crypto_request *req);
-       union {
-               struct {
-                       __be64 iv_hi;
-                       __be64 iv_lo;
-               };
-               u8 iv[AES_BLOCK_SIZE];
-       };
-       bool encryption;
-       struct qat_alg_req alg_req;
-};
-
-static inline bool adf_hw_dev_has_crypto(struct adf_accel_dev *accel_dev)
-{
-       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
-       u32 mask = ~hw_device->accel_capabilities_mask;
-
-       if (mask & ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC)
-               return false;
-       if (mask & ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC)
-               return false;
-       if (mask & ADF_ACCEL_CAPABILITIES_AUTHENTICATION)
-               return false;
-
-       return true;
-}
-
-#endif
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
deleted file mode 100644 (file)
index 7bba352..0000000
+++ /dev/null
@@ -1,1593 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/pci_ids.h>
-
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "icp_qat_hal.h"
-#include "icp_qat_uclo.h"
-
-#define BAD_REGADDR           0xffff
-#define MAX_RETRY_TIMES           10000
-#define INIT_CTX_ARB_VALUE     0x0
-#define INIT_CTX_ENABLE_VALUE     0x0
-#define INIT_PC_VALUE       0x0
-#define INIT_WAKEUP_EVENTS_VALUE  0x1
-#define INIT_SIG_EVENTS_VALUE     0x1
-#define INIT_CCENABLE_VALUE       0x2000
-#define RST_CSR_QAT_LSB           20
-#define RST_CSR_AE_LSB           0
-#define MC_TIMESTAMP_ENABLE       (0x1 << 7)
-
-#define IGNORE_W1C_MASK ((~(1 << CE_BREAKPOINT_BITPOS)) & \
-       (~(1 << CE_CNTL_STORE_PARITY_ERROR_BITPOS)) & \
-       (~(1 << CE_REG_PAR_ERR_BITPOS)))
-#define INSERT_IMMED_GPRA_CONST(inst, const_val) \
-       (inst = ((inst & 0xFFFF00C03FFull) | \
-               ((((const_val) << 12) & 0x0FF00000ull) | \
-               (((const_val) << 10) & 0x0003FC00ull))))
-#define INSERT_IMMED_GPRB_CONST(inst, const_val) \
-       (inst = ((inst & 0xFFFF00FFF00ull) | \
-               ((((const_val) << 12) & 0x0FF00000ull) | \
-               (((const_val) <<  0) & 0x000000FFull))))
-
-#define AE(handle, ae) ((handle)->hal_handle->aes[ae])
-
-static const u64 inst_4b[] = {
-       0x0F0400C0000ull, 0x0F4400C0000ull, 0x0F040000300ull, 0x0F440000300ull,
-       0x0FC066C0000ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
-       0x0A021000000ull
-};
-
-static const u64 inst[] = {
-       0x0F0000C0000ull, 0x0F000000380ull, 0x0D805000011ull, 0x0FC082C0300ull,
-       0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
-       0x0A0643C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0F0000C0001ull,
-       0x0FC066C0001ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
-       0x0F000400300ull, 0x0A0610C0000ull, 0x0BAC0000301ull, 0x0D804400101ull,
-       0x0A0580C0000ull, 0x0A0581C0000ull, 0x0A0582C0000ull, 0x0A0583C0000ull,
-       0x0A0584C0000ull, 0x0A0585C0000ull, 0x0A0586C0000ull, 0x0A0587C0000ull,
-       0x0A0588C0000ull, 0x0A0589C0000ull, 0x0A058AC0000ull, 0x0A058BC0000ull,
-       0x0A058CC0000ull, 0x0A058DC0000ull, 0x0A058EC0000ull, 0x0A058FC0000ull,
-       0x0A05C0C0000ull, 0x0A05C1C0000ull, 0x0A05C2C0000ull, 0x0A05C3C0000ull,
-       0x0A05C4C0000ull, 0x0A05C5C0000ull, 0x0A05C6C0000ull, 0x0A05C7C0000ull,
-       0x0A05C8C0000ull, 0x0A05C9C0000ull, 0x0A05CAC0000ull, 0x0A05CBC0000ull,
-       0x0A05CCC0000ull, 0x0A05CDC0000ull, 0x0A05CEC0000ull, 0x0A05CFC0000ull,
-       0x0A0400C0000ull, 0x0B0400C0000ull, 0x0A0401C0000ull, 0x0B0401C0000ull,
-       0x0A0402C0000ull, 0x0B0402C0000ull, 0x0A0403C0000ull, 0x0B0403C0000ull,
-       0x0A0404C0000ull, 0x0B0404C0000ull, 0x0A0405C0000ull, 0x0B0405C0000ull,
-       0x0A0406C0000ull, 0x0B0406C0000ull, 0x0A0407C0000ull, 0x0B0407C0000ull,
-       0x0A0408C0000ull, 0x0B0408C0000ull, 0x0A0409C0000ull, 0x0B0409C0000ull,
-       0x0A040AC0000ull, 0x0B040AC0000ull, 0x0A040BC0000ull, 0x0B040BC0000ull,
-       0x0A040CC0000ull, 0x0B040CC0000ull, 0x0A040DC0000ull, 0x0B040DC0000ull,
-       0x0A040EC0000ull, 0x0B040EC0000ull, 0x0A040FC0000ull, 0x0B040FC0000ull,
-       0x0D81581C010ull, 0x0E000010000ull, 0x0E000010000ull,
-};
-
-void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
-                         unsigned char ae, unsigned int ctx_mask)
-{
-       AE(handle, ae).live_ctx_mask = ctx_mask;
-}
-
-#define CSR_RETRY_TIMES 500
-static int qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle,
-                            unsigned char ae, unsigned int csr)
-{
-       unsigned int iterations = CSR_RETRY_TIMES;
-       int value;
-
-       do {
-               value = GET_AE_CSR(handle, ae, csr);
-               if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
-                       return value;
-       } while (iterations--);
-
-       pr_err("QAT: Read CSR timeout\n");
-       return 0;
-}
-
-static int qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle,
-                            unsigned char ae, unsigned int csr,
-                            unsigned int value)
-{
-       unsigned int iterations = CSR_RETRY_TIMES;
-
-       do {
-               SET_AE_CSR(handle, ae, csr, value);
-               if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
-                       return 0;
-       } while (iterations--);
-
-       pr_err("QAT: Write CSR Timeout\n");
-       return -EFAULT;
-}
-
-static void qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle *handle,
-                                    unsigned char ae, unsigned char ctx,
-                                    unsigned int *events)
-{
-       unsigned int cur_ctx;
-
-       cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
-       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
-       *events = qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT);
-       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
-}
-
-static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle,
-                              unsigned char ae, unsigned int cycles,
-                              int chk_inactive)
-{
-       unsigned int base_cnt = 0, cur_cnt = 0;
-       unsigned int csr = (1 << ACS_ABO_BITPOS);
-       int times = MAX_RETRY_TIMES;
-       int elapsed_cycles = 0;
-
-       base_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
-       base_cnt &= 0xffff;
-       while ((int)cycles > elapsed_cycles && times--) {
-               if (chk_inactive)
-                       csr = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
-
-               cur_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
-               cur_cnt &= 0xffff;
-               elapsed_cycles = cur_cnt - base_cnt;
-
-               if (elapsed_cycles < 0)
-                       elapsed_cycles += 0x10000;
-
-               /* ensure at least 8 time cycles elapsed in wait_cycles */
-               if (elapsed_cycles >= 8 && !(csr & (1 << ACS_ABO_BITPOS)))
-                       return 0;
-       }
-       if (times < 0) {
-               pr_err("QAT: wait_num_cycles time out\n");
-               return -EFAULT;
-       }
-       return 0;
-}
-
-#define CLR_BIT(wrd, bit) ((wrd) & ~(1 << (bit)))
-#define SET_BIT(wrd, bit) ((wrd) | 1 << (bit))
-
-int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
-                           unsigned char ae, unsigned char mode)
-{
-       unsigned int csr, new_csr;
-
-       if (mode != 4 && mode != 8) {
-               pr_err("QAT: bad ctx mode=%d\n", mode);
-               return -EINVAL;
-       }
-
-       /* Sets the accelaration engine context mode to either four or eight */
-       csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
-       csr = IGNORE_W1C_MASK & csr;
-       new_csr = (mode == 4) ?
-               SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) :
-               CLR_BIT(csr, CE_INUSE_CONTEXTS_BITPOS);
-       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
-       return 0;
-}
-
-int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
-                          unsigned char ae, unsigned char mode)
-{
-       unsigned int csr, new_csr;
-
-       csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
-       csr &= IGNORE_W1C_MASK;
-
-       new_csr = (mode) ?
-               SET_BIT(csr, CE_NN_MODE_BITPOS) :
-               CLR_BIT(csr, CE_NN_MODE_BITPOS);
-
-       if (new_csr != csr)
-               qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
-
-       return 0;
-}
-
-int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
-                          unsigned char ae, enum icp_qat_uof_regtype lm_type,
-                          unsigned char mode)
-{
-       unsigned int csr, new_csr;
-
-       csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
-       csr &= IGNORE_W1C_MASK;
-       switch (lm_type) {
-       case ICP_LMEM0:
-               new_csr = (mode) ?
-                       SET_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS) :
-                       CLR_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS);
-               break;
-       case ICP_LMEM1:
-               new_csr = (mode) ?
-                       SET_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS) :
-                       CLR_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS);
-               break;
-       case ICP_LMEM2:
-               new_csr = (mode) ?
-                       SET_BIT(csr, CE_LMADDR_2_GLOBAL_BITPOS) :
-                       CLR_BIT(csr, CE_LMADDR_2_GLOBAL_BITPOS);
-               break;
-       case ICP_LMEM3:
-               new_csr = (mode) ?
-                       SET_BIT(csr, CE_LMADDR_3_GLOBAL_BITPOS) :
-                       CLR_BIT(csr, CE_LMADDR_3_GLOBAL_BITPOS);
-               break;
-       default:
-               pr_err("QAT: lmType = 0x%x\n", lm_type);
-               return -EINVAL;
-       }
-
-       if (new_csr != csr)
-               qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
-       return 0;
-}
-
-void qat_hal_set_ae_tindex_mode(struct icp_qat_fw_loader_handle *handle,
-                               unsigned char ae, unsigned char mode)
-{
-       unsigned int csr, new_csr;
-
-       csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
-       csr &= IGNORE_W1C_MASK;
-       new_csr = (mode) ?
-                 SET_BIT(csr, CE_T_INDEX_GLOBAL_BITPOS) :
-                 CLR_BIT(csr, CE_T_INDEX_GLOBAL_BITPOS);
-       if (new_csr != csr)
-               qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
-}
-
-static unsigned short qat_hal_get_reg_addr(unsigned int type,
-                                          unsigned short reg_num)
-{
-       unsigned short reg_addr;
-
-       switch (type) {
-       case ICP_GPA_ABS:
-       case ICP_GPB_ABS:
-               reg_addr = 0x80 | (reg_num & 0x7f);
-               break;
-       case ICP_GPA_REL:
-       case ICP_GPB_REL:
-               reg_addr = reg_num & 0x1f;
-               break;
-       case ICP_SR_RD_REL:
-       case ICP_SR_WR_REL:
-       case ICP_SR_REL:
-               reg_addr = 0x180 | (reg_num & 0x1f);
-               break;
-       case ICP_SR_ABS:
-               reg_addr = 0x140 | ((reg_num & 0x3) << 1);
-               break;
-       case ICP_DR_RD_REL:
-       case ICP_DR_WR_REL:
-       case ICP_DR_REL:
-               reg_addr = 0x1c0 | (reg_num & 0x1f);
-               break;
-       case ICP_DR_ABS:
-               reg_addr = 0x100 | ((reg_num & 0x3) << 1);
-               break;
-       case ICP_NEIGH_REL:
-               reg_addr = 0x280 | (reg_num & 0x1f);
-               break;
-       case ICP_LMEM0:
-               reg_addr = 0x200;
-               break;
-       case ICP_LMEM1:
-               reg_addr = 0x220;
-               break;
-       case ICP_LMEM2:
-               reg_addr = 0x2c0;
-               break;
-       case ICP_LMEM3:
-               reg_addr = 0x2e0;
-               break;
-       case ICP_NO_DEST:
-               reg_addr = 0x300 | (reg_num & 0xff);
-               break;
-       default:
-               reg_addr = BAD_REGADDR;
-               break;
-       }
-       return reg_addr;
-}
-
-void qat_hal_reset(struct icp_qat_fw_loader_handle *handle)
-{
-       unsigned int reset_mask = handle->chip_info->icp_rst_mask;
-       unsigned int reset_csr = handle->chip_info->icp_rst_csr;
-       unsigned int csr_val;
-
-       csr_val = GET_CAP_CSR(handle, reset_csr);
-       csr_val |= reset_mask;
-       SET_CAP_CSR(handle, reset_csr, csr_val);
-}
-
-static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle,
-                               unsigned char ae, unsigned int ctx_mask,
-                               unsigned int ae_csr, unsigned int csr_val)
-{
-       unsigned int ctx, cur_ctx;
-
-       cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
-
-       for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
-               if (!(ctx_mask & (1 << ctx)))
-                       continue;
-               qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
-               qat_hal_wr_ae_csr(handle, ae, ae_csr, csr_val);
-       }
-
-       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
-}
-
-static unsigned int qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle,
-                               unsigned char ae, unsigned char ctx,
-                               unsigned int ae_csr)
-{
-       unsigned int cur_ctx, csr_val;
-
-       cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
-       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
-       csr_val = qat_hal_rd_ae_csr(handle, ae, ae_csr);
-       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
-
-       return csr_val;
-}
-
-static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle,
-                                 unsigned char ae, unsigned int ctx_mask,
-                                 unsigned int events)
-{
-       unsigned int ctx, cur_ctx;
-
-       cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
-       for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
-               if (!(ctx_mask & (1 << ctx)))
-                       continue;
-               qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
-               qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_INDIRECT, events);
-       }
-       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
-}
-
-static void qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle *handle,
-                                    unsigned char ae, unsigned int ctx_mask,
-                                    unsigned int events)
-{
-       unsigned int ctx, cur_ctx;
-
-       cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
-       for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
-               if (!(ctx_mask & (1 << ctx)))
-                       continue;
-               qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
-               qat_hal_wr_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT,
-                                 events);
-       }
-       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
-}
-
-static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle)
-{
-       unsigned long ae_mask = handle->hal_handle->ae_mask;
-       unsigned int base_cnt, cur_cnt;
-       unsigned char ae;
-       int times = MAX_RETRY_TIMES;
-
-       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
-               base_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
-               base_cnt &= 0xffff;
-
-               do {
-                       cur_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
-                       cur_cnt &= 0xffff;
-               } while (times-- && (cur_cnt == base_cnt));
-
-               if (times < 0) {
-                       pr_err("QAT: AE%d is inactive!!\n", ae);
-                       return -EFAULT;
-               }
-       }
-
-       return 0;
-}
-
-int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle,
-                           unsigned int ae)
-{
-       unsigned int enable = 0, active = 0;
-
-       enable = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
-       active = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
-       if ((enable & (0xff << CE_ENABLE_BITPOS)) ||
-           (active & (1 << ACS_ABO_BITPOS)))
-               return 1;
-       else
-               return 0;
-}
-
-static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle)
-{
-       unsigned long ae_mask = handle->hal_handle->ae_mask;
-       unsigned int misc_ctl_csr, misc_ctl;
-       unsigned char ae;
-
-       misc_ctl_csr = handle->chip_info->misc_ctl_csr;
-       /* stop the timestamp timers */
-       misc_ctl = GET_CAP_CSR(handle, misc_ctl_csr);
-       if (misc_ctl & MC_TIMESTAMP_ENABLE)
-               SET_CAP_CSR(handle, misc_ctl_csr, misc_ctl &
-                           (~MC_TIMESTAMP_ENABLE));
-
-       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
-               qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0);
-               qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_HIGH, 0);
-       }
-       /* start timestamp timers */
-       SET_CAP_CSR(handle, misc_ctl_csr, misc_ctl | MC_TIMESTAMP_ENABLE);
-}
-
-#define ESRAM_AUTO_TINIT       BIT(2)
-#define ESRAM_AUTO_TINIT_DONE  BIT(3)
-#define ESRAM_AUTO_INIT_USED_CYCLES (1640)
-#define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C
-static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
-{
-       void __iomem *csr_addr =
-                       (void __iomem *)((uintptr_t)handle->hal_ep_csr_addr_v +
-                       ESRAM_AUTO_INIT_CSR_OFFSET);
-       unsigned int csr_val;
-       int times = 30;
-
-       if (handle->pci_dev->device != PCI_DEVICE_ID_INTEL_QAT_DH895XCC)
-               return 0;
-
-       csr_val = ADF_CSR_RD(csr_addr, 0);
-       if ((csr_val & ESRAM_AUTO_TINIT) && (csr_val & ESRAM_AUTO_TINIT_DONE))
-               return 0;
-
-       csr_val = ADF_CSR_RD(csr_addr, 0);
-       csr_val |= ESRAM_AUTO_TINIT;
-       ADF_CSR_WR(csr_addr, 0, csr_val);
-
-       do {
-               qat_hal_wait_cycles(handle, 0, ESRAM_AUTO_INIT_USED_CYCLES, 0);
-               csr_val = ADF_CSR_RD(csr_addr, 0);
-       } while (!(csr_val & ESRAM_AUTO_TINIT_DONE) && times--);
-       if (times < 0) {
-               pr_err("QAT: Fail to init eSram!\n");
-               return -EFAULT;
-       }
-       return 0;
-}
-
-#define SHRAM_INIT_CYCLES 2060
-int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle)
-{
-       unsigned int clk_csr = handle->chip_info->glb_clk_enable_csr;
-       unsigned int reset_mask = handle->chip_info->icp_rst_mask;
-       unsigned int reset_csr = handle->chip_info->icp_rst_csr;
-       unsigned long ae_mask = handle->hal_handle->ae_mask;
-       unsigned char ae = 0;
-       unsigned int times = 100;
-       unsigned int csr_val;
-
-       /* write to the reset csr */
-       csr_val = GET_CAP_CSR(handle, reset_csr);
-       csr_val &= ~reset_mask;
-       do {
-               SET_CAP_CSR(handle, reset_csr, csr_val);
-               if (!(times--))
-                       goto out_err;
-               csr_val = GET_CAP_CSR(handle, reset_csr);
-               csr_val &= reset_mask;
-       } while (csr_val);
-       /* enable clock */
-       csr_val = GET_CAP_CSR(handle, clk_csr);
-       csr_val |= reset_mask;
-       SET_CAP_CSR(handle, clk_csr, csr_val);
-       if (qat_hal_check_ae_alive(handle))
-               goto out_err;
-
-       /* Set undefined power-up/reset states to reasonable default values */
-       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
-               qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
-                                 INIT_CTX_ENABLE_VALUE);
-               qat_hal_wr_indr_csr(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX,
-                                   CTX_STS_INDIRECT,
-                                   handle->hal_handle->upc_mask &
-                                   INIT_PC_VALUE);
-               qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
-               qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
-               qat_hal_put_wakeup_event(handle, ae,
-                                        ICP_QAT_UCLO_AE_ALL_CTX,
-                                        INIT_WAKEUP_EVENTS_VALUE);
-               qat_hal_put_sig_event(handle, ae,
-                                     ICP_QAT_UCLO_AE_ALL_CTX,
-                                     INIT_SIG_EVENTS_VALUE);
-       }
-       if (qat_hal_init_esram(handle))
-               goto out_err;
-       if (qat_hal_wait_cycles(handle, 0, SHRAM_INIT_CYCLES, 0))
-               goto out_err;
-       qat_hal_reset_timestamp(handle);
-
-       return 0;
-out_err:
-       pr_err("QAT: failed to get device out of reset\n");
-       return -EFAULT;
-}
-
-static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle,
-                               unsigned char ae, unsigned int ctx_mask)
-{
-       unsigned int ctx;
-
-       ctx = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
-       ctx &= IGNORE_W1C_MASK &
-               (~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS));
-       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
-}
-
-static u64 qat_hal_parity_64bit(u64 word)
-{
-       word ^= word >> 1;
-       word ^= word >> 2;
-       word ^= word >> 4;
-       word ^= word >> 8;
-       word ^= word >> 16;
-       word ^= word >> 32;
-       return word & 1;
-}
-
-static u64 qat_hal_set_uword_ecc(u64 uword)
-{
-       u64 bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL,
-               bit2_mask = 0xe387e0781e1ULL, bit3_mask = 0x7cb8e388e22ULL,
-               bit4_mask = 0xaf5b2c93244ULL, bit5_mask = 0xf56d5525488ULL,
-               bit6_mask = 0xdaf69a46910ULL;
-
-       /* clear the ecc bits */
-       uword &= ~(0x7fULL << 0x2C);
-       uword |= qat_hal_parity_64bit(bit0_mask & uword) << 0x2C;
-       uword |= qat_hal_parity_64bit(bit1_mask & uword) << 0x2D;
-       uword |= qat_hal_parity_64bit(bit2_mask & uword) << 0x2E;
-       uword |= qat_hal_parity_64bit(bit3_mask & uword) << 0x2F;
-       uword |= qat_hal_parity_64bit(bit4_mask & uword) << 0x30;
-       uword |= qat_hal_parity_64bit(bit5_mask & uword) << 0x31;
-       uword |= qat_hal_parity_64bit(bit6_mask & uword) << 0x32;
-       return uword;
-}
-
-void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
-                      unsigned char ae, unsigned int uaddr,
-                      unsigned int words_num, u64 *uword)
-{
-       unsigned int ustore_addr;
-       unsigned int i;
-
-       ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
-       uaddr |= UA_ECS;
-       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
-       for (i = 0; i < words_num; i++) {
-               unsigned int uwrd_lo, uwrd_hi;
-               u64 tmp;
-
-               tmp = qat_hal_set_uword_ecc(uword[i]);
-               uwrd_lo = (unsigned int)(tmp & 0xffffffff);
-               uwrd_hi = (unsigned int)(tmp >> 0x20);
-               qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
-               qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
-       }
-       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
-}
-
-static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle,
-                              unsigned char ae, unsigned int ctx_mask)
-{
-       unsigned int ctx;
-
-       ctx = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
-       ctx &= IGNORE_W1C_MASK;
-       ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF;
-       ctx |= (ctx_mask << CE_ENABLE_BITPOS);
-       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
-}
-
-static void qat_hal_clear_xfer(struct icp_qat_fw_loader_handle *handle)
-{
-       unsigned long ae_mask = handle->hal_handle->ae_mask;
-       unsigned char ae;
-       unsigned short reg;
-
-       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
-               for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) {
-                       qat_hal_init_rd_xfer(handle, ae, 0, ICP_SR_RD_ABS,
-                                            reg, 0);
-                       qat_hal_init_rd_xfer(handle, ae, 0, ICP_DR_RD_ABS,
-                                            reg, 0);
-               }
-       }
-}
-
-static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
-{
-       unsigned long ae_mask = handle->hal_handle->ae_mask;
-       unsigned char ae;
-       unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX;
-       int times = MAX_RETRY_TIMES;
-       unsigned int csr_val = 0;
-       unsigned int savctx = 0;
-       int ret = 0;
-
-       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
-               csr_val = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
-               csr_val &= ~(1 << MMC_SHARE_CS_BITPOS);
-               qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val);
-               csr_val = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
-               csr_val &= IGNORE_W1C_MASK;
-               if (handle->chip_info->nn)
-                       csr_val |= CE_NN_MODE;
-
-               qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val);
-               qat_hal_wr_uwords(handle, ae, 0, ARRAY_SIZE(inst),
-                                 (u64 *)inst);
-               qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
-                                   handle->hal_handle->upc_mask &
-                                   INIT_PC_VALUE);
-               savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
-               qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0);
-               qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY);
-               qat_hal_wr_indr_csr(handle, ae, ctx_mask,
-                                   CTX_SIG_EVENTS_INDIRECT, 0);
-               qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
-               qat_hal_enable_ctx(handle, ae, ctx_mask);
-       }
-       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
-               /* wait for AE to finish */
-               do {
-                       ret = qat_hal_wait_cycles(handle, ae, 20, 1);
-               } while (ret && times--);
-
-               if (times < 0) {
-                       pr_err("QAT: clear GPR of AE %d failed", ae);
-                       return -EINVAL;
-               }
-               qat_hal_disable_ctx(handle, ae, ctx_mask);
-               qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
-                                 savctx & ACS_ACNO);
-               qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
-                                 INIT_CTX_ENABLE_VALUE);
-               qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
-                                   handle->hal_handle->upc_mask &
-                                   INIT_PC_VALUE);
-               qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
-               qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
-               qat_hal_put_wakeup_event(handle, ae, ctx_mask,
-                                        INIT_WAKEUP_EVENTS_VALUE);
-               qat_hal_put_sig_event(handle, ae, ctx_mask,
-                                     INIT_SIG_EVENTS_VALUE);
-       }
-       return 0;
-}
-
-static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
-                            struct adf_accel_dev *accel_dev)
-{
-       struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
-       unsigned int max_en_ae_id = 0;
-       struct adf_bar *sram_bar;
-       unsigned int csr_val = 0;
-       unsigned long ae_mask;
-       unsigned char ae = 0;
-       int ret = 0;
-
-       handle->pci_dev = pci_info->pci_dev;
-       switch (handle->pci_dev->device) {
-       case ADF_4XXX_PCI_DEVICE_ID:
-       case ADF_401XX_PCI_DEVICE_ID:
-               handle->chip_info->mmp_sram_size = 0;
-               handle->chip_info->nn = false;
-               handle->chip_info->lm2lm3 = true;
-               handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG_2X;
-               handle->chip_info->icp_rst_csr = ICP_RESET_CPP0;
-               handle->chip_info->icp_rst_mask = 0x100015;
-               handle->chip_info->glb_clk_enable_csr = ICP_GLOBAL_CLK_ENABLE_CPP0;
-               handle->chip_info->misc_ctl_csr = MISC_CONTROL_C4XXX;
-               handle->chip_info->wakeup_event_val = 0x80000000;
-               handle->chip_info->fw_auth = true;
-               handle->chip_info->css_3k = true;
-               handle->chip_info->tgroup_share_ustore = true;
-               handle->chip_info->fcu_ctl_csr = FCU_CONTROL_4XXX;
-               handle->chip_info->fcu_sts_csr = FCU_STATUS_4XXX;
-               handle->chip_info->fcu_dram_addr_hi = FCU_DRAM_ADDR_HI_4XXX;
-               handle->chip_info->fcu_dram_addr_lo = FCU_DRAM_ADDR_LO_4XXX;
-               handle->chip_info->fcu_loaded_ae_csr = FCU_AE_LOADED_4XXX;
-               handle->chip_info->fcu_loaded_ae_pos = 0;
-
-               handle->hal_cap_g_ctl_csr_addr_v = pmisc_addr + ICP_QAT_CAP_OFFSET_4XXX;
-               handle->hal_cap_ae_xfer_csr_addr_v = pmisc_addr + ICP_QAT_AE_OFFSET_4XXX;
-               handle->hal_ep_csr_addr_v = pmisc_addr + ICP_QAT_EP_OFFSET_4XXX;
-               handle->hal_cap_ae_local_csr_addr_v =
-                       (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v
-                       + LOCAL_TO_XFER_REG_OFFSET);
-               break;
-       case PCI_DEVICE_ID_INTEL_QAT_C62X:
-       case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
-               handle->chip_info->mmp_sram_size = 0;
-               handle->chip_info->nn = true;
-               handle->chip_info->lm2lm3 = false;
-               handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG;
-               handle->chip_info->icp_rst_csr = ICP_RESET;
-               handle->chip_info->icp_rst_mask = (hw_data->ae_mask << RST_CSR_AE_LSB) |
-                                                 (hw_data->accel_mask << RST_CSR_QAT_LSB);
-               handle->chip_info->glb_clk_enable_csr = ICP_GLOBAL_CLK_ENABLE;
-               handle->chip_info->misc_ctl_csr = MISC_CONTROL;
-               handle->chip_info->wakeup_event_val = WAKEUP_EVENT;
-               handle->chip_info->fw_auth = true;
-               handle->chip_info->css_3k = false;
-               handle->chip_info->tgroup_share_ustore = false;
-               handle->chip_info->fcu_ctl_csr = FCU_CONTROL;
-               handle->chip_info->fcu_sts_csr = FCU_STATUS;
-               handle->chip_info->fcu_dram_addr_hi = FCU_DRAM_ADDR_HI;
-               handle->chip_info->fcu_dram_addr_lo = FCU_DRAM_ADDR_LO;
-               handle->chip_info->fcu_loaded_ae_csr = FCU_STATUS;
-               handle->chip_info->fcu_loaded_ae_pos = FCU_LOADED_AE_POS;
-               handle->hal_cap_g_ctl_csr_addr_v = pmisc_addr + ICP_QAT_CAP_OFFSET;
-               handle->hal_cap_ae_xfer_csr_addr_v = pmisc_addr + ICP_QAT_AE_OFFSET;
-               handle->hal_ep_csr_addr_v = pmisc_addr + ICP_QAT_EP_OFFSET;
-               handle->hal_cap_ae_local_csr_addr_v =
-                       (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v
-                       + LOCAL_TO_XFER_REG_OFFSET);
-               break;
-       case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
-               handle->chip_info->mmp_sram_size = 0x40000;
-               handle->chip_info->nn = true;
-               handle->chip_info->lm2lm3 = false;
-               handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG;
-               handle->chip_info->icp_rst_csr = ICP_RESET;
-               handle->chip_info->icp_rst_mask = (hw_data->ae_mask << RST_CSR_AE_LSB) |
-                                                 (hw_data->accel_mask << RST_CSR_QAT_LSB);
-               handle->chip_info->glb_clk_enable_csr = ICP_GLOBAL_CLK_ENABLE;
-               handle->chip_info->misc_ctl_csr = MISC_CONTROL;
-               handle->chip_info->wakeup_event_val = WAKEUP_EVENT;
-               handle->chip_info->fw_auth = false;
-               handle->chip_info->css_3k = false;
-               handle->chip_info->tgroup_share_ustore = false;
-               handle->chip_info->fcu_ctl_csr = 0;
-               handle->chip_info->fcu_sts_csr = 0;
-               handle->chip_info->fcu_dram_addr_hi = 0;
-               handle->chip_info->fcu_dram_addr_lo = 0;
-               handle->chip_info->fcu_loaded_ae_csr = 0;
-               handle->chip_info->fcu_loaded_ae_pos = 0;
-               handle->hal_cap_g_ctl_csr_addr_v = pmisc_addr + ICP_QAT_CAP_OFFSET;
-               handle->hal_cap_ae_xfer_csr_addr_v = pmisc_addr + ICP_QAT_AE_OFFSET;
-               handle->hal_ep_csr_addr_v = pmisc_addr + ICP_QAT_EP_OFFSET;
-               handle->hal_cap_ae_local_csr_addr_v =
-                       (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v
-                       + LOCAL_TO_XFER_REG_OFFSET);
-               break;
-       default:
-               ret = -EINVAL;
-               goto out_err;
-       }
-
-       if (handle->chip_info->mmp_sram_size > 0) {
-               sram_bar =
-                       &pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
-               handle->hal_sram_addr_v = sram_bar->virt_addr;
-       }
-       handle->hal_handle->revision_id = accel_dev->accel_pci_dev.revid;
-       handle->hal_handle->ae_mask = hw_data->ae_mask;
-       handle->hal_handle->admin_ae_mask = hw_data->admin_ae_mask;
-       handle->hal_handle->slice_mask = hw_data->accel_mask;
-       handle->cfg_ae_mask = ALL_AE_MASK;
-       /* create AE objects */
-       handle->hal_handle->upc_mask = 0x1ffff;
-       handle->hal_handle->max_ustore = 0x4000;
-
-       ae_mask = handle->hal_handle->ae_mask;
-       for_each_set_bit(ae, &ae_mask, ICP_QAT_UCLO_MAX_AE) {
-               handle->hal_handle->aes[ae].free_addr = 0;
-               handle->hal_handle->aes[ae].free_size =
-                   handle->hal_handle->max_ustore;
-               handle->hal_handle->aes[ae].ustore_size =
-                   handle->hal_handle->max_ustore;
-               handle->hal_handle->aes[ae].live_ctx_mask =
-                                               ICP_QAT_UCLO_AE_ALL_CTX;
-               max_en_ae_id = ae;
-       }
-       handle->hal_handle->ae_max_num = max_en_ae_id + 1;
-
-       /* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */
-       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
-               csr_val = qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE);
-               csr_val |= 0x1;
-               qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val);
-       }
-out_err:
-       return ret;
-}
-
-int qat_hal_init(struct adf_accel_dev *accel_dev)
-{
-       struct icp_qat_fw_loader_handle *handle;
-       int ret = 0;
-
-       handle = kzalloc(sizeof(*handle), GFP_KERNEL);
-       if (!handle)
-               return -ENOMEM;
-
-       handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL);
-       if (!handle->hal_handle) {
-               ret = -ENOMEM;
-               goto out_hal_handle;
-       }
-
-       handle->chip_info = kzalloc(sizeof(*handle->chip_info), GFP_KERNEL);
-       if (!handle->chip_info) {
-               ret = -ENOMEM;
-               goto out_chip_info;
-       }
-
-       ret = qat_hal_chip_init(handle, accel_dev);
-       if (ret) {
-               dev_err(&GET_DEV(accel_dev), "qat_hal_chip_init error\n");
-               goto out_err;
-       }
-
-       /* take all AEs out of reset */
-       ret = qat_hal_clr_reset(handle);
-       if (ret) {
-               dev_err(&GET_DEV(accel_dev), "qat_hal_clr_reset error\n");
-               goto out_err;
-       }
-
-       qat_hal_clear_xfer(handle);
-       if (!handle->chip_info->fw_auth) {
-               ret = qat_hal_clear_gpr(handle);
-               if (ret)
-                       goto out_err;
-       }
-
-       accel_dev->fw_loader->fw_loader = handle;
-       return 0;
-
-out_err:
-       kfree(handle->chip_info);
-out_chip_info:
-       kfree(handle->hal_handle);
-out_hal_handle:
-       kfree(handle);
-       return ret;
-}
-
-void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle)
-{
-       if (!handle)
-               return;
-       kfree(handle->chip_info);
-       kfree(handle->hal_handle);
-       kfree(handle);
-}
-
-int qat_hal_start(struct icp_qat_fw_loader_handle *handle)
-{
-       unsigned long ae_mask = handle->hal_handle->ae_mask;
-       u32 wakeup_val = handle->chip_info->wakeup_event_val;
-       u32 fcu_ctl_csr, fcu_sts_csr;
-       unsigned int fcu_sts;
-       unsigned char ae;
-       u32 ae_ctr = 0;
-       int retry = 0;
-
-       if (handle->chip_info->fw_auth) {
-               fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
-               fcu_sts_csr = handle->chip_info->fcu_sts_csr;
-               ae_ctr = hweight32(ae_mask);
-               SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_START);
-               do {
-                       msleep(FW_AUTH_WAIT_PERIOD);
-                       fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
-                       if (((fcu_sts >> FCU_STS_DONE_POS) & 0x1))
-                               return ae_ctr;
-               } while (retry++ < FW_AUTH_MAX_RETRY);
-               pr_err("QAT: start error (FCU_STS = 0x%x)\n", fcu_sts);
-               return 0;
-       } else {
-               for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
-                       qat_hal_put_wakeup_event(handle, ae, 0, wakeup_val);
-                       qat_hal_enable_ctx(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX);
-                       ae_ctr++;
-               }
-               return ae_ctr;
-       }
-}
-
-void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
-                 unsigned int ctx_mask)
-{
-       if (!handle->chip_info->fw_auth)
-               qat_hal_disable_ctx(handle, ae, ctx_mask);
-}
-
-void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
-                   unsigned char ae, unsigned int ctx_mask, unsigned int upc)
-{
-       qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
-                           handle->hal_handle->upc_mask & upc);
-}
-
-static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle,
-                              unsigned char ae, unsigned int uaddr,
-                              unsigned int words_num, u64 *uword)
-{
-       unsigned int i, uwrd_lo, uwrd_hi;
-       unsigned int ustore_addr, misc_control;
-
-       misc_control = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
-       qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL,
-                         misc_control & 0xfffffffb);
-       ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
-       uaddr |= UA_ECS;
-       for (i = 0; i < words_num; i++) {
-               qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
-               uaddr++;
-               uwrd_lo = qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER);
-               uwrd_hi = qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER);
-               uword[i] = uwrd_hi;
-               uword[i] = (uword[i] << 0x20) | uwrd_lo;
-       }
-       qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, misc_control);
-       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
-}
-
-void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle,
-                    unsigned char ae, unsigned int uaddr,
-                    unsigned int words_num, unsigned int *data)
-{
-       unsigned int i, ustore_addr;
-
-       ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
-       uaddr |= UA_ECS;
-       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
-       for (i = 0; i < words_num; i++) {
-               unsigned int uwrd_lo, uwrd_hi, tmp;
-
-               uwrd_lo = ((data[i] & 0xfff0000) << 4) | (0x3 << 18) |
-                         ((data[i] & 0xff00) << 2) |
-                         (0x3 << 8) | (data[i] & 0xff);
-               uwrd_hi = (0xf << 4) | ((data[i] & 0xf0000000) >> 28);
-               uwrd_hi |= (hweight32(data[i] & 0xffff) & 0x1) << 8;
-               tmp = ((data[i] >> 0x10) & 0xffff);
-               uwrd_hi |= (hweight32(tmp) & 0x1) << 9;
-               qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
-               qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
-       }
-       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
-}
-
-#define MAX_EXEC_INST 100
-static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
-                                  unsigned char ae, unsigned char ctx,
-                                  u64 *micro_inst, unsigned int inst_num,
-                                  int code_off, unsigned int max_cycle,
-                                  unsigned int *endpc)
-{
-       unsigned int ind_lm_addr_byte0 = 0, ind_lm_addr_byte1 = 0;
-       unsigned int ind_lm_addr_byte2 = 0, ind_lm_addr_byte3 = 0;
-       unsigned int ind_t_index = 0, ind_t_index_byte = 0;
-       unsigned int ind_lm_addr0 = 0, ind_lm_addr1 = 0;
-       unsigned int ind_lm_addr2 = 0, ind_lm_addr3 = 0;
-       u64 savuwords[MAX_EXEC_INST];
-       unsigned int ind_cnt_sig;
-       unsigned int ind_sig, act_sig;
-       unsigned int csr_val = 0, newcsr_val;
-       unsigned int savctx;
-       unsigned int savcc, wakeup_events, savpc;
-       unsigned int ctxarb_ctl, ctx_enables;
-
-       if ((inst_num > handle->hal_handle->max_ustore) || !micro_inst) {
-               pr_err("QAT: invalid instruction num %d\n", inst_num);
-               return -EINVAL;
-       }
-       /* save current context */
-       ind_lm_addr0 = qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT);
-       ind_lm_addr1 = qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT);
-       ind_lm_addr_byte0 = qat_hal_rd_indr_csr(handle, ae, ctx,
-                                               INDIRECT_LM_ADDR_0_BYTE_INDEX);
-       ind_lm_addr_byte1 = qat_hal_rd_indr_csr(handle, ae, ctx,
-                                               INDIRECT_LM_ADDR_1_BYTE_INDEX);
-       if (handle->chip_info->lm2lm3) {
-               ind_lm_addr2 = qat_hal_rd_indr_csr(handle, ae, ctx,
-                                                  LM_ADDR_2_INDIRECT);
-               ind_lm_addr3 = qat_hal_rd_indr_csr(handle, ae, ctx,
-                                                  LM_ADDR_3_INDIRECT);
-               ind_lm_addr_byte2 = qat_hal_rd_indr_csr(handle, ae, ctx,
-                                                       INDIRECT_LM_ADDR_2_BYTE_INDEX);
-               ind_lm_addr_byte3 = qat_hal_rd_indr_csr(handle, ae, ctx,
-                                                       INDIRECT_LM_ADDR_3_BYTE_INDEX);
-               ind_t_index = qat_hal_rd_indr_csr(handle, ae, ctx,
-                                                 INDIRECT_T_INDEX);
-               ind_t_index_byte = qat_hal_rd_indr_csr(handle, ae, ctx,
-                                                      INDIRECT_T_INDEX_BYTE_INDEX);
-       }
-       if (inst_num <= MAX_EXEC_INST)
-               qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords);
-       qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events);
-       savpc = qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT);
-       savpc = (savpc & handle->hal_handle->upc_mask) >> 0;
-       ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
-       ctx_enables &= IGNORE_W1C_MASK;
-       savcc = qat_hal_rd_ae_csr(handle, ae, CC_ENABLE);
-       savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
-       ctxarb_ctl = qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL);
-       ind_cnt_sig = qat_hal_rd_indr_csr(handle, ae, ctx,
-                                         FUTURE_COUNT_SIGNAL_INDIRECT);
-       ind_sig = qat_hal_rd_indr_csr(handle, ae, ctx,
-                                     CTX_SIG_EVENTS_INDIRECT);
-       act_sig = qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE);
-       /* execute micro codes */
-       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
-       qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst);
-       qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, 0);
-       qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, ctx & ACS_ACNO);
-       if (code_off)
-               qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc & 0xffffdfff);
-       qat_hal_put_wakeup_event(handle, ae, (1 << ctx), XCWE_VOLUNTARY);
-       qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_SIG_EVENTS_INDIRECT, 0);
-       qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
-       qat_hal_enable_ctx(handle, ae, (1 << ctx));
-       /* wait for micro codes to finish */
-       if (qat_hal_wait_cycles(handle, ae, max_cycle, 1) != 0)
-               return -EFAULT;
-       if (endpc) {
-               unsigned int ctx_status;
-
-               ctx_status = qat_hal_rd_indr_csr(handle, ae, ctx,
-                                                CTX_STS_INDIRECT);
-               *endpc = ctx_status & handle->hal_handle->upc_mask;
-       }
-       /* retore to saved context */
-       qat_hal_disable_ctx(handle, ae, (1 << ctx));
-       if (inst_num <= MAX_EXEC_INST)
-               qat_hal_wr_uwords(handle, ae, 0, inst_num, savuwords);
-       qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events);
-       qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT,
-                           handle->hal_handle->upc_mask & savpc);
-       csr_val = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
-       newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
-       qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
-       qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc);
-       qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, savctx & ACS_ACNO);
-       qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_ctl);
-       qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
-                           LM_ADDR_0_INDIRECT, ind_lm_addr0);
-       qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
-                           LM_ADDR_1_INDIRECT, ind_lm_addr1);
-       qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
-                           INDIRECT_LM_ADDR_0_BYTE_INDEX, ind_lm_addr_byte0);
-       qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
-                           INDIRECT_LM_ADDR_1_BYTE_INDEX, ind_lm_addr_byte1);
-       if (handle->chip_info->lm2lm3) {
-               qat_hal_wr_indr_csr(handle, ae, BIT(ctx), LM_ADDR_2_INDIRECT,
-                                   ind_lm_addr2);
-               qat_hal_wr_indr_csr(handle, ae, BIT(ctx), LM_ADDR_3_INDIRECT,
-                                   ind_lm_addr3);
-               qat_hal_wr_indr_csr(handle, ae, BIT(ctx),
-                                   INDIRECT_LM_ADDR_2_BYTE_INDEX,
-                                   ind_lm_addr_byte2);
-               qat_hal_wr_indr_csr(handle, ae, BIT(ctx),
-                                   INDIRECT_LM_ADDR_3_BYTE_INDEX,
-                                   ind_lm_addr_byte3);
-               qat_hal_wr_indr_csr(handle, ae, BIT(ctx),
-                                   INDIRECT_T_INDEX, ind_t_index);
-               qat_hal_wr_indr_csr(handle, ae, BIT(ctx),
-                                   INDIRECT_T_INDEX_BYTE_INDEX,
-                                   ind_t_index_byte);
-       }
-       qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
-                           FUTURE_COUNT_SIGNAL_INDIRECT, ind_cnt_sig);
-       qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
-                           CTX_SIG_EVENTS_INDIRECT, ind_sig);
-       qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, act_sig);
-       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
-
-       return 0;
-}
-
-static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
-                             unsigned char ae, unsigned char ctx,
-                             enum icp_qat_uof_regtype reg_type,
-                             unsigned short reg_num, unsigned int *data)
-{
-       unsigned int savctx, uaddr, uwrd_lo, uwrd_hi;
-       unsigned int ctxarb_cntl, ustore_addr, ctx_enables;
-       unsigned short reg_addr;
-       int status = 0;
-       u64 insts, savuword;
-
-       reg_addr = qat_hal_get_reg_addr(reg_type, reg_num);
-       if (reg_addr == BAD_REGADDR) {
-               pr_err("QAT: bad regaddr=0x%x\n", reg_addr);
-               return -EINVAL;
-       }
-       switch (reg_type) {
-       case ICP_GPA_REL:
-               insts = 0xA070000000ull | (reg_addr & 0x3ff);
-               break;
-       default:
-               insts = (u64)0xA030000000ull | ((reg_addr & 0x3ff) << 10);
-               break;
-       }
-       savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
-       ctxarb_cntl = qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL);
-       ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
-       ctx_enables &= IGNORE_W1C_MASK;
-       if (ctx != (savctx & ACS_ACNO))
-               qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
-                                 ctx & ACS_ACNO);
-       qat_hal_get_uwords(handle, ae, 0, 1, &savuword);
-       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
-       ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
-       uaddr = UA_ECS;
-       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
-       insts = qat_hal_set_uword_ecc(insts);
-       uwrd_lo = (unsigned int)(insts & 0xffffffff);
-       uwrd_hi = (unsigned int)(insts >> 0x20);
-       qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
-       qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
-       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
-       /* delay for at least 8 cycles */
-       qat_hal_wait_cycles(handle, ae, 0x8, 0);
-       /*
-        * read ALU output
-        * the instruction should have been executed
-        * prior to clearing the ECS in putUwords
-        */
-       *data = qat_hal_rd_ae_csr(handle, ae, ALU_OUT);
-       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
-       qat_hal_wr_uwords(handle, ae, 0, 1, &savuword);
-       if (ctx != (savctx & ACS_ACNO))
-               qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
-                                 savctx & ACS_ACNO);
-       qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_cntl);
-       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
-
-       return status;
-}
-
-static int qat_hal_wr_rel_reg(struct icp_qat_fw_loader_handle *handle,
-                             unsigned char ae, unsigned char ctx,
-                             enum icp_qat_uof_regtype reg_type,
-                             unsigned short reg_num, unsigned int data)
-{
-       unsigned short src_hiaddr, src_lowaddr, dest_addr, data16hi, data16lo;
-       u64 insts[] = {
-               0x0F440000000ull,
-               0x0F040000000ull,
-               0x0F0000C0300ull,
-               0x0E000010000ull
-       };
-       const int num_inst = ARRAY_SIZE(insts), code_off = 1;
-       const int imm_w1 = 0, imm_w0 = 1;
-
-       dest_addr = qat_hal_get_reg_addr(reg_type, reg_num);
-       if (dest_addr == BAD_REGADDR) {
-               pr_err("QAT: bad destAddr=0x%x\n", dest_addr);
-               return -EINVAL;
-       }
-
-       data16lo = 0xffff & data;
-       data16hi = 0xffff & (data >> 0x10);
-       src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
-                                         (0xff & data16hi));
-       src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
-                                          (0xff & data16lo));
-       switch (reg_type) {
-       case ICP_GPA_REL:
-               insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
-                   ((src_hiaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
-               insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
-                   ((src_lowaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
-               break;
-       default:
-               insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
-                   ((dest_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
-
-               insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
-                   ((dest_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
-               break;
-       }
-
-       return qat_hal_exec_micro_inst(handle, ae, ctx, insts, num_inst,
-                                      code_off, num_inst * 0x5, NULL);
-}
-
-int qat_hal_get_ins_num(void)
-{
-       return ARRAY_SIZE(inst_4b);
-}
-
-static int qat_hal_concat_micro_code(u64 *micro_inst,
-                                    unsigned int inst_num, unsigned int size,
-                                    unsigned int addr, unsigned int *value)
-{
-       int i;
-       unsigned int cur_value;
-       const u64 *inst_arr;
-       int fixup_offset;
-       int usize = 0;
-       int orig_num;
-
-       orig_num = inst_num;
-       cur_value = value[0];
-       inst_arr = inst_4b;
-       usize = ARRAY_SIZE(inst_4b);
-       fixup_offset = inst_num;
-       for (i = 0; i < usize; i++)
-               micro_inst[inst_num++] = inst_arr[i];
-       INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], (addr));
-       fixup_offset++;
-       INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], 0);
-       fixup_offset++;
-       INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0));
-       fixup_offset++;
-       INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0x10));
-
-       return inst_num - orig_num;
-}
-
-static int qat_hal_exec_micro_init_lm(struct icp_qat_fw_loader_handle *handle,
-                                     unsigned char ae, unsigned char ctx,
-                                     int *pfirst_exec, u64 *micro_inst,
-                                     unsigned int inst_num)
-{
-       int stat = 0;
-       unsigned int gpra0 = 0, gpra1 = 0, gpra2 = 0;
-       unsigned int gprb0 = 0, gprb1 = 0;
-
-       if (*pfirst_exec) {
-               qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, &gpra0);
-               qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, &gpra1);
-               qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, &gpra2);
-               qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, &gprb0);
-               qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, &gprb1);
-               *pfirst_exec = 0;
-       }
-       stat = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, inst_num, 1,
-                                      inst_num * 0x5, NULL);
-       if (stat != 0)
-               return -EFAULT;
-       qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, gpra0);
-       qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, gpra1);
-       qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, gpra2);
-       qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, gprb0);
-       qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, gprb1);
-
-       return 0;
-}
-
-int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
-                       unsigned char ae,
-                       struct icp_qat_uof_batch_init *lm_init_header)
-{
-       struct icp_qat_uof_batch_init *plm_init;
-       u64 *micro_inst_arry;
-       int micro_inst_num;
-       int alloc_inst_size;
-       int first_exec = 1;
-       int stat = 0;
-
-       plm_init = lm_init_header->next;
-       alloc_inst_size = lm_init_header->size;
-       if ((unsigned int)alloc_inst_size > handle->hal_handle->max_ustore)
-               alloc_inst_size = handle->hal_handle->max_ustore;
-       micro_inst_arry = kmalloc_array(alloc_inst_size, sizeof(u64),
-                                       GFP_KERNEL);
-       if (!micro_inst_arry)
-               return -ENOMEM;
-       micro_inst_num = 0;
-       while (plm_init) {
-               unsigned int addr, *value, size;
-
-               ae = plm_init->ae;
-               addr = plm_init->addr;
-               value = plm_init->value;
-               size = plm_init->size;
-               micro_inst_num += qat_hal_concat_micro_code(micro_inst_arry,
-                                                           micro_inst_num,
-                                                           size, addr, value);
-               plm_init = plm_init->next;
-       }
-       /* exec micro codes */
-       if (micro_inst_arry && micro_inst_num > 0) {
-               micro_inst_arry[micro_inst_num++] = 0x0E000010000ull;
-               stat = qat_hal_exec_micro_init_lm(handle, ae, 0, &first_exec,
-                                                 micro_inst_arry,
-                                                 micro_inst_num);
-       }
-       kfree(micro_inst_arry);
-       return stat;
-}
-
-static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle,
-                                  unsigned char ae, unsigned char ctx,
-                                  enum icp_qat_uof_regtype reg_type,
-                                  unsigned short reg_num, unsigned int val)
-{
-       int status = 0;
-       unsigned int reg_addr;
-       unsigned int ctx_enables;
-       unsigned short mask;
-       unsigned short dr_offset = 0x10;
-
-       ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
-       if (CE_INUSE_CONTEXTS & ctx_enables) {
-               if (ctx & 0x1) {
-                       pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx);
-                       return -EINVAL;
-               }
-               mask = 0x1f;
-               dr_offset = 0x20;
-       } else {
-               mask = 0x0f;
-       }
-       if (reg_num & ~mask)
-               return -EINVAL;
-       reg_addr = reg_num + (ctx << 0x5);
-       switch (reg_type) {
-       case ICP_SR_RD_REL:
-       case ICP_SR_REL:
-               SET_AE_XFER(handle, ae, reg_addr, val);
-               break;
-       case ICP_DR_RD_REL:
-       case ICP_DR_REL:
-               SET_AE_XFER(handle, ae, (reg_addr + dr_offset), val);
-               break;
-       default:
-               status = -EINVAL;
-               break;
-       }
-       return status;
-}
-
-static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle,
-                                  unsigned char ae, unsigned char ctx,
-                                  enum icp_qat_uof_regtype reg_type,
-                                  unsigned short reg_num, unsigned int data)
-{
-       unsigned int gprval, ctx_enables;
-       unsigned short src_hiaddr, src_lowaddr, gpr_addr, xfr_addr, data16hi,
-           data16low;
-       unsigned short reg_mask;
-       int status = 0;
-       u64 micro_inst[] = {
-               0x0F440000000ull,
-               0x0F040000000ull,
-               0x0A000000000ull,
-               0x0F0000C0300ull,
-               0x0E000010000ull
-       };
-       const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1;
-       const unsigned short gprnum = 0, dly = num_inst * 0x5;
-
-       ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
-       if (CE_INUSE_CONTEXTS & ctx_enables) {
-               if (ctx & 0x1) {
-                       pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx);
-                       return -EINVAL;
-               }
-               reg_mask = (unsigned short)~0x1f;
-       } else {
-               reg_mask = (unsigned short)~0xf;
-       }
-       if (reg_num & reg_mask)
-               return -EINVAL;
-       xfr_addr = qat_hal_get_reg_addr(reg_type, reg_num);
-       if (xfr_addr == BAD_REGADDR) {
-               pr_err("QAT: bad xfrAddr=0x%x\n", xfr_addr);
-               return -EINVAL;
-       }
-       status = qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval);
-       if (status) {
-               pr_err("QAT: failed to read register");
-               return status;
-       }
-       gpr_addr = qat_hal_get_reg_addr(ICP_GPB_REL, gprnum);
-       data16low = 0xffff & data;
-       data16hi = 0xffff & (data >> 0x10);
-       src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
-                                         (unsigned short)(0xff & data16hi));
-       src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
-                                          (unsigned short)(0xff & data16low));
-       micro_inst[0] = micro_inst[0x0] | ((data16hi >> 8) << 20) |
-           ((gpr_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
-       micro_inst[1] = micro_inst[0x1] | ((data16low >> 8) << 20) |
-           ((gpr_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
-       micro_inst[0x2] = micro_inst[0x2] |
-           ((xfr_addr & 0x3ff) << 20) | ((gpr_addr & 0x3ff) << 10);
-       status = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, num_inst,
-                                        code_off, dly, NULL);
-       qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, gprval);
-       return status;
-}
-
-static int qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle *handle,
-                             unsigned char ae, unsigned char ctx,
-                             unsigned short nn, unsigned int val)
-{
-       unsigned int ctx_enables;
-       int stat = 0;
-
-       ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
-       ctx_enables &= IGNORE_W1C_MASK;
-       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE);
-
-       stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, ICP_NEIGH_REL, nn, val);
-       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
-       return stat;
-}
-
-static int qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle
-                                     *handle, unsigned char ae,
-                                     unsigned short absreg_num,
-                                     unsigned short *relreg,
-                                     unsigned char *ctx)
-{
-       unsigned int ctx_enables;
-
-       ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
-       if (ctx_enables & CE_INUSE_CONTEXTS) {
-               /* 4-ctx mode */
-               *relreg = absreg_num & 0x1F;
-               *ctx = (absreg_num >> 0x4) & 0x6;
-       } else {
-               /* 8-ctx mode */
-               *relreg = absreg_num & 0x0F;
-               *ctx = (absreg_num >> 0x4) & 0x7;
-       }
-       return 0;
-}
-
-int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle,
-                    unsigned char ae, unsigned long ctx_mask,
-                    enum icp_qat_uof_regtype reg_type,
-                    unsigned short reg_num, unsigned int regdata)
-{
-       int stat = 0;
-       unsigned short reg;
-       unsigned char ctx = 0;
-       enum icp_qat_uof_regtype type;
-
-       if (reg_num >= ICP_QAT_UCLO_MAX_GPR_REG)
-               return -EINVAL;
-
-       do {
-               if (ctx_mask == 0) {
-                       qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
-                                                  &ctx);
-                       type = reg_type - 1;
-               } else {
-                       reg = reg_num;
-                       type = reg_type;
-                       if (!test_bit(ctx, &ctx_mask))
-                               continue;
-               }
-               stat = qat_hal_wr_rel_reg(handle, ae, ctx, type, reg, regdata);
-               if (stat) {
-                       pr_err("QAT: write gpr fail\n");
-                       return -EINVAL;
-               }
-       } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
-
-       return 0;
-}
-
-int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle,
-                        unsigned char ae, unsigned long ctx_mask,
-                        enum icp_qat_uof_regtype reg_type,
-                        unsigned short reg_num, unsigned int regdata)
-{
-       int stat = 0;
-       unsigned short reg;
-       unsigned char ctx = 0;
-       enum icp_qat_uof_regtype type;
-
-       if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
-               return -EINVAL;
-
-       do {
-               if (ctx_mask == 0) {
-                       qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
-                                                  &ctx);
-                       type = reg_type - 3;
-               } else {
-                       reg = reg_num;
-                       type = reg_type;
-                       if (!test_bit(ctx, &ctx_mask))
-                               continue;
-               }
-               stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, type, reg,
-                                              regdata);
-               if (stat) {
-                       pr_err("QAT: write wr xfer fail\n");
-                       return -EINVAL;
-               }
-       } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
-
-       return 0;
-}
-
-int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
-                        unsigned char ae, unsigned long ctx_mask,
-                        enum icp_qat_uof_regtype reg_type,
-                        unsigned short reg_num, unsigned int regdata)
-{
-       int stat = 0;
-       unsigned short reg;
-       unsigned char ctx = 0;
-       enum icp_qat_uof_regtype type;
-
-       if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
-               return -EINVAL;
-
-       do {
-               if (ctx_mask == 0) {
-                       qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
-                                                  &ctx);
-                       type = reg_type - 3;
-               } else {
-                       reg = reg_num;
-                       type = reg_type;
-                       if (!test_bit(ctx, &ctx_mask))
-                               continue;
-               }
-               stat = qat_hal_put_rel_rd_xfer(handle, ae, ctx, type, reg,
-                                              regdata);
-               if (stat) {
-                       pr_err("QAT: write rd xfer fail\n");
-                       return -EINVAL;
-               }
-       } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
-
-       return 0;
-}
-
-int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
-                   unsigned char ae, unsigned long ctx_mask,
-                   unsigned short reg_num, unsigned int regdata)
-{
-       int stat = 0;
-       unsigned char ctx;
-       if (!handle->chip_info->nn) {
-               dev_err(&handle->pci_dev->dev, "QAT: No next neigh in 0x%x\n",
-                       handle->pci_dev->device);
-               return -EINVAL;
-       }
-
-       if (ctx_mask == 0)
-               return -EINVAL;
-
-       for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
-               if (!test_bit(ctx, &ctx_mask))
-                       continue;
-               stat = qat_hal_put_rel_nn(handle, ae, ctx, reg_num, regdata);
-               if (stat) {
-                       pr_err("QAT: write neigh error\n");
-                       return -EINVAL;
-               }
-       }
-
-       return 0;
-}
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
deleted file mode 100644 (file)
index b7f7869..0000000
+++ /dev/null
@@ -1,2132 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/slab.h>
-#include <linux/ctype.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/pci_ids.h>
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "icp_qat_uclo.h"
-#include "icp_qat_hal.h"
-#include "icp_qat_fw_loader_handle.h"
-
-#define UWORD_CPYBUF_SIZE 1024
-#define INVLD_UWORD 0xffffffffffull
-#define PID_MINOR_REV 0xf
-#define PID_MAJOR_REV (0xf << 4)
-
-static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle,
-                                unsigned int ae, unsigned int image_num)
-{
-       struct icp_qat_uclo_aedata *ae_data;
-       struct icp_qat_uclo_encapme *encap_image;
-       struct icp_qat_uclo_page *page = NULL;
-       struct icp_qat_uclo_aeslice *ae_slice = NULL;
-
-       ae_data = &obj_handle->ae_data[ae];
-       encap_image = &obj_handle->ae_uimage[image_num];
-       ae_slice = &ae_data->ae_slices[ae_data->slice_num];
-       ae_slice->encap_image = encap_image;
-
-       if (encap_image->img_ptr) {
-               ae_slice->ctx_mask_assigned =
-                                       encap_image->img_ptr->ctx_assigned;
-               ae_data->eff_ustore_size = obj_handle->ustore_phy_size;
-       } else {
-               ae_slice->ctx_mask_assigned = 0;
-       }
-       ae_slice->region = kzalloc(sizeof(*ae_slice->region), GFP_KERNEL);
-       if (!ae_slice->region)
-               return -ENOMEM;
-       ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL);
-       if (!ae_slice->page)
-               goto out_err;
-       page = ae_slice->page;
-       page->encap_page = encap_image->page;
-       ae_slice->page->region = ae_slice->region;
-       ae_data->slice_num++;
-       return 0;
-out_err:
-       kfree(ae_slice->region);
-       ae_slice->region = NULL;
-       return -ENOMEM;
-}
-
-static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
-{
-       unsigned int i;
-
-       if (!ae_data) {
-               pr_err("QAT: bad argument, ae_data is NULL\n ");
-               return -EINVAL;
-       }
-
-       for (i = 0; i < ae_data->slice_num; i++) {
-               kfree(ae_data->ae_slices[i].region);
-               ae_data->ae_slices[i].region = NULL;
-               kfree(ae_data->ae_slices[i].page);
-               ae_data->ae_slices[i].page = NULL;
-       }
-       return 0;
-}
-
-static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table,
-                                unsigned int str_offset)
-{
-       if (!str_table->table_len || str_offset > str_table->table_len)
-               return NULL;
-       return (char *)(((uintptr_t)(str_table->strings)) + str_offset);
-}
-
-static int qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr)
-{
-       int maj = hdr->maj_ver & 0xff;
-       int min = hdr->min_ver & 0xff;
-
-       if (hdr->file_id != ICP_QAT_UOF_FID) {
-               pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
-               return -EINVAL;
-       }
-       if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
-               pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
-                      maj, min);
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static int qat_uclo_check_suof_format(struct icp_qat_suof_filehdr *suof_hdr)
-{
-       int maj = suof_hdr->maj_ver & 0xff;
-       int min = suof_hdr->min_ver & 0xff;
-
-       if (suof_hdr->file_id != ICP_QAT_SUOF_FID) {
-               pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id);
-               return -EINVAL;
-       }
-       if (suof_hdr->fw_type != 0) {
-               pr_err("QAT: unsupported firmware type\n");
-               return -EINVAL;
-       }
-       if (suof_hdr->num_chunks <= 0x1) {
-               pr_err("QAT: SUOF chunk amount is incorrect\n");
-               return -EINVAL;
-       }
-       if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) {
-               pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n",
-                      maj, min);
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
-                                     unsigned int addr, unsigned int *val,
-                                     unsigned int num_in_bytes)
-{
-       unsigned int outval;
-       unsigned char *ptr = (unsigned char *)val;
-
-       while (num_in_bytes) {
-               memcpy(&outval, ptr, 4);
-               SRAM_WRITE(handle, addr, outval);
-               num_in_bytes -= 4;
-               ptr += 4;
-               addr += 4;
-       }
-}
-
-static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle,
-                                     unsigned char ae, unsigned int addr,
-                                     unsigned int *val,
-                                     unsigned int num_in_bytes)
-{
-       unsigned int outval;
-       unsigned char *ptr = (unsigned char *)val;
-
-       addr >>= 0x2; /* convert to uword address */
-
-       while (num_in_bytes) {
-               memcpy(&outval, ptr, 4);
-               qat_hal_wr_umem(handle, ae, addr++, 1, &outval);
-               num_in_bytes -= 4;
-               ptr += 4;
-       }
-}
-
-static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle,
-                                  unsigned char ae,
-                                  struct icp_qat_uof_batch_init
-                                  *umem_init_header)
-{
-       struct icp_qat_uof_batch_init *umem_init;
-
-       if (!umem_init_header)
-               return;
-       umem_init = umem_init_header->next;
-       while (umem_init) {
-               unsigned int addr, *value, size;
-
-               ae = umem_init->ae;
-               addr = umem_init->addr;
-               value = umem_init->value;
-               size = umem_init->size;
-               qat_uclo_wr_umem_by_words(handle, ae, addr, value, size);
-               umem_init = umem_init->next;
-       }
-}
-
-static void
-qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
-                                struct icp_qat_uof_batch_init **base)
-{
-       struct icp_qat_uof_batch_init *umem_init;
-
-       umem_init = *base;
-       while (umem_init) {
-               struct icp_qat_uof_batch_init *pre;
-
-               pre = umem_init;
-               umem_init = umem_init->next;
-               kfree(pre);
-       }
-       *base = NULL;
-}
-
-static int qat_uclo_parse_num(char *str, unsigned int *num)
-{
-       char buf[16] = {0};
-       unsigned long ae = 0;
-       int i;
-
-       strncpy(buf, str, 15);
-       for (i = 0; i < 16; i++) {
-               if (!isdigit(buf[i])) {
-                       buf[i] = '\0';
-                       break;
-               }
-       }
-       if ((kstrtoul(buf, 10, &ae)))
-               return -EFAULT;
-
-       *num = (unsigned int)ae;
-       return 0;
-}
-
-static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
-                                    struct icp_qat_uof_initmem *init_mem,
-                                    unsigned int size_range, unsigned int *ae)
-{
-       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
-       char *str;
-
-       if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
-               pr_err("QAT: initmem is out of range");
-               return -EINVAL;
-       }
-       if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
-               pr_err("QAT: Memory scope for init_mem error\n");
-               return -EINVAL;
-       }
-       str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
-       if (!str) {
-               pr_err("QAT: AE name assigned in UOF init table is NULL\n");
-               return -EINVAL;
-       }
-       if (qat_uclo_parse_num(str, ae)) {
-               pr_err("QAT: Parse num for AE number failed\n");
-               return -EINVAL;
-       }
-       if (*ae >= ICP_QAT_UCLO_MAX_AE) {
-               pr_err("QAT: ae %d out of range\n", *ae);
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
-                                          *handle, struct icp_qat_uof_initmem
-                                          *init_mem, unsigned int ae,
-                                          struct icp_qat_uof_batch_init
-                                          **init_tab_base)
-{
-       struct icp_qat_uof_batch_init *init_header, *tail;
-       struct icp_qat_uof_batch_init *mem_init, *tail_old;
-       struct icp_qat_uof_memvar_attr *mem_val_attr;
-       unsigned int i, flag = 0;
-
-       mem_val_attr =
-               (struct icp_qat_uof_memvar_attr *)((uintptr_t)init_mem +
-               sizeof(struct icp_qat_uof_initmem));
-
-       init_header = *init_tab_base;
-       if (!init_header) {
-               init_header = kzalloc(sizeof(*init_header), GFP_KERNEL);
-               if (!init_header)
-                       return -ENOMEM;
-               init_header->size = 1;
-               *init_tab_base = init_header;
-               flag = 1;
-       }
-       tail_old = init_header;
-       while (tail_old->next)
-               tail_old = tail_old->next;
-       tail = tail_old;
-       for (i = 0; i < init_mem->val_attr_num; i++) {
-               mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL);
-               if (!mem_init)
-                       goto out_err;
-               mem_init->ae = ae;
-               mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte;
-               mem_init->value = &mem_val_attr->value;
-               mem_init->size = 4;
-               mem_init->next = NULL;
-               tail->next = mem_init;
-               tail = mem_init;
-               init_header->size += qat_hal_get_ins_num();
-               mem_val_attr++;
-       }
-       return 0;
-out_err:
-       /* Do not free the list head unless we allocated it. */
-       tail_old = tail_old->next;
-       if (flag) {
-               kfree(*init_tab_base);
-               *init_tab_base = NULL;
-       }
-
-       while (tail_old) {
-               mem_init = tail_old->next;
-               kfree(tail_old);
-               tail_old = mem_init;
-       }
-       return -ENOMEM;
-}
-
-static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle,
-                                 struct icp_qat_uof_initmem *init_mem)
-{
-       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
-       unsigned int ae;
-
-       if (qat_uclo_fetch_initmem_ae(handle, init_mem,
-                                     handle->chip_info->lm_size, &ae))
-               return -EINVAL;
-       if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
-                                           &obj_handle->lm_init_tab[ae]))
-               return -EINVAL;
-       return 0;
-}
-
-static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
-                                 struct icp_qat_uof_initmem *init_mem)
-{
-       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
-       unsigned int ae, ustore_size, uaddr, i;
-       struct icp_qat_uclo_aedata *aed;
-
-       ustore_size = obj_handle->ustore_phy_size;
-       if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae))
-               return -EINVAL;
-       if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
-                                           &obj_handle->umem_init_tab[ae]))
-               return -EINVAL;
-       /* set the highest ustore address referenced */
-       uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2;
-       aed = &obj_handle->ae_data[ae];
-       for (i = 0; i < aed->slice_num; i++) {
-               if (aed->ae_slices[i].encap_image->uwords_num < uaddr)
-                       aed->ae_slices[i].encap_image->uwords_num = uaddr;
-       }
-       return 0;
-}
-
-static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
-                                  struct icp_qat_uof_initmem *init_mem)
-{
-       switch (init_mem->region) {
-       case ICP_QAT_UOF_LMEM_REGION:
-               if (qat_uclo_init_lmem_seg(handle, init_mem))
-                       return -EINVAL;
-               break;
-       case ICP_QAT_UOF_UMEM_REGION:
-               if (qat_uclo_init_umem_seg(handle, init_mem))
-                       return -EINVAL;
-               break;
-       default:
-               pr_err("QAT: initmem region error. region type=0x%x\n",
-                      init_mem->region);
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
-                               struct icp_qat_uclo_encapme *image)
-{
-       unsigned int i;
-       struct icp_qat_uclo_encap_page *page;
-       struct icp_qat_uof_image *uof_image;
-       unsigned char ae;
-       unsigned int ustore_size;
-       unsigned int patt_pos;
-       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
-       unsigned long ae_mask = handle->hal_handle->ae_mask;
-       unsigned long cfg_ae_mask = handle->cfg_ae_mask;
-       u64 *fill_data;
-
-       uof_image = image->img_ptr;
-       fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(u64),
-                           GFP_KERNEL);
-       if (!fill_data)
-               return -ENOMEM;
-       for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++)
-               memcpy(&fill_data[i], &uof_image->fill_pattern,
-                      sizeof(u64));
-       page = image->page;
-
-       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
-               unsigned long ae_assigned = uof_image->ae_assigned;
-
-               if (!test_bit(ae, &ae_assigned))
-                       continue;
-
-               if (!test_bit(ae, &cfg_ae_mask))
-                       continue;
-
-               ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
-               patt_pos = page->beg_addr_p + page->micro_words_num;
-
-               qat_hal_wr_uwords(handle, (unsigned char)ae, 0,
-                                 page->beg_addr_p, &fill_data[0]);
-               qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos,
-                                 ustore_size - patt_pos + 1,
-                                 &fill_data[page->beg_addr_p]);
-       }
-       kfree(fill_data);
-       return 0;
-}
-
-static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
-{
-       int i, ae;
-       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
-       struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem;
-       unsigned long ae_mask = handle->hal_handle->ae_mask;
-
-       for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) {
-               if (initmem->num_in_bytes) {
-                       if (qat_uclo_init_ae_memory(handle, initmem))
-                               return -EINVAL;
-               }
-               initmem = (struct icp_qat_uof_initmem *)((uintptr_t)(
-                       (uintptr_t)initmem +
-                       sizeof(struct icp_qat_uof_initmem)) +
-                       (sizeof(struct icp_qat_uof_memvar_attr) *
-                       initmem->val_attr_num));
-       }
-
-       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
-               if (qat_hal_batch_wr_lm(handle, ae,
-                                       obj_handle->lm_init_tab[ae])) {
-                       pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
-                       return -EINVAL;
-               }
-               qat_uclo_cleanup_batch_init_list(handle,
-                                                &obj_handle->lm_init_tab[ae]);
-               qat_uclo_batch_wr_umem(handle, ae,
-                                      obj_handle->umem_init_tab[ae]);
-               qat_uclo_cleanup_batch_init_list(handle,
-                                                &obj_handle->
-                                                umem_init_tab[ae]);
-       }
-       return 0;
-}
-
-static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
-                                char *chunk_id, void *cur)
-{
-       int i;
-       struct icp_qat_uof_chunkhdr *chunk_hdr =
-           (struct icp_qat_uof_chunkhdr *)
-           ((uintptr_t)obj_hdr + sizeof(struct icp_qat_uof_objhdr));
-
-       for (i = 0; i < obj_hdr->num_chunks; i++) {
-               if ((cur < (void *)&chunk_hdr[i]) &&
-                   !strncmp(chunk_hdr[i].chunk_id, chunk_id,
-                            ICP_QAT_UOF_OBJID_LEN)) {
-                       return &chunk_hdr[i];
-               }
-       }
-       return NULL;
-}
-
-static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch)
-{
-       int i;
-       unsigned int topbit = 1 << 0xF;
-       unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch);
-
-       reg ^= inbyte << 0x8;
-       for (i = 0; i < 0x8; i++) {
-               if (reg & topbit)
-                       reg = (reg << 1) ^ 0x1021;
-               else
-                       reg <<= 1;
-       }
-       return reg & 0xFFFF;
-}
-
-static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num)
-{
-       unsigned int chksum = 0;
-
-       if (ptr)
-               while (num--)
-                       chksum = qat_uclo_calc_checksum(chksum, *ptr++);
-       return chksum;
-}
-
-static struct icp_qat_uclo_objhdr *
-qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr,
-                  char *chunk_id)
-{
-       struct icp_qat_uof_filechunkhdr *file_chunk;
-       struct icp_qat_uclo_objhdr *obj_hdr;
-       char *chunk;
-       int i;
-
-       file_chunk = (struct icp_qat_uof_filechunkhdr *)
-               (buf + sizeof(struct icp_qat_uof_filehdr));
-       for (i = 0; i < file_hdr->num_chunks; i++) {
-               if (!strncmp(file_chunk->chunk_id, chunk_id,
-                            ICP_QAT_UOF_OBJID_LEN)) {
-                       chunk = buf + file_chunk->offset;
-                       if (file_chunk->checksum != qat_uclo_calc_str_checksum(
-                               chunk, file_chunk->size))
-                               break;
-                       obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL);
-                       if (!obj_hdr)
-                               break;
-                       obj_hdr->file_buff = chunk;
-                       obj_hdr->checksum = file_chunk->checksum;
-                       obj_hdr->size = file_chunk->size;
-                       return obj_hdr;
-               }
-               file_chunk++;
-       }
-       return NULL;
-}
-
-static int
-qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
-                           struct icp_qat_uof_image *image)
-{
-       struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab;
-       struct icp_qat_uof_objtable *neigh_reg_tab;
-       struct icp_qat_uof_code_page *code_page;
-
-       code_page = (struct icp_qat_uof_code_page *)
-                       ((char *)image + sizeof(struct icp_qat_uof_image));
-       uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
-                    code_page->uc_var_tab_offset);
-       imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
-                     code_page->imp_var_tab_offset);
-       imp_expr_tab = (struct icp_qat_uof_objtable *)
-                      (encap_uof_obj->beg_uof +
-                      code_page->imp_expr_tab_offset);
-       if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
-           imp_expr_tab->entry_num) {
-               pr_err("QAT: UOF can't contain imported variable to be parsed\n");
-               return -EINVAL;
-       }
-       neigh_reg_tab = (struct icp_qat_uof_objtable *)
-                       (encap_uof_obj->beg_uof +
-                       code_page->neigh_reg_tab_offset);
-       if (neigh_reg_tab->entry_num) {
-               pr_err("QAT: UOF can't contain neighbor register table\n");
-               return -EINVAL;
-       }
-       if (image->numpages > 1) {
-               pr_err("QAT: UOF can't contain multiple pages\n");
-               return -EINVAL;
-       }
-       if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
-               pr_err("QAT: UOF can't use shared control store feature\n");
-               return -EFAULT;
-       }
-       if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
-               pr_err("QAT: UOF can't use reloadable feature\n");
-               return -EFAULT;
-       }
-       return 0;
-}
-
-static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj
-                                    *encap_uof_obj,
-                                    struct icp_qat_uof_image *img,
-                                    struct icp_qat_uclo_encap_page *page)
-{
-       struct icp_qat_uof_code_page *code_page;
-       struct icp_qat_uof_code_area *code_area;
-       struct icp_qat_uof_objtable *uword_block_tab;
-       struct icp_qat_uof_uword_block *uwblock;
-       int i;
-
-       code_page = (struct icp_qat_uof_code_page *)
-                       ((char *)img + sizeof(struct icp_qat_uof_image));
-       page->def_page = code_page->def_page;
-       page->page_region = code_page->page_region;
-       page->beg_addr_v = code_page->beg_addr_v;
-       page->beg_addr_p = code_page->beg_addr_p;
-       code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof +
-                                               code_page->code_area_offset);
-       page->micro_words_num = code_area->micro_words_num;
-       uword_block_tab = (struct icp_qat_uof_objtable *)
-                         (encap_uof_obj->beg_uof +
-                         code_area->uword_block_tab);
-       page->uwblock_num = uword_block_tab->entry_num;
-       uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab +
-                       sizeof(struct icp_qat_uof_objtable));
-       page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
-       for (i = 0; i < uword_block_tab->entry_num; i++)
-               page->uwblock[i].micro_words =
-               (uintptr_t)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
-}
-
-static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
-                              struct icp_qat_uclo_encapme *ae_uimage,
-                              int max_image)
-{
-       int i, j;
-       struct icp_qat_uof_chunkhdr *chunk_hdr = NULL;
-       struct icp_qat_uof_image *image;
-       struct icp_qat_uof_objtable *ae_regtab;
-       struct icp_qat_uof_objtable *init_reg_sym_tab;
-       struct icp_qat_uof_objtable *sbreak_tab;
-       struct icp_qat_uof_encap_obj *encap_uof_obj =
-                                       &obj_handle->encap_uof_obj;
-
-       for (j = 0; j < max_image; j++) {
-               chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
-                                               ICP_QAT_UOF_IMAG, chunk_hdr);
-               if (!chunk_hdr)
-                       break;
-               image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof +
-                                                    chunk_hdr->offset);
-               ae_regtab = (struct icp_qat_uof_objtable *)
-                          (image->reg_tab_offset +
-                          obj_handle->obj_hdr->file_buff);
-               ae_uimage[j].ae_reg_num = ae_regtab->entry_num;
-               ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *)
-                       (((char *)ae_regtab) +
-                       sizeof(struct icp_qat_uof_objtable));
-               init_reg_sym_tab = (struct icp_qat_uof_objtable *)
-                                  (image->init_reg_sym_tab +
-                                  obj_handle->obj_hdr->file_buff);
-               ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num;
-               ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *)
-                       (((char *)init_reg_sym_tab) +
-                       sizeof(struct icp_qat_uof_objtable));
-               sbreak_tab = (struct icp_qat_uof_objtable *)
-                       (image->sbreak_tab + obj_handle->obj_hdr->file_buff);
-               ae_uimage[j].sbreak_num = sbreak_tab->entry_num;
-               ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *)
-                                     (((char *)sbreak_tab) +
-                                     sizeof(struct icp_qat_uof_objtable));
-               ae_uimage[j].img_ptr = image;
-               if (qat_uclo_check_image_compat(encap_uof_obj, image))
-                       goto out_err;
-               ae_uimage[j].page =
-                       kzalloc(sizeof(struct icp_qat_uclo_encap_page),
-                               GFP_KERNEL);
-               if (!ae_uimage[j].page)
-                       goto out_err;
-               qat_uclo_map_image_page(encap_uof_obj, image,
-                                       ae_uimage[j].page);
-       }
-       return j;
-out_err:
-       for (i = 0; i < j; i++)
-               kfree(ae_uimage[i].page);
-       return 0;
-}
-
-static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
-{
-       int i, ae;
-       int mflag = 0;
-       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
-       unsigned long ae_mask = handle->hal_handle->ae_mask;
-       unsigned long cfg_ae_mask = handle->cfg_ae_mask;
-
-       for_each_set_bit(ae, &ae_mask, max_ae) {
-               if (!test_bit(ae, &cfg_ae_mask))
-                       continue;
-
-               for (i = 0; i < obj_handle->uimage_num; i++) {
-                       unsigned long ae_assigned = obj_handle->ae_uimage[i].img_ptr->ae_assigned;
-
-                       if (!test_bit(ae, &ae_assigned))
-                               continue;
-                       mflag = 1;
-                       if (qat_uclo_init_ae_data(obj_handle, ae, i))
-                               return -EINVAL;
-               }
-       }
-       if (!mflag) {
-               pr_err("QAT: uimage uses AE not set\n");
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static struct icp_qat_uof_strtable *
-qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr,
-                      char *tab_name, struct icp_qat_uof_strtable *str_table)
-{
-       struct icp_qat_uof_chunkhdr *chunk_hdr;
-
-       chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)
-                                       obj_hdr->file_buff, tab_name, NULL);
-       if (chunk_hdr) {
-               int hdr_size;
-
-               memcpy(&str_table->table_len, obj_hdr->file_buff +
-                      chunk_hdr->offset, sizeof(str_table->table_len));
-               hdr_size = (char *)&str_table->strings - (char *)str_table;
-               str_table->strings = (uintptr_t)obj_hdr->file_buff +
-                                       chunk_hdr->offset + hdr_size;
-               return str_table;
-       }
-       return NULL;
-}
-
-static void
-qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj,
-                          struct icp_qat_uclo_init_mem_table *init_mem_tab)
-{
-       struct icp_qat_uof_chunkhdr *chunk_hdr;
-
-       chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
-                                       ICP_QAT_UOF_IMEM, NULL);
-       if (chunk_hdr) {
-               memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof +
-                       chunk_hdr->offset, sizeof(unsigned int));
-               init_mem_tab->init_mem = (struct icp_qat_uof_initmem *)
-               (encap_uof_obj->beg_uof + chunk_hdr->offset +
-               sizeof(unsigned int));
-       }
-}
-
-static unsigned int
-qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
-{
-       switch (handle->pci_dev->device) {
-       case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
-               return ICP_QAT_AC_895XCC_DEV_TYPE;
-       case PCI_DEVICE_ID_INTEL_QAT_C62X:
-               return ICP_QAT_AC_C62X_DEV_TYPE;
-       case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
-               return ICP_QAT_AC_C3XXX_DEV_TYPE;
-       case ADF_4XXX_PCI_DEVICE_ID:
-       case ADF_401XX_PCI_DEVICE_ID:
-               return ICP_QAT_AC_4XXX_A_DEV_TYPE;
-       default:
-               pr_err("QAT: unsupported device 0x%x\n",
-                      handle->pci_dev->device);
-               return 0;
-       }
-}
-
-static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
-{
-       unsigned int maj_ver, prod_type = obj_handle->prod_type;
-
-       if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) {
-               pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n",
-                      obj_handle->encap_uof_obj.obj_hdr->ac_dev_type,
-                      prod_type);
-               return -EINVAL;
-       }
-       maj_ver = obj_handle->prod_rev & 0xff;
-       if (obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver ||
-           obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver) {
-               pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver);
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
-                            unsigned char ae, unsigned char ctx_mask,
-                            enum icp_qat_uof_regtype reg_type,
-                            unsigned short reg_addr, unsigned int value)
-{
-       switch (reg_type) {
-       case ICP_GPA_ABS:
-       case ICP_GPB_ABS:
-               ctx_mask = 0;
-               fallthrough;
-       case ICP_GPA_REL:
-       case ICP_GPB_REL:
-               return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
-                                       reg_addr, value);
-       case ICP_SR_ABS:
-       case ICP_DR_ABS:
-       case ICP_SR_RD_ABS:
-       case ICP_DR_RD_ABS:
-               ctx_mask = 0;
-               fallthrough;
-       case ICP_SR_REL:
-       case ICP_DR_REL:
-       case ICP_SR_RD_REL:
-       case ICP_DR_RD_REL:
-               return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type,
-                                           reg_addr, value);
-       case ICP_SR_WR_ABS:
-       case ICP_DR_WR_ABS:
-               ctx_mask = 0;
-               fallthrough;
-       case ICP_SR_WR_REL:
-       case ICP_DR_WR_REL:
-               return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
-                                           reg_addr, value);
-       case ICP_NEIGH_REL:
-               return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
-       default:
-               pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type);
-               return -EFAULT;
-       }
-       return 0;
-}
-
-static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
-                                unsigned int ae,
-                                struct icp_qat_uclo_encapme *encap_ae)
-{
-       unsigned int i;
-       unsigned char ctx_mask;
-       struct icp_qat_uof_init_regsym *init_regsym;
-
-       if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) ==
-           ICP_QAT_UCLO_MAX_CTX)
-               ctx_mask = 0xff;
-       else
-               ctx_mask = 0x55;
-
-       for (i = 0; i < encap_ae->init_regsym_num; i++) {
-               unsigned int exp_res;
-
-               init_regsym = &encap_ae->init_regsym[i];
-               exp_res = init_regsym->value;
-               switch (init_regsym->init_type) {
-               case ICP_QAT_UOF_INIT_REG:
-                       qat_uclo_init_reg(handle, ae, ctx_mask,
-                                         (enum icp_qat_uof_regtype)
-                                         init_regsym->reg_type,
-                                         (unsigned short)init_regsym->reg_addr,
-                                         exp_res);
-                       break;
-               case ICP_QAT_UOF_INIT_REG_CTX:
-                       /* check if ctx is appropriate for the ctxMode */
-                       if (!((1 << init_regsym->ctx) & ctx_mask)) {
-                               pr_err("QAT: invalid ctx num = 0x%x\n",
-                                      init_regsym->ctx);
-                               return -EINVAL;
-                       }
-                       qat_uclo_init_reg(handle, ae,
-                                         (unsigned char)
-                                         (1 << init_regsym->ctx),
-                                         (enum icp_qat_uof_regtype)
-                                         init_regsym->reg_type,
-                                         (unsigned short)init_regsym->reg_addr,
-                                         exp_res);
-                       break;
-               case ICP_QAT_UOF_INIT_EXPR:
-                       pr_err("QAT: INIT_EXPR feature not supported\n");
-                       return -EINVAL;
-               case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
-                       pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
-                       return -EINVAL;
-               default:
-                       break;
-               }
-       }
-       return 0;
-}
-
-static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
-{
-       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
-       unsigned long ae_mask = handle->hal_handle->ae_mask;
-       struct icp_qat_uclo_aedata *aed;
-       unsigned int s, ae;
-
-       if (obj_handle->global_inited)
-               return 0;
-       if (obj_handle->init_mem_tab.entry_num) {
-               if (qat_uclo_init_memory(handle)) {
-                       pr_err("QAT: initialize memory failed\n");
-                       return -EINVAL;
-               }
-       }
-
-       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
-               aed = &obj_handle->ae_data[ae];
-               for (s = 0; s < aed->slice_num; s++) {
-                       if (!aed->ae_slices[s].encap_image)
-                               continue;
-                       if (qat_uclo_init_reg_sym(handle, ae, aed->ae_slices[s].encap_image))
-                               return -EINVAL;
-               }
-       }
-       obj_handle->global_inited = 1;
-       return 0;
-}
-
-static int qat_hal_set_modes(struct icp_qat_fw_loader_handle *handle,
-                            struct icp_qat_uclo_objhandle *obj_handle,
-                            unsigned char ae,
-                            struct icp_qat_uof_image *uof_image)
-{
-       unsigned char mode;
-       int ret;
-
-       mode = ICP_QAT_CTX_MODE(uof_image->ae_mode);
-       ret = qat_hal_set_ae_ctx_mode(handle, ae, mode);
-       if (ret) {
-               pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
-               return ret;
-       }
-       if (handle->chip_info->nn) {
-               mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
-               ret = qat_hal_set_ae_nn_mode(handle, ae, mode);
-               if (ret) {
-                       pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
-                       return ret;
-               }
-       }
-       mode = ICP_QAT_LOC_MEM0_MODE(uof_image->ae_mode);
-       ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0, mode);
-       if (ret) {
-               pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
-               return ret;
-       }
-       mode = ICP_QAT_LOC_MEM1_MODE(uof_image->ae_mode);
-       ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1, mode);
-       if (ret) {
-               pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
-               return ret;
-       }
-       if (handle->chip_info->lm2lm3) {
-               mode = ICP_QAT_LOC_MEM2_MODE(uof_image->ae_mode);
-               ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM2, mode);
-               if (ret) {
-                       pr_err("QAT: qat_hal_set_ae_lm_mode LMEM2 error\n");
-                       return ret;
-               }
-               mode = ICP_QAT_LOC_MEM3_MODE(uof_image->ae_mode);
-               ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM3, mode);
-               if (ret) {
-                       pr_err("QAT: qat_hal_set_ae_lm_mode LMEM3 error\n");
-                       return ret;
-               }
-               mode = ICP_QAT_LOC_TINDEX_MODE(uof_image->ae_mode);
-               qat_hal_set_ae_tindex_mode(handle, ae, mode);
-       }
-       return 0;
-}
-
-static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle)
-{
-       struct icp_qat_uof_image *uof_image;
-       struct icp_qat_uclo_aedata *ae_data;
-       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
-       unsigned long ae_mask = handle->hal_handle->ae_mask;
-       unsigned long cfg_ae_mask = handle->cfg_ae_mask;
-       unsigned char ae, s;
-       int error;
-
-       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
-               if (!test_bit(ae, &cfg_ae_mask))
-                       continue;
-
-               ae_data = &obj_handle->ae_data[ae];
-               for (s = 0; s < min_t(unsigned int, ae_data->slice_num,
-                                     ICP_QAT_UCLO_MAX_CTX); s++) {
-                       if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
-                               continue;
-                       uof_image = ae_data->ae_slices[s].encap_image->img_ptr;
-                       error = qat_hal_set_modes(handle, obj_handle, ae,
-                                                 uof_image);
-                       if (error)
-                               return error;
-               }
-       }
-       return 0;
-}
-
-static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle)
-{
-       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
-       struct icp_qat_uclo_encapme *image;
-       int a;
-
-       for (a = 0; a < obj_handle->uimage_num; a++) {
-               image = &obj_handle->ae_uimage[a];
-               image->uwords_num = image->page->beg_addr_p +
-                                       image->page->micro_words_num;
-       }
-}
-
-static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
-{
-       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
-       unsigned int ae;
-
-       obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff;
-       obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
-                                            obj_handle->obj_hdr->file_buff;
-       obj_handle->uword_in_bytes = 6;
-       obj_handle->prod_type = qat_uclo_get_dev_type(handle);
-       obj_handle->prod_rev = PID_MAJOR_REV |
-                       (PID_MINOR_REV & handle->hal_handle->revision_id);
-       if (qat_uclo_check_uof_compat(obj_handle)) {
-               pr_err("QAT: UOF incompatible\n");
-               return -EINVAL;
-       }
-       obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(u64),
-                                       GFP_KERNEL);
-       if (!obj_handle->uword_buf)
-               return -ENOMEM;
-       obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE;
-       if (!obj_handle->obj_hdr->file_buff ||
-           !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
-                                   &obj_handle->str_table)) {
-               pr_err("QAT: UOF doesn't have effective images\n");
-               goto out_err;
-       }
-       obj_handle->uimage_num =
-               qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage,
-                                   ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX);
-       if (!obj_handle->uimage_num)
-               goto out_err;
-       if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
-               pr_err("QAT: Bad object\n");
-               goto out_check_uof_aemask_err;
-       }
-       qat_uclo_init_uword_num(handle);
-       qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj,
-                                  &obj_handle->init_mem_tab);
-       if (qat_uclo_set_ae_mode(handle))
-               goto out_check_uof_aemask_err;
-       return 0;
-out_check_uof_aemask_err:
-       for (ae = 0; ae < obj_handle->uimage_num; ae++)
-               kfree(obj_handle->ae_uimage[ae].page);
-out_err:
-       kfree(obj_handle->uword_buf);
-       return -EFAULT;
-}
-
-static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle,
-                                     struct icp_qat_suof_filehdr *suof_ptr,
-                                     int suof_size)
-{
-       unsigned int check_sum = 0;
-       unsigned int min_ver_offset = 0;
-       struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
-
-       suof_handle->file_id = ICP_QAT_SUOF_FID;
-       suof_handle->suof_buf = (char *)suof_ptr;
-       suof_handle->suof_size = suof_size;
-       min_ver_offset = suof_size - offsetof(struct icp_qat_suof_filehdr,
-                                             min_ver);
-       check_sum = qat_uclo_calc_str_checksum((char *)&suof_ptr->min_ver,
-                                              min_ver_offset);
-       if (check_sum != suof_ptr->check_sum) {
-               pr_err("QAT: incorrect SUOF checksum\n");
-               return -EINVAL;
-       }
-       suof_handle->check_sum = suof_ptr->check_sum;
-       suof_handle->min_ver = suof_ptr->min_ver;
-       suof_handle->maj_ver = suof_ptr->maj_ver;
-       suof_handle->fw_type = suof_ptr->fw_type;
-       return 0;
-}
-
-static void qat_uclo_map_simg(struct icp_qat_fw_loader_handle *handle,
-                             struct icp_qat_suof_img_hdr *suof_img_hdr,
-                             struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
-{
-       struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
-       struct icp_qat_simg_ae_mode *ae_mode;
-       struct icp_qat_suof_objhdr *suof_objhdr;
-
-       suof_img_hdr->simg_buf  = (suof_handle->suof_buf +
-                                  suof_chunk_hdr->offset +
-                                  sizeof(*suof_objhdr));
-       suof_img_hdr->simg_len = ((struct icp_qat_suof_objhdr *)(uintptr_t)
-                                 (suof_handle->suof_buf +
-                                  suof_chunk_hdr->offset))->img_length;
-
-       suof_img_hdr->css_header = suof_img_hdr->simg_buf;
-       suof_img_hdr->css_key = (suof_img_hdr->css_header +
-                                sizeof(struct icp_qat_css_hdr));
-       suof_img_hdr->css_signature = suof_img_hdr->css_key +
-                                     ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
-                                     ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle);
-       suof_img_hdr->css_simg = suof_img_hdr->css_signature +
-                                ICP_QAT_CSS_SIGNATURE_LEN(handle);
-
-       ae_mode = (struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg);
-       suof_img_hdr->ae_mask = ae_mode->ae_mask;
-       suof_img_hdr->simg_name = (unsigned long)&ae_mode->simg_name;
-       suof_img_hdr->appmeta_data = (unsigned long)&ae_mode->appmeta_data;
-       suof_img_hdr->fw_type = ae_mode->fw_type;
-}
-
-static void
-qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle *suof_handle,
-                         struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
-{
-       char **sym_str = (char **)&suof_handle->sym_str;
-       unsigned int *sym_size = &suof_handle->sym_size;
-       struct icp_qat_suof_strtable *str_table_obj;
-
-       *sym_size = *(unsigned int *)(uintptr_t)
-                  (suof_chunk_hdr->offset + suof_handle->suof_buf);
-       *sym_str = (char *)(uintptr_t)
-                  (suof_handle->suof_buf + suof_chunk_hdr->offset +
-                  sizeof(str_table_obj->tab_length));
-}
-
-static int qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle,
-                                     struct icp_qat_suof_img_hdr *img_hdr)
-{
-       struct icp_qat_simg_ae_mode *img_ae_mode = NULL;
-       unsigned int prod_rev, maj_ver, prod_type;
-
-       prod_type = qat_uclo_get_dev_type(handle);
-       img_ae_mode = (struct icp_qat_simg_ae_mode *)img_hdr->css_simg;
-       prod_rev = PID_MAJOR_REV |
-                        (PID_MINOR_REV & handle->hal_handle->revision_id);
-       if (img_ae_mode->dev_type != prod_type) {
-               pr_err("QAT: incompatible product type %x\n",
-                      img_ae_mode->dev_type);
-               return -EINVAL;
-       }
-       maj_ver = prod_rev & 0xff;
-       if (maj_ver > img_ae_mode->devmax_ver ||
-           maj_ver < img_ae_mode->devmin_ver) {
-               pr_err("QAT: incompatible device majver 0x%x\n", maj_ver);
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static void qat_uclo_del_suof(struct icp_qat_fw_loader_handle *handle)
-{
-       struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
-
-       kfree(sobj_handle->img_table.simg_hdr);
-       sobj_handle->img_table.simg_hdr = NULL;
-       kfree(handle->sobj_handle);
-       handle->sobj_handle = NULL;
-}
-
-static void qat_uclo_tail_img(struct icp_qat_suof_img_hdr *suof_img_hdr,
-                             unsigned int img_id, unsigned int num_simgs)
-{
-       struct icp_qat_suof_img_hdr img_header;
-
-       if (img_id != num_simgs - 1) {
-               memcpy(&img_header, &suof_img_hdr[num_simgs - 1],
-                      sizeof(*suof_img_hdr));
-               memcpy(&suof_img_hdr[num_simgs - 1], &suof_img_hdr[img_id],
-                      sizeof(*suof_img_hdr));
-               memcpy(&suof_img_hdr[img_id], &img_header,
-                      sizeof(*suof_img_hdr));
-       }
-}
-
-static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
-                            struct icp_qat_suof_filehdr *suof_ptr,
-                            int suof_size)
-{
-       struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
-       struct icp_qat_suof_chunk_hdr *suof_chunk_hdr = NULL;
-       struct icp_qat_suof_img_hdr *suof_img_hdr = NULL;
-       int ret = 0, ae0_img = ICP_QAT_UCLO_MAX_AE;
-       unsigned int i = 0;
-       struct icp_qat_suof_img_hdr img_header;
-
-       if (!suof_ptr || suof_size == 0) {
-               pr_err("QAT: input parameter SUOF pointer/size is NULL\n");
-               return -EINVAL;
-       }
-       if (qat_uclo_check_suof_format(suof_ptr))
-               return -EINVAL;
-       ret = qat_uclo_map_suof_file_hdr(handle, suof_ptr, suof_size);
-       if (ret)
-               return ret;
-       suof_chunk_hdr = (struct icp_qat_suof_chunk_hdr *)
-                        ((uintptr_t)suof_ptr + sizeof(*suof_ptr));
-
-       qat_uclo_map_suof_symobjs(suof_handle, suof_chunk_hdr);
-       suof_handle->img_table.num_simgs = suof_ptr->num_chunks - 1;
-
-       if (suof_handle->img_table.num_simgs != 0) {
-               suof_img_hdr = kcalloc(suof_handle->img_table.num_simgs,
-                                      sizeof(img_header),
-                                      GFP_KERNEL);
-               if (!suof_img_hdr)
-                       return -ENOMEM;
-               suof_handle->img_table.simg_hdr = suof_img_hdr;
-
-               for (i = 0; i < suof_handle->img_table.num_simgs; i++) {
-                       qat_uclo_map_simg(handle, &suof_img_hdr[i],
-                                         &suof_chunk_hdr[1 + i]);
-                       ret = qat_uclo_check_simg_compat(handle,
-                                                        &suof_img_hdr[i]);
-                       if (ret)
-                               return ret;
-                       suof_img_hdr[i].ae_mask &= handle->cfg_ae_mask;
-                       if ((suof_img_hdr[i].ae_mask & 0x1) != 0)
-                               ae0_img = i;
-               }
-
-               if (!handle->chip_info->tgroup_share_ustore) {
-                       qat_uclo_tail_img(suof_img_hdr, ae0_img,
-                                         suof_handle->img_table.num_simgs);
-               }
-       }
-       return 0;
-}
-
-#define ADD_ADDR(high, low)  ((((u64)high) << 32) + low)
-#define BITS_IN_DWORD 32
-
-static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
-                           struct icp_qat_fw_auth_desc *desc)
-{
-       u32 fcu_sts, retry = 0;
-       u32 fcu_ctl_csr, fcu_sts_csr;
-       u32 fcu_dram_hi_csr, fcu_dram_lo_csr;
-       u64 bus_addr;
-
-       bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low)
-                          - sizeof(struct icp_qat_auth_chunk);
-
-       fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
-       fcu_sts_csr = handle->chip_info->fcu_sts_csr;
-       fcu_dram_hi_csr = handle->chip_info->fcu_dram_addr_hi;
-       fcu_dram_lo_csr = handle->chip_info->fcu_dram_addr_lo;
-
-       SET_CAP_CSR(handle, fcu_dram_hi_csr, (bus_addr >> BITS_IN_DWORD));
-       SET_CAP_CSR(handle, fcu_dram_lo_csr, bus_addr);
-       SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_AUTH);
-
-       do {
-               msleep(FW_AUTH_WAIT_PERIOD);
-               fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
-               if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL)
-                       goto auth_fail;
-               if (((fcu_sts >> FCU_STS_AUTHFWLD_POS) & 0x1))
-                       if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE)
-                               return 0;
-       } while (retry++ < FW_AUTH_MAX_RETRY);
-auth_fail:
-       pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n",
-              fcu_sts & FCU_AUTH_STS_MASK, retry);
-       return -EINVAL;
-}
-
-static bool qat_uclo_is_broadcast(struct icp_qat_fw_loader_handle *handle,
-                                 int imgid)
-{
-       struct icp_qat_suof_handle *sobj_handle;
-
-       if (!handle->chip_info->tgroup_share_ustore)
-               return false;
-
-       sobj_handle = (struct icp_qat_suof_handle *)handle->sobj_handle;
-       if (handle->hal_handle->admin_ae_mask &
-           sobj_handle->img_table.simg_hdr[imgid].ae_mask)
-               return false;
-
-       return true;
-}
-
-static int qat_uclo_broadcast_load_fw(struct icp_qat_fw_loader_handle *handle,
-                                     struct icp_qat_fw_auth_desc *desc)
-{
-       unsigned long ae_mask = handle->hal_handle->ae_mask;
-       unsigned long desc_ae_mask = desc->ae_mask;
-       u32 fcu_sts, ae_broadcast_mask = 0;
-       u32 fcu_loaded_csr, ae_loaded;
-       u32 fcu_sts_csr, fcu_ctl_csr;
-       unsigned int ae, retry = 0;
-
-       if (handle->chip_info->tgroup_share_ustore) {
-               fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
-               fcu_sts_csr = handle->chip_info->fcu_sts_csr;
-               fcu_loaded_csr = handle->chip_info->fcu_loaded_ae_csr;
-       } else {
-               pr_err("Chip 0x%x doesn't support broadcast load\n",
-                      handle->pci_dev->device);
-               return -EINVAL;
-       }
-
-       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
-               if (qat_hal_check_ae_active(handle, (unsigned char)ae)) {
-                       pr_err("QAT: Broadcast load failed. AE is not enabled or active.\n");
-                       return -EINVAL;
-               }
-
-               if (test_bit(ae, &desc_ae_mask))
-                       ae_broadcast_mask |= 1 << ae;
-       }
-
-       if (ae_broadcast_mask) {
-               SET_CAP_CSR(handle, FCU_ME_BROADCAST_MASK_TYPE,
-                           ae_broadcast_mask);
-
-               SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_LOAD);
-
-               do {
-                       msleep(FW_AUTH_WAIT_PERIOD);
-                       fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
-                       fcu_sts &= FCU_AUTH_STS_MASK;
-
-                       if (fcu_sts == FCU_STS_LOAD_FAIL) {
-                               pr_err("Broadcast load failed: 0x%x)\n", fcu_sts);
-                               return -EINVAL;
-                       } else if (fcu_sts == FCU_STS_LOAD_DONE) {
-                               ae_loaded = GET_CAP_CSR(handle, fcu_loaded_csr);
-                               ae_loaded >>= handle->chip_info->fcu_loaded_ae_pos;
-
-                               if ((ae_loaded & ae_broadcast_mask) == ae_broadcast_mask)
-                                       break;
-                       }
-               } while (retry++ < FW_AUTH_MAX_RETRY);
-
-               if (retry > FW_AUTH_MAX_RETRY) {
-                       pr_err("QAT: broadcast load failed timeout %d\n", retry);
-                       return -EINVAL;
-               }
-       }
-       return 0;
-}
-
-static int qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle *handle,
-                              struct icp_firml_dram_desc *dram_desc,
-                              unsigned int size)
-{
-       void *vptr;
-       dma_addr_t ptr;
-
-       vptr = dma_alloc_coherent(&handle->pci_dev->dev,
-                                 size, &ptr, GFP_KERNEL);
-       if (!vptr)
-               return -ENOMEM;
-       dram_desc->dram_base_addr_v = vptr;
-       dram_desc->dram_bus_addr = ptr;
-       dram_desc->dram_size = size;
-       return 0;
-}
-
-static void qat_uclo_simg_free(struct icp_qat_fw_loader_handle *handle,
-                              struct icp_firml_dram_desc *dram_desc)
-{
-       if (handle && dram_desc && dram_desc->dram_base_addr_v) {
-               dma_free_coherent(&handle->pci_dev->dev,
-                                 (size_t)(dram_desc->dram_size),
-                                 dram_desc->dram_base_addr_v,
-                                 dram_desc->dram_bus_addr);
-       }
-
-       if (dram_desc)
-               memset(dram_desc, 0, sizeof(*dram_desc));
-}
-
-static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle,
-                                  struct icp_qat_fw_auth_desc **desc)
-{
-       struct icp_firml_dram_desc dram_desc;
-
-       if (*desc) {
-               dram_desc.dram_base_addr_v = *desc;
-               dram_desc.dram_bus_addr = ((struct icp_qat_auth_chunk *)
-                                          (*desc))->chunk_bus_addr;
-               dram_desc.dram_size = ((struct icp_qat_auth_chunk *)
-                                      (*desc))->chunk_size;
-               qat_uclo_simg_free(handle, &dram_desc);
-       }
-}
-
-static int qat_uclo_check_image(struct icp_qat_fw_loader_handle *handle,
-                               char *image, unsigned int size,
-                               unsigned int fw_type)
-{
-       char *fw_type_name = fw_type ? "MMP" : "AE";
-       unsigned int css_dword_size = sizeof(u32);
-
-       if (handle->chip_info->fw_auth) {
-               struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
-               unsigned int header_len = ICP_QAT_AE_IMG_OFFSET(handle);
-
-               if ((css_hdr->header_len * css_dword_size) != header_len)
-                       goto err;
-               if ((css_hdr->size * css_dword_size) != size)
-                       goto err;
-               if (fw_type != css_hdr->fw_type)
-                       goto err;
-               if (size <= header_len)
-                       goto err;
-               size -= header_len;
-       }
-
-       if (fw_type == CSS_AE_FIRMWARE) {
-               if (size < sizeof(struct icp_qat_simg_ae_mode *) +
-                   ICP_QAT_SIMG_AE_INIT_SEQ_LEN)
-                       goto err;
-               if (size > ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN)
-                       goto err;
-       } else if (fw_type == CSS_MMP_FIRMWARE) {
-               if (size > ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN)
-                       goto err;
-       } else {
-               pr_err("QAT: Unsupported firmware type\n");
-               return -EINVAL;
-       }
-       return 0;
-
-err:
-       pr_err("QAT: Invalid %s firmware image\n", fw_type_name);
-       return -EINVAL;
-}
-
-static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
-                               char *image, unsigned int size,
-                               struct icp_qat_fw_auth_desc **desc)
-{
-       struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
-       struct icp_qat_fw_auth_desc *auth_desc;
-       struct icp_qat_auth_chunk *auth_chunk;
-       u64 virt_addr,  bus_addr, virt_base;
-       unsigned int length, simg_offset = sizeof(*auth_chunk);
-       struct icp_qat_simg_ae_mode *simg_ae_mode;
-       struct icp_firml_dram_desc img_desc;
-
-       if (size > (ICP_QAT_AE_IMG_OFFSET(handle) + ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN)) {
-               pr_err("QAT: error, input image size overflow %d\n", size);
-               return -EINVAL;
-       }
-       length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ?
-                ICP_QAT_CSS_AE_SIMG_LEN(handle) + simg_offset :
-                size + ICP_QAT_CSS_FWSK_PAD_LEN(handle) + simg_offset;
-       if (qat_uclo_simg_alloc(handle, &img_desc, length)) {
-               pr_err("QAT: error, allocate continuous dram fail\n");
-               return -ENOMEM;
-       }
-
-       auth_chunk = img_desc.dram_base_addr_v;
-       auth_chunk->chunk_size = img_desc.dram_size;
-       auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr;
-       virt_base = (uintptr_t)img_desc.dram_base_addr_v + simg_offset;
-       bus_addr  = img_desc.dram_bus_addr + simg_offset;
-       auth_desc = img_desc.dram_base_addr_v;
-       auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
-       auth_desc->css_hdr_low = (unsigned int)bus_addr;
-       virt_addr = virt_base;
-
-       memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr));
-       /* pub key */
-       bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) +
-                          sizeof(*css_hdr);
-       virt_addr = virt_addr + sizeof(*css_hdr);
-
-       auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
-       auth_desc->fwsk_pub_low = (unsigned int)bus_addr;
-
-       memcpy((void *)(uintptr_t)virt_addr,
-              (void *)(image + sizeof(*css_hdr)),
-              ICP_QAT_CSS_FWSK_MODULUS_LEN(handle));
-       /* padding */
-       memset((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)),
-              0, ICP_QAT_CSS_FWSK_PAD_LEN(handle));
-
-       /* exponent */
-       memcpy((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
-              ICP_QAT_CSS_FWSK_PAD_LEN(handle)),
-              (void *)(image + sizeof(*css_hdr) +
-                       ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)),
-              sizeof(unsigned int));
-
-       /* signature */
-       bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high,
-                           auth_desc->fwsk_pub_low) +
-                  ICP_QAT_CSS_FWSK_PUB_LEN(handle);
-       virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN(handle);
-       auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
-       auth_desc->signature_low = (unsigned int)bus_addr;
-
-       memcpy((void *)(uintptr_t)virt_addr,
-              (void *)(image + sizeof(*css_hdr) +
-              ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
-              ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle)),
-              ICP_QAT_CSS_SIGNATURE_LEN(handle));
-
-       bus_addr = ADD_ADDR(auth_desc->signature_high,
-                           auth_desc->signature_low) +
-                  ICP_QAT_CSS_SIGNATURE_LEN(handle);
-       virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(handle);
-
-       auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
-       auth_desc->img_low = (unsigned int)bus_addr;
-       auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET(handle);
-       memcpy((void *)(uintptr_t)virt_addr,
-              (void *)(image + ICP_QAT_AE_IMG_OFFSET(handle)),
-              auth_desc->img_len);
-       virt_addr = virt_base;
-       /* AE firmware */
-       if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type ==
-           CSS_AE_FIRMWARE) {
-               auth_desc->img_ae_mode_data_high = auth_desc->img_high;
-               auth_desc->img_ae_mode_data_low = auth_desc->img_low;
-               bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high,
-                                   auth_desc->img_ae_mode_data_low) +
-                          sizeof(struct icp_qat_simg_ae_mode);
-
-               auth_desc->img_ae_init_data_high = (unsigned int)
-                                                (bus_addr >> BITS_IN_DWORD);
-               auth_desc->img_ae_init_data_low = (unsigned int)bus_addr;
-               bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN;
-               auth_desc->img_ae_insts_high = (unsigned int)
-                                            (bus_addr >> BITS_IN_DWORD);
-               auth_desc->img_ae_insts_low = (unsigned int)bus_addr;
-               virt_addr += sizeof(struct icp_qat_css_hdr);
-               virt_addr += ICP_QAT_CSS_FWSK_PUB_LEN(handle);
-               virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(handle);
-               simg_ae_mode = (struct icp_qat_simg_ae_mode *)(uintptr_t)virt_addr;
-               auth_desc->ae_mask = simg_ae_mode->ae_mask & handle->cfg_ae_mask;
-       } else {
-               auth_desc->img_ae_insts_high = auth_desc->img_high;
-               auth_desc->img_ae_insts_low = auth_desc->img_low;
-       }
-       *desc = auth_desc;
-       return 0;
-}
-
-static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
-                           struct icp_qat_fw_auth_desc *desc)
-{
-       unsigned long ae_mask = handle->hal_handle->ae_mask;
-       u32 fcu_sts_csr, fcu_ctl_csr;
-       u32 loaded_aes, loaded_csr;
-       unsigned int i;
-       u32 fcu_sts;
-
-       fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
-       fcu_sts_csr = handle->chip_info->fcu_sts_csr;
-       loaded_csr = handle->chip_info->fcu_loaded_ae_csr;
-
-       for_each_set_bit(i, &ae_mask, handle->hal_handle->ae_max_num) {
-               int retry = 0;
-
-               if (!((desc->ae_mask >> i) & 0x1))
-                       continue;
-               if (qat_hal_check_ae_active(handle, i)) {
-                       pr_err("QAT: AE %d is active\n", i);
-                       return -EINVAL;
-               }
-               SET_CAP_CSR(handle, fcu_ctl_csr,
-                           (FCU_CTRL_CMD_LOAD |
-                           (1 << FCU_CTRL_BROADCAST_POS) |
-                           (i << FCU_CTRL_AE_POS)));
-
-               do {
-                       msleep(FW_AUTH_WAIT_PERIOD);
-                       fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
-                       if ((fcu_sts & FCU_AUTH_STS_MASK) ==
-                           FCU_STS_LOAD_DONE) {
-                               loaded_aes = GET_CAP_CSR(handle, loaded_csr);
-                               loaded_aes >>= handle->chip_info->fcu_loaded_ae_pos;
-                               if (loaded_aes & (1 << i))
-                                       break;
-                       }
-               } while (retry++ < FW_AUTH_MAX_RETRY);
-               if (retry > FW_AUTH_MAX_RETRY) {
-                       pr_err("QAT: firmware load failed timeout %x\n", retry);
-                       return -EINVAL;
-               }
-       }
-       return 0;
-}
-
-static int qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle,
-                                void *addr_ptr, int mem_size)
-{
-       struct icp_qat_suof_handle *suof_handle;
-
-       suof_handle = kzalloc(sizeof(*suof_handle), GFP_KERNEL);
-       if (!suof_handle)
-               return -ENOMEM;
-       handle->sobj_handle = suof_handle;
-       if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) {
-               qat_uclo_del_suof(handle);
-               pr_err("QAT: map SUOF failed\n");
-               return -EINVAL;
-       }
-       return 0;
-}
-
-int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
-                      void *addr_ptr, int mem_size)
-{
-       struct icp_qat_fw_auth_desc *desc = NULL;
-       int status = 0;
-       int ret;
-
-       ret = qat_uclo_check_image(handle, addr_ptr, mem_size, CSS_MMP_FIRMWARE);
-       if (ret)
-               return ret;
-
-       if (handle->chip_info->fw_auth) {
-               status = qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc);
-               if (!status)
-                       status = qat_uclo_auth_fw(handle, desc);
-               qat_uclo_ummap_auth_fw(handle, &desc);
-       } else {
-               if (handle->chip_info->mmp_sram_size < mem_size) {
-                       pr_err("QAT: MMP size is too large: 0x%x\n", mem_size);
-                       return -EFBIG;
-               }
-               qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, mem_size);
-       }
-       return status;
-}
-
-static int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
-                               void *addr_ptr, int mem_size)
-{
-       struct icp_qat_uof_filehdr *filehdr;
-       struct icp_qat_uclo_objhandle *objhdl;
-
-       objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL);
-       if (!objhdl)
-               return -ENOMEM;
-       objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL);
-       if (!objhdl->obj_buf)
-               goto out_objbuf_err;
-       filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
-       if (qat_uclo_check_uof_format(filehdr))
-               goto out_objhdr_err;
-       objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
-                                            ICP_QAT_UOF_OBJS);
-       if (!objhdl->obj_hdr) {
-               pr_err("QAT: object file chunk is null\n");
-               goto out_objhdr_err;
-       }
-       handle->obj_handle = objhdl;
-       if (qat_uclo_parse_uof_obj(handle))
-               goto out_overlay_obj_err;
-       return 0;
-
-out_overlay_obj_err:
-       handle->obj_handle = NULL;
-       kfree(objhdl->obj_hdr);
-out_objhdr_err:
-       kfree(objhdl->obj_buf);
-out_objbuf_err:
-       kfree(objhdl);
-       return -ENOMEM;
-}
-
-static int qat_uclo_map_mof_file_hdr(struct icp_qat_fw_loader_handle *handle,
-                                    struct icp_qat_mof_file_hdr *mof_ptr,
-                                    u32 mof_size)
-{
-       struct icp_qat_mof_handle *mobj_handle = handle->mobj_handle;
-       unsigned int min_ver_offset;
-       unsigned int checksum;
-
-       mobj_handle->file_id = ICP_QAT_MOF_FID;
-       mobj_handle->mof_buf = (char *)mof_ptr;
-       mobj_handle->mof_size = mof_size;
-
-       min_ver_offset = mof_size - offsetof(struct icp_qat_mof_file_hdr,
-                                            min_ver);
-       checksum = qat_uclo_calc_str_checksum(&mof_ptr->min_ver,
-                                             min_ver_offset);
-       if (checksum != mof_ptr->checksum) {
-               pr_err("QAT: incorrect MOF checksum\n");
-               return -EINVAL;
-       }
-
-       mobj_handle->checksum = mof_ptr->checksum;
-       mobj_handle->min_ver = mof_ptr->min_ver;
-       mobj_handle->maj_ver = mof_ptr->maj_ver;
-       return 0;
-}
-
-static void qat_uclo_del_mof(struct icp_qat_fw_loader_handle *handle)
-{
-       struct icp_qat_mof_handle *mobj_handle = handle->mobj_handle;
-
-       kfree(mobj_handle->obj_table.obj_hdr);
-       mobj_handle->obj_table.obj_hdr = NULL;
-       kfree(handle->mobj_handle);
-       handle->mobj_handle = NULL;
-}
-
-static int qat_uclo_seek_obj_inside_mof(struct icp_qat_mof_handle *mobj_handle,
-                                       char *obj_name, char **obj_ptr,
-                                       unsigned int *obj_size)
-{
-       struct icp_qat_mof_objhdr *obj_hdr = mobj_handle->obj_table.obj_hdr;
-       unsigned int i;
-
-       for (i = 0; i < mobj_handle->obj_table.num_objs; i++) {
-               if (!strncmp(obj_hdr[i].obj_name, obj_name,
-                            ICP_QAT_SUOF_OBJ_NAME_LEN)) {
-                       *obj_ptr  = obj_hdr[i].obj_buf;
-                       *obj_size = obj_hdr[i].obj_size;
-                       return 0;
-               }
-       }
-
-       pr_err("QAT: object %s is not found inside MOF\n", obj_name);
-       return -EINVAL;
-}
-
-static int qat_uclo_map_obj_from_mof(struct icp_qat_mof_handle *mobj_handle,
-                                    struct icp_qat_mof_objhdr *mobj_hdr,
-                                    struct icp_qat_mof_obj_chunkhdr *obj_chunkhdr)
-{
-       u8 *obj;
-
-       if (!strncmp(obj_chunkhdr->chunk_id, ICP_QAT_UOF_IMAG,
-                    ICP_QAT_MOF_OBJ_CHUNKID_LEN)) {
-               obj = mobj_handle->uobjs_hdr + obj_chunkhdr->offset;
-       } else if (!strncmp(obj_chunkhdr->chunk_id, ICP_QAT_SUOF_IMAG,
-                           ICP_QAT_MOF_OBJ_CHUNKID_LEN)) {
-               obj = mobj_handle->sobjs_hdr + obj_chunkhdr->offset;
-       } else {
-               pr_err("QAT: unsupported chunk id\n");
-               return -EINVAL;
-       }
-       mobj_hdr->obj_buf = obj;
-       mobj_hdr->obj_size = (unsigned int)obj_chunkhdr->size;
-       mobj_hdr->obj_name = obj_chunkhdr->name + mobj_handle->sym_str;
-       return 0;
-}
-
-static int qat_uclo_map_objs_from_mof(struct icp_qat_mof_handle *mobj_handle)
-{
-       struct icp_qat_mof_obj_chunkhdr *uobj_chunkhdr;
-       struct icp_qat_mof_obj_chunkhdr *sobj_chunkhdr;
-       struct icp_qat_mof_obj_hdr *uobj_hdr;
-       struct icp_qat_mof_obj_hdr *sobj_hdr;
-       struct icp_qat_mof_objhdr *mobj_hdr;
-       unsigned int uobj_chunk_num = 0;
-       unsigned int sobj_chunk_num = 0;
-       unsigned int *valid_chunk;
-       int ret, i;
-
-       uobj_hdr = (struct icp_qat_mof_obj_hdr *)mobj_handle->uobjs_hdr;
-       sobj_hdr = (struct icp_qat_mof_obj_hdr *)mobj_handle->sobjs_hdr;
-       if (uobj_hdr)
-               uobj_chunk_num = uobj_hdr->num_chunks;
-       if (sobj_hdr)
-               sobj_chunk_num = sobj_hdr->num_chunks;
-
-       mobj_hdr = kzalloc((uobj_chunk_num + sobj_chunk_num) *
-                          sizeof(*mobj_hdr), GFP_KERNEL);
-       if (!mobj_hdr)
-               return -ENOMEM;
-
-       mobj_handle->obj_table.obj_hdr = mobj_hdr;
-       valid_chunk = &mobj_handle->obj_table.num_objs;
-       uobj_chunkhdr = (struct icp_qat_mof_obj_chunkhdr *)
-                        ((uintptr_t)uobj_hdr + sizeof(*uobj_hdr));
-       sobj_chunkhdr = (struct icp_qat_mof_obj_chunkhdr *)
-                       ((uintptr_t)sobj_hdr + sizeof(*sobj_hdr));
-
-       /* map uof objects */
-       for (i = 0; i < uobj_chunk_num; i++) {
-               ret = qat_uclo_map_obj_from_mof(mobj_handle,
-                                               &mobj_hdr[*valid_chunk],
-                                               &uobj_chunkhdr[i]);
-               if (ret)
-                       return ret;
-               (*valid_chunk)++;
-       }
-
-       /* map suof objects */
-       for (i = 0; i < sobj_chunk_num; i++) {
-               ret = qat_uclo_map_obj_from_mof(mobj_handle,
-                                               &mobj_hdr[*valid_chunk],
-                                               &sobj_chunkhdr[i]);
-               if (ret)
-                       return ret;
-               (*valid_chunk)++;
-       }
-
-       if ((uobj_chunk_num + sobj_chunk_num) != *valid_chunk) {
-               pr_err("QAT: inconsistent UOF/SUOF chunk amount\n");
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static void qat_uclo_map_mof_symobjs(struct icp_qat_mof_handle *mobj_handle,
-                                    struct icp_qat_mof_chunkhdr *mof_chunkhdr)
-{
-       char **sym_str = (char **)&mobj_handle->sym_str;
-       unsigned int *sym_size = &mobj_handle->sym_size;
-       struct icp_qat_mof_str_table *str_table_obj;
-
-       *sym_size = *(unsigned int *)(uintptr_t)
-                   (mof_chunkhdr->offset + mobj_handle->mof_buf);
-       *sym_str = (char *)(uintptr_t)
-                  (mobj_handle->mof_buf + mof_chunkhdr->offset +
-                   sizeof(str_table_obj->tab_len));
-}
-
-static void qat_uclo_map_mof_chunk(struct icp_qat_mof_handle *mobj_handle,
-                                  struct icp_qat_mof_chunkhdr *mof_chunkhdr)
-{
-       char *chunk_id = mof_chunkhdr->chunk_id;
-
-       if (!strncmp(chunk_id, ICP_QAT_MOF_SYM_OBJS, ICP_QAT_MOF_OBJ_ID_LEN))
-               qat_uclo_map_mof_symobjs(mobj_handle, mof_chunkhdr);
-       else if (!strncmp(chunk_id, ICP_QAT_UOF_OBJS, ICP_QAT_MOF_OBJ_ID_LEN))
-               mobj_handle->uobjs_hdr = mobj_handle->mof_buf +
-                                        mof_chunkhdr->offset;
-       else if (!strncmp(chunk_id, ICP_QAT_SUOF_OBJS, ICP_QAT_MOF_OBJ_ID_LEN))
-               mobj_handle->sobjs_hdr = mobj_handle->mof_buf +
-                                        mof_chunkhdr->offset;
-}
-
-static int qat_uclo_check_mof_format(struct icp_qat_mof_file_hdr *mof_hdr)
-{
-       int maj = mof_hdr->maj_ver & 0xff;
-       int min = mof_hdr->min_ver & 0xff;
-
-       if (mof_hdr->file_id != ICP_QAT_MOF_FID) {
-               pr_err("QAT: invalid header 0x%x\n", mof_hdr->file_id);
-               return -EINVAL;
-       }
-
-       if (mof_hdr->num_chunks <= 0x1) {
-               pr_err("QAT: MOF chunk amount is incorrect\n");
-               return -EINVAL;
-       }
-       if (maj != ICP_QAT_MOF_MAJVER || min != ICP_QAT_MOF_MINVER) {
-               pr_err("QAT: bad MOF version, major 0x%x, minor 0x%x\n",
-                      maj, min);
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static int qat_uclo_map_mof_obj(struct icp_qat_fw_loader_handle *handle,
-                               struct icp_qat_mof_file_hdr *mof_ptr,
-                               u32 mof_size, char *obj_name, char **obj_ptr,
-                               unsigned int *obj_size)
-{
-       struct icp_qat_mof_chunkhdr *mof_chunkhdr;
-       unsigned int file_id = mof_ptr->file_id;
-       struct icp_qat_mof_handle *mobj_handle;
-       unsigned short chunks_num;
-       unsigned int i;
-       int ret;
-
-       if (file_id == ICP_QAT_UOF_FID || file_id == ICP_QAT_SUOF_FID) {
-               if (obj_ptr)
-                       *obj_ptr = (char *)mof_ptr;
-               if (obj_size)
-                       *obj_size = mof_size;
-               return 0;
-       }
-       if (qat_uclo_check_mof_format(mof_ptr))
-               return -EINVAL;
-
-       mobj_handle = kzalloc(sizeof(*mobj_handle), GFP_KERNEL);
-       if (!mobj_handle)
-               return -ENOMEM;
-
-       handle->mobj_handle = mobj_handle;
-       ret = qat_uclo_map_mof_file_hdr(handle, mof_ptr, mof_size);
-       if (ret)
-               return ret;
-
-       mof_chunkhdr = (void *)mof_ptr + sizeof(*mof_ptr);
-       chunks_num = mof_ptr->num_chunks;
-
-       /* Parse MOF file chunks */
-       for (i = 0; i < chunks_num; i++)
-               qat_uclo_map_mof_chunk(mobj_handle, &mof_chunkhdr[i]);
-
-       /* All sym_objs uobjs and sobjs should be available */
-       if (!mobj_handle->sym_str ||
-           (!mobj_handle->uobjs_hdr && !mobj_handle->sobjs_hdr))
-               return -EINVAL;
-
-       ret = qat_uclo_map_objs_from_mof(mobj_handle);
-       if (ret)
-               return ret;
-
-       /* Seek specified uof object in MOF */
-       return qat_uclo_seek_obj_inside_mof(mobj_handle, obj_name,
-                                           obj_ptr, obj_size);
-}
-
-int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
-                    void *addr_ptr, u32 mem_size, char *obj_name)
-{
-       char *obj_addr;
-       u32 obj_size;
-       int ret;
-
-       BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >=
-                    (sizeof(handle->hal_handle->ae_mask) * 8));
-
-       if (!handle || !addr_ptr || mem_size < 24)
-               return -EINVAL;
-
-       if (obj_name) {
-               ret = qat_uclo_map_mof_obj(handle, addr_ptr, mem_size, obj_name,
-                                          &obj_addr, &obj_size);
-               if (ret)
-                       return ret;
-       } else {
-               obj_addr = addr_ptr;
-               obj_size = mem_size;
-       }
-
-       return (handle->chip_info->fw_auth) ?
-                       qat_uclo_map_suof_obj(handle, obj_addr, obj_size) :
-                       qat_uclo_map_uof_obj(handle, obj_addr, obj_size);
-}
-
-void qat_uclo_del_obj(struct icp_qat_fw_loader_handle *handle)
-{
-       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
-       unsigned int a;
-
-       if (handle->mobj_handle)
-               qat_uclo_del_mof(handle);
-       if (handle->sobj_handle)
-               qat_uclo_del_suof(handle);
-       if (!obj_handle)
-               return;
-
-       kfree(obj_handle->uword_buf);
-       for (a = 0; a < obj_handle->uimage_num; a++)
-               kfree(obj_handle->ae_uimage[a].page);
-
-       for (a = 0; a < handle->hal_handle->ae_max_num; a++)
-               qat_uclo_free_ae_data(&obj_handle->ae_data[a]);
-
-       kfree(obj_handle->obj_hdr);
-       kfree(obj_handle->obj_buf);
-       kfree(obj_handle);
-       handle->obj_handle = NULL;
-}
-
-static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
-                                struct icp_qat_uclo_encap_page *encap_page,
-                                u64 *uword, unsigned int addr_p,
-                                unsigned int raddr, u64 fill)
-{
-       unsigned int i, addr;
-       u64 uwrd = 0;
-
-       if (!encap_page) {
-               *uword = fill;
-               return;
-       }
-       addr = (encap_page->page_region) ? raddr : addr_p;
-       for (i = 0; i < encap_page->uwblock_num; i++) {
-               if (addr >= encap_page->uwblock[i].start_addr &&
-                   addr <= encap_page->uwblock[i].start_addr +
-                   encap_page->uwblock[i].words_num - 1) {
-                       addr -= encap_page->uwblock[i].start_addr;
-                       addr *= obj_handle->uword_in_bytes;
-                       memcpy(&uwrd, (void *)(((uintptr_t)
-                              encap_page->uwblock[i].micro_words) + addr),
-                              obj_handle->uword_in_bytes);
-                       uwrd = uwrd & GENMASK_ULL(43, 0);
-               }
-       }
-       *uword = uwrd;
-       if (*uword == INVLD_UWORD)
-               *uword = fill;
-}
-
-static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
-                                       struct icp_qat_uclo_encap_page
-                                       *encap_page, unsigned int ae)
-{
-       unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen;
-       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
-       u64 fill_pat;
-
-       /* load the page starting at appropriate ustore address */
-       /* get fill-pattern from an image -- they are all the same */
-       memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern,
-              sizeof(u64));
-       uw_physical_addr = encap_page->beg_addr_p;
-       uw_relative_addr = 0;
-       words_num = encap_page->micro_words_num;
-       while (words_num) {
-               if (words_num < UWORD_CPYBUF_SIZE)
-                       cpylen = words_num;
-               else
-                       cpylen = UWORD_CPYBUF_SIZE;
-
-               /* load the buffer */
-               for (i = 0; i < cpylen; i++)
-                       qat_uclo_fill_uwords(obj_handle, encap_page,
-                                            &obj_handle->uword_buf[i],
-                                            uw_physical_addr + i,
-                                            uw_relative_addr + i, fill_pat);
-
-               /* copy the buffer to ustore */
-               qat_hal_wr_uwords(handle, (unsigned char)ae,
-                                 uw_physical_addr, cpylen,
-                                 obj_handle->uword_buf);
-
-               uw_physical_addr += cpylen;
-               uw_relative_addr += cpylen;
-               words_num -= cpylen;
-       }
-}
-
-static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle,
-                                   struct icp_qat_uof_image *image)
-{
-       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
-       unsigned long ae_mask = handle->hal_handle->ae_mask;
-       unsigned long cfg_ae_mask = handle->cfg_ae_mask;
-       unsigned long ae_assigned = image->ae_assigned;
-       struct icp_qat_uclo_aedata *aed;
-       unsigned int ctx_mask, s;
-       struct icp_qat_uclo_page *page;
-       unsigned char ae;
-       int ctx;
-
-       if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX)
-               ctx_mask = 0xff;
-       else
-               ctx_mask = 0x55;
-       /* load the default page and set assigned CTX PC
-        * to the entrypoint address */
-       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
-               if (!test_bit(ae, &cfg_ae_mask))
-                       continue;
-
-               if (!test_bit(ae, &ae_assigned))
-                       continue;
-
-               aed = &obj_handle->ae_data[ae];
-               /* find the slice to which this image is assigned */
-               for (s = 0; s < aed->slice_num; s++) {
-                       if (image->ctx_assigned &
-                           aed->ae_slices[s].ctx_mask_assigned)
-                               break;
-               }
-               if (s >= aed->slice_num)
-                       continue;
-               page = aed->ae_slices[s].page;
-               if (!page->encap_page->def_page)
-                       continue;
-               qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae);
-
-               page = aed->ae_slices[s].page;
-               for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++)
-                       aed->ae_slices[s].cur_page[ctx] =
-                                       (ctx_mask & (1 << ctx)) ? page : NULL;
-               qat_hal_set_live_ctx(handle, (unsigned char)ae,
-                                    image->ctx_assigned);
-               qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned,
-                              image->entry_address);
-       }
-}
-
-static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle)
-{
-       unsigned int i;
-       struct icp_qat_fw_auth_desc *desc = NULL;
-       struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
-       struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr;
-       int ret;
-
-       for (i = 0; i < sobj_handle->img_table.num_simgs; i++) {
-               ret = qat_uclo_check_image(handle, simg_hdr[i].simg_buf,
-                                          simg_hdr[i].simg_len,
-                                          CSS_AE_FIRMWARE);
-               if (ret)
-                       return ret;
-
-               if (qat_uclo_map_auth_fw(handle,
-                                        (char *)simg_hdr[i].simg_buf,
-                                        (unsigned int)
-                                        simg_hdr[i].simg_len,
-                                        &desc))
-                       goto wr_err;
-               if (qat_uclo_auth_fw(handle, desc))
-                       goto wr_err;
-               if (qat_uclo_is_broadcast(handle, i)) {
-                       if (qat_uclo_broadcast_load_fw(handle, desc))
-                               goto wr_err;
-               } else {
-                       if (qat_uclo_load_fw(handle, desc))
-                               goto wr_err;
-               }
-               qat_uclo_ummap_auth_fw(handle, &desc);
-       }
-       return 0;
-wr_err:
-       qat_uclo_ummap_auth_fw(handle, &desc);
-       return -EINVAL;
-}
-
-static int qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle *handle)
-{
-       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
-       unsigned int i;
-
-       if (qat_uclo_init_globals(handle))
-               return -EINVAL;
-       for (i = 0; i < obj_handle->uimage_num; i++) {
-               if (!obj_handle->ae_uimage[i].img_ptr)
-                       return -EINVAL;
-               if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i]))
-                       return -EINVAL;
-               qat_uclo_wr_uimage_page(handle,
-                                       obj_handle->ae_uimage[i].img_ptr);
-       }
-       return 0;
-}
-
-int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
-{
-       return (handle->chip_info->fw_auth) ? qat_uclo_wr_suof_img(handle) :
-                                  qat_uclo_wr_uof_img(handle);
-}
-
-int qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle,
-                            unsigned int cfg_ae_mask)
-{
-       if (!cfg_ae_mask)
-               return -EINVAL;
-
-       handle->cfg_ae_mask = cfg_ae_mask;
-       return 0;
-}
diff --git a/drivers/crypto/qat/qat_dh895xcc/Makefile b/drivers/crypto/qat/qat_dh895xcc/Makefile
deleted file mode 100644 (file)
index 38d6f8e..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(srctree)/$(src)/../qat_common
-obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o
-qat_dh895xcc-objs := adf_drv.o adf_dh895xcc_hw_data.o
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
deleted file mode 100644 (file)
index bc80bb4..0000000
+++ /dev/null
@@ -1,252 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2021 Intel Corporation */
-#include <adf_accel_devices.h>
-#include <adf_common_drv.h>
-#include <adf_gen2_config.h>
-#include <adf_gen2_dc.h>
-#include <adf_gen2_hw_data.h>
-#include <adf_gen2_pfvf.h>
-#include "adf_dh895xcc_hw_data.h"
-#include "icp_qat_hw.h"
-
-#define ADF_DH895XCC_VF_MSK    0xFFFFFFFF
-
-/* Worker thread to service arbiter mappings */
-static const u32 thrd_to_arb_map[ADF_DH895XCC_MAX_ACCELENGINES] = {
-       0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
-       0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
-       0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222
-};
-
-static struct adf_hw_device_class dh895xcc_class = {
-       .name = ADF_DH895XCC_DEVICE_NAME,
-       .type = DEV_DH895XCC,
-       .instances = 0
-};
-
-static u32 get_accel_mask(struct adf_hw_device_data *self)
-{
-       u32 fuses = self->fuses;
-
-       return ~fuses >> ADF_DH895XCC_ACCELERATORS_REG_OFFSET &
-                        ADF_DH895XCC_ACCELERATORS_MASK;
-}
-
-static u32 get_ae_mask(struct adf_hw_device_data *self)
-{
-       u32 fuses = self->fuses;
-
-       return ~fuses & ADF_DH895XCC_ACCELENGINES_MASK;
-}
-
-static u32 get_misc_bar_id(struct adf_hw_device_data *self)
-{
-       return ADF_DH895XCC_PMISC_BAR;
-}
-
-static u32 get_etr_bar_id(struct adf_hw_device_data *self)
-{
-       return ADF_DH895XCC_ETR_BAR;
-}
-
-static u32 get_sram_bar_id(struct adf_hw_device_data *self)
-{
-       return ADF_DH895XCC_SRAM_BAR;
-}
-
-static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
-{
-       struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
-       u32 capabilities;
-       u32 legfuses;
-
-       capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
-                      ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
-                      ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
-                      ICP_ACCEL_CAPABILITIES_CIPHER |
-                      ICP_ACCEL_CAPABILITIES_COMPRESSION;
-
-       /* Read accelerator capabilities mask */
-       pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, &legfuses);
-
-       /* A set bit in legfuses means the feature is OFF in this SKU */
-       if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) {
-               capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
-               capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
-       }
-       if (legfuses & ICP_ACCEL_MASK_PKE_SLICE)
-               capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
-       if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE) {
-               capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
-               capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
-       }
-       if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE)
-               capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
-
-       return capabilities;
-}
-
-static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
-{
-       int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK)
-           >> ADF_DH895XCC_FUSECTL_SKU_SHIFT;
-
-       switch (sku) {
-       case ADF_DH895XCC_FUSECTL_SKU_1:
-               return DEV_SKU_1;
-       case ADF_DH895XCC_FUSECTL_SKU_2:
-               return DEV_SKU_2;
-       case ADF_DH895XCC_FUSECTL_SKU_3:
-               return DEV_SKU_3;
-       case ADF_DH895XCC_FUSECTL_SKU_4:
-               return DEV_SKU_4;
-       default:
-               return DEV_SKU_UNKNOWN;
-       }
-       return DEV_SKU_UNKNOWN;
-}
-
-static const u32 *adf_get_arbiter_mapping(void)
-{
-       return thrd_to_arb_map;
-}
-
-static void enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
-{
-       /* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */
-       if (vf_mask & 0xFFFF) {
-               u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
-                         & ~ADF_DH895XCC_ERR_MSK_VF2PF_L(vf_mask);
-               ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
-       }
-
-       /* Enable VF2PF Messaging Ints - VFs 16 through 31 per vf_mask[31:16] */
-       if (vf_mask >> 16) {
-               u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5)
-                         & ~ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask);
-               ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val);
-       }
-}
-
-static void disable_all_vf2pf_interrupts(void __iomem *pmisc_addr)
-{
-       u32 val;
-
-       /* Disable VF2PF interrupts for VFs 0 through 15 per vf_mask[15:0] */
-       val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
-             | ADF_DH895XCC_ERR_MSK_VF2PF_L(ADF_DH895XCC_VF_MSK);
-       ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
-
-       /* Disable VF2PF interrupts for VFs 16 through 31 per vf_mask[31:16] */
-       val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5)
-             | ADF_DH895XCC_ERR_MSK_VF2PF_U(ADF_DH895XCC_VF_MSK);
-       ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val);
-}
-
-static u32 disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
-{
-       u32 sources, pending, disabled;
-       u32 errsou3, errmsk3;
-       u32 errsou5, errmsk5;
-
-       /* Get the interrupt sources triggered by VFs */
-       errsou3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU3);
-       errsou5 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU5);
-       sources = ADF_DH895XCC_ERR_REG_VF2PF_L(errsou3)
-                 | ADF_DH895XCC_ERR_REG_VF2PF_U(errsou5);
-
-       if (!sources)
-               return 0;
-
-       /* Get the already disabled interrupts */
-       errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3);
-       errmsk5 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5);
-       disabled = ADF_DH895XCC_ERR_REG_VF2PF_L(errmsk3)
-                  | ADF_DH895XCC_ERR_REG_VF2PF_U(errmsk5);
-
-       pending = sources & ~disabled;
-       if (!pending)
-               return 0;
-
-       /* Due to HW limitations, when disabling the interrupts, we can't
-        * just disable the requested sources, as this would lead to missed
-        * interrupts if sources changes just before writing to ERRMSK3 and
-        * ERRMSK5.
-        * To work around it, disable all and re-enable only the sources that
-        * are not in vf_mask and were not already disabled. Re-enabling will
-        * trigger a new interrupt for the sources that have changed in the
-        * meantime, if any.
-        */
-       errmsk3 |= ADF_DH895XCC_ERR_MSK_VF2PF_L(ADF_DH895XCC_VF_MSK);
-       errmsk5 |= ADF_DH895XCC_ERR_MSK_VF2PF_U(ADF_DH895XCC_VF_MSK);
-       ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
-       ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5);
-
-       errmsk3 &= ADF_DH895XCC_ERR_MSK_VF2PF_L(sources | disabled);
-       errmsk5 &= ADF_DH895XCC_ERR_MSK_VF2PF_U(sources | disabled);
-       ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
-       ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5);
-
-       /* Return the sources of the (new) interrupt(s) */
-       return pending;
-}
-
-static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
-{
-       adf_gen2_cfg_iov_thds(accel_dev, enable,
-                             ADF_DH895XCC_AE2FUNC_MAP_GRP_A_NUM_REGS,
-                             ADF_DH895XCC_AE2FUNC_MAP_GRP_B_NUM_REGS);
-}
-
-void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
-{
-       hw_data->dev_class = &dh895xcc_class;
-       hw_data->instance_id = dh895xcc_class.instances++;
-       hw_data->num_banks = ADF_DH895XCC_ETR_MAX_BANKS;
-       hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
-       hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS;
-       hw_data->num_logical_accel = 1;
-       hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES;
-       hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET;
-       hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK;
-       hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
-       hw_data->alloc_irq = adf_isr_resource_alloc;
-       hw_data->free_irq = adf_isr_resource_free;
-       hw_data->enable_error_correction = adf_gen2_enable_error_correction;
-       hw_data->get_accel_mask = get_accel_mask;
-       hw_data->get_ae_mask = get_ae_mask;
-       hw_data->get_accel_cap = get_accel_cap;
-       hw_data->get_num_accels = adf_gen2_get_num_accels;
-       hw_data->get_num_aes = adf_gen2_get_num_aes;
-       hw_data->get_etr_bar_id = get_etr_bar_id;
-       hw_data->get_misc_bar_id = get_misc_bar_id;
-       hw_data->get_admin_info = adf_gen2_get_admin_info;
-       hw_data->get_arb_info = adf_gen2_get_arb_info;
-       hw_data->get_sram_bar_id = get_sram_bar_id;
-       hw_data->get_sku = get_sku;
-       hw_data->fw_name = ADF_DH895XCC_FW;
-       hw_data->fw_mmp_name = ADF_DH895XCC_MMP;
-       hw_data->init_admin_comms = adf_init_admin_comms;
-       hw_data->exit_admin_comms = adf_exit_admin_comms;
-       hw_data->configure_iov_threads = configure_iov_threads;
-       hw_data->send_admin_init = adf_send_admin_init;
-       hw_data->init_arb = adf_init_arb;
-       hw_data->exit_arb = adf_exit_arb;
-       hw_data->get_arb_mapping = adf_get_arbiter_mapping;
-       hw_data->enable_ints = adf_gen2_enable_ints;
-       hw_data->reset_device = adf_reset_sbr;
-       hw_data->disable_iov = adf_disable_sriov;
-       hw_data->dev_config = adf_gen2_dev_config;
-
-       adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
-       hw_data->pfvf_ops.enable_vf2pf_interrupts = enable_vf2pf_interrupts;
-       hw_data->pfvf_ops.disable_all_vf2pf_interrupts = disable_all_vf2pf_interrupts;
-       hw_data->pfvf_ops.disable_pending_vf2pf_interrupts = disable_pending_vf2pf_interrupts;
-       adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
-       adf_gen2_init_dc_ops(&hw_data->dc_ops);
-}
-
-void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
-{
-       hw_data->dev_class->instances--;
-}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
deleted file mode 100644 (file)
index 7b674bb..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef ADF_DH895x_HW_DATA_H_
-#define ADF_DH895x_HW_DATA_H_
-
-/* PCIe configuration space */
-#define ADF_DH895XCC_SRAM_BAR 0
-#define ADF_DH895XCC_PMISC_BAR 1
-#define ADF_DH895XCC_ETR_BAR 2
-#define ADF_DH895XCC_FUSECTL_SKU_MASK 0x300000
-#define ADF_DH895XCC_FUSECTL_SKU_SHIFT 20
-#define ADF_DH895XCC_FUSECTL_SKU_1 0x0
-#define ADF_DH895XCC_FUSECTL_SKU_2 0x1
-#define ADF_DH895XCC_FUSECTL_SKU_3 0x2
-#define ADF_DH895XCC_FUSECTL_SKU_4 0x3
-#define ADF_DH895XCC_MAX_ACCELERATORS 6
-#define ADF_DH895XCC_MAX_ACCELENGINES 12
-#define ADF_DH895XCC_ACCELERATORS_REG_OFFSET 13
-#define ADF_DH895XCC_ACCELERATORS_MASK 0x3F
-#define ADF_DH895XCC_ACCELENGINES_MASK 0xFFF
-#define ADF_DH895XCC_ETR_MAX_BANKS 32
-
-/* Masks for VF2PF interrupts */
-#define ADF_DH895XCC_ERR_REG_VF2PF_L(vf_src)   (((vf_src) & 0x01FFFE00) >> 9)
-#define ADF_DH895XCC_ERR_MSK_VF2PF_L(vf_mask)  (((vf_mask) & 0xFFFF) << 9)
-#define ADF_DH895XCC_ERR_REG_VF2PF_U(vf_src)   (((vf_src) & 0x0000FFFF) << 16)
-#define ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask)  ((vf_mask) >> 16)
-
-/* AE to function mapping */
-#define ADF_DH895XCC_AE2FUNC_MAP_GRP_A_NUM_REGS 96
-#define ADF_DH895XCC_AE2FUNC_MAP_GRP_B_NUM_REGS 12
-
-/* FW names */
-#define ADF_DH895XCC_FW "qat_895xcc.bin"
-#define ADF_DH895XCC_MMP "qat_895xcc_mmp.bin"
-
-void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
-void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
-#endif
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
deleted file mode 100644 (file)
index ebeb17b..0000000
+++ /dev/null
@@ -1,274 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
-#include <linux/workqueue.h>
-#include <linux/io.h>
-#include <adf_accel_devices.h>
-#include <adf_common_drv.h>
-#include <adf_cfg.h>
-#include "adf_dh895xcc_hw_data.h"
-
-static const struct pci_device_id adf_pci_tbl[] = {
-       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_DH895XCC), },
-       { }
-};
-MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
-
-static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
-static void adf_remove(struct pci_dev *dev);
-
-static struct pci_driver adf_driver = {
-       .id_table = adf_pci_tbl,
-       .name = ADF_DH895XCC_DEVICE_NAME,
-       .probe = adf_probe,
-       .remove = adf_remove,
-       .sriov_configure = adf_sriov_configure,
-       .err_handler = &adf_err_handler,
-};
-
-static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
-{
-       pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
-       pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
-}
-
-static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
-{
-       struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
-       int i;
-
-       for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
-               struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
-
-               if (bar->virt_addr)
-                       pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
-       }
-
-       if (accel_dev->hw_device) {
-               switch (accel_pci_dev->pci_dev->device) {
-               case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
-                       adf_clean_hw_data_dh895xcc(accel_dev->hw_device);
-                       break;
-               default:
-                       break;
-               }
-               kfree(accel_dev->hw_device);
-               accel_dev->hw_device = NULL;
-       }
-       adf_cfg_dev_remove(accel_dev);
-       debugfs_remove(accel_dev->debugfs_dir);
-       adf_devmgr_rm_dev(accel_dev, NULL);
-}
-
-static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
-       struct adf_accel_dev *accel_dev;
-       struct adf_accel_pci *accel_pci_dev;
-       struct adf_hw_device_data *hw_data;
-       char name[ADF_DEVICE_NAME_LENGTH];
-       unsigned int i, bar_nr;
-       unsigned long bar_mask;
-       int ret;
-
-       switch (ent->device) {
-       case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
-               break;
-       default:
-               dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
-               return -ENODEV;
-       }
-
-       if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
-               /* If the accelerator is connected to a node with no memory
-                * there is no point in using the accelerator since the remote
-                * memory transaction will be very slow. */
-               dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
-               return -EINVAL;
-       }
-
-       accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
-                                dev_to_node(&pdev->dev));
-       if (!accel_dev)
-               return -ENOMEM;
-
-       INIT_LIST_HEAD(&accel_dev->crypto_list);
-       accel_pci_dev = &accel_dev->accel_pci_dev;
-       accel_pci_dev->pci_dev = pdev;
-
-       /* Add accel device to accel table.
-        * This should be called before adf_cleanup_accel is called */
-       if (adf_devmgr_add_dev(accel_dev, NULL)) {
-               dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
-               kfree(accel_dev);
-               return -EFAULT;
-       }
-
-       accel_dev->owner = THIS_MODULE;
-       /* Allocate and configure device configuration structure */
-       hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
-                              dev_to_node(&pdev->dev));
-       if (!hw_data) {
-               ret = -ENOMEM;
-               goto out_err;
-       }
-
-       accel_dev->hw_device = hw_data;
-       adf_init_hw_data_dh895xcc(accel_dev->hw_device);
-       pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
-       pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET,
-                             &hw_data->fuses);
-
-       /* Get Accelerators and Accelerators Engines masks */
-       hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
-       hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
-       accel_pci_dev->sku = hw_data->get_sku(hw_data);
-       /* If the device has no acceleration engines then ignore it. */
-       if (!hw_data->accel_mask || !hw_data->ae_mask ||
-           ((~hw_data->ae_mask) & 0x01)) {
-               dev_err(&pdev->dev, "No acceleration units found");
-               ret = -EFAULT;
-               goto out_err;
-       }
-
-       /* Create dev top level debugfs entry */
-       snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
-                hw_data->dev_class->name, pci_name(pdev));
-
-       accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
-
-       /* Create device configuration table */
-       ret = adf_cfg_dev_add(accel_dev);
-       if (ret)
-               goto out_err;
-
-       pcie_set_readrq(pdev, 1024);
-
-       /* enable PCI device */
-       if (pci_enable_device(pdev)) {
-               ret = -EFAULT;
-               goto out_err;
-       }
-
-       /* set dma identifier */
-       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
-       if (ret) {
-               dev_err(&pdev->dev, "No usable DMA configuration\n");
-               goto out_err_disable;
-       }
-
-       if (pci_request_regions(pdev, ADF_DH895XCC_DEVICE_NAME)) {
-               ret = -EFAULT;
-               goto out_err_disable;
-       }
-
-       /* Get accelerator capabilities mask */
-       hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
-
-       /* Find and map all the device's BARS */
-       i = 0;
-       bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
-               struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
-
-               bar->base_addr = pci_resource_start(pdev, bar_nr);
-               if (!bar->base_addr)
-                       break;
-               bar->size = pci_resource_len(pdev, bar_nr);
-               bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
-               if (!bar->virt_addr) {
-                       dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
-                       ret = -EFAULT;
-                       goto out_err_free_reg;
-               }
-       }
-       pci_set_master(pdev);
-
-       adf_enable_aer(accel_dev);
-
-       if (pci_save_state(pdev)) {
-               dev_err(&pdev->dev, "Failed to save pci state\n");
-               ret = -ENOMEM;
-               goto out_err_disable_aer;
-       }
-
-       ret = hw_data->dev_config(accel_dev);
-       if (ret)
-               goto out_err_disable_aer;
-
-       ret = adf_dev_init(accel_dev);
-       if (ret)
-               goto out_err_dev_shutdown;
-
-       ret = adf_dev_start(accel_dev);
-       if (ret)
-               goto out_err_dev_stop;
-
-       return ret;
-
-out_err_dev_stop:
-       adf_dev_stop(accel_dev);
-out_err_dev_shutdown:
-       adf_dev_shutdown(accel_dev);
-out_err_disable_aer:
-       adf_disable_aer(accel_dev);
-out_err_free_reg:
-       pci_release_regions(accel_pci_dev->pci_dev);
-out_err_disable:
-       pci_disable_device(accel_pci_dev->pci_dev);
-out_err:
-       adf_cleanup_accel(accel_dev);
-       kfree(accel_dev);
-       return ret;
-}
-
-static void adf_remove(struct pci_dev *pdev)
-{
-       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
-
-       if (!accel_dev) {
-               pr_err("QAT: Driver removal failed\n");
-               return;
-       }
-       adf_dev_stop(accel_dev);
-       adf_dev_shutdown(accel_dev);
-       adf_disable_aer(accel_dev);
-       adf_cleanup_accel(accel_dev);
-       adf_cleanup_pci_dev(accel_dev);
-       kfree(accel_dev);
-}
-
-static int __init adfdrv_init(void)
-{
-       request_module("intel_qat");
-
-       if (pci_register_driver(&adf_driver)) {
-               pr_err("QAT: Driver initialization failed\n");
-               return -EFAULT;
-       }
-       return 0;
-}
-
-static void __exit adfdrv_release(void)
-{
-       pci_unregister_driver(&adf_driver);
-}
-
-module_init(adfdrv_init);
-module_exit(adfdrv_release);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Intel");
-MODULE_FIRMWARE(ADF_DH895XCC_FW);
-MODULE_FIRMWARE(ADF_DH895XCC_MMP);
-MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
-MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/qat/qat_dh895xccvf/Makefile b/drivers/crypto/qat/qat_dh895xccvf/Makefile
deleted file mode 100644 (file)
index 0153c85..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(srctree)/$(src)/../qat_common
-obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o
-qat_dh895xccvf-objs := adf_drv.o adf_dh895xccvf_hw_data.o
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
deleted file mode 100644 (file)
index 70e56cc..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2015 - 2021 Intel Corporation */
-#include <adf_accel_devices.h>
-#include <adf_common_drv.h>
-#include <adf_gen2_config.h>
-#include <adf_gen2_dc.h>
-#include <adf_gen2_hw_data.h>
-#include <adf_gen2_pfvf.h>
-#include <adf_pfvf_vf_msg.h>
-#include "adf_dh895xccvf_hw_data.h"
-
-static struct adf_hw_device_class dh895xcciov_class = {
-       .name = ADF_DH895XCCVF_DEVICE_NAME,
-       .type = DEV_DH895XCCVF,
-       .instances = 0
-};
-
-static u32 get_accel_mask(struct adf_hw_device_data *self)
-{
-       return ADF_DH895XCCIOV_ACCELERATORS_MASK;
-}
-
-static u32 get_ae_mask(struct adf_hw_device_data *self)
-{
-       return ADF_DH895XCCIOV_ACCELENGINES_MASK;
-}
-
-static u32 get_num_accels(struct adf_hw_device_data *self)
-{
-       return ADF_DH895XCCIOV_MAX_ACCELERATORS;
-}
-
-static u32 get_num_aes(struct adf_hw_device_data *self)
-{
-       return ADF_DH895XCCIOV_MAX_ACCELENGINES;
-}
-
-static u32 get_misc_bar_id(struct adf_hw_device_data *self)
-{
-       return ADF_DH895XCCIOV_PMISC_BAR;
-}
-
-static u32 get_etr_bar_id(struct adf_hw_device_data *self)
-{
-       return ADF_DH895XCCIOV_ETR_BAR;
-}
-
-static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
-{
-       return DEV_SKU_VF;
-}
-
-static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
-{
-       return 0;
-}
-
-static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
-{
-}
-
-void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
-{
-       hw_data->dev_class = &dh895xcciov_class;
-       hw_data->num_banks = ADF_DH895XCCIOV_ETR_MAX_BANKS;
-       hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
-       hw_data->num_accel = ADF_DH895XCCIOV_MAX_ACCELERATORS;
-       hw_data->num_logical_accel = 1;
-       hw_data->num_engines = ADF_DH895XCCIOV_MAX_ACCELENGINES;
-       hw_data->tx_rx_gap = ADF_DH895XCCIOV_RX_RINGS_OFFSET;
-       hw_data->tx_rings_mask = ADF_DH895XCCIOV_TX_RINGS_MASK;
-       hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
-       hw_data->alloc_irq = adf_vf_isr_resource_alloc;
-       hw_data->free_irq = adf_vf_isr_resource_free;
-       hw_data->enable_error_correction = adf_vf_void_noop;
-       hw_data->init_admin_comms = adf_vf_int_noop;
-       hw_data->exit_admin_comms = adf_vf_void_noop;
-       hw_data->send_admin_init = adf_vf2pf_notify_init;
-       hw_data->init_arb = adf_vf_int_noop;
-       hw_data->exit_arb = adf_vf_void_noop;
-       hw_data->disable_iov = adf_vf2pf_notify_shutdown;
-       hw_data->get_accel_mask = get_accel_mask;
-       hw_data->get_ae_mask = get_ae_mask;
-       hw_data->get_num_accels = get_num_accels;
-       hw_data->get_num_aes = get_num_aes;
-       hw_data->get_etr_bar_id = get_etr_bar_id;
-       hw_data->get_misc_bar_id = get_misc_bar_id;
-       hw_data->get_sku = get_sku;
-       hw_data->enable_ints = adf_vf_void_noop;
-       hw_data->dev_class->instances++;
-       hw_data->dev_config = adf_gen2_dev_config;
-       adf_devmgr_update_class_index(hw_data);
-       adf_gen2_init_vf_pfvf_ops(&hw_data->pfvf_ops);
-       adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
-       adf_gen2_init_dc_ops(&hw_data->dc_ops);
-}
-
-void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
-{
-       hw_data->dev_class->instances--;
-       adf_devmgr_update_class_index(hw_data);
-}
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
deleted file mode 100644 (file)
index 6973fa9..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2015 - 2020 Intel Corporation */
-#ifndef ADF_DH895XVF_HW_DATA_H_
-#define ADF_DH895XVF_HW_DATA_H_
-
-#define ADF_DH895XCCIOV_PMISC_BAR 1
-#define ADF_DH895XCCIOV_ACCELERATORS_MASK 0x1
-#define ADF_DH895XCCIOV_ACCELENGINES_MASK 0x1
-#define ADF_DH895XCCIOV_MAX_ACCELERATORS 1
-#define ADF_DH895XCCIOV_MAX_ACCELENGINES 1
-#define ADF_DH895XCCIOV_RX_RINGS_OFFSET 8
-#define ADF_DH895XCCIOV_TX_RINGS_MASK 0xFF
-#define ADF_DH895XCCIOV_ETR_BAR 0
-#define ADF_DH895XCCIOV_ETR_MAX_BANKS 1
-
-void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
-void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
-#endif
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
deleted file mode 100644 (file)
index c1485e7..0000000
+++ /dev/null
@@ -1,239 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
-#include <linux/workqueue.h>
-#include <linux/io.h>
-#include <adf_accel_devices.h>
-#include <adf_common_drv.h>
-#include <adf_cfg.h>
-#include "adf_dh895xccvf_hw_data.h"
-
-static const struct pci_device_id adf_pci_tbl[] = {
-       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF), },
-       { }
-};
-MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
-
-static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
-static void adf_remove(struct pci_dev *dev);
-
-static struct pci_driver adf_driver = {
-       .id_table = adf_pci_tbl,
-       .name = ADF_DH895XCCVF_DEVICE_NAME,
-       .probe = adf_probe,
-       .remove = adf_remove,
-};
-
-static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
-{
-       pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
-       pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
-}
-
-static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
-{
-       struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
-       struct adf_accel_dev *pf;
-       int i;
-
-       for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
-               struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
-
-               if (bar->virt_addr)
-                       pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
-       }
-
-       if (accel_dev->hw_device) {
-               switch (accel_pci_dev->pci_dev->device) {
-               case PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF:
-                       adf_clean_hw_data_dh895xcciov(accel_dev->hw_device);
-                       break;
-               default:
-                       break;
-               }
-               kfree(accel_dev->hw_device);
-               accel_dev->hw_device = NULL;
-       }
-       adf_cfg_dev_remove(accel_dev);
-       debugfs_remove(accel_dev->debugfs_dir);
-       pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
-       adf_devmgr_rm_dev(accel_dev, pf);
-}
-
-static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
-       struct adf_accel_dev *accel_dev;
-       struct adf_accel_dev *pf;
-       struct adf_accel_pci *accel_pci_dev;
-       struct adf_hw_device_data *hw_data;
-       char name[ADF_DEVICE_NAME_LENGTH];
-       unsigned int i, bar_nr;
-       unsigned long bar_mask;
-       int ret;
-
-       switch (ent->device) {
-       case PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF:
-               break;
-       default:
-               dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
-               return -ENODEV;
-       }
-
-       accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
-                                dev_to_node(&pdev->dev));
-       if (!accel_dev)
-               return -ENOMEM;
-
-       accel_dev->is_vf = true;
-       pf = adf_devmgr_pci_to_accel_dev(pdev->physfn);
-       accel_pci_dev = &accel_dev->accel_pci_dev;
-       accel_pci_dev->pci_dev = pdev;
-
-       /* Add accel device to accel table */
-       if (adf_devmgr_add_dev(accel_dev, pf)) {
-               dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
-               kfree(accel_dev);
-               return -EFAULT;
-       }
-       INIT_LIST_HEAD(&accel_dev->crypto_list);
-
-       accel_dev->owner = THIS_MODULE;
-       /* Allocate and configure device configuration structure */
-       hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
-                              dev_to_node(&pdev->dev));
-       if (!hw_data) {
-               ret = -ENOMEM;
-               goto out_err;
-       }
-       accel_dev->hw_device = hw_data;
-       adf_init_hw_data_dh895xcciov(accel_dev->hw_device);
-
-       /* Get Accelerators and Accelerators Engines masks */
-       hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
-       hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
-       accel_pci_dev->sku = hw_data->get_sku(hw_data);
-
-       /* Create dev top level debugfs entry */
-       snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
-                hw_data->dev_class->name, pci_name(pdev));
-
-       accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
-
-       /* Create device configuration table */
-       ret = adf_cfg_dev_add(accel_dev);
-       if (ret)
-               goto out_err;
-
-       /* enable PCI device */
-       if (pci_enable_device(pdev)) {
-               ret = -EFAULT;
-               goto out_err;
-       }
-
-       /* set dma identifier */
-       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
-       if (ret) {
-               dev_err(&pdev->dev, "No usable DMA configuration\n");
-               goto out_err_disable;
-       }
-
-       if (pci_request_regions(pdev, ADF_DH895XCCVF_DEVICE_NAME)) {
-               ret = -EFAULT;
-               goto out_err_disable;
-       }
-
-       /* Find and map all the device's BARS */
-       i = 0;
-       bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
-               struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
-
-               bar->base_addr = pci_resource_start(pdev, bar_nr);
-               if (!bar->base_addr)
-                       break;
-               bar->size = pci_resource_len(pdev, bar_nr);
-               bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
-               if (!bar->virt_addr) {
-                       dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
-                       ret = -EFAULT;
-                       goto out_err_free_reg;
-               }
-       }
-       pci_set_master(pdev);
-       /* Completion for VF2PF request/response message exchange */
-       init_completion(&accel_dev->vf.msg_received);
-
-       ret = adf_dev_init(accel_dev);
-       if (ret)
-               goto out_err_dev_shutdown;
-
-       ret = adf_dev_start(accel_dev);
-       if (ret)
-               goto out_err_dev_stop;
-
-       return ret;
-
-out_err_dev_stop:
-       adf_dev_stop(accel_dev);
-out_err_dev_shutdown:
-       adf_dev_shutdown(accel_dev);
-out_err_free_reg:
-       pci_release_regions(accel_pci_dev->pci_dev);
-out_err_disable:
-       pci_disable_device(accel_pci_dev->pci_dev);
-out_err:
-       adf_cleanup_accel(accel_dev);
-       kfree(accel_dev);
-       return ret;
-}
-
-static void adf_remove(struct pci_dev *pdev)
-{
-       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
-
-       if (!accel_dev) {
-               pr_err("QAT: Driver removal failed\n");
-               return;
-       }
-       adf_flush_vf_wq(accel_dev);
-       adf_dev_stop(accel_dev);
-       adf_dev_shutdown(accel_dev);
-       adf_cleanup_accel(accel_dev);
-       adf_cleanup_pci_dev(accel_dev);
-       kfree(accel_dev);
-}
-
-static int __init adfdrv_init(void)
-{
-       request_module("intel_qat");
-
-       if (pci_register_driver(&adf_driver)) {
-               pr_err("QAT: Driver initialization failed\n");
-               return -EFAULT;
-       }
-       return 0;
-}
-
-static void __exit adfdrv_release(void)
-{
-       pci_unregister_driver(&adf_driver);
-       adf_clean_vf_map(true);
-}
-
-module_init(adfdrv_init);
-module_exit(adfdrv_release);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Intel");
-MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
-MODULE_VERSION(ADF_DRV_VERSION);
index 74deca4f96e06d9085adc6592630ef157cb7736f..fce49c0dee3e2dd73e6b359b506c2bedfb794195 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <linux/clk.h>
 #include <linux/dma-mapping.h>
+#include <linux/interconnect.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/mod_devicetable.h>
@@ -22,6 +23,8 @@
 #define QCE_MAJOR_VERSION5     0x05
 #define QCE_QUEUE_LENGTH       1
 
+#define QCE_DEFAULT_MEM_BANDWIDTH      393600
+
 static const struct qce_algo_ops *qce_ops[] = {
 #ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
        &skcipher_ops,
@@ -206,22 +209,30 @@ static int qce_crypto_probe(struct platform_device *pdev)
        if (ret < 0)
                return ret;
 
-       qce->core = devm_clk_get(qce->dev, "core");
+       qce->core = devm_clk_get_optional(qce->dev, "core");
        if (IS_ERR(qce->core))
                return PTR_ERR(qce->core);
 
-       qce->iface = devm_clk_get(qce->dev, "iface");
+       qce->iface = devm_clk_get_optional(qce->dev, "iface");
        if (IS_ERR(qce->iface))
                return PTR_ERR(qce->iface);
 
-       qce->bus = devm_clk_get(qce->dev, "bus");
+       qce->bus = devm_clk_get_optional(qce->dev, "bus");
        if (IS_ERR(qce->bus))
                return PTR_ERR(qce->bus);
 
-       ret = clk_prepare_enable(qce->core);
+       qce->mem_path = devm_of_icc_get(qce->dev, "memory");
+       if (IS_ERR(qce->mem_path))
+               return PTR_ERR(qce->mem_path);
+
+       ret = icc_set_bw(qce->mem_path, QCE_DEFAULT_MEM_BANDWIDTH, QCE_DEFAULT_MEM_BANDWIDTH);
        if (ret)
                return ret;
 
+       ret = clk_prepare_enable(qce->core);
+       if (ret)
+               goto err_mem_path_disable;
+
        ret = clk_prepare_enable(qce->iface);
        if (ret)
                goto err_clks_core;
@@ -260,6 +271,9 @@ err_clks_iface:
        clk_disable_unprepare(qce->iface);
 err_clks_core:
        clk_disable_unprepare(qce->core);
+err_mem_path_disable:
+       icc_set_bw(qce->mem_path, 0, 0);
+
        return ret;
 }
 
@@ -279,6 +293,7 @@ static int qce_crypto_remove(struct platform_device *pdev)
 static const struct of_device_id qce_crypto_of_match[] = {
        { .compatible = "qcom,crypto-v5.1", },
        { .compatible = "qcom,crypto-v5.4", },
+       { .compatible = "qcom,qce", },
        {}
 };
 MODULE_DEVICE_TABLE(of, qce_crypto_of_match);
index 085774cdf641b191d7428f81df55944d39697b5e..228fcd69ec511078e258662e66ed2c35db5612fd 100644 (file)
@@ -35,6 +35,7 @@ struct qce_device {
        void __iomem *base;
        struct device *dev;
        struct clk *core, *iface, *bus;
+       struct icc_path *mem_path;
        struct qce_dma_data dma;
        int burst_size;
        unsigned int pipe_pair_id;
index f4bc06c24ad8fd99379920b6fe60682b70a204e8..df5f9d675c572ea9238c6133cf4f3d15a3d14c2d 100644 (file)
@@ -1037,7 +1037,7 @@ static void sa_free_sa_rx_data(struct sa_rx_data *rxd)
 
 static void sa_aes_dma_in_callback(void *data)
 {
-       struct sa_rx_data *rxd = (struct sa_rx_data *)data;
+       struct sa_rx_data *rxd = data;
        struct skcipher_request *req;
        u32 *result;
        __be32 *mdptr;
@@ -1351,7 +1351,7 @@ static int sa_decrypt(struct skcipher_request *req)
 
 static void sa_sha_dma_in_callback(void *data)
 {
-       struct sa_rx_data *rxd = (struct sa_rx_data *)data;
+       struct sa_rx_data *rxd = data;
        struct ahash_request *req;
        struct crypto_ahash *tfm;
        unsigned int authsize;
@@ -1689,7 +1689,7 @@ static void sa_sha_cra_exit(struct crypto_tfm *tfm)
 
 static void sa_aead_dma_in_callback(void *data)
 {
-       struct sa_rx_data *rxd = (struct sa_rx_data *)data;
+       struct sa_rx_data *rxd = data;
        struct aead_request *req;
        struct crypto_aead *tfm;
        unsigned int start;
index dd4c703cd85592528c6f73df10f6da6d93b8ef85..4c799df3e8838bd585695a0a1ed967d43bf735d9 100644 (file)
@@ -1035,7 +1035,7 @@ static int sahara_sha_process(struct ahash_request *req)
 
 static int sahara_queue_manage(void *data)
 {
-       struct sahara_dev *dev = (struct sahara_dev *)data;
+       struct sahara_dev *dev = data;
        struct crypto_async_request *async_req;
        struct crypto_async_request *backlog;
        int ret = 0;
@@ -1270,7 +1270,7 @@ static struct ahash_alg sha_v4_algs[] = {
 
 static irqreturn_t sahara_irq_handler(int irq, void *data)
 {
-       struct sahara_dev *dev = (struct sahara_dev *)data;
+       struct sahara_dev *dev = data;
        unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
        unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
 
index 7bf805563ac2922cf80c932737d56f05833f6837..f0df32382719c8b65ff5e0243350ab41d2f0f728 100644 (file)
@@ -7,7 +7,6 @@
  */
 
 #include <linux/clk.h>
-#include <linux/crypto.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
 #include <linux/dmaengine.h>
@@ -68,7 +67,7 @@
 #define HASH_MASK_DATA_INPUT           BIT(1)
 
 /* Context swap register */
-#define HASH_CSR_REGISTER_NUMBER       53
+#define HASH_CSR_REGISTER_NUMBER       54
 
 /* Status Flags */
 #define HASH_SR_DATA_INPUT_READY       BIT(0)
@@ -96,7 +95,7 @@
 #define HASH_FLAGS_SHA1                        BIT(19)
 #define HASH_FLAGS_SHA224              BIT(20)
 #define HASH_FLAGS_SHA256              BIT(21)
-#define HASH_FLAGS_ERRORS              BIT(22)
+#define HASH_FLAGS_EMPTY               BIT(22)
 #define HASH_FLAGS_HMAC                        BIT(23)
 
 #define HASH_OP_UPDATE                 1
@@ -127,15 +126,24 @@ struct stm32_hash_ctx {
        int                     keylen;
 };
 
+struct stm32_hash_state {
+       u32                     flags;
+
+       u16                     bufcnt;
+       u16                     buflen;
+
+       u8 buffer[HASH_BUFLEN] __aligned(4);
+
+       /* hash state */
+       u32                     hw_context[3 + HASH_CSR_REGISTER_NUMBER];
+};
+
 struct stm32_hash_request_ctx {
        struct stm32_hash_dev   *hdev;
-       unsigned long           flags;
        unsigned long           op;
 
        u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
        size_t                  digcnt;
-       size_t                  bufcnt;
-       size_t                  buflen;
 
        /* DMA */
        struct scatterlist      *sg;
@@ -149,10 +157,7 @@ struct stm32_hash_request_ctx {
 
        u8                      data_type;
 
-       u8 buffer[HASH_BUFLEN] __aligned(sizeof(u32));
-
-       /* Export Context */
-       u32                     *hw_context;
+       struct stm32_hash_state state;
 };
 
 struct stm32_hash_algs_info {
@@ -183,7 +188,6 @@ struct stm32_hash_dev {
        struct ahash_request    *req;
        struct crypto_engine    *engine;
 
-       int                     err;
        unsigned long           flags;
 
        struct dma_chan         *dma_lch;
@@ -270,11 +274,12 @@ static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev, int bufcnt)
        struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
        struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+       struct stm32_hash_state *state = &rctx->state;
 
        u32 reg = HASH_CR_INIT;
 
        if (!(hdev->flags & HASH_FLAGS_INIT)) {
-               switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
+               switch (state->flags & HASH_FLAGS_ALGO_MASK) {
                case HASH_FLAGS_MD5:
                        reg |= HASH_CR_ALGO_MD5;
                        break;
@@ -299,20 +304,13 @@ static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev, int bufcnt)
 
                reg |= (rctx->data_type << HASH_CR_DATATYPE_POS);
 
-               if (rctx->flags & HASH_FLAGS_HMAC) {
+               if (state->flags & HASH_FLAGS_HMAC) {
                        hdev->flags |= HASH_FLAGS_HMAC;
                        reg |= HASH_CR_MODE;
                        if (ctx->keylen > HASH_LONG_KEY)
                                reg |= HASH_CR_LKEY;
                }
 
-               /*
-                * On the Ux500 we need to set a special flag to indicate that
-                * the message is zero length.
-                */
-               if (hdev->pdata->ux500 && bufcnt == 0)
-                       reg |= HASH_CR_UX500_EMPTYMSG;
-
                if (!hdev->polled)
                        stm32_hash_write(hdev, HASH_IMR, HASH_DCIE);
 
@@ -326,11 +324,12 @@ static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev, int bufcnt)
 
 static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
 {
+       struct stm32_hash_state *state = &rctx->state;
        size_t count;
 
-       while ((rctx->bufcnt < rctx->buflen) && rctx->total) {
+       while ((state->bufcnt < state->buflen) && rctx->total) {
                count = min(rctx->sg->length - rctx->offset, rctx->total);
-               count = min(count, rctx->buflen - rctx->bufcnt);
+               count = min_t(size_t, count, state->buflen - state->bufcnt);
 
                if (count <= 0) {
                        if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) {
@@ -341,10 +340,10 @@ static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
                        }
                }
 
-               scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, rctx->sg,
-                                        rctx->offset, count, 0);
+               scatterwalk_map_and_copy(state->buffer + state->bufcnt,
+                                        rctx->sg, rctx->offset, count, 0);
 
-               rctx->bufcnt += count;
+               state->bufcnt += count;
                rctx->offset += count;
                rctx->total -= count;
 
@@ -361,13 +360,23 @@ static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
 static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
                               const u8 *buf, size_t length, int final)
 {
+       struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
+       struct stm32_hash_state *state = &rctx->state;
        unsigned int count, len32;
        const u32 *buffer = (const u32 *)buf;
        u32 reg;
 
-       if (final)
+       if (final) {
                hdev->flags |= HASH_FLAGS_FINAL;
 
+               /* Do not process empty messages if hw is buggy. */
+               if (!(hdev->flags & HASH_FLAGS_INIT) && !length &&
+                   hdev->pdata->broken_emptymsg) {
+                       state->flags |= HASH_FLAGS_EMPTY;
+                       return 0;
+               }
+       }
+
        len32 = DIV_ROUND_UP(length, sizeof(u32));
 
        dev_dbg(hdev->dev, "%s: length: %zd, final: %x len32 %i\n",
@@ -413,36 +422,48 @@ static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
 static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
 {
        struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
+       struct stm32_hash_state *state = &rctx->state;
+       u32 *preg = state->hw_context;
        int bufcnt, err = 0, final;
+       int i;
 
-       dev_dbg(hdev->dev, "%s flags %lx\n", __func__, rctx->flags);
+       dev_dbg(hdev->dev, "%s flags %x\n", __func__, state->flags);
 
-       final = (rctx->flags & HASH_FLAGS_FINUP);
+       final = state->flags & HASH_FLAGS_FINAL;
 
-       while ((rctx->total >= rctx->buflen) ||
-              (rctx->bufcnt + rctx->total >= rctx->buflen)) {
+       while ((rctx->total >= state->buflen) ||
+              (state->bufcnt + rctx->total >= state->buflen)) {
                stm32_hash_append_sg(rctx);
-               bufcnt = rctx->bufcnt;
-               rctx->bufcnt = 0;
-               err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, 0);
+               bufcnt = state->bufcnt;
+               state->bufcnt = 0;
+               err = stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 0);
+               if (err)
+                       return err;
        }
 
        stm32_hash_append_sg(rctx);
 
        if (final) {
-               bufcnt = rctx->bufcnt;
-               rctx->bufcnt = 0;
-               err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, 1);
-
-               /* If we have an IRQ, wait for that, else poll for completion */
-               if (hdev->polled) {
-                       if (stm32_hash_wait_busy(hdev))
-                               return -ETIMEDOUT;
-                       hdev->flags |= HASH_FLAGS_OUTPUT_READY;
-                       err = 0;
-               }
+               bufcnt = state->bufcnt;
+               state->bufcnt = 0;
+               return stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 1);
        }
 
+       if (!(hdev->flags & HASH_FLAGS_INIT))
+               return 0;
+
+       if (stm32_hash_wait_busy(hdev))
+               return -ETIMEDOUT;
+
+       if (!hdev->pdata->ux500)
+               *preg++ = stm32_hash_read(hdev, HASH_IMR);
+       *preg++ = stm32_hash_read(hdev, HASH_STR);
+       *preg++ = stm32_hash_read(hdev, HASH_CR);
+       for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
+               *preg++ = stm32_hash_read(hdev, HASH_CSR(i));
+
+       state->flags |= HASH_FLAGS_INIT;
+
        return err;
 }
 
@@ -584,10 +605,10 @@ static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
 static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
 {
        struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
+       u32 *buffer = (void *)rctx->state.buffer;
        struct scatterlist sg[1], *tsg;
        int err = 0, len = 0, reg, ncp = 0;
        unsigned int i;
-       u32 *buffer = (void *)rctx->buffer;
 
        rctx->sg = hdev->req->src;
        rctx->total = hdev->req->nbytes;
@@ -615,7 +636,7 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
 
                                ncp = sg_pcopy_to_buffer(
                                        rctx->sg, rctx->nents,
-                                       rctx->buffer, sg->length - len,
+                                       rctx->state.buffer, sg->length - len,
                                        rctx->total - sg->length + len);
 
                                sg->length = len;
@@ -726,47 +747,52 @@ static int stm32_hash_init(struct ahash_request *req)
        struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
        struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
        struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+       struct stm32_hash_state *state = &rctx->state;
 
        rctx->hdev = hdev;
 
-       rctx->flags = HASH_FLAGS_CPU;
+       state->flags = HASH_FLAGS_CPU;
 
        rctx->digcnt = crypto_ahash_digestsize(tfm);
        switch (rctx->digcnt) {
        case MD5_DIGEST_SIZE:
-               rctx->flags |= HASH_FLAGS_MD5;
+               state->flags |= HASH_FLAGS_MD5;
                break;
        case SHA1_DIGEST_SIZE:
-               rctx->flags |= HASH_FLAGS_SHA1;
+               state->flags |= HASH_FLAGS_SHA1;
                break;
        case SHA224_DIGEST_SIZE:
-               rctx->flags |= HASH_FLAGS_SHA224;
+               state->flags |= HASH_FLAGS_SHA224;
                break;
        case SHA256_DIGEST_SIZE:
-               rctx->flags |= HASH_FLAGS_SHA256;
+               state->flags |= HASH_FLAGS_SHA256;
                break;
        default:
                return -EINVAL;
        }
 
-       rctx->bufcnt = 0;
-       rctx->buflen = HASH_BUFLEN;
+       rctx->state.bufcnt = 0;
+       rctx->state.buflen = HASH_BUFLEN;
        rctx->total = 0;
        rctx->offset = 0;
        rctx->data_type = HASH_DATA_8_BITS;
 
-       memset(rctx->buffer, 0, HASH_BUFLEN);
-
        if (ctx->flags & HASH_FLAGS_HMAC)
-               rctx->flags |= HASH_FLAGS_HMAC;
+               state->flags |= HASH_FLAGS_HMAC;
 
-       dev_dbg(hdev->dev, "%s Flags %lx\n", __func__, rctx->flags);
+       dev_dbg(hdev->dev, "%s Flags %x\n", __func__, state->flags);
 
        return 0;
 }
 
 static int stm32_hash_update_req(struct stm32_hash_dev *hdev)
 {
+       struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
+       struct stm32_hash_state *state = &rctx->state;
+
+       if (!(state->flags & HASH_FLAGS_CPU))
+               return stm32_hash_dma_send(hdev);
+
        return stm32_hash_update_cpu(hdev);
 }
 
@@ -774,26 +800,15 @@ static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
 {
        struct ahash_request *req = hdev->req;
        struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
-       int err;
-       int buflen = rctx->bufcnt;
+       struct stm32_hash_state *state = &rctx->state;
+       int buflen = state->bufcnt;
 
-       rctx->bufcnt = 0;
+       if (state->flags & HASH_FLAGS_FINUP)
+               return stm32_hash_update_req(hdev);
 
-       if (!(rctx->flags & HASH_FLAGS_CPU))
-               err = stm32_hash_dma_send(hdev);
-       else
-               err = stm32_hash_xmit_cpu(hdev, rctx->buffer, buflen, 1);
+       state->bufcnt = 0;
 
-       /* If we have an IRQ, wait for that, else poll for completion */
-       if (hdev->polled) {
-               if (stm32_hash_wait_busy(hdev))
-                       return -ETIMEDOUT;
-               hdev->flags |= HASH_FLAGS_OUTPUT_READY;
-               /* Caller will call stm32_hash_finish_req() */
-               err = 0;
-       }
-
-       return err;
+       return stm32_hash_xmit_cpu(hdev, state->buffer, buflen, 1);
 }
 
 static void stm32_hash_emptymsg_fallback(struct ahash_request *req)
@@ -828,14 +843,15 @@ static void stm32_hash_emptymsg_fallback(struct ahash_request *req)
 static void stm32_hash_copy_hash(struct ahash_request *req)
 {
        struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+       struct stm32_hash_state *state = &rctx->state;
        struct stm32_hash_dev *hdev = rctx->hdev;
        __be32 *hash = (void *)rctx->digest;
        unsigned int i, hashsize;
 
-       if (hdev->pdata->broken_emptymsg && !req->nbytes)
+       if (hdev->pdata->broken_emptymsg && (state->flags & HASH_FLAGS_EMPTY))
                return stm32_hash_emptymsg_fallback(req);
 
-       switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
+       switch (state->flags & HASH_FLAGS_ALGO_MASK) {
        case HASH_FLAGS_MD5:
                hashsize = MD5_DIGEST_SIZE;
                break;
@@ -882,13 +898,6 @@ static void stm32_hash_finish_req(struct ahash_request *req, int err)
        if (!err && (HASH_FLAGS_FINAL & hdev->flags)) {
                stm32_hash_copy_hash(req);
                err = stm32_hash_finish(req);
-               hdev->flags &= ~(HASH_FLAGS_FINAL | HASH_FLAGS_CPU |
-                                HASH_FLAGS_INIT | HASH_FLAGS_DMA_READY |
-                                HASH_FLAGS_OUTPUT_READY | HASH_FLAGS_HMAC |
-                                HASH_FLAGS_HMAC_INIT | HASH_FLAGS_HMAC_FINAL |
-                                HASH_FLAGS_HMAC_KEY);
-       } else {
-               rctx->flags |= HASH_FLAGS_ERRORS;
        }
 
        pm_runtime_mark_last_busy(hdev->dev);
@@ -897,73 +906,70 @@ static void stm32_hash_finish_req(struct ahash_request *req, int err)
        crypto_finalize_hash_request(hdev->engine, req, err);
 }
 
-static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
-                             struct stm32_hash_request_ctx *rctx)
-{
-       pm_runtime_get_sync(hdev->dev);
-
-       if (!(HASH_FLAGS_INIT & hdev->flags)) {
-               stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
-               stm32_hash_write(hdev, HASH_STR, 0);
-               stm32_hash_write(hdev, HASH_DIN, 0);
-               stm32_hash_write(hdev, HASH_IMR, 0);
-               hdev->err = 0;
-       }
-
-       return 0;
-}
-
-static int stm32_hash_one_request(struct crypto_engine *engine, void *areq);
-static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq);
-
 static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev,
                                   struct ahash_request *req)
 {
        return crypto_transfer_hash_request_to_engine(hdev->engine, req);
 }
 
-static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq)
+static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
 {
        struct ahash_request *req = container_of(areq, struct ahash_request,
                                                 base);
        struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+       struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
        struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
-       struct stm32_hash_request_ctx *rctx;
+       struct stm32_hash_state *state = &rctx->state;
+       int err = 0;
 
        if (!hdev)
                return -ENODEV;
 
-       hdev->req = req;
-
-       rctx = ahash_request_ctx(req);
-
        dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n",
                rctx->op, req->nbytes);
 
-       return stm32_hash_hw_init(hdev, rctx);
-}
+       pm_runtime_get_sync(hdev->dev);
 
-static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
-{
-       struct ahash_request *req = container_of(areq, struct ahash_request,
-                                                base);
-       struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
-       struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
-       struct stm32_hash_request_ctx *rctx;
-       int err = 0;
+       hdev->req = req;
+       hdev->flags = 0;
+
+       if (state->flags & HASH_FLAGS_INIT) {
+               u32 *preg = rctx->state.hw_context;
+               u32 reg;
+               int i;
+
+               if (!hdev->pdata->ux500)
+                       stm32_hash_write(hdev, HASH_IMR, *preg++);
+               stm32_hash_write(hdev, HASH_STR, *preg++);
+               stm32_hash_write(hdev, HASH_CR, *preg);
+               reg = *preg++ | HASH_CR_INIT;
+               stm32_hash_write(hdev, HASH_CR, reg);
 
-       if (!hdev)
-               return -ENODEV;
+               for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
+                       stm32_hash_write(hdev, HASH_CSR(i), *preg++);
 
-       hdev->req = req;
+               hdev->flags |= HASH_FLAGS_INIT;
 
-       rctx = ahash_request_ctx(req);
+               if (state->flags & HASH_FLAGS_HMAC)
+                       hdev->flags |= HASH_FLAGS_HMAC |
+                                      HASH_FLAGS_HMAC_KEY;
+       }
 
        if (rctx->op == HASH_OP_UPDATE)
                err = stm32_hash_update_req(hdev);
        else if (rctx->op == HASH_OP_FINAL)
                err = stm32_hash_final_req(hdev);
 
+       /* If we have an IRQ, wait for that, else poll for completion */
+       if (err == -EINPROGRESS && hdev->polled) {
+               if (stm32_hash_wait_busy(hdev))
+                       err = -ETIMEDOUT;
+               else {
+                       hdev->flags |= HASH_FLAGS_OUTPUT_READY;
+                       err = 0;
+               }
+       }
+
        if (err != -EINPROGRESS)
        /* done task will not finish it, so do it here */
                stm32_hash_finish_req(req, err);
@@ -985,15 +991,16 @@ static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op)
 static int stm32_hash_update(struct ahash_request *req)
 {
        struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+       struct stm32_hash_state *state = &rctx->state;
 
-       if (!req->nbytes || !(rctx->flags & HASH_FLAGS_CPU))
+       if (!req->nbytes || !(state->flags & HASH_FLAGS_CPU))
                return 0;
 
        rctx->total = req->nbytes;
        rctx->sg = req->src;
        rctx->offset = 0;
 
-       if ((rctx->bufcnt + rctx->total < rctx->buflen)) {
+       if ((state->bufcnt + rctx->total < state->buflen)) {
                stm32_hash_append_sg(rctx);
                return 0;
        }
@@ -1004,8 +1011,9 @@ static int stm32_hash_update(struct ahash_request *req)
 static int stm32_hash_final(struct ahash_request *req)
 {
        struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+       struct stm32_hash_state *state = &rctx->state;
 
-       rctx->flags |= HASH_FLAGS_FINUP;
+       state->flags |= HASH_FLAGS_FINAL;
 
        return stm32_hash_enqueue(req, HASH_OP_FINAL);
 }
@@ -1015,25 +1023,21 @@ static int stm32_hash_finup(struct ahash_request *req)
        struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
        struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
        struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
-       int err1, err2;
+       struct stm32_hash_state *state = &rctx->state;
 
-       rctx->flags |= HASH_FLAGS_FINUP;
+       if (!req->nbytes)
+               goto out;
 
-       if (hdev->dma_lch && stm32_hash_dma_aligned_data(req))
-               rctx->flags &= ~HASH_FLAGS_CPU;
-
-       err1 = stm32_hash_update(req);
-
-       if (err1 == -EINPROGRESS || err1 == -EBUSY)
-               return err1;
+       state->flags |= HASH_FLAGS_FINUP;
+       rctx->total = req->nbytes;
+       rctx->sg = req->src;
+       rctx->offset = 0;
 
-       /*
-        * final() has to be always called to cleanup resources
-        * even if update() failed, except EINPROGRESS
-        */
-       err2 = stm32_hash_final(req);
+       if (hdev->dma_lch && stm32_hash_dma_aligned_data(req))
+               state->flags &= ~HASH_FLAGS_CPU;
 
-       return err1 ?: err2;
+out:
+       return stm32_hash_final(req);
 }
 
 static int stm32_hash_digest(struct ahash_request *req)
@@ -1044,35 +1048,8 @@ static int stm32_hash_digest(struct ahash_request *req)
 static int stm32_hash_export(struct ahash_request *req, void *out)
 {
        struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
-       struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
-       struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
-       u32 *preg;
-       unsigned int i;
-       int ret;
 
-       pm_runtime_get_sync(hdev->dev);
-
-       ret = stm32_hash_wait_busy(hdev);
-       if (ret)
-               return ret;
-
-       rctx->hw_context = kmalloc_array(3 + HASH_CSR_REGISTER_NUMBER,
-                                        sizeof(u32),
-                                        GFP_KERNEL);
-
-       preg = rctx->hw_context;
-
-       if (!hdev->pdata->ux500)
-               *preg++ = stm32_hash_read(hdev, HASH_IMR);
-       *preg++ = stm32_hash_read(hdev, HASH_STR);
-       *preg++ = stm32_hash_read(hdev, HASH_CR);
-       for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
-               *preg++ = stm32_hash_read(hdev, HASH_CSR(i));
-
-       pm_runtime_mark_last_busy(hdev->dev);
-       pm_runtime_put_autosuspend(hdev->dev);
-
-       memcpy(out, rctx, sizeof(*rctx));
+       memcpy(out, &rctx->state, sizeof(rctx->state));
 
        return 0;
 }
@@ -1080,32 +1057,9 @@ static int stm32_hash_export(struct ahash_request *req, void *out)
 static int stm32_hash_import(struct ahash_request *req, const void *in)
 {
        struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
-       struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
-       struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
-       const u32 *preg = in;
-       u32 reg;
-       unsigned int i;
-
-       memcpy(rctx, in, sizeof(*rctx));
-
-       preg = rctx->hw_context;
-
-       pm_runtime_get_sync(hdev->dev);
-
-       if (!hdev->pdata->ux500)
-               stm32_hash_write(hdev, HASH_IMR, *preg++);
-       stm32_hash_write(hdev, HASH_STR, *preg++);
-       stm32_hash_write(hdev, HASH_CR, *preg);
-       reg = *preg++ | HASH_CR_INIT;
-       stm32_hash_write(hdev, HASH_CR, reg);
-
-       for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
-               stm32_hash_write(hdev, HASH_CSR(i), *preg++);
-
-       pm_runtime_mark_last_busy(hdev->dev);
-       pm_runtime_put_autosuspend(hdev->dev);
 
-       kfree(rctx->hw_context);
+       stm32_hash_init(req);
+       memcpy(&rctx->state, in, sizeof(rctx->state));
 
        return 0;
 }
@@ -1162,8 +1116,6 @@ static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm,
                ctx->flags |= HASH_FLAGS_HMAC;
 
        ctx->enginectx.op.do_one_request = stm32_hash_one_request;
-       ctx->enginectx.op.prepare_request = stm32_hash_prepare_req;
-       ctx->enginectx.op.unprepare_request = NULL;
 
        return stm32_hash_init_fallback(tfm);
 }
@@ -1255,7 +1207,7 @@ static struct ahash_alg algs_md5[] = {
                .import = stm32_hash_import,
                .halg = {
                        .digestsize = MD5_DIGEST_SIZE,
-                       .statesize = sizeof(struct stm32_hash_request_ctx),
+                       .statesize = sizeof(struct stm32_hash_state),
                        .base = {
                                .cra_name = "md5",
                                .cra_driver_name = "stm32-md5",
@@ -1282,7 +1234,7 @@ static struct ahash_alg algs_md5[] = {
                .setkey = stm32_hash_setkey,
                .halg = {
                        .digestsize = MD5_DIGEST_SIZE,
-                       .statesize = sizeof(struct stm32_hash_request_ctx),
+                       .statesize = sizeof(struct stm32_hash_state),
                        .base = {
                                .cra_name = "hmac(md5)",
                                .cra_driver_name = "stm32-hmac-md5",
@@ -1311,7 +1263,7 @@ static struct ahash_alg algs_sha1[] = {
                .import = stm32_hash_import,
                .halg = {
                        .digestsize = SHA1_DIGEST_SIZE,
-                       .statesize = sizeof(struct stm32_hash_request_ctx),
+                       .statesize = sizeof(struct stm32_hash_state),
                        .base = {
                                .cra_name = "sha1",
                                .cra_driver_name = "stm32-sha1",
@@ -1338,7 +1290,7 @@ static struct ahash_alg algs_sha1[] = {
                .setkey = stm32_hash_setkey,
                .halg = {
                        .digestsize = SHA1_DIGEST_SIZE,
-                       .statesize = sizeof(struct stm32_hash_request_ctx),
+                       .statesize = sizeof(struct stm32_hash_state),
                        .base = {
                                .cra_name = "hmac(sha1)",
                                .cra_driver_name = "stm32-hmac-sha1",
@@ -1367,7 +1319,7 @@ static struct ahash_alg algs_sha224[] = {
                .import = stm32_hash_import,
                .halg = {
                        .digestsize = SHA224_DIGEST_SIZE,
-                       .statesize = sizeof(struct stm32_hash_request_ctx),
+                       .statesize = sizeof(struct stm32_hash_state),
                        .base = {
                                .cra_name = "sha224",
                                .cra_driver_name = "stm32-sha224",
@@ -1394,7 +1346,7 @@ static struct ahash_alg algs_sha224[] = {
                .import = stm32_hash_import,
                .halg = {
                        .digestsize = SHA224_DIGEST_SIZE,
-                       .statesize = sizeof(struct stm32_hash_request_ctx),
+                       .statesize = sizeof(struct stm32_hash_state),
                        .base = {
                                .cra_name = "hmac(sha224)",
                                .cra_driver_name = "stm32-hmac-sha224",
@@ -1423,7 +1375,7 @@ static struct ahash_alg algs_sha256[] = {
                .import = stm32_hash_import,
                .halg = {
                        .digestsize = SHA256_DIGEST_SIZE,
-                       .statesize = sizeof(struct stm32_hash_request_ctx),
+                       .statesize = sizeof(struct stm32_hash_state),
                        .base = {
                                .cra_name = "sha256",
                                .cra_driver_name = "stm32-sha256",
@@ -1450,7 +1402,7 @@ static struct ahash_alg algs_sha256[] = {
                .setkey = stm32_hash_setkey,
                .halg = {
                        .digestsize = SHA256_DIGEST_SIZE,
-                       .statesize = sizeof(struct stm32_hash_request_ctx),
+                       .statesize = sizeof(struct stm32_hash_state),
                        .base = {
                                .cra_name = "hmac(sha256)",
                                .cra_driver_name = "stm32-hmac-sha256",
@@ -1616,8 +1568,7 @@ static int stm32_hash_probe(struct platform_device *pdev)
        if (!hdev)
                return -ENOMEM;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       hdev->io_base = devm_ioremap_resource(dev, res);
+       hdev->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
        if (IS_ERR(hdev->io_base))
                return PTR_ERR(hdev->io_base);
 
index 25eb4e8fd22fdde6b3fbf0603757ec2fa38beaa1..4b4323bbf268d5f1991c5178d058b5ffe428a865 100644 (file)
@@ -566,9 +566,12 @@ config I2C_DESIGNWARE_PLATFORM
 
 config I2C_DESIGNWARE_AMDPSP
        bool "AMD PSP I2C semaphore support"
-       depends on X86_MSR
        depends on ACPI
+       depends on CRYPTO_DEV_SP_PSP
+       depends on PCI
        depends on I2C_DESIGNWARE_PLATFORM
+       depends on (I2C_DESIGNWARE_PLATFORM=y && CRYPTO_DEV_CCP_DD=y) || \
+                  (I2C_DESIGNWARE_PLATFORM=m && CRYPTO_DEV_CCP_DD)
        help
          This driver enables managed host access to the selected I2C bus shared
          between AMD CPU and AMD PSP.
index 8f36167bce6244fd5dfc74f7612334bead5fe904..63454b06e5da17b648c6db3e28d4a6b76b2977f0 100644 (file)
@@ -1,41 +1,21 @@
 // SPDX-License-Identifier: GPL-2.0
 
-#include <linux/bitfield.h>
-#include <linux/bits.h>
 #include <linux/i2c.h>
-#include <linux/io-64-nonatomic-lo-hi.h>
-#include <linux/psp-sev.h>
-#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/psp-platform-access.h>
+#include <linux/psp.h>
 #include <linux/workqueue.h>
 
-#include <asm/msr.h>
-
 #include "i2c-designware-core.h"
 
-#define MSR_AMD_PSP_ADDR       0xc00110a2
-#define PSP_MBOX_OFFSET                0x10570
-#define PSP_CMD_TIMEOUT_US     (500 * USEC_PER_MSEC)
-
 #define PSP_I2C_RESERVATION_TIME_MS 100
 
-#define PSP_I2C_REQ_BUS_CMD            0x64
 #define PSP_I2C_REQ_RETRY_CNT          400
 #define PSP_I2C_REQ_RETRY_DELAY_US     (25 * USEC_PER_MSEC)
 #define PSP_I2C_REQ_STS_OK             0x0
 #define PSP_I2C_REQ_STS_BUS_BUSY       0x1
 #define PSP_I2C_REQ_STS_INV_PARAM      0x3
 
-#define PSP_MBOX_FIELDS_STS            GENMASK(15, 0)
-#define PSP_MBOX_FIELDS_CMD            GENMASK(23, 16)
-#define PSP_MBOX_FIELDS_RESERVED       GENMASK(29, 24)
-#define PSP_MBOX_FIELDS_RECOVERY       BIT(30)
-#define PSP_MBOX_FIELDS_READY          BIT(31)
-
-struct psp_req_buffer_hdr {
-       u32 total_size;
-       u32 status;
-};
-
 enum psp_i2c_req_type {
        PSP_I2C_REQ_ACQUIRE,
        PSP_I2C_REQ_RELEASE,
@@ -47,118 +27,13 @@ struct psp_i2c_req {
        enum psp_i2c_req_type type;
 };
 
-struct psp_mbox {
-       u32 cmd_fields;
-       u64 i2c_req_addr;
-} __packed;
-
 static DEFINE_MUTEX(psp_i2c_access_mutex);
 static unsigned long psp_i2c_sem_acquired;
-static void __iomem *mbox_iomem;
 static u32 psp_i2c_access_count;
 static bool psp_i2c_mbox_fail;
 static struct device *psp_i2c_dev;
 
-/*
- * Implementation of PSP-x86 i2c-arbitration mailbox introduced for AMD Cezanne
- * family of SoCs.
- */
-
-static int psp_get_mbox_addr(unsigned long *mbox_addr)
-{
-       unsigned long long psp_mmio;
-
-       if (rdmsrl_safe(MSR_AMD_PSP_ADDR, &psp_mmio))
-               return -EIO;
-
-       *mbox_addr = (unsigned long)(psp_mmio + PSP_MBOX_OFFSET);
-
-       return 0;
-}
-
-static int psp_mbox_probe(void)
-{
-       unsigned long mbox_addr;
-       int ret;
-
-       ret = psp_get_mbox_addr(&mbox_addr);
-       if (ret)
-               return ret;
-
-       mbox_iomem = ioremap(mbox_addr, sizeof(struct psp_mbox));
-       if (!mbox_iomem)
-               return -ENOMEM;
-
-       return 0;
-}
-
-/* Recovery field should be equal 0 to start sending commands */
-static int psp_check_mbox_recovery(struct psp_mbox __iomem *mbox)
-{
-       u32 tmp;
-
-       tmp = readl(&mbox->cmd_fields);
-
-       return FIELD_GET(PSP_MBOX_FIELDS_RECOVERY, tmp);
-}
-
-static int psp_wait_cmd(struct psp_mbox __iomem *mbox)
-{
-       u32 tmp, expected;
-
-       /* Expect mbox_cmd to be cleared and ready bit to be set by PSP */
-       expected = FIELD_PREP(PSP_MBOX_FIELDS_READY, 1);
-
-       /*
-        * Check for readiness of PSP mailbox in a tight loop in order to
-        * process further as soon as command was consumed.
-        */
-       return readl_poll_timeout(&mbox->cmd_fields, tmp, (tmp == expected),
-                                 0, PSP_CMD_TIMEOUT_US);
-}
-
-/* Status equal to 0 means that PSP succeed processing command */
-static u32 psp_check_mbox_sts(struct psp_mbox __iomem *mbox)
-{
-       u32 cmd_reg;
-
-       cmd_reg = readl(&mbox->cmd_fields);
-
-       return FIELD_GET(PSP_MBOX_FIELDS_STS, cmd_reg);
-}
-
-static int psp_send_cmd(struct psp_i2c_req *req)
-{
-       struct psp_mbox __iomem *mbox = mbox_iomem;
-       phys_addr_t req_addr;
-       u32 cmd_reg;
-
-       if (psp_check_mbox_recovery(mbox))
-               return -EIO;
-
-       if (psp_wait_cmd(mbox))
-               return -EBUSY;
-
-       /*
-        * Fill mailbox with address of command-response buffer, which will be
-        * used for sending i2c requests as well as reading status returned by
-        * PSP. Use physical address of buffer, since PSP will map this region.
-        */
-       req_addr = __psp_pa((void *)req);
-       writeq(req_addr, &mbox->i2c_req_addr);
-
-       /* Write command register to trigger processing */
-       cmd_reg = FIELD_PREP(PSP_MBOX_FIELDS_CMD, PSP_I2C_REQ_BUS_CMD);
-       writel(cmd_reg, &mbox->cmd_fields);
-
-       if (psp_wait_cmd(mbox))
-               return -ETIMEDOUT;
-
-       if (psp_check_mbox_sts(mbox))
-               return -EIO;
-
-       return 0;
-}
+static int (*_psp_send_i2c_req)(struct psp_i2c_req *req);
 
 /* Helper to verify status returned by PSP */
 static int check_i2c_req_sts(struct psp_i2c_req *req)
@@ -179,22 +54,36 @@ static int check_i2c_req_sts(struct psp_i2c_req *req)
        }
 }
 
-static int psp_send_check_i2c_req(struct psp_i2c_req *req)
+/*
+ * Errors in x86-PSP i2c-arbitration protocol may occur at two levels:
+ * 1. mailbox communication - PSP is not operational or some IO errors with
+ *    basic communication had happened.
+ * 2. i2c-requests - PSP refuses to grant i2c arbitration to x86 for too long.
+ *
+ * In order to distinguish between these in error handling code all mailbox
+ * communication errors on the first level (from CCP symbols) will be passed
+ * up and if -EIO is returned the second level will be checked.
+ */
+static int psp_send_i2c_req_cezanne(struct psp_i2c_req *req)
 {
-       /*
-        * Errors in x86-PSP i2c-arbitration protocol may occur at two levels:
-        * 1. mailbox communication - PSP is not operational or some IO errors
-        * with basic communication had happened;
-        * 2. i2c-requests - PSP refuses to grant i2c arbitration to x86 for too
-        * long.
-        * In order to distinguish between these two in error handling code, all
-        * errors on the first level (returned by psp_send_cmd) are shadowed by
-        * -EIO.
-        */
-       if (psp_send_cmd(req))
-               return -EIO;
+       int ret;
+
+       ret = psp_send_platform_access_msg(PSP_I2C_REQ_BUS_CMD, (struct psp_request *)req);
+       if (ret == -EIO)
+               return check_i2c_req_sts(req);
 
-       return check_i2c_req_sts(req);
+       return ret;
+}
+
+static int psp_send_i2c_req_doorbell(struct psp_i2c_req *req)
+{
+       int ret;
+
+       ret = psp_ring_platform_doorbell(req->type, &req->hdr.status);
+       if (ret == -EIO)
+               return check_i2c_req_sts(req);
+
+       return ret;
 }
 
 static int psp_send_i2c_req(enum psp_i2c_req_type i2c_req_type)
@@ -208,11 +97,11 @@ static int psp_send_i2c_req(enum psp_i2c_req_type i2c_req_type)
        if (!req)
                return -ENOMEM;
 
-       req->hdr.total_size = sizeof(*req);
+       req->hdr.payload_size = sizeof(*req);
        req->type = i2c_req_type;
 
        start = jiffies;
-       ret = read_poll_timeout(psp_send_check_i2c_req, status,
+       ret = read_poll_timeout(_psp_send_i2c_req, status,
                                (status != -EBUSY),
                                PSP_I2C_REQ_RETRY_DELAY_US,
                                PSP_I2C_REQ_RETRY_CNT * PSP_I2C_REQ_RETRY_DELAY_US,
@@ -387,7 +276,10 @@ static const struct i2c_lock_operations i2c_dw_psp_lock_ops = {
 
 int i2c_dw_amdpsp_probe_lock_support(struct dw_i2c_dev *dev)
 {
-       int ret;
+       struct pci_dev *rdev;
+
+       if (!IS_REACHABLE(CONFIG_CRYPTO_DEV_CCP_DD))
+               return -ENODEV;
 
        if (!dev)
                return -ENODEV;
@@ -399,11 +291,18 @@ int i2c_dw_amdpsp_probe_lock_support(struct dw_i2c_dev *dev)
        if (psp_i2c_dev)
                return -EEXIST;
 
-       psp_i2c_dev = dev->dev;
+       /* Cezanne uses platform mailbox, Mendocino and later use doorbell */
+       rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
+       if (rdev->device == 0x1630)
+               _psp_send_i2c_req = psp_send_i2c_req_cezanne;
+       else
+               _psp_send_i2c_req = psp_send_i2c_req_doorbell;
+       pci_dev_put(rdev);
 
-       ret = psp_mbox_probe();
-       if (ret)
-               return ret;
+       if (psp_check_platform_access_status())
+               return -EPROBE_DEFER;
+
+       psp_i2c_dev = dev->dev;
 
        dev_info(psp_i2c_dev, "I2C bus managed by AMD PSP\n");
 
@@ -417,9 +316,3 @@ int i2c_dw_amdpsp_probe_lock_support(struct dw_i2c_dev *dev)
 
        return 0;
 }
-
-/* Unmap area used as a mailbox with PSP */
-void i2c_dw_amdpsp_remove_lock_support(struct dw_i2c_dev *dev)
-{
-       iounmap(mbox_iomem);
-}
index 050d8c63ad3c5464da845a7116a5e577cfb6611e..c5d87aae39c666db553afe3f57c18b27288f462d 100644 (file)
@@ -383,7 +383,6 @@ int i2c_dw_baytrail_probe_lock_support(struct dw_i2c_dev *dev);
 
 #if IS_ENABLED(CONFIG_I2C_DESIGNWARE_AMDPSP)
 int i2c_dw_amdpsp_probe_lock_support(struct dw_i2c_dev *dev);
-void i2c_dw_amdpsp_remove_lock_support(struct dw_i2c_dev *dev);
 #endif
 
 int i2c_dw_validate_speed(struct dw_i2c_dev *dev);
index 74182db03a88b36946b6f897e5de1db62c32b222..89ad88c547544b30cd6d961ef92e203145c7052b 100644 (file)
@@ -214,7 +214,6 @@ static const struct i2c_dw_semaphore_callbacks i2c_dw_semaphore_cb_table[] = {
 #ifdef CONFIG_I2C_DESIGNWARE_AMDPSP
        {
                .probe = i2c_dw_amdpsp_probe_lock_support,
-               .remove = i2c_dw_amdpsp_remove_lock_support,
        },
 #endif
        {}
index cec6e70f0ac92f30013c27669616e430f20b751a..e8cd9aaa346750fd7e47a2a861813ecb512b51b5 100644 (file)
@@ -8,7 +8,7 @@
 #include <linux/tee_drv.h>
 #include <linux/psp-tee.h>
 #include <linux/slab.h>
-#include <linux/psp-sev.h>
+#include <linux/psp.h>
 #include "amdtee_if.h"
 #include "amdtee_private.h"
 
index f87f96a291c99d68619664ecbfebbd6bbce6de5c..f0303126f199d2a922397a0d80e53d2625ba0088 100644 (file)
@@ -5,7 +5,7 @@
 
 #include <linux/slab.h>
 #include <linux/tee_drv.h>
-#include <linux/psp-sev.h>
+#include <linux/psp.h>
 #include "amdtee_private.h"
 
 static int pool_op_alloc(struct tee_shm_pool *pool, struct tee_shm *shm,
index e4bc96528902e51432aabf66dd7e8595618ea22e..574cffc90730f5f3296262375a08474dab6ec59d 100644 (file)
@@ -8,6 +8,9 @@
  */
 #ifndef _CRYPTO_ACOMP_H
 #define _CRYPTO_ACOMP_H
+
+#include <linux/atomic.h>
+#include <linux/container_of.h>
 #include <linux/crypto.h>
 
 #define CRYPTO_ACOMP_ALLOC_OUTPUT      0x00000001
@@ -53,37 +56,35 @@ struct crypto_acomp {
        struct crypto_tfm base;
 };
 
-/**
- * struct acomp_alg - asynchronous compression algorithm
- *
- * @compress:  Function performs a compress operation
- * @decompress:        Function performs a de-compress operation
- * @dst_free:  Frees destination buffer if allocated inside the algorithm
- * @init:      Initialize the cryptographic transformation object.
- *             This function is used to initialize the cryptographic
- *             transformation object. This function is called only once at
- *             the instantiation time, right after the transformation context
- *             was allocated. In case the cryptographic hardware has some
- *             special requirements which need to be handled by software, this
- *             function shall check for the precise requirement of the
- *             transformation and put any software fallbacks in place.
- * @exit:      Deinitialize the cryptographic transformation object. This is a
- *             counterpart to @init, used to remove various changes set in
- *             @init.
- *
- * @reqsize:   Context size for (de)compression requests
- * @base:      Common crypto API algorithm data structure
+/*
+ * struct crypto_istat_compress - statistics for compress algorithm
+ * @compress_cnt:      number of compress requests
+ * @compress_tlen:     total data size handled by compress requests
+ * @decompress_cnt:    number of decompress requests
+ * @decompress_tlen:   total data size handled by decompress requests
+ * @err_cnt:           number of error for compress requests
  */
-struct acomp_alg {
-       int (*compress)(struct acomp_req *req);
-       int (*decompress)(struct acomp_req *req);
-       void (*dst_free)(struct scatterlist *dst);
-       int (*init)(struct crypto_acomp *tfm);
-       void (*exit)(struct crypto_acomp *tfm);
-       unsigned int reqsize;
-       struct crypto_alg base;
+struct crypto_istat_compress {
+       atomic64_t compress_cnt;
+       atomic64_t compress_tlen;
+       atomic64_t decompress_cnt;
+       atomic64_t decompress_tlen;
+       atomic64_t err_cnt;
 };
 
+#ifdef CONFIG_CRYPTO_STATS
+#define COMP_ALG_COMMON_STATS struct crypto_istat_compress stat;
+#else
+#define COMP_ALG_COMMON_STATS
+#endif
+
+#define COMP_ALG_COMMON {                      \
+       COMP_ALG_COMMON_STATS                   \
+                                               \
+       struct crypto_alg base;                 \
+}
+struct comp_alg_common COMP_ALG_COMMON;
+
 /**
  * DOC: Asynchronous Compression API
  *
@@ -131,9 +132,10 @@ static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm)
        return &tfm->base;
 }
 
-static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
+static inline struct comp_alg_common *__crypto_comp_alg_common(
+       struct crypto_alg *alg)
 {
-       return container_of(alg, struct acomp_alg, base);
+       return container_of(alg, struct comp_alg_common, base);
 }
 
 static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm)
@@ -141,9 +143,10 @@ static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm)
        return container_of(tfm, struct crypto_acomp, base);
 }
 
-static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
+static inline struct comp_alg_common *crypto_comp_alg_common(
+       struct crypto_acomp *tfm)
 {
-       return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg);
+       return __crypto_comp_alg_common(crypto_acomp_tfm(tfm)->__crt_alg);
 }
 
 static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm)
@@ -219,7 +222,8 @@ static inline void acomp_request_set_callback(struct acomp_req *req,
 {
        req->base.complete = cmpl;
        req->base.data = data;
-       req->base.flags = flgs;
+       req->base.flags &= CRYPTO_ACOMP_ALLOC_OUTPUT;
+       req->base.flags |= flgs & ~CRYPTO_ACOMP_ALLOC_OUTPUT;
 }
 
 /**
@@ -246,10 +250,32 @@ static inline void acomp_request_set_params(struct acomp_req *req,
        req->slen = slen;
        req->dlen = dlen;
 
+       req->flags &= ~CRYPTO_ACOMP_ALLOC_OUTPUT;
        if (!req->dst)
                req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
 }
 
+static inline struct crypto_istat_compress *comp_get_stat(
+       struct comp_alg_common *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+       return &alg->stat;
+#else
+       return NULL;
+#endif
+}
+
+static inline int crypto_comp_errstat(struct comp_alg_common *alg, int err)
+{
+       if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+               return err;
+
+       if (err && err != -EINPROGRESS && err != -EBUSY)
+               atomic64_inc(&comp_get_stat(alg)->err_cnt);
+
+       return err;
+}
+
 /**
  * crypto_acomp_compress() -- Invoke asynchronous compress operation
  *
@@ -262,14 +288,18 @@ static inline void acomp_request_set_params(struct acomp_req *req,
 static inline int crypto_acomp_compress(struct acomp_req *req)
 {
        struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
-       struct crypto_alg *alg = tfm->base.__crt_alg;
-       unsigned int slen = req->slen;
-       int ret;
-
-       crypto_stats_get(alg);
-       ret = tfm->compress(req);
-       crypto_stats_compress(slen, ret, alg);
-       return ret;
+       struct comp_alg_common *alg;
+
+       alg = crypto_comp_alg_common(tfm);
+
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+               struct crypto_istat_compress *istat = comp_get_stat(alg);
+
+               atomic64_inc(&istat->compress_cnt);
+               atomic64_add(req->slen, &istat->compress_tlen);
+       }
+
+       return crypto_comp_errstat(alg, tfm->compress(req));
 }
 
 /**
@@ -284,14 +314,18 @@ static inline int crypto_acomp_compress(struct acomp_req *req)
 static inline int crypto_acomp_decompress(struct acomp_req *req)
 {
        struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
-       struct crypto_alg *alg = tfm->base.__crt_alg;
-       unsigned int slen = req->slen;
-       int ret;
-
-       crypto_stats_get(alg);
-       ret = tfm->decompress(req);
-       crypto_stats_decompress(slen, ret, alg);
-       return ret;
+       struct comp_alg_common *alg;
+
+       alg = crypto_comp_alg_common(tfm);
+
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+               struct crypto_istat_compress *istat = comp_get_stat(alg);
+
+               atomic64_inc(&istat->decompress_cnt);
+               atomic64_add(req->slen, &istat->decompress_tlen);
+       }
+
+       return crypto_comp_errstat(alg, tfm->decompress(req));
 }
 
 #endif
index 4a2b7e6e0c1fa7cd22dd6bc7c3302932a7625598..35e45b854a6fa4940f099eadde9228c1b3a1105d 100644 (file)
@@ -8,6 +8,7 @@
 #ifndef _CRYPTO_AEAD_H
 #define _CRYPTO_AEAD_H
 
+#include <linux/atomic.h>
 #include <linux/container_of.h>
 #include <linux/crypto.h>
 #include <linux/slab.h>
@@ -100,6 +101,22 @@ struct aead_request {
        void *__ctx[] CRYPTO_MINALIGN_ATTR;
 };
 
+/*
+ * struct crypto_istat_aead - statistics for AEAD algorithm
+ * @encrypt_cnt:       number of encrypt requests
+ * @encrypt_tlen:      total data size handled by encrypt requests
+ * @decrypt_cnt:       number of decrypt requests
+ * @decrypt_tlen:      total data size handled by decrypt requests
+ * @err_cnt:           number of error for AEAD requests
+ */
+struct crypto_istat_aead {
+       atomic64_t encrypt_cnt;
+       atomic64_t encrypt_tlen;
+       atomic64_t decrypt_cnt;
+       atomic64_t decrypt_tlen;
+       atomic64_t err_cnt;
+};
+
 /**
  * struct aead_alg - AEAD cipher definition
  * @maxauthsize: Set the maximum authentication tag size supported by the
@@ -118,6 +135,7 @@ struct aead_request {
  * @setkey: see struct skcipher_alg
  * @encrypt: see struct skcipher_alg
  * @decrypt: see struct skcipher_alg
+ * @stat: statistics for AEAD algorithm
  * @ivsize: see struct skcipher_alg
  * @chunksize: see struct skcipher_alg
  * @init: Initialize the cryptographic transformation object. This function
@@ -144,6 +162,10 @@ struct aead_alg {
        int (*init)(struct crypto_aead *tfm);
        void (*exit)(struct crypto_aead *tfm);
 
+#ifdef CONFIG_CRYPTO_STATS
+       struct crypto_istat_aead stat;
+#endif
+
        unsigned int ivsize;
        unsigned int maxauthsize;
        unsigned int chunksize;
index 734c213918bda2d6fb6866fcf3133c846a04fede..f35fd653e4e538c9f53a44a2b6b936091238e8bf 100644 (file)
@@ -7,6 +7,8 @@
  */
 #ifndef _CRYPTO_AKCIPHER_H
 #define _CRYPTO_AKCIPHER_H
+
+#include <linux/atomic.h>
 #include <linux/crypto.h>
 
 /**
@@ -52,6 +54,26 @@ struct crypto_akcipher {
        struct crypto_tfm base;
 };
 
+/*
+ * struct crypto_istat_akcipher - statistics for akcipher algorithm
+ * @encrypt_cnt:       number of encrypt requests
+ * @encrypt_tlen:      total data size handled by encrypt requests
+ * @decrypt_cnt:       number of decrypt requests
+ * @decrypt_tlen:      total data size handled by decrypt requests
+ * @verify_cnt:                number of verify operation
+ * @sign_cnt:          number of sign requests
+ * @err_cnt:           number of error for akcipher requests
+ */
+struct crypto_istat_akcipher {
+       atomic64_t encrypt_cnt;
+       atomic64_t encrypt_tlen;
+       atomic64_t decrypt_cnt;
+       atomic64_t decrypt_tlen;
+       atomic64_t verify_cnt;
+       atomic64_t sign_cnt;
+       atomic64_t err_cnt;
+};
+
 /**
  * struct akcipher_alg - generic public key algorithm
  *
@@ -88,6 +110,7 @@ struct crypto_akcipher {
  * @exit:      Deinitialize the cryptographic transformation object. This is a
  *             counterpart to @init, used to remove various changes set in
  *             @init.
+ * @stat:      Statistics for akcipher algorithm
  *
  * @base:      Common crypto API algorithm data structure
  */
@@ -104,6 +127,10 @@ struct akcipher_alg {
        int (*init)(struct crypto_akcipher *tfm);
        void (*exit)(struct crypto_akcipher *tfm);
 
+#ifdef CONFIG_CRYPTO_STATS
+       struct crypto_istat_akcipher stat;
+#endif
+
        struct crypto_alg base;
 };
 
@@ -275,6 +302,27 @@ static inline unsigned int crypto_akcipher_maxsize(struct crypto_akcipher *tfm)
        return alg->max_size(tfm);
 }
 
+static inline struct crypto_istat_akcipher *akcipher_get_stat(
+       struct akcipher_alg *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+       return &alg->stat;
+#else
+       return NULL;
+#endif
+}
+
+static inline int crypto_akcipher_errstat(struct akcipher_alg *alg, int err)
+{
+       if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+               return err;
+
+       if (err && err != -EINPROGRESS && err != -EBUSY)
+               atomic64_inc(&akcipher_get_stat(alg)->err_cnt);
+
+       return err;
+}
+
 /**
  * crypto_akcipher_encrypt() - Invoke public key encrypt operation
  *
@@ -289,14 +337,15 @@ static inline int crypto_akcipher_encrypt(struct akcipher_request *req)
 {
        struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
        struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
-       struct crypto_alg *calg = tfm->base.__crt_alg;
-       unsigned int src_len = req->src_len;
-       int ret;
-
-       crypto_stats_get(calg);
-       ret = alg->encrypt(req);
-       crypto_stats_akcipher_encrypt(src_len, ret, calg);
-       return ret;
+
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+               struct crypto_istat_akcipher *istat = akcipher_get_stat(alg);
+
+               atomic64_inc(&istat->encrypt_cnt);
+               atomic64_add(req->src_len, &istat->encrypt_tlen);
+       }
+
+       return crypto_akcipher_errstat(alg, alg->encrypt(req));
 }
 
 /**
@@ -313,14 +362,15 @@ static inline int crypto_akcipher_decrypt(struct akcipher_request *req)
 {
        struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
        struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
-       struct crypto_alg *calg = tfm->base.__crt_alg;
-       unsigned int src_len = req->src_len;
-       int ret;
-
-       crypto_stats_get(calg);
-       ret = alg->decrypt(req);
-       crypto_stats_akcipher_decrypt(src_len, ret, calg);
-       return ret;
+
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+               struct crypto_istat_akcipher *istat = akcipher_get_stat(alg);
+
+               atomic64_inc(&istat->decrypt_cnt);
+               atomic64_add(req->src_len, &istat->decrypt_tlen);
+       }
+
+       return crypto_akcipher_errstat(alg, alg->decrypt(req));
 }
 
 /**
@@ -337,13 +387,11 @@ static inline int crypto_akcipher_sign(struct akcipher_request *req)
 {
        struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
        struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
-       struct crypto_alg *calg = tfm->base.__crt_alg;
-       int ret;
 
-       crypto_stats_get(calg);
-       ret = alg->sign(req);
-       crypto_stats_akcipher_sign(ret, calg);
-       return ret;
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+               atomic64_inc(&akcipher_get_stat(alg)->sign_cnt);
+
+       return crypto_akcipher_errstat(alg, alg->sign(req));
 }
 
 /**
@@ -364,13 +412,11 @@ static inline int crypto_akcipher_verify(struct akcipher_request *req)
 {
        struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
        struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
-       struct crypto_alg *calg = tfm->base.__crt_alg;
-       int ret;
 
-       crypto_stats_get(calg);
-       ret = alg->verify(req);
-       crypto_stats_akcipher_verify(ret, calg);
-       return ret;
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+               atomic64_inc(&akcipher_get_stat(alg)->verify_cnt);
+
+       return crypto_akcipher_errstat(alg, alg->verify(req));
 }
 
 /**
index fede394ae2ab501601a071f8cdc1edc210a5d913..016d5a302b84a987a72fd40cc9466c3e9fc06a5d 100644 (file)
@@ -7,15 +7,12 @@
 #ifndef _CRYPTO_ALGAPI_H
 #define _CRYPTO_ALGAPI_H
 
+#include <crypto/utils.h>
 #include <linux/align.h>
 #include <linux/cache.h>
 #include <linux/crypto.h>
-#include <linux/kconfig.h>
-#include <linux/list.h>
 #include <linux/types.h>
 
-#include <asm/unaligned.h>
-
 /*
  * Maximum values for blocksize and alignmask, used to allocate
  * static buffers that are big enough for any combination of
 
 #define CRYPTO_DMA_PADDING ((CRYPTO_DMA_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
 
+/*
+ * Autoloaded crypto modules should only use a prefixed name to avoid allowing
+ * arbitrary modules to be loaded. Loading from userspace may still need the
+ * unprefixed names, so retains those aliases as well.
+ * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
+ * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
+ * expands twice on the same line. Instead, use a separate base name for the
+ * alias.
+ */
+#define MODULE_ALIAS_CRYPTO(name)      \
+               __MODULE_INFO(alias, alias_userspace, name);    \
+               __MODULE_INFO(alias, alias_crypto, "crypto-" name)
+
 struct crypto_aead;
 struct crypto_instance;
 struct module;
 struct notifier_block;
 struct rtattr;
+struct scatterlist;
 struct seq_file;
 struct sk_buff;
 
@@ -50,6 +61,9 @@ struct crypto_type {
        void (*show)(struct seq_file *m, struct crypto_alg *alg);
        int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
        void (*free)(struct crypto_instance *inst);
+#ifdef CONFIG_CRYPTO_STATS
+       int (*report_stat)(struct sk_buff *skb, struct crypto_alg *alg);
+#endif
 
        unsigned int type;
        unsigned int maskclear;
@@ -119,6 +133,14 @@ struct crypto_attr_type {
        u32 mask;
 };
 
+/*
+ * Algorithm registration interface.
+ */
+int crypto_register_alg(struct crypto_alg *alg);
+void crypto_unregister_alg(struct crypto_alg *alg);
+int crypto_register_algs(struct crypto_alg *algs, int count);
+void crypto_unregister_algs(struct crypto_alg *algs, int count);
+
 void crypto_mod_put(struct crypto_alg *alg);
 
 int crypto_register_template(struct crypto_template *tmpl);
@@ -156,47 +178,6 @@ static inline unsigned int crypto_queue_len(struct crypto_queue *queue)
 }
 
 void crypto_inc(u8 *a, unsigned int size);
-void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size);
-
-static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
-{
-       if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
-           __builtin_constant_p(size) &&
-           (size % sizeof(unsigned long)) == 0) {
-               unsigned long *d = (unsigned long *)dst;
-               unsigned long *s = (unsigned long *)src;
-               unsigned long l;
-
-               while (size > 0) {
-                       l = get_unaligned(d) ^ get_unaligned(s++);
-                       put_unaligned(l, d++);
-                       size -= sizeof(unsigned long);
-               }
-       } else {
-               __crypto_xor(dst, dst, src, size);
-       }
-}
-
-static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
-                                 unsigned int size)
-{
-       if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
-           __builtin_constant_p(size) &&
-           (size % sizeof(unsigned long)) == 0) {
-               unsigned long *d = (unsigned long *)dst;
-               unsigned long *s1 = (unsigned long *)src1;
-               unsigned long *s2 = (unsigned long *)src2;
-               unsigned long l;
-
-               while (size > 0) {
-                       l = get_unaligned(s1++) ^ get_unaligned(s2++);
-                       put_unaligned(l, d++);
-                       size -= sizeof(unsigned long);
-               }
-       } else {
-               __crypto_xor(dst, src1, src2, size);
-       }
-}
 
 static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
 {
@@ -275,23 +256,6 @@ static inline u32 crypto_algt_inherited_mask(struct crypto_attr_type *algt)
        return crypto_requires_off(algt, CRYPTO_ALG_INHERITED_FLAGS);
 }
 
-noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
-
-/**
- * crypto_memneq - Compare two areas of memory without leaking
- *                timing information.
- *
- * @a: One area of memory
- * @b: Another area of memory
- * @size: The size of the area.
- *
- * Returns 0 when data is equal, 1 otherwise.
- */
-static inline int crypto_memneq(const void *a, const void *b, size_t size)
-{
-       return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
-}
-
 int crypto_register_notifier(struct notifier_block *nb);
 int crypto_unregister_notifier(struct notifier_block *nb);
 
@@ -308,4 +272,9 @@ static inline void crypto_request_complete(struct crypto_async_request *req,
        req->complete(req->data, err);
 }
 
+static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
+{
+       return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
+}
+
 #endif /* _CRYPTO_ALGAPI_H */
index f5841992dc9b162556993b0ea41fa42b9c4a44a1..e69542d86a2b5d50bee3c4c194bae115a8fd608a 100644 (file)
@@ -8,6 +8,7 @@
 #ifndef _CRYPTO_HASH_H
 #define _CRYPTO_HASH_H
 
+#include <linux/atomic.h>
 #include <linux/crypto.h>
 #include <linux/string.h>
 
@@ -22,8 +23,27 @@ struct crypto_ahash;
  * crypto_unregister_shash().
  */
 
-/**
+/*
+ * struct crypto_istat_hash - statistics for has algorithm
+ * @hash_cnt:          number of hash requests
+ * @hash_tlen:         total data size hashed
+ * @err_cnt:           number of error for hash requests
+ */
+struct crypto_istat_hash {
+       atomic64_t hash_cnt;
+       atomic64_t hash_tlen;
+       atomic64_t err_cnt;
+};
+
+#ifdef CONFIG_CRYPTO_STATS
+#define HASH_ALG_COMMON_STAT struct crypto_istat_hash stat;
+#else
+#define HASH_ALG_COMMON_STAT
+#endif
+
+/*
  * struct hash_alg_common - define properties of message digest
+ * @stat: Statistics for hash algorithm.
  * @digestsize: Size of the result of the transformation. A buffer of this size
  *             must be available to the @final and @finup calls, so they can
  *             store the resulting hash into it. For various predefined sizes,
@@ -39,12 +59,15 @@ struct crypto_ahash;
  *       The hash_alg_common data structure now adds the hash-specific
  *       information.
  */
-struct hash_alg_common {
-       unsigned int digestsize;
-       unsigned int statesize;
-
-       struct crypto_alg base;
-};
+#define HASH_ALG_COMMON {              \
+       HASH_ALG_COMMON_STAT            \
+                                       \
+       unsigned int digestsize;        \
+       unsigned int statesize;         \
+                                       \
+       struct crypto_alg base;         \
+}
+struct hash_alg_common HASH_ALG_COMMON;
 
 struct ahash_request {
        struct crypto_async_request base;
@@ -129,6 +152,7 @@ struct ahash_request {
  * @exit_tfm: Deinitialize the cryptographic transformation object.
  *           This is a counterpart to @init_tfm, used to remove
  *           various changes set in @init_tfm.
+ * @clone_tfm: Copy transform into new object, may allocate memory.
  * @halg: see struct hash_alg_common
  */
 struct ahash_alg {
@@ -143,6 +167,7 @@ struct ahash_alg {
                      unsigned int keylen);
        int (*init_tfm)(struct crypto_ahash *tfm);
        void (*exit_tfm)(struct crypto_ahash *tfm);
+       int (*clone_tfm)(struct crypto_ahash *dst, struct crypto_ahash *src);
 
        struct hash_alg_common halg;
 };
@@ -160,8 +185,6 @@ struct shash_desc {
  */
 #define HASH_MAX_DESCSIZE      (sizeof(struct shash_desc) + 360)
 
-#define HASH_MAX_STATESIZE     512
-
 #define SHASH_DESC_ON_STACK(shash, ctx)                                             \
        char __##shash##_desc[sizeof(struct shash_desc) + HASH_MAX_DESCSIZE] \
                __aligned(__alignof__(struct shash_desc));                   \
@@ -188,12 +211,16 @@ struct shash_desc {
  * @exit_tfm: Deinitialize the cryptographic transformation object.
  *           This is a counterpart to @init_tfm, used to remove
  *           various changes set in @init_tfm.
+ * @clone_tfm: Copy transform into new object, may allocate memory.
  * @digestsize: see struct ahash_alg
  * @statesize: see struct ahash_alg
  * @descsize: Size of the operational state for the message digest. This state
  *           size is the memory size that needs to be allocated for
  *           shash_desc.__ctx
+ * @stat: Statistics for hash algorithm.
  * @base: internally used
+ * @halg: see struct hash_alg_common
+ * @HASH_ALG_COMMON: see struct hash_alg_common
  */
 struct shash_alg {
        int (*init)(struct shash_desc *desc);
@@ -210,16 +237,17 @@ struct shash_alg {
                      unsigned int keylen);
        int (*init_tfm)(struct crypto_shash *tfm);
        void (*exit_tfm)(struct crypto_shash *tfm);
+       int (*clone_tfm)(struct crypto_shash *dst, struct crypto_shash *src);
 
        unsigned int descsize;
 
-       /* These fields must match hash_alg_common. */
-       unsigned int digestsize
-               __attribute__ ((aligned(__alignof__(struct hash_alg_common))));
-       unsigned int statesize;
-
-       struct crypto_alg base;
+       union {
+               struct HASH_ALG_COMMON;
+               struct hash_alg_common halg;
+       };
 };
+#undef HASH_ALG_COMMON
+#undef HASH_ALG_COMMON_STAT
 
 struct crypto_ahash {
        int (*init)(struct ahash_request *req);
@@ -273,6 +301,8 @@ static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm)
 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
                                        u32 mask);
 
+struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *tfm);
+
 static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
 {
        return &tfm->base;
@@ -535,6 +565,27 @@ static inline int crypto_ahash_init(struct ahash_request *req)
        return tfm->init(req);
 }
 
+static inline struct crypto_istat_hash *hash_get_stat(
+       struct hash_alg_common *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+       return &alg->stat;
+#else
+       return NULL;
+#endif
+}
+
+static inline int crypto_hash_errstat(struct hash_alg_common *alg, int err)
+{
+       if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+               return err;
+
+       if (err && err != -EINPROGRESS && err != -EBUSY)
+               atomic64_inc(&hash_get_stat(alg)->err_cnt);
+
+       return err;
+}
+
 /**
  * crypto_ahash_update() - add data to message digest for processing
  * @req: ahash_request handle that was previously initialized with the
@@ -549,14 +600,12 @@ static inline int crypto_ahash_init(struct ahash_request *req)
 static inline int crypto_ahash_update(struct ahash_request *req)
 {
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-       struct crypto_alg *alg = tfm->base.__crt_alg;
-       unsigned int nbytes = req->nbytes;
-       int ret;
+       struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
+
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+               atomic64_add(req->nbytes, &hash_get_stat(alg)->hash_tlen);
 
-       crypto_stats_get(alg);
-       ret = crypto_ahash_reqtfm(req)->update(req);
-       crypto_stats_ahash_update(nbytes, ret, alg);
-       return ret;
+       return crypto_hash_errstat(alg, tfm->update(req));
 }
 
 /**
@@ -718,6 +767,8 @@ static inline void ahash_request_set_crypt(struct ahash_request *req,
 struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type,
                                        u32 mask);
 
+struct crypto_shash *crypto_clone_shash(struct crypto_shash *tfm);
+
 int crypto_has_shash(const char *alg_name, u32 type, u32 mask);
 
 static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
index 978b57a3f4f0e29de2913973d4b5c6d9d635d239..4ac46bafba9d7c94699f8af2c0e77bbe12a19bd2 100644 (file)
 #include <crypto/acompress.h>
 #include <crypto/algapi.h>
 
+/**
+ * struct acomp_alg - asynchronous compression algorithm
+ *
+ * @compress:  Function performs a compress operation
+ * @decompress:        Function performs a de-compress operation
+ * @dst_free:  Frees destination buffer if allocated inside the algorithm
+ * @init:      Initialize the cryptographic transformation object.
+ *             This function is used to initialize the cryptographic
+ *             transformation object. This function is called only once at
+ *             the instantiation time, right after the transformation context
+ *             was allocated. In case the cryptographic hardware has some
+ *             special requirements which need to be handled by software, this
+ *             function shall check for the precise requirement of the
+ *             transformation and put any software fallbacks in place.
+ * @exit:      Deinitialize the cryptographic transformation object. This is a
+ *             counterpart to @init, used to remove various changes set in
+ *             @init.
+ *
+ * @reqsize:   Context size for (de)compression requests
+ * @stat:      Statistics for compress algorithm
+ * @base:      Common crypto API algorithm data structure
+ * @calg:      Cmonn algorithm data structure shared with scomp
+ */
+struct acomp_alg {
+       int (*compress)(struct acomp_req *req);
+       int (*decompress)(struct acomp_req *req);
+       void (*dst_free)(struct scatterlist *dst);
+       int (*init)(struct crypto_acomp *tfm);
+       void (*exit)(struct crypto_acomp *tfm);
+
+       unsigned int reqsize;
+
+       union {
+               struct COMP_ALG_COMMON;
+               struct comp_alg_common calg;
+       };
+};
+
 /*
  * Transform internal helpers.
  */
@@ -31,11 +69,6 @@ static inline void acomp_request_complete(struct acomp_req *req,
        crypto_request_complete(&req->base, err);
 }
 
-static inline const char *acomp_alg_name(struct crypto_acomp *tfm)
-{
-       return crypto_acomp_tfm(tfm)->__crt_alg->cra_name;
-}
-
 static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm)
 {
        struct acomp_req *req;
index 0b259dbb97af05e8b2070988ee91dca934a63da5..37edf3f4e8af21e8372ece9472937fe24ca13844 100644 (file)
@@ -133,8 +133,6 @@ int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc);
 int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc);
 int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc);
 
-int crypto_init_shash_ops_async(struct crypto_tfm *tfm);
-
 static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm)
 {
        return crypto_tfm_ctx(crypto_ahash_tfm(tfm));
index 252cc949d4ee5f3ca8881d8b29101ef50e857800..858fe3965ae347ef19d498bd91997e54766e302e 100644 (file)
@@ -9,10 +9,13 @@
 #ifndef _CRYPTO_SCOMP_INT_H
 #define _CRYPTO_SCOMP_INT_H
 
+#include <crypto/acompress.h>
 #include <crypto/algapi.h>
 
 #define SCOMP_SCRATCH_SIZE     131072
 
+struct acomp_req;
+
 struct crypto_scomp {
        struct crypto_tfm base;
 };
@@ -24,7 +27,9 @@ struct crypto_scomp {
  * @free_ctx:  Function frees context allocated with alloc_ctx
  * @compress:  Function performs a compress operation
  * @decompress:        Function performs a de-compress operation
+ * @stat:      Statistics for compress algorithm
  * @base:      Common crypto API algorithm data structure
+ * @calg:      Cmonn algorithm data structure shared with acomp
  */
 struct scomp_alg {
        void *(*alloc_ctx)(struct crypto_scomp *tfm);
@@ -35,7 +40,11 @@ struct scomp_alg {
        int (*decompress)(struct crypto_scomp *tfm, const u8 *src,
                          unsigned int slen, u8 *dst, unsigned int *dlen,
                          void *ctx);
-       struct crypto_alg base;
+
+       union {
+               struct COMP_ALG_COMMON;
+               struct comp_alg_common calg;
+       };
 };
 
 static inline struct scomp_alg *__crypto_scomp_alg(struct crypto_alg *alg)
@@ -90,10 +99,6 @@ static inline int crypto_scomp_decompress(struct crypto_scomp *tfm,
                                                 ctx);
 }
 
-int crypto_init_scomp_ops_async(struct crypto_tfm *tfm);
-struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req);
-void crypto_acomp_scomp_free_ctx(struct acomp_req *req);
-
 /**
  * crypto_register_scomp() -- Register synchronous compression algorithm
  *
index 33ff32878802a3db9a2c303b29c4a0ae4ce18df3..1988e24a0d1db6411573f64cd8499181b96cfcda 100644 (file)
@@ -8,7 +8,11 @@
 
 #ifndef _CRYPTO_KPP_
 #define _CRYPTO_KPP_
+
+#include <linux/atomic.h>
+#include <linux/container_of.h>
 #include <linux/crypto.h>
+#include <linux/slab.h>
 
 /**
  * struct kpp_request
@@ -47,6 +51,20 @@ struct crypto_kpp {
        struct crypto_tfm base;
 };
 
+/*
+ * struct crypto_istat_kpp - statistics for KPP algorithm
+ * @setsecret_cnt:             number of setsecrey operation
+ * @generate_public_key_cnt:   number of generate_public_key operation
+ * @compute_shared_secret_cnt: number of compute_shared_secret operation
+ * @err_cnt:                   number of error for KPP requests
+ */
+struct crypto_istat_kpp {
+       atomic64_t setsecret_cnt;
+       atomic64_t generate_public_key_cnt;
+       atomic64_t compute_shared_secret_cnt;
+       atomic64_t err_cnt;
+};
+
 /**
  * struct kpp_alg - generic key-agreement protocol primitives
  *
@@ -69,6 +87,7 @@ struct crypto_kpp {
  * @exit:              Undo everything @init did.
  *
  * @base:              Common crypto API algorithm data structure
+ * @stat:              Statistics for KPP algorithm
  */
 struct kpp_alg {
        int (*set_secret)(struct crypto_kpp *tfm, const void *buffer,
@@ -81,6 +100,10 @@ struct kpp_alg {
        int (*init)(struct crypto_kpp *tfm);
        void (*exit)(struct crypto_kpp *tfm);
 
+#ifdef CONFIG_CRYPTO_STATS
+       struct crypto_istat_kpp stat;
+#endif
+
        struct crypto_alg base;
 };
 
@@ -268,6 +291,26 @@ struct kpp_secret {
        unsigned short len;
 };
 
+static inline struct crypto_istat_kpp *kpp_get_stat(struct kpp_alg *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+       return &alg->stat;
+#else
+       return NULL;
+#endif
+}
+
+static inline int crypto_kpp_errstat(struct kpp_alg *alg, int err)
+{
+       if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+               return err;
+
+       if (err && err != -EINPROGRESS && err != -EBUSY)
+               atomic64_inc(&kpp_get_stat(alg)->err_cnt);
+
+       return err;
+}
+
 /**
  * crypto_kpp_set_secret() - Invoke kpp operation
  *
@@ -287,13 +330,11 @@ static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm,
                                        const void *buffer, unsigned int len)
 {
        struct kpp_alg *alg = crypto_kpp_alg(tfm);
-       struct crypto_alg *calg = tfm->base.__crt_alg;
-       int ret;
 
-       crypto_stats_get(calg);
-       ret = alg->set_secret(tfm, buffer, len);
-       crypto_stats_kpp_set_secret(calg, ret);
-       return ret;
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+               atomic64_inc(&kpp_get_stat(alg)->setsecret_cnt);
+
+       return crypto_kpp_errstat(alg, alg->set_secret(tfm, buffer, len));
 }
 
 /**
@@ -313,13 +354,11 @@ static inline int crypto_kpp_generate_public_key(struct kpp_request *req)
 {
        struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
        struct kpp_alg *alg = crypto_kpp_alg(tfm);
-       struct crypto_alg *calg = tfm->base.__crt_alg;
-       int ret;
 
-       crypto_stats_get(calg);
-       ret = alg->generate_public_key(req);
-       crypto_stats_kpp_generate_public_key(calg, ret);
-       return ret;
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+               atomic64_inc(&kpp_get_stat(alg)->generate_public_key_cnt);
+
+       return crypto_kpp_errstat(alg, alg->generate_public_key(req));
 }
 
 /**
@@ -336,13 +375,11 @@ static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req)
 {
        struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
        struct kpp_alg *alg = crypto_kpp_alg(tfm);
-       struct crypto_alg *calg = tfm->base.__crt_alg;
-       int ret;
 
-       crypto_stats_get(calg);
-       ret = alg->compute_shared_secret(req);
-       crypto_stats_kpp_compute_shared_secret(calg, ret);
-       return ret;
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+               atomic64_inc(&kpp_get_stat(alg)->compute_shared_secret_cnt);
+
+       return crypto_kpp_errstat(alg, alg->compute_shared_secret(req));
 }
 
 /**
index 17bb3673d3c1700359515c3d79f7d0da531908f8..6abe5102e5fb1004a928c554db692ff7d93b92a9 100644 (file)
@@ -9,10 +9,26 @@
 #ifndef _CRYPTO_RNG_H
 #define _CRYPTO_RNG_H
 
+#include <linux/atomic.h>
+#include <linux/container_of.h>
 #include <linux/crypto.h>
 
 struct crypto_rng;
 
+/*
+ * struct crypto_istat_rng: statistics for RNG algorithm
+ * @generate_cnt:      number of RNG generate requests
+ * @generate_tlen:     total data size of generated data by the RNG
+ * @seed_cnt:          number of times the RNG was seeded
+ * @err_cnt:           number of error for RNG requests
+ */
+struct crypto_istat_rng {
+       atomic64_t generate_cnt;
+       atomic64_t generate_tlen;
+       atomic64_t seed_cnt;
+       atomic64_t err_cnt;
+};
+
 /**
  * struct rng_alg - random number generator definition
  *
@@ -30,6 +46,7 @@ struct crypto_rng;
  *             size of the seed is defined with @seedsize .
  * @set_ent:   Set entropy that would otherwise be obtained from
  *             entropy source.  Internal use only.
+ * @stat:      Statistics for rng algorithm
  * @seedsize:  The seed size required for a random number generator
  *             initialization defined with this variable. Some
  *             random number generators does not require a seed
@@ -46,6 +63,10 @@ struct rng_alg {
        void (*set_ent)(struct crypto_rng *tfm, const u8 *data,
                        unsigned int len);
 
+#ifdef CONFIG_CRYPTO_STATS
+       struct crypto_istat_rng stat;
+#endif
+
        unsigned int seedsize;
 
        struct crypto_alg base;
@@ -94,6 +115,11 @@ static inline struct crypto_tfm *crypto_rng_tfm(struct crypto_rng *tfm)
        return &tfm->base;
 }
 
+static inline struct rng_alg *__crypto_rng_alg(struct crypto_alg *alg)
+{
+       return container_of(alg, struct rng_alg, base);
+}
+
 /**
  * crypto_rng_alg - obtain name of RNG
  * @tfm: cipher handle
@@ -104,8 +130,7 @@ static inline struct crypto_tfm *crypto_rng_tfm(struct crypto_rng *tfm)
  */
 static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm)
 {
-       return container_of(crypto_rng_tfm(tfm)->__crt_alg,
-                           struct rng_alg, base);
+       return __crypto_rng_alg(crypto_rng_tfm(tfm)->__crt_alg);
 }
 
 /**
@@ -119,6 +144,26 @@ static inline void crypto_free_rng(struct crypto_rng *tfm)
        crypto_destroy_tfm(tfm, crypto_rng_tfm(tfm));
 }
 
+static inline struct crypto_istat_rng *rng_get_stat(struct rng_alg *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+       return &alg->stat;
+#else
+       return NULL;
+#endif
+}
+
+static inline int crypto_rng_errstat(struct rng_alg *alg, int err)
+{
+       if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+               return err;
+
+       if (err && err != -EINPROGRESS && err != -EBUSY)
+               atomic64_inc(&rng_get_stat(alg)->err_cnt);
+
+       return err;
+}
+
 /**
  * crypto_rng_generate() - get random number
  * @tfm: cipher handle
@@ -137,13 +182,17 @@ static inline int crypto_rng_generate(struct crypto_rng *tfm,
                                      const u8 *src, unsigned int slen,
                                      u8 *dst, unsigned int dlen)
 {
-       struct crypto_alg *alg = tfm->base.__crt_alg;
-       int ret;
+       struct rng_alg *alg = crypto_rng_alg(tfm);
+
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+               struct crypto_istat_rng *istat = rng_get_stat(alg);
+
+               atomic64_inc(&istat->generate_cnt);
+               atomic64_add(dlen, &istat->generate_tlen);
+       }
 
-       crypto_stats_get(alg);
-       ret = crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen);
-       crypto_stats_rng_generate(alg, dlen, ret);
-       return ret;
+       return crypto_rng_errstat(alg,
+                                 alg->generate(tfm, src, slen, dst, dlen));
 }
 
 /**
index 39f5b67c3069808a78282528e2ca486d1f1b4ba0..080d1ba3611d8d924f82178c2fc4cad4d54b425a 100644 (file)
@@ -8,6 +8,7 @@
 #ifndef _CRYPTO_SKCIPHER_H
 #define _CRYPTO_SKCIPHER_H
 
+#include <linux/atomic.h>
 #include <linux/container_of.h>
 #include <linux/crypto.h>
 #include <linux/slab.h>
@@ -48,6 +49,22 @@ struct crypto_sync_skcipher {
        struct crypto_skcipher base;
 };
 
+/*
+ * struct crypto_istat_cipher - statistics for cipher algorithm
+ * @encrypt_cnt:       number of encrypt requests
+ * @encrypt_tlen:      total data size handled by encrypt requests
+ * @decrypt_cnt:       number of decrypt requests
+ * @decrypt_tlen:      total data size handled by decrypt requests
+ * @err_cnt:           number of error for cipher requests
+ */
+struct crypto_istat_cipher {
+       atomic64_t encrypt_cnt;
+       atomic64_t encrypt_tlen;
+       atomic64_t decrypt_cnt;
+       atomic64_t decrypt_tlen;
+       atomic64_t err_cnt;
+};
+
 /**
  * struct skcipher_alg - symmetric key cipher definition
  * @min_keysize: Minimum key size supported by the transformation. This is the
@@ -101,6 +118,7 @@ struct crypto_sync_skcipher {
  * @walksize: Equal to the chunk size except in cases where the algorithm is
  *           considerably more efficient if it can operate on multiple chunks
  *           in parallel. Should be a multiple of chunksize.
+ * @stat: Statistics for cipher algorithm
  * @base: Definition of a generic crypto algorithm.
  *
  * All fields except @ivsize are mandatory and must be filled.
@@ -119,6 +137,10 @@ struct skcipher_alg {
        unsigned int chunksize;
        unsigned int walksize;
 
+#ifdef CONFIG_CRYPTO_STATS
+       struct crypto_istat_cipher stat;
+#endif
+
        struct crypto_alg base;
 };
 
diff --git a/include/crypto/utils.h b/include/crypto/utils.h
new file mode 100644 (file)
index 0000000..acbb917
--- /dev/null
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Cryptographic utilities
+ *
+ * Copyright (c) 2023 Herbert Xu <[email protected]>
+ */
+#ifndef _CRYPTO_UTILS_H
+#define _CRYPTO_UTILS_H
+
+#include <asm/unaligned.h>
+#include <linux/compiler_attributes.h>
+#include <linux/types.h>
+
+void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size);
+
+static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
+{
+       if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
+           __builtin_constant_p(size) &&
+           (size % sizeof(unsigned long)) == 0) {
+               unsigned long *d = (unsigned long *)dst;
+               unsigned long *s = (unsigned long *)src;
+               unsigned long l;
+
+               while (size > 0) {
+                       l = get_unaligned(d) ^ get_unaligned(s++);
+                       put_unaligned(l, d++);
+                       size -= sizeof(unsigned long);
+               }
+       } else {
+               __crypto_xor(dst, dst, src, size);
+       }
+}
+
+static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
+                                 unsigned int size)
+{
+       if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
+           __builtin_constant_p(size) &&
+           (size % sizeof(unsigned long)) == 0) {
+               unsigned long *d = (unsigned long *)dst;
+               unsigned long *s1 = (unsigned long *)src1;
+               unsigned long *s2 = (unsigned long *)src2;
+               unsigned long l;
+
+               while (size > 0) {
+                       l = get_unaligned(s1++) ^ get_unaligned(s2++);
+                       put_unaligned(l, d++);
+                       size -= sizeof(unsigned long);
+               }
+       } else {
+               __crypto_xor(dst, src1, src2, size);
+       }
+}
+
+noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
+
+/**
+ * crypto_memneq - Compare two areas of memory without leaking
+ *                timing information.
+ *
+ * @a: One area of memory
+ * @b: Another area of memory
+ * @size: The size of the area.
+ *
+ * Returns 0 when data is equal, 1 otherwise.
+ */
+static inline int crypto_memneq(const void *a, const void *b, size_t size)
+{
+       return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
+}
+
+#endif /* _CRYPTO_UTILS_H */
index bb1d9b0e16471ce720620cc54e1675e80d340e7f..fa310ac1db59779dde363297f91dba4e6450fcd5 100644 (file)
 #ifndef _LINUX_CRYPTO_H
 #define _LINUX_CRYPTO_H
 
-#include <linux/atomic.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/bug.h>
+#include <linux/completion.h>
 #include <linux/refcount.h>
 #include <linux/slab.h>
-#include <linux/completion.h>
-
-/*
- * Autoloaded crypto modules should only use a prefixed name to avoid allowing
- * arbitrary modules to be loaded. Loading from userspace may still need the
- * unprefixed names, so retains those aliases as well.
- * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
- * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
- * expands twice on the same line. Instead, use a separate base name for the
- * alias.
- */
-#define MODULE_ALIAS_CRYPTO(name)      \
-               __MODULE_INFO(alias, alias_userspace, name);    \
-               __MODULE_INFO(alias, alias_crypto, "crypto-" name)
+#include <linux/types.h>
 
 /*
  * Algorithm masks and types.
 
 #define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN)))
 
-struct scatterlist;
-struct crypto_async_request;
 struct crypto_tfm;
 struct crypto_type;
+struct module;
 
 typedef void (*crypto_completion_t)(void *req, int err);
 
@@ -275,116 +258,6 @@ struct compress_alg {
                              unsigned int slen, u8 *dst, unsigned int *dlen);
 };
 
-#ifdef CONFIG_CRYPTO_STATS
-/*
- * struct crypto_istat_aead - statistics for AEAD algorithm
- * @encrypt_cnt:       number of encrypt requests
- * @encrypt_tlen:      total data size handled by encrypt requests
- * @decrypt_cnt:       number of decrypt requests
- * @decrypt_tlen:      total data size handled by decrypt requests
- * @err_cnt:           number of error for AEAD requests
- */
-struct crypto_istat_aead {
-       atomic64_t encrypt_cnt;
-       atomic64_t encrypt_tlen;
-       atomic64_t decrypt_cnt;
-       atomic64_t decrypt_tlen;
-       atomic64_t err_cnt;
-};
-
-/*
- * struct crypto_istat_akcipher - statistics for akcipher algorithm
- * @encrypt_cnt:       number of encrypt requests
- * @encrypt_tlen:      total data size handled by encrypt requests
- * @decrypt_cnt:       number of decrypt requests
- * @decrypt_tlen:      total data size handled by decrypt requests
- * @verify_cnt:                number of verify operation
- * @sign_cnt:          number of sign requests
- * @err_cnt:           number of error for akcipher requests
- */
-struct crypto_istat_akcipher {
-       atomic64_t encrypt_cnt;
-       atomic64_t encrypt_tlen;
-       atomic64_t decrypt_cnt;
-       atomic64_t decrypt_tlen;
-       atomic64_t verify_cnt;
-       atomic64_t sign_cnt;
-       atomic64_t err_cnt;
-};
-
-/*
- * struct crypto_istat_cipher - statistics for cipher algorithm
- * @encrypt_cnt:       number of encrypt requests
- * @encrypt_tlen:      total data size handled by encrypt requests
- * @decrypt_cnt:       number of decrypt requests
- * @decrypt_tlen:      total data size handled by decrypt requests
- * @err_cnt:           number of error for cipher requests
- */
-struct crypto_istat_cipher {
-       atomic64_t encrypt_cnt;
-       atomic64_t encrypt_tlen;
-       atomic64_t decrypt_cnt;
-       atomic64_t decrypt_tlen;
-       atomic64_t err_cnt;
-};
-
-/*
- * struct crypto_istat_compress - statistics for compress algorithm
- * @compress_cnt:      number of compress requests
- * @compress_tlen:     total data size handled by compress requests
- * @decompress_cnt:    number of decompress requests
- * @decompress_tlen:   total data size handled by decompress requests
- * @err_cnt:           number of error for compress requests
- */
-struct crypto_istat_compress {
-       atomic64_t compress_cnt;
-       atomic64_t compress_tlen;
-       atomic64_t decompress_cnt;
-       atomic64_t decompress_tlen;
-       atomic64_t err_cnt;
-};
-
-/*
- * struct crypto_istat_hash - statistics for has algorithm
- * @hash_cnt:          number of hash requests
- * @hash_tlen:         total data size hashed
- * @err_cnt:           number of error for hash requests
- */
-struct crypto_istat_hash {
-       atomic64_t hash_cnt;
-       atomic64_t hash_tlen;
-       atomic64_t err_cnt;
-};
-
-/*
- * struct crypto_istat_kpp - statistics for KPP algorithm
- * @setsecret_cnt:             number of setsecrey operation
- * @generate_public_key_cnt:   number of generate_public_key operation
- * @compute_shared_secret_cnt: number of compute_shared_secret operation
- * @err_cnt:                   number of error for KPP requests
- */
-struct crypto_istat_kpp {
-       atomic64_t setsecret_cnt;
-       atomic64_t generate_public_key_cnt;
-       atomic64_t compute_shared_secret_cnt;
-       atomic64_t err_cnt;
-};
-
-/*
- * struct crypto_istat_rng: statistics for RNG algorithm
- * @generate_cnt:      number of RNG generate requests
- * @generate_tlen:     total data size of generated data by the RNG
- * @seed_cnt:          number of times the RNG was seeded
- * @err_cnt:           number of error for RNG requests
- */
-struct crypto_istat_rng {
-       atomic64_t generate_cnt;
-       atomic64_t generate_tlen;
-       atomic64_t seed_cnt;
-       atomic64_t err_cnt;
-};
-#endif /* CONFIG_CRYPTO_STATS */
-
 #define cra_cipher     cra_u.cipher
 #define cra_compress   cra_u.compress
 
@@ -462,15 +335,6 @@ struct crypto_istat_rng {
  * @cra_refcnt: internally used
  * @cra_destroy: internally used
  *
- * @stats: union of all possible crypto_istat_xxx structures
- * @stats.aead:                statistics for AEAD algorithm
- * @stats.akcipher:    statistics for akcipher algorithm
- * @stats.cipher:      statistics for cipher algorithm
- * @stats.compress:    statistics for compress algorithm
- * @stats.hash:                statistics for hash algorithm
- * @stats.rng:         statistics for rng algorithm
- * @stats.kpp:         statistics for KPP algorithm
- *
  * The struct crypto_alg describes a generic Crypto API algorithm and is common
  * for all of the transformations. Any variable not documented here shall not
  * be used by a cipher implementation as it is internal to the Crypto API.
@@ -502,81 +366,8 @@ struct crypto_alg {
        void (*cra_destroy)(struct crypto_alg *alg);
        
        struct module *cra_module;
-
-#ifdef CONFIG_CRYPTO_STATS
-       union {
-               struct crypto_istat_aead aead;
-               struct crypto_istat_akcipher akcipher;
-               struct crypto_istat_cipher cipher;
-               struct crypto_istat_compress compress;
-               struct crypto_istat_hash hash;
-               struct crypto_istat_rng rng;
-               struct crypto_istat_kpp kpp;
-       } stats;
-#endif /* CONFIG_CRYPTO_STATS */
-
 } CRYPTO_MINALIGN_ATTR;
 
-#ifdef CONFIG_CRYPTO_STATS
-void crypto_stats_init(struct crypto_alg *alg);
-void crypto_stats_get(struct crypto_alg *alg);
-void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret);
-void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret);
-void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg);
-void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg);
-void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg);
-void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg);
-void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg);
-void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg);
-void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg);
-void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg);
-void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret);
-void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret);
-void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret);
-void crypto_stats_rng_seed(struct crypto_alg *alg, int ret);
-void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret);
-void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg);
-void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg);
-#else
-static inline void crypto_stats_init(struct crypto_alg *alg)
-{}
-static inline void crypto_stats_get(struct crypto_alg *alg)
-{}
-static inline void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret)
-{}
-static inline void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret)
-{}
-static inline void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret)
-{}
-static inline void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret)
-{}
-static inline void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret)
-{}
-static inline void crypto_stats_rng_seed(struct crypto_alg *alg, int ret)
-{}
-static inline void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret)
-{}
-static inline void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg)
-{}
-#endif
 /*
  * A helper struct for waiting for completion of async crypto ops
  */
@@ -616,14 +407,6 @@ static inline void crypto_init_wait(struct crypto_wait *wait)
        init_completion(&wait->completion);
 }
 
-/*
- * Algorithm registration interface.
- */
-int crypto_register_alg(struct crypto_alg *alg);
-void crypto_unregister_alg(struct crypto_alg *alg);
-int crypto_register_algs(struct crypto_alg *algs, int count);
-void crypto_unregister_algs(struct crypto_alg *algs, int count);
-
 /*
  * Algorithm query interface.
  */
@@ -636,6 +419,7 @@ int crypto_has_alg(const char *name, u32 type, u32 mask);
  */
 
 struct crypto_tfm {
+       refcount_t refcnt;
 
        u32 crt_flags;
 
@@ -664,8 +448,6 @@ static inline void crypto_free_tfm(struct crypto_tfm *tfm)
        return crypto_destroy_tfm(tfm, tfm);
 }
 
-int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
-
 /*
  * Transform helpers which query the underlying algorithm.
  */
@@ -679,16 +461,6 @@ static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm)
        return tfm->__crt_alg->cra_driver_name;
 }
 
-static inline int crypto_tfm_alg_priority(struct crypto_tfm *tfm)
-{
-       return tfm->__crt_alg->cra_priority;
-}
-
-static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
-{
-       return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
-}
-
 static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm)
 {
        return tfm->__crt_alg->cra_blocksize;
diff --git a/include/linux/psp-platform-access.h b/include/linux/psp-platform-access.h
new file mode 100644 (file)
index 0000000..75da8f5
--- /dev/null
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __PSP_PLATFORM_ACCESS_H
+#define __PSP_PLATFORM_ACCESS_H
+
+#include <linux/psp.h>
+
+enum psp_platform_access_msg {
+       PSP_CMD_NONE = 0x0,
+       PSP_I2C_REQ_BUS_CMD = 0x64,
+};
+
+struct psp_req_buffer_hdr {
+       u32 payload_size;
+       u32 status;
+} __packed;
+
+struct psp_request {
+       struct psp_req_buffer_hdr header;
+       void *buf;
+} __packed;
+
+/**
+ * psp_send_platform_access_msg() - Send a message to control platform features
+ *
+ * This function is intended to be used by drivers outside of ccp to communicate
+ * with the platform.
+ *
+ * Returns:
+ *  0:           success
+ *  -%EBUSY:     mailbox in recovery or in use
+ *  -%ENODEV:    driver not bound with PSP device
+ *  -%ETIMEDOUT: request timed out
+ *  -%EIO:       unknown error (see kernel log)
+ */
+int psp_send_platform_access_msg(enum psp_platform_access_msg, struct psp_request *req);
+
+/**
+ * psp_ring_platform_doorbell() - Ring platform doorbell
+ *
+ * This function is intended to be used by drivers outside of ccp to ring the
+ * platform doorbell with a message.
+ *
+ * Returns:
+ *  0:           success
+ *  -%EBUSY:     mailbox in recovery or in use
+ *  -%ENODEV:    driver not bound with PSP device
+ *  -%ETIMEDOUT: request timed out
+ *  -%EIO:       error will be stored in result argument
+ */
+int psp_ring_platform_doorbell(int msg, u32 *result);
+
+/**
+ * psp_check_platform_access_status() - Checks whether platform features is ready
+ *
+ * This function is intended to be used by drivers outside of ccp to determine
+ * if platform features has initialized.
+ *
+ * Returns:
+ * 0          platform features is ready
+ * -%ENODEV   platform features is not ready or present
+ */
+int psp_check_platform_access_status(void);
+
+#endif /* __PSP_PLATFORM_ACCESS_H */
index 1595088c428b4c0c5c7bc4e473382686ce79196e..7fd17e82bab43ff4409cf9e4c8827ff35c9df4d9 100644 (file)
 
 #include <uapi/linux/psp-sev.h>
 
-#ifdef CONFIG_X86
-#include <linux/mem_encrypt.h>
-
-#define __psp_pa(x)    __sme_pa(x)
-#else
-#define __psp_pa(x)    __pa(x)
-#endif
-
 #define SEV_FW_BLOB_MAX_SIZE   0x4000  /* 16KB */
 
 /**
diff --git a/include/linux/psp.h b/include/linux/psp.h
new file mode 100644 (file)
index 0000000..92e60ae
--- /dev/null
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __PSP_H
+#define __PSP_H
+
+#ifdef CONFIG_X86
+#include <linux/mem_encrypt.h>
+
+#define __psp_pa(x)    __sme_pa(x)
+#else
+#define __psp_pa(x)    __pa(x)
+#endif
+
+/*
+ * Fields and bits used by most PSP mailboxes
+ *
+ * Note: Some mailboxes (such as SEV) have extra bits or different meanings
+ * and should include an appropriate local definition in their source file.
+ */
+#define PSP_CMDRESP_STS                GENMASK(15, 0)
+#define PSP_CMDRESP_CMD                GENMASK(23, 16)
+#define PSP_CMDRESP_RESERVED   GENMASK(29, 24)
+#define PSP_CMDRESP_RECOVERY   BIT(30)
+#define PSP_CMDRESP_RESP       BIT(31)
+
+#define PSP_DRBL_MSG           PSP_CMDRESP_CMD
+#define PSP_DRBL_RING          BIT(0)
+
+#endif /* __PSP_H */
index e007b8a4b73887caf8b3b6903939039f14506d8d..222d60195de66fec5721dace5d2b304936d2a937 100644 (file)
@@ -491,7 +491,7 @@ void __init padata_do_multithreaded(struct padata_mt_job *job)
                return;
 
        /* Ensure at least one thread when size < min_chunk. */
-       nworks = max(job->size / job->min_chunk, 1ul);
+       nworks = max(job->size / max(job->min_chunk, job->align), 1ul);
        nworks = min(nworks, job->max_threads);
 
        if (nworks == 1) {
@@ -967,7 +967,7 @@ static const struct sysfs_ops padata_sysfs_ops = {
        .store = padata_sysfs_store,
 };
 
-static struct kobj_type padata_attr_type = {
+static const struct kobj_type padata_attr_type = {
        .sysfs_ops = &padata_sysfs_ops,
        .default_groups = padata_default_groups,
        .release = padata_sysfs_release,
index 53230ab1b19576382f02f5de75bbe6da9cf859b6..c852c7151b0a65c5afd990c880df5cd308f92f29 100644 (file)
@@ -6,7 +6,7 @@
  */
 
 #include <asm/unaligned.h>
-#include <crypto/algapi.h>
+#include <crypto/utils.h>
 #include <linux/module.h>
 
 /*
This page took 2.286861 seconds and 4 git commands to generate.