]> Git Repo - J-linux.git/blob - drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c
Merge tag 'kbuild-v6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy...
[J-linux.git] / drivers / crypto / intel / qat / qat_common / adf_gen2_hw_data.c
1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2020 Intel Corporation */
3 #include "adf_common_drv.h"
4 #include "adf_gen2_hw_data.h"
5 #include "icp_qat_hw.h"
6 #include <linux/pci.h>
7
8 u32 adf_gen2_get_num_accels(struct adf_hw_device_data *self)
9 {
10         if (!self || !self->accel_mask)
11                 return 0;
12
13         return hweight16(self->accel_mask);
14 }
15 EXPORT_SYMBOL_GPL(adf_gen2_get_num_accels);
16
17 u32 adf_gen2_get_num_aes(struct adf_hw_device_data *self)
18 {
19         if (!self || !self->ae_mask)
20                 return 0;
21
22         return hweight32(self->ae_mask);
23 }
24 EXPORT_SYMBOL_GPL(adf_gen2_get_num_aes);
25
26 void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev)
27 {
28         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
29         void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
30         unsigned long accel_mask = hw_data->accel_mask;
31         unsigned long ae_mask = hw_data->ae_mask;
32         unsigned int val, i;
33
34         /* Enable Accel Engine error detection & correction */
35         for_each_set_bit(i, &ae_mask, hw_data->num_engines) {
36                 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_AE_CTX_ENABLES(i));
37                 val |= ADF_GEN2_ENABLE_AE_ECC_ERR;
38                 ADF_CSR_WR(pmisc_addr, ADF_GEN2_AE_CTX_ENABLES(i), val);
39                 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_AE_MISC_CONTROL(i));
40                 val |= ADF_GEN2_ENABLE_AE_ECC_PARITY_CORR;
41                 ADF_CSR_WR(pmisc_addr, ADF_GEN2_AE_MISC_CONTROL(i), val);
42         }
43
44         /* Enable shared memory error detection & correction */
45         for_each_set_bit(i, &accel_mask, hw_data->num_accel) {
46                 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_UERRSSMSH(i));
47                 val |= ADF_GEN2_ERRSSMSH_EN;
48                 ADF_CSR_WR(pmisc_addr, ADF_GEN2_UERRSSMSH(i), val);
49                 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_CERRSSMSH(i));
50                 val |= ADF_GEN2_ERRSSMSH_EN;
51                 ADF_CSR_WR(pmisc_addr, ADF_GEN2_CERRSSMSH(i), val);
52         }
53 }
54 EXPORT_SYMBOL_GPL(adf_gen2_enable_error_correction);
55
56 void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable,
57                            int num_a_regs, int num_b_regs)
58 {
59         void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
60         u32 reg;
61         int i;
62
63         /* Set/Unset Valid bit in AE Thread to PCIe Function Mapping Group A */
64         for (i = 0; i < num_a_regs; i++) {
65                 reg = READ_CSR_AE2FUNCTION_MAP_A(pmisc_addr, i);
66                 if (enable)
67                         reg |= AE2FUNCTION_MAP_VALID;
68                 else
69                         reg &= ~AE2FUNCTION_MAP_VALID;
70                 WRITE_CSR_AE2FUNCTION_MAP_A(pmisc_addr, i, reg);
71         }
72
73         /* Set/Unset Valid bit in AE Thread to PCIe Function Mapping Group B */
74         for (i = 0; i < num_b_regs; i++) {
75                 reg = READ_CSR_AE2FUNCTION_MAP_B(pmisc_addr, i);
76                 if (enable)
77                         reg |= AE2FUNCTION_MAP_VALID;
78                 else
79                         reg &= ~AE2FUNCTION_MAP_VALID;
80                 WRITE_CSR_AE2FUNCTION_MAP_B(pmisc_addr, i, reg);
81         }
82 }
83 EXPORT_SYMBOL_GPL(adf_gen2_cfg_iov_thds);
84
85 void adf_gen2_get_admin_info(struct admin_info *admin_csrs_info)
86 {
87         admin_csrs_info->mailbox_offset = ADF_MAILBOX_BASE_OFFSET;
88         admin_csrs_info->admin_msg_ur = ADF_ADMINMSGUR_OFFSET;
89         admin_csrs_info->admin_msg_lr = ADF_ADMINMSGLR_OFFSET;
90 }
91 EXPORT_SYMBOL_GPL(adf_gen2_get_admin_info);
92
93 void adf_gen2_get_arb_info(struct arb_info *arb_info)
94 {
95         arb_info->arb_cfg = ADF_ARB_CONFIG;
96         arb_info->arb_offset = ADF_ARB_OFFSET;
97         arb_info->wt2sam_offset = ADF_ARB_WRK_2_SER_MAP_OFFSET;
98 }
99 EXPORT_SYMBOL_GPL(adf_gen2_get_arb_info);
100
101 void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev)
102 {
103         void __iomem *addr = adf_get_pmisc_base(accel_dev);
104         u32 val;
105
106         val = accel_dev->pf.vf_info ? 0 : BIT_ULL(GET_MAX_BANKS(accel_dev)) - 1;
107
108         /* Enable bundle and misc interrupts */
109         ADF_CSR_WR(addr, ADF_GEN2_SMIAPF0_MASK_OFFSET, val);
110         ADF_CSR_WR(addr, ADF_GEN2_SMIAPF1_MASK_OFFSET, ADF_GEN2_SMIA1_MASK);
111 }
112 EXPORT_SYMBOL_GPL(adf_gen2_enable_ints);
113
114 static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size)
115 {
116         return BUILD_RING_BASE_ADDR(addr, size);
117 }
118
119 static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring)
120 {
121         return READ_CSR_RING_HEAD(csr_base_addr, bank, ring);
122 }
123
124 static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring,
125                                 u32 value)
126 {
127         WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value);
128 }
129
130 static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring)
131 {
132         return READ_CSR_RING_TAIL(csr_base_addr, bank, ring);
133 }
134
135 static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring,
136                                 u32 value)
137 {
138         WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value);
139 }
140
141 static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank)
142 {
143         return READ_CSR_E_STAT(csr_base_addr, bank);
144 }
145
146 static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank,
147                                   u32 ring, u32 value)
148 {
149         WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value);
150 }
151
152 static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring,
153                                 dma_addr_t addr)
154 {
155         WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr);
156 }
157
158 static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank, u32 value)
159 {
160         WRITE_CSR_INT_FLAG(csr_base_addr, bank, value);
161 }
162
163 static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank)
164 {
165         WRITE_CSR_INT_SRCSEL(csr_base_addr, bank);
166 }
167
168 static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank,
169                                  u32 value)
170 {
171         WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value);
172 }
173
174 static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank,
175                                   u32 value)
176 {
177         WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value);
178 }
179
180 static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank,
181                                        u32 value)
182 {
183         WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value);
184 }
185
186 static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank,
187                                       u32 value)
188 {
189         WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value);
190 }
191
192 void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
193 {
194         csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr;
195         csr_ops->read_csr_ring_head = read_csr_ring_head;
196         csr_ops->write_csr_ring_head = write_csr_ring_head;
197         csr_ops->read_csr_ring_tail = read_csr_ring_tail;
198         csr_ops->write_csr_ring_tail = write_csr_ring_tail;
199         csr_ops->read_csr_e_stat = read_csr_e_stat;
200         csr_ops->write_csr_ring_config = write_csr_ring_config;
201         csr_ops->write_csr_ring_base = write_csr_ring_base;
202         csr_ops->write_csr_int_flag = write_csr_int_flag;
203         csr_ops->write_csr_int_srcsel = write_csr_int_srcsel;
204         csr_ops->write_csr_int_col_en = write_csr_int_col_en;
205         csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl;
206         csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col;
207         csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
208 }
209 EXPORT_SYMBOL_GPL(adf_gen2_init_hw_csr_ops);
210
211 u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev)
212 {
213         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
214         struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
215         u32 straps = hw_data->straps;
216         u32 fuses = hw_data->fuses;
217         u32 legfuses;
218         u32 capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
219                            ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
220                            ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
221                            ICP_ACCEL_CAPABILITIES_CIPHER |
222                            ICP_ACCEL_CAPABILITIES_COMPRESSION;
223
224         /* Read accelerator capabilities mask */
225         pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, &legfuses);
226
227         /* A set bit in legfuses means the feature is OFF in this SKU */
228         if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) {
229                 capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
230                 capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
231         }
232         if (legfuses & ICP_ACCEL_MASK_PKE_SLICE)
233                 capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
234         if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE) {
235                 capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
236                 capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
237         }
238         if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE)
239                 capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
240
241         if ((straps | fuses) & ADF_POWERGATE_PKE)
242                 capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
243
244         if ((straps | fuses) & ADF_POWERGATE_DC)
245                 capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
246
247         return capabilities;
248 }
249 EXPORT_SYMBOL_GPL(adf_gen2_get_accel_cap);
250
251 void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
252 {
253         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
254         void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
255         u32 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
256         u32 timer_val = ADF_SSM_WDT_DEFAULT_VALUE;
257         unsigned long accel_mask = hw_data->accel_mask;
258         u32 i = 0;
259
260         /* Configures WDT timers */
261         for_each_set_bit(i, &accel_mask, hw_data->num_accel) {
262                 /* Enable WDT for sym and dc */
263                 ADF_CSR_WR(pmisc_addr, ADF_SSMWDT(i), timer_val);
264                 /* Enable WDT for pke */
265                 ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKE(i), timer_val_pke);
266         }
267 }
268 EXPORT_SYMBOL_GPL(adf_gen2_set_ssm_wdtimer);
This page took 0.043668 seconds and 4 git commands to generate.