1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2020 Intel Corporation */
3 #include <linux/iopoll.h>
5 #include "adf_accel_devices.h"
6 #include "adf_cfg_services.h"
7 #include "adf_common_drv.h"
8 #include "adf_fw_config.h"
9 #include "adf_gen4_hw_data.h"
10 #include "adf_gen4_pm.h"
12 u32 adf_gen4_get_accel_mask(struct adf_hw_device_data *self)
14 return ADF_GEN4_ACCELERATORS_MASK;
16 EXPORT_SYMBOL_GPL(adf_gen4_get_accel_mask);
18 u32 adf_gen4_get_num_accels(struct adf_hw_device_data *self)
20 return ADF_GEN4_MAX_ACCELERATORS;
22 EXPORT_SYMBOL_GPL(adf_gen4_get_num_accels);
24 u32 adf_gen4_get_num_aes(struct adf_hw_device_data *self)
26 if (!self || !self->ae_mask)
29 return hweight32(self->ae_mask);
31 EXPORT_SYMBOL_GPL(adf_gen4_get_num_aes);
33 u32 adf_gen4_get_misc_bar_id(struct adf_hw_device_data *self)
35 return ADF_GEN4_PMISC_BAR;
37 EXPORT_SYMBOL_GPL(adf_gen4_get_misc_bar_id);
39 u32 adf_gen4_get_etr_bar_id(struct adf_hw_device_data *self)
41 return ADF_GEN4_ETR_BAR;
43 EXPORT_SYMBOL_GPL(adf_gen4_get_etr_bar_id);
45 u32 adf_gen4_get_sram_bar_id(struct adf_hw_device_data *self)
47 return ADF_GEN4_SRAM_BAR;
49 EXPORT_SYMBOL_GPL(adf_gen4_get_sram_bar_id);
51 enum dev_sku_info adf_gen4_get_sku(struct adf_hw_device_data *self)
55 EXPORT_SYMBOL_GPL(adf_gen4_get_sku);
57 void adf_gen4_get_arb_info(struct arb_info *arb_info)
59 arb_info->arb_cfg = ADF_GEN4_ARB_CONFIG;
60 arb_info->arb_offset = ADF_GEN4_ARB_OFFSET;
61 arb_info->wt2sam_offset = ADF_GEN4_ARB_WRK_2_SER_MAP_OFFSET;
63 EXPORT_SYMBOL_GPL(adf_gen4_get_arb_info);
65 void adf_gen4_get_admin_info(struct admin_info *admin_csrs_info)
67 admin_csrs_info->mailbox_offset = ADF_GEN4_MAILBOX_BASE_OFFSET;
68 admin_csrs_info->admin_msg_ur = ADF_GEN4_ADMINMSGUR_OFFSET;
69 admin_csrs_info->admin_msg_lr = ADF_GEN4_ADMINMSGLR_OFFSET;
71 EXPORT_SYMBOL_GPL(adf_gen4_get_admin_info);
73 u32 adf_gen4_get_heartbeat_clock(struct adf_hw_device_data *self)
76 * GEN4 uses KPT counter for HB
78 return ADF_GEN4_KPT_COUNTER_FREQ;
80 EXPORT_SYMBOL_GPL(adf_gen4_get_heartbeat_clock);
82 void adf_gen4_enable_error_correction(struct adf_accel_dev *accel_dev)
84 struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR];
85 void __iomem *csr = misc_bar->virt_addr;
87 /* Enable all in errsou3 except VFLR notification on host */
88 ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_VFLNOTIFY);
90 EXPORT_SYMBOL_GPL(adf_gen4_enable_error_correction);
92 void adf_gen4_enable_ints(struct adf_accel_dev *accel_dev)
96 addr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr;
98 /* Enable bundle interrupts */
99 ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_RP_X0_MASK_OFFSET, 0);
100 ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_RP_X1_MASK_OFFSET, 0);
102 /* Enable misc interrupts */
103 ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_MASK_OFFSET, 0);
105 EXPORT_SYMBOL_GPL(adf_gen4_enable_ints);
107 int adf_gen4_init_device(struct adf_accel_dev *accel_dev)
114 addr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr;
116 /* Temporarily mask PM interrupt */
117 csr = ADF_CSR_RD(addr, ADF_GEN4_ERRMSK2);
118 csr |= ADF_GEN4_PM_SOU;
119 ADF_CSR_WR(addr, ADF_GEN4_ERRMSK2, csr);
121 /* Set DRV_ACTIVE bit to power up the device */
122 ADF_CSR_WR(addr, ADF_GEN4_PM_INTERRUPT, ADF_GEN4_PM_DRV_ACTIVE);
124 /* Poll status register to make sure the device is powered up */
125 ret = read_poll_timeout(ADF_CSR_RD, status,
126 status & ADF_GEN4_PM_INIT_STATE,
127 ADF_GEN4_PM_POLL_DELAY_US,
128 ADF_GEN4_PM_POLL_TIMEOUT_US, true, addr,
131 dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n");
135 EXPORT_SYMBOL_GPL(adf_gen4_init_device);
137 static inline void adf_gen4_unpack_ssm_wdtimer(u64 value, u32 *upper,
140 *lower = lower_32_bits(value);
141 *upper = upper_32_bits(value);
144 void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
146 void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
147 u64 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
148 u64 timer_val = ADF_SSM_WDT_DEFAULT_VALUE;
149 u32 ssm_wdt_pke_high = 0;
150 u32 ssm_wdt_pke_low = 0;
151 u32 ssm_wdt_high = 0;
154 /* Convert 64bit WDT timer value into 32bit values for
155 * mmio write to 32bit CSRs.
157 adf_gen4_unpack_ssm_wdtimer(timer_val, &ssm_wdt_high, &ssm_wdt_low);
158 adf_gen4_unpack_ssm_wdtimer(timer_val_pke, &ssm_wdt_pke_high,
161 /* Enable WDT for sym and dc */
162 ADF_CSR_WR(pmisc_addr, ADF_SSMWDTL_OFFSET, ssm_wdt_low);
163 ADF_CSR_WR(pmisc_addr, ADF_SSMWDTH_OFFSET, ssm_wdt_high);
164 /* Enable WDT for pke */
165 ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEL_OFFSET, ssm_wdt_pke_low);
166 ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEH_OFFSET, ssm_wdt_pke_high);
168 EXPORT_SYMBOL_GPL(adf_gen4_set_ssm_wdtimer);
171 * The vector routing table is used to select the MSI-X entry to use for each
173 * The first ADF_GEN4_ETR_MAX_BANKS entries correspond to ring interrupts.
174 * The final entry corresponds to VF2PF or error interrupts.
175 * This vector table could be used to configure one MSI-X entry to be shared
176 * between multiple interrupt sources.
178 * The default routing is set to have a one to one correspondence between the
179 * interrupt source and the MSI-X entry used.
181 void adf_gen4_set_msix_default_rttable(struct adf_accel_dev *accel_dev)
186 csr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr;
187 for (i = 0; i <= ADF_GEN4_ETR_MAX_BANKS; i++)
188 ADF_CSR_WR(csr, ADF_GEN4_MSIX_RTTABLE_OFFSET(i), i);
190 EXPORT_SYMBOL_GPL(adf_gen4_set_msix_default_rttable);
192 int adf_pfvf_comms_disabled(struct adf_accel_dev *accel_dev)
196 EXPORT_SYMBOL_GPL(adf_pfvf_comms_disabled);
198 static int reset_ring_pair(void __iomem *csr, u32 bank_number)
203 /* Write rpresetctl register BIT(0) as 1
204 * Since rpresetctl registers have no RW fields, no need to preserve
205 * values for other bits. Just write directly.
207 ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETCTL(bank_number),
208 ADF_WQM_CSR_RPRESETCTL_RESET);
210 /* Read rpresetsts register and wait for rp reset to complete */
211 ret = read_poll_timeout(ADF_CSR_RD, status,
212 status & ADF_WQM_CSR_RPRESETSTS_STATUS,
213 ADF_RPRESET_POLL_DELAY_US,
214 ADF_RPRESET_POLL_TIMEOUT_US, true,
215 csr, ADF_WQM_CSR_RPRESETSTS(bank_number));
217 /* When rp reset is done, clear rpresetsts */
218 ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETSTS(bank_number),
219 ADF_WQM_CSR_RPRESETSTS_STATUS);
225 int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number)
227 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
228 void __iomem *csr = adf_get_etr_base(accel_dev);
231 if (bank_number >= hw_data->num_banks)
234 dev_dbg(&GET_DEV(accel_dev),
235 "ring pair reset for bank:%d\n", bank_number);
237 ret = reset_ring_pair(csr, bank_number);
239 dev_err(&GET_DEV(accel_dev),
240 "ring pair reset failed (timeout)\n");
242 dev_dbg(&GET_DEV(accel_dev), "ring pair reset successful\n");
246 EXPORT_SYMBOL_GPL(adf_gen4_ring_pair_reset);
248 static const u32 thrd_to_arb_map_dcc[] = {
249 0x00000000, 0x00000000, 0x00000000, 0x00000000,
250 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF,
251 0x00000000, 0x00000000, 0x00000000, 0x00000000,
252 0x00000000, 0x00000000, 0x00000000, 0x00000000,
256 static const u16 rp_group_to_arb_mask[] = {
261 static bool is_single_service(int service_id)
263 switch (service_id) {
280 int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev)
282 struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
283 u32 *thd2arb_map = hw_data->thd_to_arb_map;
284 unsigned int ae_cnt, worker_obj_cnt, i, j;
285 unsigned long ae_mask, thds_mask;
286 int srv_id, rp_group;
287 u32 thd2arb_map_base;
290 if (!hw_data->get_rp_group || !hw_data->get_ena_thd_mask ||
291 !hw_data->get_num_aes || !hw_data->uof_get_num_objs ||
292 !hw_data->uof_get_ae_mask)
295 srv_id = adf_get_service_enabled(accel_dev);
299 ae_cnt = hw_data->get_num_aes(hw_data);
300 worker_obj_cnt = hw_data->uof_get_num_objs(accel_dev) -
301 ADF_GEN4_ADMIN_ACCELENGINES;
303 if (srv_id == SVC_DCC) {
304 if (ae_cnt > ICP_QAT_HW_AE_DELIMITER)
307 memcpy(thd2arb_map, thrd_to_arb_map_dcc,
308 array_size(sizeof(*thd2arb_map), ae_cnt));
312 for (i = 0; i < worker_obj_cnt; i++) {
313 ae_mask = hw_data->uof_get_ae_mask(accel_dev, i);
314 rp_group = hw_data->get_rp_group(accel_dev, ae_mask);
315 thds_mask = hw_data->get_ena_thd_mask(accel_dev, i);
316 thd2arb_map_base = 0;
318 if (rp_group >= RP_GROUP_COUNT || rp_group < RP_GROUP_0)
321 if (thds_mask == ADF_GEN4_ENA_THD_MASK_ERROR)
324 if (is_single_service(srv_id))
325 arb_mask = rp_group_to_arb_mask[RP_GROUP_0] |
326 rp_group_to_arb_mask[RP_GROUP_1];
328 arb_mask = rp_group_to_arb_mask[rp_group];
330 for_each_set_bit(j, &thds_mask, ADF_NUM_THREADS_PER_AE)
331 thd2arb_map_base |= arb_mask << (j * 4);
333 for_each_set_bit(j, &ae_mask, ae_cnt)
334 thd2arb_map[j] = thd2arb_map_base;
338 EXPORT_SYMBOL_GPL(adf_gen4_init_thd2arb_map);
340 u16 adf_gen4_get_ring_to_svc_map(struct adf_accel_dev *accel_dev)
342 struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
343 enum adf_cfg_service_type rps[RP_GROUP_COUNT] = { };
344 unsigned int ae_mask, start_id, worker_obj_cnt, i;
348 if (!hw_data->get_rp_group || !hw_data->uof_get_ae_mask ||
349 !hw_data->uof_get_obj_type || !hw_data->uof_get_num_objs)
352 /* If dcc, all rings handle compression requests */
353 if (adf_get_service_enabled(accel_dev) == SVC_DCC) {
354 for (i = 0; i < RP_GROUP_COUNT; i++)
359 worker_obj_cnt = hw_data->uof_get_num_objs(accel_dev) -
360 ADF_GEN4_ADMIN_ACCELENGINES;
361 start_id = worker_obj_cnt - RP_GROUP_COUNT;
363 for (i = start_id; i < worker_obj_cnt; i++) {
364 ae_mask = hw_data->uof_get_ae_mask(accel_dev, i);
365 rp_group = hw_data->get_rp_group(accel_dev, ae_mask);
366 if (rp_group >= RP_GROUP_COUNT || rp_group < RP_GROUP_0)
369 switch (hw_data->uof_get_obj_type(accel_dev, i)) {
373 case ADF_FW_ASYM_OBJ:
374 rps[rp_group] = ASYM;
377 rps[rp_group] = COMP;
386 ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT |
387 rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT |
388 rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT |
389 rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT;
391 return ring_to_svc_map;
393 EXPORT_SYMBOL_GPL(adf_gen4_get_ring_to_svc_map);
396 * adf_gen4_bank_quiesce_coal_timer() - quiesce bank coalesced interrupt timer
397 * @accel_dev: Pointer to the device structure
398 * @bank_idx: Offset to the bank within this device
399 * @timeout_ms: Timeout in milliseconds for the operation
401 * This function tries to quiesce the coalesced interrupt timer of a bank if
402 * it has been enabled and triggered.
404 * Returns 0 on success, error code otherwise
407 int adf_gen4_bank_quiesce_coal_timer(struct adf_accel_dev *accel_dev,
408 u32 bank_idx, int timeout_ms)
410 struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
411 struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
412 void __iomem *csr_misc = adf_get_pmisc_base(accel_dev);
413 void __iomem *csr_etr = adf_get_etr_base(accel_dev);
414 u32 int_col_ctl, int_col_mask, int_col_en;
422 int_col_ctl = csr_ops->read_csr_int_col_ctl(csr_etr, bank_idx);
423 int_col_mask = csr_ops->get_int_col_ctl_enable_mask();
424 if (!(int_col_ctl & int_col_mask))
427 int_col_en = csr_ops->read_csr_int_col_en(csr_etr, bank_idx);
428 int_col_en &= BIT(ADF_WQM_CSR_RP_IDX_RX);
430 e_stat = csr_ops->read_csr_e_stat(csr_etr, bank_idx);
431 if (!(~e_stat & int_col_en))
434 wait_us = 2 * ((int_col_ctl & ~int_col_mask) << 8) * USEC_PER_SEC;
435 do_div(wait_us, hw_data->clock_frequency);
436 wait_us = min(wait_us, (u64)timeout_ms * USEC_PER_MSEC);
437 dev_dbg(&GET_DEV(accel_dev),
438 "wait for bank %d - coalesced timer expires in %llu us (max=%u ms estat=0x%x intcolen=0x%x)\n",
439 bank_idx, wait_us, timeout_ms, e_stat, int_col_en);
441 ret = read_poll_timeout(ADF_CSR_RD, intsrc, intsrc,
442 ADF_COALESCED_POLL_DELAY_US, wait_us, true,
443 csr_misc, ADF_WQM_CSR_RPINTSOU(bank_idx));
445 dev_warn(&GET_DEV(accel_dev),
446 "coalesced timer for bank %d expired (%llu us)\n",
451 EXPORT_SYMBOL_GPL(adf_gen4_bank_quiesce_coal_timer);
453 static int drain_bank(void __iomem *csr, u32 bank_number, int timeout_us)
457 ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETCTL(bank_number),
458 ADF_WQM_CSR_RPRESETCTL_DRAIN);
460 return read_poll_timeout(ADF_CSR_RD, status,
461 status & ADF_WQM_CSR_RPRESETSTS_STATUS,
462 ADF_RPRESET_POLL_DELAY_US, timeout_us, true,
463 csr, ADF_WQM_CSR_RPRESETSTS(bank_number));
466 void adf_gen4_bank_drain_finish(struct adf_accel_dev *accel_dev,
469 void __iomem *csr = adf_get_etr_base(accel_dev);
471 ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETSTS(bank_number),
472 ADF_WQM_CSR_RPRESETSTS_STATUS);
475 int adf_gen4_bank_drain_start(struct adf_accel_dev *accel_dev,
476 u32 bank_number, int timeout_us)
478 void __iomem *csr = adf_get_etr_base(accel_dev);
481 dev_dbg(&GET_DEV(accel_dev), "Drain bank %d\n", bank_number);
483 ret = drain_bank(csr, bank_number, timeout_us);
485 dev_err(&GET_DEV(accel_dev), "Bank drain failed (timeout)\n");
487 dev_dbg(&GET_DEV(accel_dev), "Bank drain successful\n");
492 static void bank_state_save(struct adf_hw_csr_ops *ops, void __iomem *base,
493 u32 bank, struct bank_state *state, u32 num_rings)
497 state->ringstat0 = ops->read_csr_stat(base, bank);
498 state->ringuostat = ops->read_csr_uo_stat(base, bank);
499 state->ringestat = ops->read_csr_e_stat(base, bank);
500 state->ringnestat = ops->read_csr_ne_stat(base, bank);
501 state->ringnfstat = ops->read_csr_nf_stat(base, bank);
502 state->ringfstat = ops->read_csr_f_stat(base, bank);
503 state->ringcstat0 = ops->read_csr_c_stat(base, bank);
504 state->iaintflagen = ops->read_csr_int_en(base, bank);
505 state->iaintflagreg = ops->read_csr_int_flag(base, bank);
506 state->iaintflagsrcsel0 = ops->read_csr_int_srcsel(base, bank);
507 state->iaintcolen = ops->read_csr_int_col_en(base, bank);
508 state->iaintcolctl = ops->read_csr_int_col_ctl(base, bank);
509 state->iaintflagandcolen = ops->read_csr_int_flag_and_col(base, bank);
510 state->ringexpstat = ops->read_csr_exp_stat(base, bank);
511 state->ringexpintenable = ops->read_csr_exp_int_en(base, bank);
512 state->ringsrvarben = ops->read_csr_ring_srv_arb_en(base, bank);
514 for (i = 0; i < num_rings; i++) {
515 state->rings[i].head = ops->read_csr_ring_head(base, bank, i);
516 state->rings[i].tail = ops->read_csr_ring_tail(base, bank, i);
517 state->rings[i].config = ops->read_csr_ring_config(base, bank, i);
518 state->rings[i].base = ops->read_csr_ring_base(base, bank, i);
522 #define CHECK_STAT(op, expect_val, name, args...) \
524 u32 __expect_val = (expect_val); \
525 u32 actual_val = op(args); \
526 (__expect_val == actual_val) ? 0 : \
527 (pr_err("QAT: Fail to restore %s register. Expected 0x%x, actual 0x%x\n", \
528 name, __expect_val, actual_val), -EINVAL); \
531 static int bank_state_restore(struct adf_hw_csr_ops *ops, void __iomem *base,
532 u32 bank, struct bank_state *state, u32 num_rings,
538 for (i = 0; i < num_rings; i++)
539 ops->write_csr_ring_base(base, bank, i, state->rings[i].base);
541 for (i = 0; i < num_rings; i++)
542 ops->write_csr_ring_config(base, bank, i, state->rings[i].config);
544 for (i = 0; i < num_rings / 2; i++) {
545 int tx = i * (tx_rx_gap + 1);
546 int rx = tx + tx_rx_gap;
548 ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head);
549 ops->write_csr_ring_tail(base, bank, tx, state->rings[tx].tail);
552 * The TX ring head needs to be updated again to make sure that
553 * the HW will not consider the ring as full when it is empty
554 * and the correct state flags are set to match the recovered state.
556 if (state->ringestat & BIT(tx)) {
557 val = ops->read_csr_int_srcsel(base, bank);
558 val |= ADF_RP_INT_SRC_SEL_F_RISE_MASK;
559 ops->write_csr_int_srcsel_w_val(base, bank, val);
560 ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head);
563 ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail);
564 val = ops->read_csr_int_srcsel(base, bank);
565 val |= ADF_RP_INT_SRC_SEL_F_RISE_MASK << ADF_RP_INT_SRC_SEL_RANGE_WIDTH;
566 ops->write_csr_int_srcsel_w_val(base, bank, val);
568 ops->write_csr_ring_head(base, bank, rx, state->rings[rx].head);
569 val = ops->read_csr_int_srcsel(base, bank);
570 val |= ADF_RP_INT_SRC_SEL_F_FALL_MASK << ADF_RP_INT_SRC_SEL_RANGE_WIDTH;
571 ops->write_csr_int_srcsel_w_val(base, bank, val);
574 * The RX ring tail needs to be updated again to make sure that
575 * the HW will not consider the ring as empty when it is full
576 * and the correct state flags are set to match the recovered state.
578 if (state->ringfstat & BIT(rx))
579 ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail);
582 ops->write_csr_int_flag_and_col(base, bank, state->iaintflagandcolen);
583 ops->write_csr_int_en(base, bank, state->iaintflagen);
584 ops->write_csr_int_col_en(base, bank, state->iaintcolen);
585 ops->write_csr_int_srcsel_w_val(base, bank, state->iaintflagsrcsel0);
586 ops->write_csr_exp_int_en(base, bank, state->ringexpintenable);
587 ops->write_csr_int_col_ctl(base, bank, state->iaintcolctl);
588 ops->write_csr_ring_srv_arb_en(base, bank, state->ringsrvarben);
590 /* Check that all ring statuses match the saved state. */
591 ret = CHECK_STAT(ops->read_csr_stat, state->ringstat0, "ringstat",
596 ret = CHECK_STAT(ops->read_csr_e_stat, state->ringestat, "ringestat",
601 ret = CHECK_STAT(ops->read_csr_ne_stat, state->ringnestat, "ringnestat",
606 ret = CHECK_STAT(ops->read_csr_nf_stat, state->ringnfstat, "ringnfstat",
611 ret = CHECK_STAT(ops->read_csr_f_stat, state->ringfstat, "ringfstat",
616 ret = CHECK_STAT(ops->read_csr_c_stat, state->ringcstat0, "ringcstat",
621 tmp_val = ops->read_csr_exp_stat(base, bank);
622 val = state->ringexpstat;
623 if (tmp_val && !val) {
624 pr_err("QAT: Bank was restored with exception: 0x%x\n", val);
631 int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number,
632 struct bank_state *state)
634 struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
635 struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
636 void __iomem *csr_base = adf_get_etr_base(accel_dev);
638 if (bank_number >= hw_data->num_banks || !state)
641 dev_dbg(&GET_DEV(accel_dev), "Saving state of bank %d\n", bank_number);
643 bank_state_save(csr_ops, csr_base, bank_number, state,
644 hw_data->num_rings_per_bank);
648 EXPORT_SYMBOL_GPL(adf_gen4_bank_state_save);
650 int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev, u32 bank_number,
651 struct bank_state *state)
653 struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
654 struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
655 void __iomem *csr_base = adf_get_etr_base(accel_dev);
658 if (bank_number >= hw_data->num_banks || !state)
661 dev_dbg(&GET_DEV(accel_dev), "Restoring state of bank %d\n", bank_number);
663 ret = bank_state_restore(csr_ops, csr_base, bank_number, state,
664 hw_data->num_rings_per_bank, hw_data->tx_rx_gap);
666 dev_err(&GET_DEV(accel_dev),
667 "Unable to restore state of bank %d\n", bank_number);
671 EXPORT_SYMBOL_GPL(adf_gen4_bank_state_restore);