]> Git Repo - linux.git/blob - drivers/accel/ivpu/ivpu_hw_ip.c
dma-mapping: don't return errors from dma_set_max_seg_size
[linux.git] / drivers / accel / ivpu / ivpu_hw_ip.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-2024 Intel Corporation
4  */
5
6 #include "ivpu_drv.h"
7 #include "ivpu_fw.h"
8 #include "ivpu_hw.h"
9 #include "ivpu_hw_37xx_reg.h"
10 #include "ivpu_hw_40xx_reg.h"
11 #include "ivpu_hw_ip.h"
12 #include "ivpu_hw_reg_io.h"
13 #include "ivpu_mmu.h"
14 #include "ivpu_pm.h"
15
16 #define PWR_ISLAND_EN_POST_DLY_FREQ_DEFAULT 0
17 #define PWR_ISLAND_EN_POST_DLY_FREQ_HIGH    18
18 #define PWR_ISLAND_STATUS_DLY_FREQ_DEFAULT  3
19 #define PWR_ISLAND_STATUS_DLY_FREQ_HIGH     46
20 #define PWR_ISLAND_STATUS_TIMEOUT_US        (5 * USEC_PER_MSEC)
21
22 #define TIM_SAFE_ENABLE                     0xf1d0dead
23 #define TIM_WATCHDOG_RESET_VALUE            0xffffffff
24
25 #define ICB_0_IRQ_MASK_37XX ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
26                              (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
27                              (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
28                              (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
29                              (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
30                              (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
31                              (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
32
33 #define ICB_1_IRQ_MASK_37XX ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
34                              (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
35                              (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
36
37 #define ICB_0_1_IRQ_MASK_37XX ((((u64)ICB_1_IRQ_MASK_37XX) << 32) | ICB_0_IRQ_MASK_37XX)
38
39 #define ICB_0_IRQ_MASK_40XX ((REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
40                              (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
41                              (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
42                              (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
43                              (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
44                              (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
45                              (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
46
47 #define ICB_1_IRQ_MASK_40XX ((REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
48                              (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
49                              (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
50
51 #define ICB_0_1_IRQ_MASK_40XX ((((u64)ICB_1_IRQ_MASK_40XX) << 32) | ICB_0_IRQ_MASK_40XX)
52
53 #define ITF_FIREWALL_VIOLATION_MASK_37XX ((REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
54                                           (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
55                                           (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
56                                           (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
57                                           (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
58                                           (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
59                                           (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
60
61 #define ITF_FIREWALL_VIOLATION_MASK_40XX ((REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
62                                           (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
63                                           (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
64                                           (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
65                                           (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
66                                           (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
67                                           (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
68
69 static int wait_for_ip_bar(struct ivpu_device *vdev)
70 {
71         return REGV_POLL_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, AON, 0, 100);
72 }
73
74 static void host_ss_rst_clr(struct ivpu_device *vdev)
75 {
76         u32 val = 0;
77
78         val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, TOP_NOC, val);
79         val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, DSS_MAS, val);
80         val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, MSS_MAS, val);
81
82         REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_CLR, val);
83 }
84
85 static int host_ss_noc_qreqn_check_37xx(struct ivpu_device *vdev, u32 exp_val)
86 {
87         u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN);
88
89         if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val))
90                 return -EIO;
91
92         return 0;
93 }
94
95 static int host_ss_noc_qreqn_check_40xx(struct ivpu_device *vdev, u32 exp_val)
96 {
97         u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QREQN);
98
99         if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val))
100                 return -EIO;
101
102         return 0;
103 }
104
105 static int host_ss_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val)
106 {
107         if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
108                 return host_ss_noc_qreqn_check_37xx(vdev, exp_val);
109         else
110                 return host_ss_noc_qreqn_check_40xx(vdev, exp_val);
111 }
112
113 static int host_ss_noc_qacceptn_check_37xx(struct ivpu_device *vdev, u32 exp_val)
114 {
115         u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QACCEPTN);
116
117         if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val))
118                 return -EIO;
119
120         return 0;
121 }
122
123 static int host_ss_noc_qacceptn_check_40xx(struct ivpu_device *vdev, u32 exp_val)
124 {
125         u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QACCEPTN);
126
127         if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val))
128                 return -EIO;
129
130         return 0;
131 }
132
133 static int host_ss_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
134 {
135         if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
136                 return host_ss_noc_qacceptn_check_37xx(vdev, exp_val);
137         else
138                 return host_ss_noc_qacceptn_check_40xx(vdev, exp_val);
139 }
140
141 static int host_ss_noc_qdeny_check_37xx(struct ivpu_device *vdev, u32 exp_val)
142 {
143         u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QDENY);
144
145         if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val))
146                 return -EIO;
147
148         return 0;
149 }
150
151 static int host_ss_noc_qdeny_check_40xx(struct ivpu_device *vdev, u32 exp_val)
152 {
153         u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QDENY);
154
155         if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val))
156                 return -EIO;
157
158         return 0;
159 }
160
161 static int host_ss_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
162 {
163         if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
164                 return host_ss_noc_qdeny_check_37xx(vdev, exp_val);
165         else
166                 return host_ss_noc_qdeny_check_40xx(vdev, exp_val);
167 }
168
169 static int top_noc_qrenqn_check_37xx(struct ivpu_device *vdev, u32 exp_val)
170 {
171         u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QREQN);
172
173         if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) ||
174             !REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val))
175                 return -EIO;
176
177         return 0;
178 }
179
180 static int top_noc_qrenqn_check_40xx(struct ivpu_device *vdev, u32 exp_val)
181 {
182         u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QREQN);
183
184         if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) ||
185             !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val))
186                 return -EIO;
187
188         return 0;
189 }
190
191 static int top_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val)
192 {
193         if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
194                 return top_noc_qrenqn_check_37xx(vdev, exp_val);
195         else
196                 return top_noc_qrenqn_check_40xx(vdev, exp_val);
197 }
198
199 int ivpu_hw_ip_host_ss_configure(struct ivpu_device *vdev)
200 {
201         int ret;
202
203         if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
204                 ret = wait_for_ip_bar(vdev);
205                 if (ret) {
206                         ivpu_err(vdev, "Timed out waiting for NPU IP bar\n");
207                         return ret;
208                 }
209                 host_ss_rst_clr(vdev);
210         }
211
212         ret = host_ss_noc_qreqn_check(vdev, 0x0);
213         if (ret) {
214                 ivpu_err(vdev, "Failed qreqn check: %d\n", ret);
215                 return ret;
216         }
217
218         ret = host_ss_noc_qacceptn_check(vdev, 0x0);
219         if (ret) {
220                 ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
221                 return ret;
222         }
223
224         ret = host_ss_noc_qdeny_check(vdev, 0x0);
225         if (ret)
226                 ivpu_err(vdev, "Failed qdeny check %d\n", ret);
227
228         return ret;
229 }
230
231 static void idle_gen_drive_37xx(struct ivpu_device *vdev, bool enable)
232 {
233         u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN);
234
235         if (enable)
236                 val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, EN, val);
237         else
238                 val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, EN, val);
239
240         REGV_WR32(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, val);
241 }
242
243 static void idle_gen_drive_40xx(struct ivpu_device *vdev, bool enable)
244 {
245         u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_IDLE_GEN);
246
247         if (enable)
248                 val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_IDLE_GEN, EN, val);
249         else
250                 val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_IDLE_GEN, EN, val);
251
252         REGV_WR32(VPU_40XX_HOST_SS_AON_IDLE_GEN, val);
253 }
254
255 void ivpu_hw_ip_idle_gen_enable(struct ivpu_device *vdev)
256 {
257         if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
258                 idle_gen_drive_37xx(vdev, true);
259         else
260                 idle_gen_drive_40xx(vdev, true);
261 }
262
263 void ivpu_hw_ip_idle_gen_disable(struct ivpu_device *vdev)
264 {
265         if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
266                 idle_gen_drive_37xx(vdev, false);
267         else
268                 idle_gen_drive_40xx(vdev, false);
269 }
270
271 static void pwr_island_delay_set_50xx(struct ivpu_device *vdev)
272 {
273         u32 val, post, status;
274
275         if (vdev->hw->pll.profiling_freq == PLL_PROFILING_FREQ_DEFAULT) {
276                 post = PWR_ISLAND_EN_POST_DLY_FREQ_DEFAULT;
277                 status = PWR_ISLAND_STATUS_DLY_FREQ_DEFAULT;
278         } else {
279                 post = PWR_ISLAND_EN_POST_DLY_FREQ_HIGH;
280                 status = PWR_ISLAND_STATUS_DLY_FREQ_HIGH;
281         }
282
283         val = REGV_RD32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY);
284         val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, POST_DLY, post, val);
285         REGV_WR32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, val);
286
287         val = REGV_RD32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY);
288         val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY, STATUS_DLY, status, val);
289         REGV_WR32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY, val);
290 }
291
292 static void pwr_island_trickle_drive_37xx(struct ivpu_device *vdev, bool enable)
293 {
294         u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
295
296         if (enable)
297                 val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
298         else
299                 val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
300
301         REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
302 }
303
304 static void pwr_island_trickle_drive_40xx(struct ivpu_device *vdev, bool enable)
305 {
306         u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
307
308         if (enable)
309                 val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val);
310         else
311                 val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val);
312
313         REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
314
315         if (enable)
316                 ndelay(500);
317 }
318
319 static void pwr_island_drive_37xx(struct ivpu_device *vdev, bool enable)
320 {
321         u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0);
322
323         if (enable)
324                 val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val);
325         else
326                 val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val);
327
328         REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, val);
329
330         if (!enable)
331                 ndelay(500);
332 }
333
334 static void pwr_island_drive_40xx(struct ivpu_device *vdev, bool enable)
335 {
336         u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0);
337
338         if (enable)
339                 val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
340         else
341                 val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
342
343         REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, val);
344 }
345
346 static void pwr_island_enable(struct ivpu_device *vdev)
347 {
348         if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
349                 pwr_island_trickle_drive_37xx(vdev, true);
350                 pwr_island_drive_37xx(vdev, true);
351         } else {
352                 pwr_island_trickle_drive_40xx(vdev, true);
353                 pwr_island_drive_40xx(vdev, true);
354         }
355 }
356
357 static int wait_for_pwr_island_status(struct ivpu_device *vdev, u32 exp_val)
358 {
359         if (IVPU_WA(punit_disabled))
360                 return 0;
361
362         if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
363                 return REGV_POLL_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0, MSS_CPU, exp_val,
364                                      PWR_ISLAND_STATUS_TIMEOUT_US);
365         else
366                 return REGV_POLL_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_STATUS0, CSS_CPU, exp_val,
367                                      PWR_ISLAND_STATUS_TIMEOUT_US);
368 }
369
370 static void pwr_island_isolation_drive_37xx(struct ivpu_device *vdev, bool enable)
371 {
372         u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0);
373
374         if (enable)
375                 val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
376         else
377                 val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
378
379         REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, val);
380 }
381
382 static void pwr_island_isolation_drive_40xx(struct ivpu_device *vdev, bool enable)
383 {
384         u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0);
385
386         if (enable)
387                 val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, CSS_CPU, val);
388         else
389                 val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, CSS_CPU, val);
390
391         REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, val);
392 }
393
394 static void pwr_island_isolation_drive(struct ivpu_device *vdev, bool enable)
395 {
396         if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
397                 pwr_island_isolation_drive_37xx(vdev, enable);
398         else
399                 pwr_island_isolation_drive_40xx(vdev, enable);
400 }
401
402 static void pwr_island_isolation_disable(struct ivpu_device *vdev)
403 {
404         pwr_island_isolation_drive(vdev, false);
405 }
406
407 static void host_ss_clk_drive_37xx(struct ivpu_device *vdev, bool enable)
408 {
409         u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_CLK_SET);
410
411         if (enable) {
412                 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
413                 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
414                 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
415         } else {
416                 val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
417                 val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
418                 val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
419         }
420
421         REGV_WR32(VPU_37XX_HOST_SS_CPR_CLK_SET, val);
422 }
423
424 static void host_ss_clk_drive_40xx(struct ivpu_device *vdev, bool enable)
425 {
426         u32 val = REGV_RD32(VPU_40XX_HOST_SS_CPR_CLK_EN);
427
428         if (enable) {
429                 val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, TOP_NOC, val);
430                 val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, DSS_MAS, val);
431                 val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, CSS_MAS, val);
432         } else {
433                 val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, TOP_NOC, val);
434                 val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, DSS_MAS, val);
435                 val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, CSS_MAS, val);
436         }
437
438         REGV_WR32(VPU_40XX_HOST_SS_CPR_CLK_EN, val);
439 }
440
441 static void host_ss_clk_drive(struct ivpu_device *vdev, bool enable)
442 {
443         if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
444                 host_ss_clk_drive_37xx(vdev, enable);
445         else
446                 host_ss_clk_drive_40xx(vdev, enable);
447 }
448
449 static void host_ss_clk_enable(struct ivpu_device *vdev)
450 {
451         host_ss_clk_drive(vdev, true);
452 }
453
454 static void host_ss_rst_drive_37xx(struct ivpu_device *vdev, bool enable)
455 {
456         u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_RST_SET);
457
458         if (enable) {
459                 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val);
460                 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val);
461                 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val);
462         } else {
463                 val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val);
464                 val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val);
465                 val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val);
466         }
467
468         REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_SET, val);
469 }
470
471 static void host_ss_rst_drive_40xx(struct ivpu_device *vdev, bool enable)
472 {
473         u32 val = REGV_RD32(VPU_40XX_HOST_SS_CPR_RST_EN);
474
475         if (enable) {
476                 val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, TOP_NOC, val);
477                 val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, DSS_MAS, val);
478                 val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, CSS_MAS, val);
479         } else {
480                 val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, TOP_NOC, val);
481                 val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, DSS_MAS, val);
482                 val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, CSS_MAS, val);
483         }
484
485         REGV_WR32(VPU_40XX_HOST_SS_CPR_RST_EN, val);
486 }
487
488 static void host_ss_rst_drive(struct ivpu_device *vdev, bool enable)
489 {
490         if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
491                 host_ss_rst_drive_37xx(vdev, enable);
492         else
493                 host_ss_rst_drive_40xx(vdev, enable);
494 }
495
496 static void host_ss_rst_enable(struct ivpu_device *vdev)
497 {
498         host_ss_rst_drive(vdev, true);
499 }
500
501 static void host_ss_noc_qreqn_top_socmmio_drive_37xx(struct ivpu_device *vdev, bool enable)
502 {
503         u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN);
504
505         if (enable)
506                 val = REG_SET_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
507         else
508                 val = REG_CLR_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
509         REGV_WR32(VPU_37XX_HOST_SS_NOC_QREQN, val);
510 }
511
512 static void host_ss_noc_qreqn_top_socmmio_drive_40xx(struct ivpu_device *vdev, bool enable)
513 {
514         u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QREQN);
515
516         if (enable)
517                 val = REG_SET_FLD(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
518         else
519                 val = REG_CLR_FLD(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
520         REGV_WR32(VPU_40XX_HOST_SS_NOC_QREQN, val);
521 }
522
523 static void host_ss_noc_qreqn_top_socmmio_drive(struct ivpu_device *vdev, bool enable)
524 {
525         if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
526                 host_ss_noc_qreqn_top_socmmio_drive_37xx(vdev, enable);
527         else
528                 host_ss_noc_qreqn_top_socmmio_drive_40xx(vdev, enable);
529 }
530
531 static int host_ss_axi_drive(struct ivpu_device *vdev, bool enable)
532 {
533         int ret;
534
535         host_ss_noc_qreqn_top_socmmio_drive(vdev, enable);
536
537         ret = host_ss_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
538         if (ret) {
539                 ivpu_err(vdev, "Failed HOST SS NOC QACCEPTN check: %d\n", ret);
540                 return ret;
541         }
542
543         ret = host_ss_noc_qdeny_check(vdev, 0x0);
544         if (ret)
545                 ivpu_err(vdev, "Failed HOST SS NOC QDENY check: %d\n", ret);
546
547         return ret;
548 }
549
550 static void top_noc_qreqn_drive_40xx(struct ivpu_device *vdev, bool enable)
551 {
552         u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QREQN);
553
554         if (enable) {
555                 val = REG_SET_FLD(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, val);
556                 val = REG_SET_FLD(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
557         } else {
558                 val = REG_CLR_FLD(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, val);
559                 val = REG_CLR_FLD(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
560         }
561
562         REGV_WR32(VPU_40XX_TOP_NOC_QREQN, val);
563 }
564
565 static void top_noc_qreqn_drive_37xx(struct ivpu_device *vdev, bool enable)
566 {
567         u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QREQN);
568
569         if (enable) {
570                 val = REG_SET_FLD(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, val);
571                 val = REG_SET_FLD(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
572         } else {
573                 val = REG_CLR_FLD(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, val);
574                 val = REG_CLR_FLD(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
575         }
576
577         REGV_WR32(VPU_37XX_TOP_NOC_QREQN, val);
578 }
579
580 static void top_noc_qreqn_drive(struct ivpu_device *vdev, bool enable)
581 {
582         if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
583                 top_noc_qreqn_drive_37xx(vdev, enable);
584         else
585                 top_noc_qreqn_drive_40xx(vdev, enable);
586 }
587
588 int ivpu_hw_ip_host_ss_axi_enable(struct ivpu_device *vdev)
589 {
590         return host_ss_axi_drive(vdev, true);
591 }
592
593 static int top_noc_qacceptn_check_37xx(struct ivpu_device *vdev, u32 exp_val)
594 {
595         u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QACCEPTN);
596
597         if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) ||
598             !REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val))
599                 return -EIO;
600
601         return 0;
602 }
603
604 static int top_noc_qacceptn_check_40xx(struct ivpu_device *vdev, u32 exp_val)
605 {
606         u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QACCEPTN);
607
608         if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) ||
609             !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val))
610                 return -EIO;
611
612         return 0;
613 }
614
615 static int top_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
616 {
617         if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
618                 return top_noc_qacceptn_check_37xx(vdev, exp_val);
619         else
620                 return top_noc_qacceptn_check_40xx(vdev, exp_val);
621 }
622
623 static int top_noc_qdeny_check_37xx(struct ivpu_device *vdev, u32 exp_val)
624 {
625         u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QDENY);
626
627         if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) ||
628             !REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val))
629                 return -EIO;
630
631         return 0;
632 }
633
634 static int top_noc_qdeny_check_40xx(struct ivpu_device *vdev, u32 exp_val)
635 {
636         u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QDENY);
637
638         if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) ||
639             !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val))
640                 return -EIO;
641
642         return 0;
643 }
644
645 static int top_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
646 {
647         if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
648                 return top_noc_qdeny_check_37xx(vdev, exp_val);
649         else
650                 return top_noc_qdeny_check_40xx(vdev, exp_val);
651 }
652
653 static int top_noc_drive(struct ivpu_device *vdev, bool enable)
654 {
655         int ret;
656
657         top_noc_qreqn_drive(vdev, enable);
658
659         ret = top_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
660         if (ret) {
661                 ivpu_err(vdev, "Failed TOP NOC QACCEPTN check: %d\n", ret);
662                 return ret;
663         }
664
665         ret = top_noc_qdeny_check(vdev, 0x0);
666         if (ret)
667                 ivpu_err(vdev, "Failed TOP NOC QDENY check: %d\n", ret);
668
669         return ret;
670 }
671
672 int ivpu_hw_ip_top_noc_enable(struct ivpu_device *vdev)
673 {
674         return top_noc_drive(vdev, true);
675 }
676
677 static void dpu_active_drive_37xx(struct ivpu_device *vdev, bool enable)
678 {
679         u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE);
680
681         if (enable)
682                 val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
683         else
684                 val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
685
686         REGV_WR32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, val);
687 }
688
689 int ivpu_hw_ip_pwr_domain_enable(struct ivpu_device *vdev)
690 {
691         int ret;
692
693         if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_50XX)
694                 pwr_island_delay_set_50xx(vdev);
695
696         pwr_island_enable(vdev);
697
698         ret = wait_for_pwr_island_status(vdev, 0x1);
699         if (ret) {
700                 ivpu_err(vdev, "Timed out waiting for power island status\n");
701                 return ret;
702         }
703
704         ret = top_noc_qreqn_check(vdev, 0x0);
705         if (ret) {
706                 ivpu_err(vdev, "Failed TOP NOC QREQN check %d\n", ret);
707                 return ret;
708         }
709
710         host_ss_clk_enable(vdev);
711         pwr_island_isolation_disable(vdev);
712         host_ss_rst_enable(vdev);
713
714         if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
715                 dpu_active_drive_37xx(vdev, true);
716
717         return ret;
718 }
719
720 u64 ivpu_hw_ip_read_perf_timer_counter(struct ivpu_device *vdev)
721 {
722         if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
723                 return REGV_RD64(VPU_37XX_CPU_SS_TIM_PERF_FREE_CNT);
724         else
725                 return REGV_RD64(VPU_40XX_CPU_SS_TIM_PERF_EXT_FREE_CNT);
726 }
727
728 static void ivpu_hw_ip_snoop_disable_37xx(struct ivpu_device *vdev)
729 {
730         u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES);
731
732         val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val);
733         val = REG_CLR_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
734
735         if (ivpu_is_force_snoop_enabled(vdev))
736                 val = REG_CLR_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
737         else
738                 val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
739
740         REGV_WR32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, val);
741 }
742
743 static void ivpu_hw_ip_snoop_disable_40xx(struct ivpu_device *vdev)
744 {
745         u32 val = REGV_RD32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES);
746
747         val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, SNOOP_OVERRIDE_EN, val);
748         val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val);
749
750         if (ivpu_is_force_snoop_enabled(vdev))
751                 val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val);
752         else
753                 val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val);
754
755         REGV_WR32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, val);
756 }
757
758 void ivpu_hw_ip_snoop_disable(struct ivpu_device *vdev)
759 {
760         if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
761                 return ivpu_hw_ip_snoop_disable_37xx(vdev);
762         else
763                 return ivpu_hw_ip_snoop_disable_40xx(vdev);
764 }
765
766 static void ivpu_hw_ip_tbu_mmu_enable_37xx(struct ivpu_device *vdev)
767 {
768         u32 val = REGV_RD32(VPU_37XX_HOST_IF_TBU_MMUSSIDV);
769
770         val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
771         val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
772         val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
773         val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
774
775         REGV_WR32(VPU_37XX_HOST_IF_TBU_MMUSSIDV, val);
776 }
777
778 static void ivpu_hw_ip_tbu_mmu_enable_40xx(struct ivpu_device *vdev)
779 {
780         u32 val = REGV_RD32(VPU_40XX_HOST_IF_TBU_MMUSSIDV);
781
782         val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
783         val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
784         val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU1_AWMMUSSIDV, val);
785         val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU1_ARMMUSSIDV, val);
786         val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
787         val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
788
789         REGV_WR32(VPU_40XX_HOST_IF_TBU_MMUSSIDV, val);
790 }
791
792 void ivpu_hw_ip_tbu_mmu_enable(struct ivpu_device *vdev)
793 {
794         if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
795                 return ivpu_hw_ip_tbu_mmu_enable_37xx(vdev);
796         else
797                 return ivpu_hw_ip_tbu_mmu_enable_40xx(vdev);
798 }
799
800 static int soc_cpu_boot_37xx(struct ivpu_device *vdev)
801 {
802         u32 val;
803
804         val = REGV_RD32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC);
805         val = REG_SET_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTRUN0, val);
806
807         val = REG_CLR_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTVEC, val);
808         REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
809
810         val = REG_SET_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val);
811         REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
812
813         val = REG_CLR_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val);
814         REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
815
816         val = vdev->fw->entry_point >> 9;
817         REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val);
818
819         val = REG_SET_FLD(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, DONE, val);
820         REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val);
821
822         ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n",
823                  vdev->fw->entry_point == vdev->fw->cold_boot_entry_point ? "cold boot" : "resume");
824
825         return 0;
826 }
827
828 static int cpu_noc_qacceptn_check_40xx(struct ivpu_device *vdev, u32 exp_val)
829 {
830         u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN);
831
832         if (!REG_TEST_FLD_NUM(VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN, TOP_MMIO, exp_val, val))
833                 return -EIO;
834
835         return 0;
836 }
837
838 static int cpu_noc_qdeny_check_40xx(struct ivpu_device *vdev, u32 exp_val)
839 {
840         u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QDENY);
841
842         if (!REG_TEST_FLD_NUM(VPU_40XX_CPU_SS_CPR_NOC_QDENY, TOP_MMIO, exp_val, val))
843                 return -EIO;
844
845         return 0;
846 }
847
848 static void cpu_noc_top_mmio_drive_40xx(struct ivpu_device *vdev, bool enable)
849 {
850         u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QREQN);
851
852         if (enable)
853                 val = REG_SET_FLD(VPU_40XX_CPU_SS_CPR_NOC_QREQN, TOP_MMIO, val);
854         else
855                 val = REG_CLR_FLD(VPU_40XX_CPU_SS_CPR_NOC_QREQN, TOP_MMIO, val);
856         REGV_WR32(VPU_40XX_CPU_SS_CPR_NOC_QREQN, val);
857 }
858
859 static int soc_cpu_drive_40xx(struct ivpu_device *vdev, bool enable)
860 {
861         int ret;
862
863         cpu_noc_top_mmio_drive_40xx(vdev, enable);
864
865         ret = cpu_noc_qacceptn_check_40xx(vdev, enable ? 0x1 : 0x0);
866         if (ret) {
867                 ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
868                 return ret;
869         }
870
871         ret = cpu_noc_qdeny_check_40xx(vdev, 0x0);
872         if (ret)
873                 ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
874
875         return ret;
876 }
877
878 static int soc_cpu_enable(struct ivpu_device *vdev)
879 {
880         return soc_cpu_drive_40xx(vdev, true);
881 }
882
883 static int soc_cpu_boot_40xx(struct ivpu_device *vdev)
884 {
885         int ret;
886         u32 val;
887         u64 val64;
888
889         ret = soc_cpu_enable(vdev);
890         if (ret) {
891                 ivpu_err(vdev, "Failed to enable SOC CPU: %d\n", ret);
892                 return ret;
893         }
894
895         val64 = vdev->fw->entry_point;
896         val64 <<= ffs(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_IMAGE_LOCATION_MASK) - 1;
897         REGV_WR64(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, val64);
898
899         val = REGV_RD32(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO);
900         val = REG_SET_FLD(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, DONE, val);
901         REGV_WR32(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, val);
902
903         ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n",
904                  ivpu_fw_is_cold_boot(vdev) ? "cold boot" : "resume");
905
906         return 0;
907 }
908
909 int ivpu_hw_ip_soc_cpu_boot(struct ivpu_device *vdev)
910 {
911         if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
912                 return soc_cpu_boot_37xx(vdev);
913         else
914                 return soc_cpu_boot_40xx(vdev);
915 }
916
917 static void wdt_disable_37xx(struct ivpu_device *vdev)
918 {
919         u32 val;
920
921         /* Enable writing and set non-zero WDT value */
922         REGV_WR32(VPU_37XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
923         REGV_WR32(VPU_37XX_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE);
924
925         /* Enable writing and disable watchdog timer */
926         REGV_WR32(VPU_37XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
927         REGV_WR32(VPU_37XX_CPU_SS_TIM_WDOG_EN, 0);
928
929         /* Now clear the timeout interrupt */
930         val = REGV_RD32(VPU_37XX_CPU_SS_TIM_GEN_CONFIG);
931         val = REG_CLR_FLD(VPU_37XX_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val);
932         REGV_WR32(VPU_37XX_CPU_SS_TIM_GEN_CONFIG, val);
933 }
934
935 static void wdt_disable_40xx(struct ivpu_device *vdev)
936 {
937         u32 val;
938
939         REGV_WR32(VPU_40XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
940         REGV_WR32(VPU_40XX_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE);
941
942         REGV_WR32(VPU_40XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
943         REGV_WR32(VPU_40XX_CPU_SS_TIM_WDOG_EN, 0);
944
945         val = REGV_RD32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG);
946         val = REG_CLR_FLD(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val);
947         REGV_WR32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, val);
948 }
949
950 void ivpu_hw_ip_wdt_disable(struct ivpu_device *vdev)
951 {
952         if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
953                 return wdt_disable_37xx(vdev);
954         else
955                 return wdt_disable_40xx(vdev);
956 }
957
958 static u32 ipc_rx_count_get_37xx(struct ivpu_device *vdev)
959 {
960         u32 count = REGV_RD32_SILENT(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT);
961
962         return REG_GET_FLD(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
963 }
964
965 static u32 ipc_rx_count_get_40xx(struct ivpu_device *vdev)
966 {
967         u32 count = REGV_RD32_SILENT(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT);
968
969         return REG_GET_FLD(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
970 }
971
972 u32 ivpu_hw_ip_ipc_rx_count_get(struct ivpu_device *vdev)
973 {
974         if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
975                 return ipc_rx_count_get_37xx(vdev);
976         else
977                 return ipc_rx_count_get_40xx(vdev);
978 }
979
980 void ivpu_hw_ip_irq_enable(struct ivpu_device *vdev)
981 {
982         if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
983                 REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK_37XX);
984                 REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK_37XX);
985         } else {
986                 REGV_WR32(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK_40XX);
987                 REGV_WR64(VPU_40XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK_40XX);
988         }
989 }
990
991 void ivpu_hw_ip_irq_disable(struct ivpu_device *vdev)
992 {
993         if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
994                 REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, 0x0ull);
995                 REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, 0x0);
996         } else {
997                 REGV_WR64(VPU_40XX_HOST_SS_ICB_ENABLE_0, 0x0ull);
998                 REGV_WR32(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, 0x0ul);
999         }
1000 }
1001
1002 static void diagnose_failure_37xx(struct ivpu_device *vdev)
1003 {
1004         u32 reg = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK_37XX;
1005
1006         if (ipc_rx_count_get_37xx(vdev))
1007                 ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ");
1008
1009         if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, reg))
1010                 ivpu_err(vdev, "WDT MSS timeout detected\n");
1011
1012         if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, reg))
1013                 ivpu_err(vdev, "WDT NCE timeout detected\n");
1014
1015         if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, reg))
1016                 ivpu_err(vdev, "NOC Firewall irq detected\n");
1017 }
1018
1019 static void diagnose_failure_40xx(struct ivpu_device *vdev)
1020 {
1021         u32 reg = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK_40XX;
1022
1023         if (ipc_rx_count_get_40xx(vdev))
1024                 ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ");
1025
1026         if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, reg))
1027                 ivpu_err(vdev, "WDT MSS timeout detected\n");
1028
1029         if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, reg))
1030                 ivpu_err(vdev, "WDT NCE timeout detected\n");
1031
1032         if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, reg))
1033                 ivpu_err(vdev, "NOC Firewall irq detected\n");
1034 }
1035
1036 void ivpu_hw_ip_diagnose_failure(struct ivpu_device *vdev)
1037 {
1038         if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
1039                 diagnose_failure_37xx(vdev);
1040         else
1041                 diagnose_failure_40xx(vdev);
1042 }
1043
1044 void ivpu_hw_ip_irq_clear(struct ivpu_device *vdev)
1045 {
1046         if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
1047                 REGV_WR64(VPU_37XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK_37XX);
1048         else
1049                 REGV_WR64(VPU_40XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK_40XX);
1050 }
1051
1052 static void irq_wdt_nce_handler(struct ivpu_device *vdev)
1053 {
1054         ivpu_pm_trigger_recovery(vdev, "WDT NCE IRQ");
1055 }
1056
1057 static void irq_wdt_mss_handler(struct ivpu_device *vdev)
1058 {
1059         ivpu_hw_ip_wdt_disable(vdev);
1060         ivpu_pm_trigger_recovery(vdev, "WDT MSS IRQ");
1061 }
1062
1063 static void irq_noc_firewall_handler(struct ivpu_device *vdev)
1064 {
1065         ivpu_pm_trigger_recovery(vdev, "NOC Firewall IRQ");
1066 }
1067
1068 /* Handler for IRQs from NPU core */
1069 bool ivpu_hw_ip_irq_handler_37xx(struct ivpu_device *vdev, int irq)
1070 {
1071         u32 status = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK_37XX;
1072
1073         if (!status)
1074                 return false;
1075
1076         REGV_WR32(VPU_37XX_HOST_SS_ICB_CLEAR_0, status);
1077
1078         if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
1079                 ivpu_mmu_irq_evtq_handler(vdev);
1080
1081         if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
1082                 ivpu_ipc_irq_handler(vdev);
1083
1084         if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
1085                 ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
1086
1087         if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status))
1088                 ivpu_mmu_irq_gerr_handler(vdev);
1089
1090         if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status))
1091                 irq_wdt_mss_handler(vdev);
1092
1093         if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status))
1094                 irq_wdt_nce_handler(vdev);
1095
1096         if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
1097                 irq_noc_firewall_handler(vdev);
1098
1099         return true;
1100 }
1101
1102 /* Handler for IRQs from NPU core */
1103 bool ivpu_hw_ip_irq_handler_40xx(struct ivpu_device *vdev, int irq)
1104 {
1105         u32 status = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK_40XX;
1106
1107         if (!status)
1108                 return false;
1109
1110         REGV_WR32(VPU_40XX_HOST_SS_ICB_CLEAR_0, status);
1111
1112         if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
1113                 ivpu_mmu_irq_evtq_handler(vdev);
1114
1115         if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
1116                 ivpu_ipc_irq_handler(vdev);
1117
1118         if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
1119                 ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
1120
1121         if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status))
1122                 ivpu_mmu_irq_gerr_handler(vdev);
1123
1124         if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status))
1125                 irq_wdt_mss_handler(vdev);
1126
1127         if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status))
1128                 irq_wdt_nce_handler(vdev);
1129
1130         if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
1131                 irq_noc_firewall_handler(vdev);
1132
1133         return true;
1134 }
1135
1136 static void db_set_37xx(struct ivpu_device *vdev, u32 db_id)
1137 {
1138         u32 reg_stride = VPU_37XX_CPU_SS_DOORBELL_1 - VPU_37XX_CPU_SS_DOORBELL_0;
1139         u32 val = REG_FLD(VPU_37XX_CPU_SS_DOORBELL_0, SET);
1140
1141         REGV_WR32I(VPU_37XX_CPU_SS_DOORBELL_0, reg_stride, db_id, val);
1142 }
1143
1144 static void db_set_40xx(struct ivpu_device *vdev, u32 db_id)
1145 {
1146         u32 reg_stride = VPU_40XX_CPU_SS_DOORBELL_1 - VPU_40XX_CPU_SS_DOORBELL_0;
1147         u32 val = REG_FLD(VPU_40XX_CPU_SS_DOORBELL_0, SET);
1148
1149         REGV_WR32I(VPU_40XX_CPU_SS_DOORBELL_0, reg_stride, db_id, val);
1150 }
1151
1152 void ivpu_hw_ip_db_set(struct ivpu_device *vdev, u32 db_id)
1153 {
1154         if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
1155                 db_set_37xx(vdev, db_id);
1156         else
1157                 db_set_40xx(vdev, db_id);
1158 }
1159
1160 u32 ivpu_hw_ip_ipc_rx_addr_get(struct ivpu_device *vdev)
1161 {
1162         if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
1163                 return REGV_RD32(VPU_37XX_HOST_SS_TIM_IPC_FIFO_ATM);
1164         else
1165                 return REGV_RD32(VPU_40XX_HOST_SS_TIM_IPC_FIFO_ATM);
1166 }
1167
1168 void ivpu_hw_ip_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
1169 {
1170         if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
1171                 REGV_WR32(VPU_37XX_CPU_SS_TIM_IPC_FIFO, vpu_addr);
1172         else
1173                 REGV_WR32(VPU_40XX_CPU_SS_TIM_IPC_FIFO, vpu_addr);
1174 }
This page took 0.097301 seconds and 4 git commands to generate.