]> Git Repo - linux.git/blob - drivers/accel/ivpu/ivpu_hw.c
dma-mapping: don't return errors from dma_set_max_seg_size
[linux.git] / drivers / accel / ivpu / ivpu_hw.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 - 2024 Intel Corporation
4  */
5
6 #include "ivpu_drv.h"
7 #include "ivpu_hw.h"
8 #include "ivpu_hw_btrs.h"
9 #include "ivpu_hw_ip.h"
10
11 #include <linux/dmi.h>
12
13 static char *platform_to_str(u32 platform)
14 {
15         switch (platform) {
16         case IVPU_PLATFORM_SILICON:
17                 return "SILICON";
18         case IVPU_PLATFORM_SIMICS:
19                 return "SIMICS";
20         case IVPU_PLATFORM_FPGA:
21                 return "FPGA";
22         default:
23                 return "Invalid platform";
24         }
25 }
26
27 static const struct dmi_system_id dmi_platform_simulation[] = {
28         {
29                 .ident = "Intel Simics",
30                 .matches = {
31                         DMI_MATCH(DMI_BOARD_NAME, "lnlrvp"),
32                         DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
33                         DMI_MATCH(DMI_BOARD_SERIAL, "123456789"),
34                 },
35         },
36         {
37                 .ident = "Intel Simics",
38                 .matches = {
39                         DMI_MATCH(DMI_BOARD_NAME, "Simics"),
40                 },
41         },
42         { }
43 };
44
45 static void platform_init(struct ivpu_device *vdev)
46 {
47         if (dmi_check_system(dmi_platform_simulation))
48                 vdev->platform = IVPU_PLATFORM_SIMICS;
49         else
50                 vdev->platform = IVPU_PLATFORM_SILICON;
51
52         ivpu_dbg(vdev, MISC, "Platform type: %s (%d)\n",
53                  platform_to_str(vdev->platform), vdev->platform);
54 }
55
56 static void wa_init(struct ivpu_device *vdev)
57 {
58         vdev->wa.punit_disabled = ivpu_is_fpga(vdev);
59         vdev->wa.clear_runtime_mem = false;
60
61         if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
62                 vdev->wa.interrupt_clear_with_0 = ivpu_hw_btrs_irqs_clear_with_0_mtl(vdev);
63
64         if (ivpu_device_id(vdev) == PCI_DEVICE_ID_LNL &&
65             ivpu_revision(vdev) < IVPU_HW_IP_REV_LNL_B0)
66                 vdev->wa.disable_clock_relinquish = true;
67
68         if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
69                 vdev->wa.wp0_during_power_up = true;
70
71         IVPU_PRINT_WA(punit_disabled);
72         IVPU_PRINT_WA(clear_runtime_mem);
73         IVPU_PRINT_WA(interrupt_clear_with_0);
74         IVPU_PRINT_WA(disable_clock_relinquish);
75         IVPU_PRINT_WA(wp0_during_power_up);
76 }
77
78 static void timeouts_init(struct ivpu_device *vdev)
79 {
80         if (ivpu_test_mode & IVPU_TEST_MODE_DISABLE_TIMEOUTS) {
81                 vdev->timeout.boot = -1;
82                 vdev->timeout.jsm = -1;
83                 vdev->timeout.tdr = -1;
84                 vdev->timeout.autosuspend = -1;
85                 vdev->timeout.d0i3_entry_msg = -1;
86         } else if (ivpu_is_fpga(vdev)) {
87                 vdev->timeout.boot = 100000;
88                 vdev->timeout.jsm = 50000;
89                 vdev->timeout.tdr = 2000000;
90                 vdev->timeout.autosuspend = -1;
91                 vdev->timeout.d0i3_entry_msg = 500;
92         } else if (ivpu_is_simics(vdev)) {
93                 vdev->timeout.boot = 50;
94                 vdev->timeout.jsm = 500;
95                 vdev->timeout.tdr = 10000;
96                 vdev->timeout.autosuspend = -1;
97                 vdev->timeout.d0i3_entry_msg = 100;
98         } else {
99                 vdev->timeout.boot = 1000;
100                 vdev->timeout.jsm = 500;
101                 vdev->timeout.tdr = 2000;
102                 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
103                         vdev->timeout.autosuspend = 10;
104                 else
105                         vdev->timeout.autosuspend = 100;
106                 vdev->timeout.d0i3_entry_msg = 5;
107         }
108 }
109
110 static void memory_ranges_init(struct ivpu_device *vdev)
111 {
112         if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
113                 ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M);
114                 ivpu_hw_range_init(&vdev->hw->ranges.user,   0xc0000000, 255 * SZ_1M);
115                 ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x180000000, SZ_2G);
116                 ivpu_hw_range_init(&vdev->hw->ranges.dma,   0x200000000, SZ_8G);
117         } else {
118                 ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M);
119                 ivpu_hw_range_init(&vdev->hw->ranges.user,   0x80000000, SZ_256M);
120                 ivpu_hw_range_init(&vdev->hw->ranges.shave,  0x80000000 + SZ_256M, SZ_2G - SZ_256M);
121                 ivpu_hw_range_init(&vdev->hw->ranges.dma,   0x200000000, SZ_8G);
122         }
123 }
124
125 static int wp_enable(struct ivpu_device *vdev)
126 {
127         return ivpu_hw_btrs_wp_drive(vdev, true);
128 }
129
130 static int wp_disable(struct ivpu_device *vdev)
131 {
132         return ivpu_hw_btrs_wp_drive(vdev, false);
133 }
134
135 int ivpu_hw_power_up(struct ivpu_device *vdev)
136 {
137         int ret;
138
139         if (IVPU_WA(wp0_during_power_up)) {
140                 /* WP requests may fail when powering down, so issue WP 0 here */
141                 ret = wp_disable(vdev);
142                 if (ret)
143                         ivpu_warn(vdev, "Failed to disable workpoint: %d\n", ret);
144         }
145
146         ret = ivpu_hw_btrs_d0i3_disable(vdev);
147         if (ret)
148                 ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
149
150         ret = wp_enable(vdev);
151         if (ret) {
152                 ivpu_err(vdev, "Failed to enable workpoint: %d\n", ret);
153                 return ret;
154         }
155
156         if (ivpu_hw_btrs_gen(vdev) >= IVPU_HW_BTRS_LNL) {
157                 if (IVPU_WA(disable_clock_relinquish))
158                         ivpu_hw_btrs_clock_relinquish_disable_lnl(vdev);
159                 ivpu_hw_btrs_profiling_freq_reg_set_lnl(vdev);
160                 ivpu_hw_btrs_ats_print_lnl(vdev);
161         }
162
163         ret = ivpu_hw_ip_host_ss_configure(vdev);
164         if (ret) {
165                 ivpu_err(vdev, "Failed to configure host SS: %d\n", ret);
166                 return ret;
167         }
168
169         ivpu_hw_ip_idle_gen_disable(vdev);
170
171         ret = ivpu_hw_btrs_wait_for_clock_res_own_ack(vdev);
172         if (ret) {
173                 ivpu_err(vdev, "Timed out waiting for clock resource own ACK\n");
174                 return ret;
175         }
176
177         ret = ivpu_hw_ip_pwr_domain_enable(vdev);
178         if (ret) {
179                 ivpu_err(vdev, "Failed to enable power domain: %d\n", ret);
180                 return ret;
181         }
182
183         ret = ivpu_hw_ip_host_ss_axi_enable(vdev);
184         if (ret) {
185                 ivpu_err(vdev, "Failed to enable AXI: %d\n", ret);
186                 return ret;
187         }
188
189         if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_LNL)
190                 ivpu_hw_btrs_set_port_arbitration_weights_lnl(vdev);
191
192         ret = ivpu_hw_ip_top_noc_enable(vdev);
193         if (ret)
194                 ivpu_err(vdev, "Failed to enable TOP NOC: %d\n", ret);
195
196         return ret;
197 }
198
199 static void save_d0i3_entry_timestamp(struct ivpu_device *vdev)
200 {
201         vdev->hw->d0i3_entry_host_ts = ktime_get_boottime();
202         vdev->hw->d0i3_entry_vpu_ts = ivpu_hw_ip_read_perf_timer_counter(vdev);
203 }
204
205 int ivpu_hw_reset(struct ivpu_device *vdev)
206 {
207         int ret = 0;
208
209         if (ivpu_hw_btrs_ip_reset(vdev)) {
210                 ivpu_err(vdev, "Failed to reset NPU IP\n");
211                 ret = -EIO;
212         }
213
214         if (wp_disable(vdev)) {
215                 ivpu_err(vdev, "Failed to disable workpoint\n");
216                 ret = -EIO;
217         }
218
219         return ret;
220 }
221
222 int ivpu_hw_power_down(struct ivpu_device *vdev)
223 {
224         int ret = 0;
225
226         save_d0i3_entry_timestamp(vdev);
227
228         if (!ivpu_hw_is_idle(vdev))
229                 ivpu_warn(vdev, "NPU not idle during power down\n");
230
231         if (ivpu_hw_reset(vdev)) {
232                 ivpu_err(vdev, "Failed to reset NPU\n");
233                 ret = -EIO;
234         }
235
236         if (ivpu_hw_btrs_d0i3_enable(vdev)) {
237                 ivpu_err(vdev, "Failed to enter D0I3\n");
238                 ret = -EIO;
239         }
240
241         return ret;
242 }
243
244 int ivpu_hw_init(struct ivpu_device *vdev)
245 {
246         ivpu_hw_btrs_info_init(vdev);
247         ivpu_hw_btrs_freq_ratios_init(vdev);
248         memory_ranges_init(vdev);
249         platform_init(vdev);
250         wa_init(vdev);
251         timeouts_init(vdev);
252
253         return 0;
254 }
255
256 int ivpu_hw_boot_fw(struct ivpu_device *vdev)
257 {
258         int ret;
259
260         ivpu_hw_ip_snoop_disable(vdev);
261         ivpu_hw_ip_tbu_mmu_enable(vdev);
262         ret = ivpu_hw_ip_soc_cpu_boot(vdev);
263         if (ret)
264                 ivpu_err(vdev, "Failed to boot SOC CPU: %d\n", ret);
265
266         return ret;
267 }
268
269 void ivpu_hw_profiling_freq_drive(struct ivpu_device *vdev, bool enable)
270 {
271         if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
272                 vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
273                 return;
274         }
275
276         if (enable)
277                 vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_HIGH;
278         else
279                 vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
280 }
281
282 void ivpu_irq_handlers_init(struct ivpu_device *vdev)
283 {
284         INIT_KFIFO(vdev->hw->irq.fifo);
285
286         if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
287                 vdev->hw->irq.ip_irq_handler = ivpu_hw_ip_irq_handler_37xx;
288         else
289                 vdev->hw->irq.ip_irq_handler = ivpu_hw_ip_irq_handler_40xx;
290
291         if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
292                 vdev->hw->irq.btrs_irq_handler = ivpu_hw_btrs_irq_handler_mtl;
293         else
294                 vdev->hw->irq.btrs_irq_handler = ivpu_hw_btrs_irq_handler_lnl;
295 }
296
297 void ivpu_hw_irq_enable(struct ivpu_device *vdev)
298 {
299         kfifo_reset(&vdev->hw->irq.fifo);
300         ivpu_hw_ip_irq_enable(vdev);
301         ivpu_hw_btrs_irq_enable(vdev);
302 }
303
304 void ivpu_hw_irq_disable(struct ivpu_device *vdev)
305 {
306         ivpu_hw_btrs_irq_disable(vdev);
307         ivpu_hw_ip_irq_disable(vdev);
308 }
309
310 irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr)
311 {
312         struct ivpu_device *vdev = ptr;
313         bool ip_handled, btrs_handled;
314
315         ivpu_hw_btrs_global_int_disable(vdev);
316
317         btrs_handled = ivpu_hw_btrs_irq_handler(vdev, irq);
318         if (!ivpu_hw_is_idle((vdev)) || !btrs_handled)
319                 ip_handled = ivpu_hw_ip_irq_handler(vdev, irq);
320         else
321                 ip_handled = false;
322
323         /* Re-enable global interrupts to re-trigger MSI for pending interrupts */
324         ivpu_hw_btrs_global_int_enable(vdev);
325
326         if (!kfifo_is_empty(&vdev->hw->irq.fifo))
327                 return IRQ_WAKE_THREAD;
328         if (ip_handled || btrs_handled)
329                 return IRQ_HANDLED;
330         return IRQ_NONE;
331 }
This page took 0.051469 seconds and 4 git commands to generate.