]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drm/amdgpu: stop data_exchange work thread before reset
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_probe_helper.h>
36 #include <drm/amdgpu_drm.h>
37 #include <linux/vgaarb.h>
38 #include <linux/vga_switcheroo.h>
39 #include <linux/efi.h>
40 #include "amdgpu.h"
41 #include "amdgpu_trace.h"
42 #include "amdgpu_i2c.h"
43 #include "atom.h"
44 #include "amdgpu_atombios.h"
45 #include "amdgpu_atomfirmware.h"
46 #include "amd_pcie.h"
47 #ifdef CONFIG_DRM_AMDGPU_SI
48 #include "si.h"
49 #endif
50 #ifdef CONFIG_DRM_AMDGPU_CIK
51 #include "cik.h"
52 #endif
53 #include "vi.h"
54 #include "soc15.h"
55 #include "nv.h"
56 #include "bif/bif_4_1_d.h"
57 #include <linux/pci.h>
58 #include <linux/firmware.h>
59 #include "amdgpu_vf_error.h"
60
61 #include "amdgpu_amdkfd.h"
62 #include "amdgpu_pm.h"
63
64 #include "amdgpu_xgmi.h"
65 #include "amdgpu_ras.h"
66 #include "amdgpu_pmu.h"
67 #include "amdgpu_fru_eeprom.h"
68
69 #include <linux/suspend.h>
70 #include <drm/task_barrier.h>
71 #include <linux/pm_runtime.h>
72
73 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
74 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
75 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
76 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
77 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
78 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
79 MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
80 MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
81 MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
82 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
83
84 #define AMDGPU_RESUME_MS                2000
85
86 const char *amdgpu_asic_name[] = {
87         "TAHITI",
88         "PITCAIRN",
89         "VERDE",
90         "OLAND",
91         "HAINAN",
92         "BONAIRE",
93         "KAVERI",
94         "KABINI",
95         "HAWAII",
96         "MULLINS",
97         "TOPAZ",
98         "TONGA",
99         "FIJI",
100         "CARRIZO",
101         "STONEY",
102         "POLARIS10",
103         "POLARIS11",
104         "POLARIS12",
105         "VEGAM",
106         "VEGA10",
107         "VEGA12",
108         "VEGA20",
109         "RAVEN",
110         "ARCTURUS",
111         "RENOIR",
112         "NAVI10",
113         "NAVI14",
114         "NAVI12",
115         "SIENNA_CICHLID",
116         "NAVY_FLOUNDER",
117         "LAST",
118 };
119
120 /**
121  * DOC: pcie_replay_count
122  *
123  * The amdgpu driver provides a sysfs API for reporting the total number
124  * of PCIe replays (NAKs)
125  * The file pcie_replay_count is used for this and returns the total
126  * number of replays as a sum of the NAKs generated and NAKs received
127  */
128
129 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
130                 struct device_attribute *attr, char *buf)
131 {
132         struct drm_device *ddev = dev_get_drvdata(dev);
133         struct amdgpu_device *adev = drm_to_adev(ddev);
134         uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
135
136         return snprintf(buf, PAGE_SIZE, "%llu\n", cnt);
137 }
138
139 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
140                 amdgpu_device_get_pcie_replay_count, NULL);
141
142 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
143
144 /**
145  * DOC: product_name
146  *
147  * The amdgpu driver provides a sysfs API for reporting the product name
148  * for the device
149  * The file serial_number is used for this and returns the product name
150  * as returned from the FRU.
151  * NOTE: This is only available for certain server cards
152  */
153
154 static ssize_t amdgpu_device_get_product_name(struct device *dev,
155                 struct device_attribute *attr, char *buf)
156 {
157         struct drm_device *ddev = dev_get_drvdata(dev);
158         struct amdgpu_device *adev = drm_to_adev(ddev);
159
160         return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_name);
161 }
162
163 static DEVICE_ATTR(product_name, S_IRUGO,
164                 amdgpu_device_get_product_name, NULL);
165
166 /**
167  * DOC: product_number
168  *
169  * The amdgpu driver provides a sysfs API for reporting the part number
170  * for the device
171  * The file serial_number is used for this and returns the part number
172  * as returned from the FRU.
173  * NOTE: This is only available for certain server cards
174  */
175
176 static ssize_t amdgpu_device_get_product_number(struct device *dev,
177                 struct device_attribute *attr, char *buf)
178 {
179         struct drm_device *ddev = dev_get_drvdata(dev);
180         struct amdgpu_device *adev = drm_to_adev(ddev);
181
182         return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_number);
183 }
184
185 static DEVICE_ATTR(product_number, S_IRUGO,
186                 amdgpu_device_get_product_number, NULL);
187
188 /**
189  * DOC: serial_number
190  *
191  * The amdgpu driver provides a sysfs API for reporting the serial number
192  * for the device
193  * The file serial_number is used for this and returns the serial number
194  * as returned from the FRU.
195  * NOTE: This is only available for certain server cards
196  */
197
198 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
199                 struct device_attribute *attr, char *buf)
200 {
201         struct drm_device *ddev = dev_get_drvdata(dev);
202         struct amdgpu_device *adev = drm_to_adev(ddev);
203
204         return snprintf(buf, PAGE_SIZE, "%s\n", adev->serial);
205 }
206
207 static DEVICE_ATTR(serial_number, S_IRUGO,
208                 amdgpu_device_get_serial_number, NULL);
209
210 /**
211  * amdgpu_device_supports_boco - Is the device a dGPU with HG/PX power control
212  *
213  * @dev: drm_device pointer
214  *
215  * Returns true if the device is a dGPU with HG/PX power control,
216  * otherwise return false.
217  */
218 bool amdgpu_device_supports_boco(struct drm_device *dev)
219 {
220         struct amdgpu_device *adev = drm_to_adev(dev);
221
222         if (adev->flags & AMD_IS_PX)
223                 return true;
224         return false;
225 }
226
227 /**
228  * amdgpu_device_supports_baco - Does the device support BACO
229  *
230  * @dev: drm_device pointer
231  *
232  * Returns true if the device supporte BACO,
233  * otherwise return false.
234  */
235 bool amdgpu_device_supports_baco(struct drm_device *dev)
236 {
237         struct amdgpu_device *adev = drm_to_adev(dev);
238
239         return amdgpu_asic_supports_baco(adev);
240 }
241
242 /**
243  * VRAM access helper functions.
244  *
245  * amdgpu_device_vram_access - read/write a buffer in vram
246  *
247  * @adev: amdgpu_device pointer
248  * @pos: offset of the buffer in vram
249  * @buf: virtual address of the buffer in system memory
250  * @size: read/write size, sizeof(@buf) must > @size
251  * @write: true - write to vram, otherwise - read from vram
252  */
253 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
254                                uint32_t *buf, size_t size, bool write)
255 {
256         unsigned long flags;
257         uint32_t hi = ~0;
258         uint64_t last;
259
260
261 #ifdef CONFIG_64BIT
262         last = min(pos + size, adev->gmc.visible_vram_size);
263         if (last > pos) {
264                 void __iomem *addr = adev->mman.aper_base_kaddr + pos;
265                 size_t count = last - pos;
266
267                 if (write) {
268                         memcpy_toio(addr, buf, count);
269                         mb();
270                         amdgpu_asic_flush_hdp(adev, NULL);
271                 } else {
272                         amdgpu_asic_invalidate_hdp(adev, NULL);
273                         mb();
274                         memcpy_fromio(buf, addr, count);
275                 }
276
277                 if (count == size)
278                         return;
279
280                 pos += count;
281                 buf += count / 4;
282                 size -= count;
283         }
284 #endif
285
286         spin_lock_irqsave(&adev->mmio_idx_lock, flags);
287         for (last = pos + size; pos < last; pos += 4) {
288                 uint32_t tmp = pos >> 31;
289
290                 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
291                 if (tmp != hi) {
292                         WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
293                         hi = tmp;
294                 }
295                 if (write)
296                         WREG32_NO_KIQ(mmMM_DATA, *buf++);
297                 else
298                         *buf++ = RREG32_NO_KIQ(mmMM_DATA);
299         }
300         spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
301 }
302
303 /*
304  * MMIO register access helper functions.
305  */
306 /**
307  * amdgpu_mm_rreg - read a memory mapped IO register
308  *
309  * @adev: amdgpu_device pointer
310  * @reg: dword aligned register offset
311  * @acc_flags: access flags which require special behavior
312  *
313  * Returns the 32 bit value from the offset specified.
314  */
315 uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
316                         uint32_t acc_flags)
317 {
318         uint32_t ret;
319
320         if (adev->in_pci_err_recovery)
321                 return 0;
322
323         if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev) &&
324             down_read_trylock(&adev->reset_sem)) {
325                 ret = amdgpu_kiq_rreg(adev, reg);
326                 up_read(&adev->reset_sem);
327                 return ret;
328         }
329
330         if ((reg * 4) < adev->rmmio_size)
331                 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
332         else {
333                 unsigned long flags;
334
335                 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
336                 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
337                 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
338                 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
339         }
340
341         trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
342         return ret;
343 }
344
345 /*
346  * MMIO register read with bytes helper functions
347  * @offset:bytes offset from MMIO start
348  *
349 */
350
351 /**
352  * amdgpu_mm_rreg8 - read a memory mapped IO register
353  *
354  * @adev: amdgpu_device pointer
355  * @offset: byte aligned register offset
356  *
357  * Returns the 8 bit value from the offset specified.
358  */
359 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
360 {
361         if (adev->in_pci_err_recovery)
362                 return 0;
363
364         if (offset < adev->rmmio_size)
365                 return (readb(adev->rmmio + offset));
366         BUG();
367 }
368
369 /*
370  * MMIO register write with bytes helper functions
371  * @offset:bytes offset from MMIO start
372  * @value: the value want to be written to the register
373  *
374 */
375 /**
376  * amdgpu_mm_wreg8 - read a memory mapped IO register
377  *
378  * @adev: amdgpu_device pointer
379  * @offset: byte aligned register offset
380  * @value: 8 bit value to write
381  *
382  * Writes the value specified to the offset specified.
383  */
384 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
385 {
386         if (adev->in_pci_err_recovery)
387                 return;
388
389         if (offset < adev->rmmio_size)
390                 writeb(value, adev->rmmio + offset);
391         else
392                 BUG();
393 }
394
395 static inline void amdgpu_mm_wreg_mmio(struct amdgpu_device *adev,
396                                        uint32_t reg, uint32_t v,
397                                        uint32_t acc_flags)
398 {
399         if (adev->in_pci_err_recovery)
400                 return;
401
402         trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
403
404         if ((reg * 4) < adev->rmmio_size)
405                 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
406         else {
407                 unsigned long flags;
408
409                 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
410                 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
411                 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
412                 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
413         }
414 }
415
416 /**
417  * amdgpu_mm_wreg - write to a memory mapped IO register
418  *
419  * @adev: amdgpu_device pointer
420  * @reg: dword aligned register offset
421  * @v: 32 bit value to write to the register
422  * @acc_flags: access flags which require special behavior
423  *
424  * Writes the value specified to the offset specified.
425  */
426 void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
427                     uint32_t acc_flags)
428 {
429         if (adev->in_pci_err_recovery)
430                 return;
431
432         if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev) &&
433             down_read_trylock(&adev->reset_sem)) {
434                 amdgpu_kiq_wreg(adev, reg, v);
435                 up_read(&adev->reset_sem);
436                 return;
437         }
438
439         amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
440 }
441
442 /*
443  * amdgpu_mm_wreg_mmio_rlc -  write register either with mmio or with RLC path if in range
444  *
445  * this function is invoked only the debugfs register access
446  * */
447 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
448                     uint32_t acc_flags)
449 {
450         if (adev->in_pci_err_recovery)
451                 return;
452
453         if (amdgpu_sriov_fullaccess(adev) &&
454                 adev->gfx.rlc.funcs &&
455                 adev->gfx.rlc.funcs->is_rlcg_access_range) {
456
457                 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
458                         return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v);
459         }
460
461         amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
462 }
463
464 /**
465  * amdgpu_io_rreg - read an IO register
466  *
467  * @adev: amdgpu_device pointer
468  * @reg: dword aligned register offset
469  *
470  * Returns the 32 bit value from the offset specified.
471  */
472 u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
473 {
474         if (adev->in_pci_err_recovery)
475                 return 0;
476
477         if ((reg * 4) < adev->rio_mem_size)
478                 return ioread32(adev->rio_mem + (reg * 4));
479         else {
480                 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
481                 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
482         }
483 }
484
485 /**
486  * amdgpu_io_wreg - write to an IO register
487  *
488  * @adev: amdgpu_device pointer
489  * @reg: dword aligned register offset
490  * @v: 32 bit value to write to the register
491  *
492  * Writes the value specified to the offset specified.
493  */
494 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
495 {
496         if (adev->in_pci_err_recovery)
497                 return;
498
499         if ((reg * 4) < adev->rio_mem_size)
500                 iowrite32(v, adev->rio_mem + (reg * 4));
501         else {
502                 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
503                 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
504         }
505 }
506
507 /**
508  * amdgpu_mm_rdoorbell - read a doorbell dword
509  *
510  * @adev: amdgpu_device pointer
511  * @index: doorbell index
512  *
513  * Returns the value in the doorbell aperture at the
514  * requested doorbell index (CIK).
515  */
516 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
517 {
518         if (adev->in_pci_err_recovery)
519                 return 0;
520
521         if (index < adev->doorbell.num_doorbells) {
522                 return readl(adev->doorbell.ptr + index);
523         } else {
524                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
525                 return 0;
526         }
527 }
528
529 /**
530  * amdgpu_mm_wdoorbell - write a doorbell dword
531  *
532  * @adev: amdgpu_device pointer
533  * @index: doorbell index
534  * @v: value to write
535  *
536  * Writes @v to the doorbell aperture at the
537  * requested doorbell index (CIK).
538  */
539 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
540 {
541         if (adev->in_pci_err_recovery)
542                 return;
543
544         if (index < adev->doorbell.num_doorbells) {
545                 writel(v, adev->doorbell.ptr + index);
546         } else {
547                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
548         }
549 }
550
551 /**
552  * amdgpu_mm_rdoorbell64 - read a doorbell Qword
553  *
554  * @adev: amdgpu_device pointer
555  * @index: doorbell index
556  *
557  * Returns the value in the doorbell aperture at the
558  * requested doorbell index (VEGA10+).
559  */
560 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
561 {
562         if (adev->in_pci_err_recovery)
563                 return 0;
564
565         if (index < adev->doorbell.num_doorbells) {
566                 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
567         } else {
568                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
569                 return 0;
570         }
571 }
572
573 /**
574  * amdgpu_mm_wdoorbell64 - write a doorbell Qword
575  *
576  * @adev: amdgpu_device pointer
577  * @index: doorbell index
578  * @v: value to write
579  *
580  * Writes @v to the doorbell aperture at the
581  * requested doorbell index (VEGA10+).
582  */
583 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
584 {
585         if (adev->in_pci_err_recovery)
586                 return;
587
588         if (index < adev->doorbell.num_doorbells) {
589                 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
590         } else {
591                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
592         }
593 }
594
595 /**
596  * amdgpu_invalid_rreg - dummy reg read function
597  *
598  * @adev: amdgpu device pointer
599  * @reg: offset of register
600  *
601  * Dummy register read function.  Used for register blocks
602  * that certain asics don't have (all asics).
603  * Returns the value in the register.
604  */
605 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
606 {
607         DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
608         BUG();
609         return 0;
610 }
611
612 /**
613  * amdgpu_invalid_wreg - dummy reg write function
614  *
615  * @adev: amdgpu device pointer
616  * @reg: offset of register
617  * @v: value to write to the register
618  *
619  * Dummy register read function.  Used for register blocks
620  * that certain asics don't have (all asics).
621  */
622 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
623 {
624         DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
625                   reg, v);
626         BUG();
627 }
628
629 /**
630  * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
631  *
632  * @adev: amdgpu device pointer
633  * @reg: offset of register
634  *
635  * Dummy register read function.  Used for register blocks
636  * that certain asics don't have (all asics).
637  * Returns the value in the register.
638  */
639 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
640 {
641         DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
642         BUG();
643         return 0;
644 }
645
646 /**
647  * amdgpu_invalid_wreg64 - dummy reg write function
648  *
649  * @adev: amdgpu device pointer
650  * @reg: offset of register
651  * @v: value to write to the register
652  *
653  * Dummy register read function.  Used for register blocks
654  * that certain asics don't have (all asics).
655  */
656 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
657 {
658         DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
659                   reg, v);
660         BUG();
661 }
662
663 /**
664  * amdgpu_block_invalid_rreg - dummy reg read function
665  *
666  * @adev: amdgpu device pointer
667  * @block: offset of instance
668  * @reg: offset of register
669  *
670  * Dummy register read function.  Used for register blocks
671  * that certain asics don't have (all asics).
672  * Returns the value in the register.
673  */
674 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
675                                           uint32_t block, uint32_t reg)
676 {
677         DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
678                   reg, block);
679         BUG();
680         return 0;
681 }
682
683 /**
684  * amdgpu_block_invalid_wreg - dummy reg write function
685  *
686  * @adev: amdgpu device pointer
687  * @block: offset of instance
688  * @reg: offset of register
689  * @v: value to write to the register
690  *
691  * Dummy register read function.  Used for register blocks
692  * that certain asics don't have (all asics).
693  */
694 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
695                                       uint32_t block,
696                                       uint32_t reg, uint32_t v)
697 {
698         DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
699                   reg, block, v);
700         BUG();
701 }
702
703 /**
704  * amdgpu_device_asic_init - Wrapper for atom asic_init
705  *
706  * @dev: drm_device pointer
707  *
708  * Does any asic specific work and then calls atom asic init.
709  */
710 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
711 {
712         amdgpu_asic_pre_asic_init(adev);
713
714         return amdgpu_atom_asic_init(adev->mode_info.atom_context);
715 }
716
717 /**
718  * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
719  *
720  * @adev: amdgpu device pointer
721  *
722  * Allocates a scratch page of VRAM for use by various things in the
723  * driver.
724  */
725 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
726 {
727         return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
728                                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
729                                        &adev->vram_scratch.robj,
730                                        &adev->vram_scratch.gpu_addr,
731                                        (void **)&adev->vram_scratch.ptr);
732 }
733
734 /**
735  * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
736  *
737  * @adev: amdgpu device pointer
738  *
739  * Frees the VRAM scratch page.
740  */
741 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
742 {
743         amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
744 }
745
746 /**
747  * amdgpu_device_program_register_sequence - program an array of registers.
748  *
749  * @adev: amdgpu_device pointer
750  * @registers: pointer to the register array
751  * @array_size: size of the register array
752  *
753  * Programs an array or registers with and and or masks.
754  * This is a helper for setting golden registers.
755  */
756 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
757                                              const u32 *registers,
758                                              const u32 array_size)
759 {
760         u32 tmp, reg, and_mask, or_mask;
761         int i;
762
763         if (array_size % 3)
764                 return;
765
766         for (i = 0; i < array_size; i +=3) {
767                 reg = registers[i + 0];
768                 and_mask = registers[i + 1];
769                 or_mask = registers[i + 2];
770
771                 if (and_mask == 0xffffffff) {
772                         tmp = or_mask;
773                 } else {
774                         tmp = RREG32(reg);
775                         tmp &= ~and_mask;
776                         if (adev->family >= AMDGPU_FAMILY_AI)
777                                 tmp |= (or_mask & and_mask);
778                         else
779                                 tmp |= or_mask;
780                 }
781                 WREG32(reg, tmp);
782         }
783 }
784
785 /**
786  * amdgpu_device_pci_config_reset - reset the GPU
787  *
788  * @adev: amdgpu_device pointer
789  *
790  * Resets the GPU using the pci config reset sequence.
791  * Only applicable to asics prior to vega10.
792  */
793 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
794 {
795         pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
796 }
797
798 /*
799  * GPU doorbell aperture helpers function.
800  */
801 /**
802  * amdgpu_device_doorbell_init - Init doorbell driver information.
803  *
804  * @adev: amdgpu_device pointer
805  *
806  * Init doorbell driver information (CIK)
807  * Returns 0 on success, error on failure.
808  */
809 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
810 {
811
812         /* No doorbell on SI hardware generation */
813         if (adev->asic_type < CHIP_BONAIRE) {
814                 adev->doorbell.base = 0;
815                 adev->doorbell.size = 0;
816                 adev->doorbell.num_doorbells = 0;
817                 adev->doorbell.ptr = NULL;
818                 return 0;
819         }
820
821         if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
822                 return -EINVAL;
823
824         amdgpu_asic_init_doorbell_index(adev);
825
826         /* doorbell bar mapping */
827         adev->doorbell.base = pci_resource_start(adev->pdev, 2);
828         adev->doorbell.size = pci_resource_len(adev->pdev, 2);
829
830         adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
831                                              adev->doorbell_index.max_assignment+1);
832         if (adev->doorbell.num_doorbells == 0)
833                 return -EINVAL;
834
835         /* For Vega, reserve and map two pages on doorbell BAR since SDMA
836          * paging queue doorbell use the second page. The
837          * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
838          * doorbells are in the first page. So with paging queue enabled,
839          * the max num_doorbells should + 1 page (0x400 in dword)
840          */
841         if (adev->asic_type >= CHIP_VEGA10)
842                 adev->doorbell.num_doorbells += 0x400;
843
844         adev->doorbell.ptr = ioremap(adev->doorbell.base,
845                                      adev->doorbell.num_doorbells *
846                                      sizeof(u32));
847         if (adev->doorbell.ptr == NULL)
848                 return -ENOMEM;
849
850         return 0;
851 }
852
853 /**
854  * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
855  *
856  * @adev: amdgpu_device pointer
857  *
858  * Tear down doorbell driver information (CIK)
859  */
860 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
861 {
862         iounmap(adev->doorbell.ptr);
863         adev->doorbell.ptr = NULL;
864 }
865
866
867
868 /*
869  * amdgpu_device_wb_*()
870  * Writeback is the method by which the GPU updates special pages in memory
871  * with the status of certain GPU events (fences, ring pointers,etc.).
872  */
873
874 /**
875  * amdgpu_device_wb_fini - Disable Writeback and free memory
876  *
877  * @adev: amdgpu_device pointer
878  *
879  * Disables Writeback and frees the Writeback memory (all asics).
880  * Used at driver shutdown.
881  */
882 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
883 {
884         if (adev->wb.wb_obj) {
885                 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
886                                       &adev->wb.gpu_addr,
887                                       (void **)&adev->wb.wb);
888                 adev->wb.wb_obj = NULL;
889         }
890 }
891
892 /**
893  * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
894  *
895  * @adev: amdgpu_device pointer
896  *
897  * Initializes writeback and allocates writeback memory (all asics).
898  * Used at driver startup.
899  * Returns 0 on success or an -error on failure.
900  */
901 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
902 {
903         int r;
904
905         if (adev->wb.wb_obj == NULL) {
906                 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
907                 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
908                                             PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
909                                             &adev->wb.wb_obj, &adev->wb.gpu_addr,
910                                             (void **)&adev->wb.wb);
911                 if (r) {
912                         dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
913                         return r;
914                 }
915
916                 adev->wb.num_wb = AMDGPU_MAX_WB;
917                 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
918
919                 /* clear wb memory */
920                 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
921         }
922
923         return 0;
924 }
925
926 /**
927  * amdgpu_device_wb_get - Allocate a wb entry
928  *
929  * @adev: amdgpu_device pointer
930  * @wb: wb index
931  *
932  * Allocate a wb slot for use by the driver (all asics).
933  * Returns 0 on success or -EINVAL on failure.
934  */
935 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
936 {
937         unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
938
939         if (offset < adev->wb.num_wb) {
940                 __set_bit(offset, adev->wb.used);
941                 *wb = offset << 3; /* convert to dw offset */
942                 return 0;
943         } else {
944                 return -EINVAL;
945         }
946 }
947
948 /**
949  * amdgpu_device_wb_free - Free a wb entry
950  *
951  * @adev: amdgpu_device pointer
952  * @wb: wb index
953  *
954  * Free a wb slot allocated for use by the driver (all asics)
955  */
956 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
957 {
958         wb >>= 3;
959         if (wb < adev->wb.num_wb)
960                 __clear_bit(wb, adev->wb.used);
961 }
962
963 /**
964  * amdgpu_device_resize_fb_bar - try to resize FB BAR
965  *
966  * @adev: amdgpu_device pointer
967  *
968  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
969  * to fail, but if any of the BARs is not accessible after the size we abort
970  * driver loading by returning -ENODEV.
971  */
972 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
973 {
974         u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
975         u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
976         struct pci_bus *root;
977         struct resource *res;
978         unsigned i;
979         u16 cmd;
980         int r;
981
982         /* Bypass for VF */
983         if (amdgpu_sriov_vf(adev))
984                 return 0;
985
986         /* skip if the bios has already enabled large BAR */
987         if (adev->gmc.real_vram_size &&
988             (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
989                 return 0;
990
991         /* Check if the root BUS has 64bit memory resources */
992         root = adev->pdev->bus;
993         while (root->parent)
994                 root = root->parent;
995
996         pci_bus_for_each_resource(root, res, i) {
997                 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
998                     res->start > 0x100000000ull)
999                         break;
1000         }
1001
1002         /* Trying to resize is pointless without a root hub window above 4GB */
1003         if (!res)
1004                 return 0;
1005
1006         /* Disable memory decoding while we change the BAR addresses and size */
1007         pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1008         pci_write_config_word(adev->pdev, PCI_COMMAND,
1009                               cmd & ~PCI_COMMAND_MEMORY);
1010
1011         /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1012         amdgpu_device_doorbell_fini(adev);
1013         if (adev->asic_type >= CHIP_BONAIRE)
1014                 pci_release_resource(adev->pdev, 2);
1015
1016         pci_release_resource(adev->pdev, 0);
1017
1018         r = pci_resize_resource(adev->pdev, 0, rbar_size);
1019         if (r == -ENOSPC)
1020                 DRM_INFO("Not enough PCI address space for a large BAR.");
1021         else if (r && r != -ENOTSUPP)
1022                 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1023
1024         pci_assign_unassigned_bus_resources(adev->pdev->bus);
1025
1026         /* When the doorbell or fb BAR isn't available we have no chance of
1027          * using the device.
1028          */
1029         r = amdgpu_device_doorbell_init(adev);
1030         if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1031                 return -ENODEV;
1032
1033         pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1034
1035         return 0;
1036 }
1037
1038 /*
1039  * GPU helpers function.
1040  */
1041 /**
1042  * amdgpu_device_need_post - check if the hw need post or not
1043  *
1044  * @adev: amdgpu_device pointer
1045  *
1046  * Check if the asic has been initialized (all asics) at driver startup
1047  * or post is needed if  hw reset is performed.
1048  * Returns true if need or false if not.
1049  */
1050 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1051 {
1052         uint32_t reg;
1053
1054         if (amdgpu_sriov_vf(adev))
1055                 return false;
1056
1057         if (amdgpu_passthrough(adev)) {
1058                 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1059                  * some old smc fw still need driver do vPost otherwise gpu hang, while
1060                  * those smc fw version above 22.15 doesn't have this flaw, so we force
1061                  * vpost executed for smc version below 22.15
1062                  */
1063                 if (adev->asic_type == CHIP_FIJI) {
1064                         int err;
1065                         uint32_t fw_ver;
1066                         err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1067                         /* force vPost if error occured */
1068                         if (err)
1069                                 return true;
1070
1071                         fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1072                         if (fw_ver < 0x00160e00)
1073                                 return true;
1074                 }
1075         }
1076
1077         if (adev->has_hw_reset) {
1078                 adev->has_hw_reset = false;
1079                 return true;
1080         }
1081
1082         /* bios scratch used on CIK+ */
1083         if (adev->asic_type >= CHIP_BONAIRE)
1084                 return amdgpu_atombios_scratch_need_asic_init(adev);
1085
1086         /* check MEM_SIZE for older asics */
1087         reg = amdgpu_asic_get_config_memsize(adev);
1088
1089         if ((reg != 0) && (reg != 0xffffffff))
1090                 return false;
1091
1092         return true;
1093 }
1094
1095 /* if we get transitioned to only one device, take VGA back */
1096 /**
1097  * amdgpu_device_vga_set_decode - enable/disable vga decode
1098  *
1099  * @cookie: amdgpu_device pointer
1100  * @state: enable/disable vga decode
1101  *
1102  * Enable/disable vga decode (all asics).
1103  * Returns VGA resource flags.
1104  */
1105 static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
1106 {
1107         struct amdgpu_device *adev = cookie;
1108         amdgpu_asic_set_vga_state(adev, state);
1109         if (state)
1110                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1111                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1112         else
1113                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1114 }
1115
1116 /**
1117  * amdgpu_device_check_block_size - validate the vm block size
1118  *
1119  * @adev: amdgpu_device pointer
1120  *
1121  * Validates the vm block size specified via module parameter.
1122  * The vm block size defines number of bits in page table versus page directory,
1123  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1124  * page table and the remaining bits are in the page directory.
1125  */
1126 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1127 {
1128         /* defines number of bits in page table versus page directory,
1129          * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1130          * page table and the remaining bits are in the page directory */
1131         if (amdgpu_vm_block_size == -1)
1132                 return;
1133
1134         if (amdgpu_vm_block_size < 9) {
1135                 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1136                          amdgpu_vm_block_size);
1137                 amdgpu_vm_block_size = -1;
1138         }
1139 }
1140
1141 /**
1142  * amdgpu_device_check_vm_size - validate the vm size
1143  *
1144  * @adev: amdgpu_device pointer
1145  *
1146  * Validates the vm size in GB specified via module parameter.
1147  * The VM size is the size of the GPU virtual memory space in GB.
1148  */
1149 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1150 {
1151         /* no need to check the default value */
1152         if (amdgpu_vm_size == -1)
1153                 return;
1154
1155         if (amdgpu_vm_size < 1) {
1156                 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1157                          amdgpu_vm_size);
1158                 amdgpu_vm_size = -1;
1159         }
1160 }
1161
1162 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1163 {
1164         struct sysinfo si;
1165         bool is_os_64 = (sizeof(void *) == 8);
1166         uint64_t total_memory;
1167         uint64_t dram_size_seven_GB = 0x1B8000000;
1168         uint64_t dram_size_three_GB = 0xB8000000;
1169
1170         if (amdgpu_smu_memory_pool_size == 0)
1171                 return;
1172
1173         if (!is_os_64) {
1174                 DRM_WARN("Not 64-bit OS, feature not supported\n");
1175                 goto def_value;
1176         }
1177         si_meminfo(&si);
1178         total_memory = (uint64_t)si.totalram * si.mem_unit;
1179
1180         if ((amdgpu_smu_memory_pool_size == 1) ||
1181                 (amdgpu_smu_memory_pool_size == 2)) {
1182                 if (total_memory < dram_size_three_GB)
1183                         goto def_value1;
1184         } else if ((amdgpu_smu_memory_pool_size == 4) ||
1185                 (amdgpu_smu_memory_pool_size == 8)) {
1186                 if (total_memory < dram_size_seven_GB)
1187                         goto def_value1;
1188         } else {
1189                 DRM_WARN("Smu memory pool size not supported\n");
1190                 goto def_value;
1191         }
1192         adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1193
1194         return;
1195
1196 def_value1:
1197         DRM_WARN("No enough system memory\n");
1198 def_value:
1199         adev->pm.smu_prv_buffer_size = 0;
1200 }
1201
1202 /**
1203  * amdgpu_device_check_arguments - validate module params
1204  *
1205  * @adev: amdgpu_device pointer
1206  *
1207  * Validates certain module parameters and updates
1208  * the associated values used by the driver (all asics).
1209  */
1210 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1211 {
1212         if (amdgpu_sched_jobs < 4) {
1213                 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1214                          amdgpu_sched_jobs);
1215                 amdgpu_sched_jobs = 4;
1216         } else if (!is_power_of_2(amdgpu_sched_jobs)){
1217                 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1218                          amdgpu_sched_jobs);
1219                 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1220         }
1221
1222         if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1223                 /* gart size must be greater or equal to 32M */
1224                 dev_warn(adev->dev, "gart size (%d) too small\n",
1225                          amdgpu_gart_size);
1226                 amdgpu_gart_size = -1;
1227         }
1228
1229         if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1230                 /* gtt size must be greater or equal to 32M */
1231                 dev_warn(adev->dev, "gtt size (%d) too small\n",
1232                                  amdgpu_gtt_size);
1233                 amdgpu_gtt_size = -1;
1234         }
1235
1236         /* valid range is between 4 and 9 inclusive */
1237         if (amdgpu_vm_fragment_size != -1 &&
1238             (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1239                 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1240                 amdgpu_vm_fragment_size = -1;
1241         }
1242
1243         if (amdgpu_sched_hw_submission < 2) {
1244                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1245                          amdgpu_sched_hw_submission);
1246                 amdgpu_sched_hw_submission = 2;
1247         } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1248                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1249                          amdgpu_sched_hw_submission);
1250                 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1251         }
1252
1253         amdgpu_device_check_smu_prv_buffer_size(adev);
1254
1255         amdgpu_device_check_vm_size(adev);
1256
1257         amdgpu_device_check_block_size(adev);
1258
1259         adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1260
1261         amdgpu_gmc_tmz_set(adev);
1262
1263         if (amdgpu_num_kcq > 8 || amdgpu_num_kcq < 0) {
1264                 amdgpu_num_kcq = 8;
1265                 dev_warn(adev->dev, "set kernel compute queue number to 8 due to invalid parameter provided by user\n");
1266         }
1267
1268         amdgpu_gmc_noretry_set(adev);
1269
1270         return 0;
1271 }
1272
1273 /**
1274  * amdgpu_switcheroo_set_state - set switcheroo state
1275  *
1276  * @pdev: pci dev pointer
1277  * @state: vga_switcheroo state
1278  *
1279  * Callback for the switcheroo driver.  Suspends or resumes the
1280  * the asics before or after it is powered up using ACPI methods.
1281  */
1282 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1283                                         enum vga_switcheroo_state state)
1284 {
1285         struct drm_device *dev = pci_get_drvdata(pdev);
1286         int r;
1287
1288         if (amdgpu_device_supports_boco(dev) && state == VGA_SWITCHEROO_OFF)
1289                 return;
1290
1291         if (state == VGA_SWITCHEROO_ON) {
1292                 pr_info("switched on\n");
1293                 /* don't suspend or resume card normally */
1294                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1295
1296                 pci_set_power_state(dev->pdev, PCI_D0);
1297                 amdgpu_device_load_pci_state(dev->pdev);
1298                 r = pci_enable_device(dev->pdev);
1299                 if (r)
1300                         DRM_WARN("pci_enable_device failed (%d)\n", r);
1301                 amdgpu_device_resume(dev, true);
1302
1303                 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1304                 drm_kms_helper_poll_enable(dev);
1305         } else {
1306                 pr_info("switched off\n");
1307                 drm_kms_helper_poll_disable(dev);
1308                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1309                 amdgpu_device_suspend(dev, true);
1310                 amdgpu_device_cache_pci_state(dev->pdev);
1311                 /* Shut down the device */
1312                 pci_disable_device(dev->pdev);
1313                 pci_set_power_state(dev->pdev, PCI_D3cold);
1314                 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1315         }
1316 }
1317
1318 /**
1319  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1320  *
1321  * @pdev: pci dev pointer
1322  *
1323  * Callback for the switcheroo driver.  Check of the switcheroo
1324  * state can be changed.
1325  * Returns true if the state can be changed, false if not.
1326  */
1327 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1328 {
1329         struct drm_device *dev = pci_get_drvdata(pdev);
1330
1331         /*
1332         * FIXME: open_count is protected by drm_global_mutex but that would lead to
1333         * locking inversion with the driver load path. And the access here is
1334         * completely racy anyway. So don't bother with locking for now.
1335         */
1336         return atomic_read(&dev->open_count) == 0;
1337 }
1338
1339 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1340         .set_gpu_state = amdgpu_switcheroo_set_state,
1341         .reprobe = NULL,
1342         .can_switch = amdgpu_switcheroo_can_switch,
1343 };
1344
1345 /**
1346  * amdgpu_device_ip_set_clockgating_state - set the CG state
1347  *
1348  * @dev: amdgpu_device pointer
1349  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1350  * @state: clockgating state (gate or ungate)
1351  *
1352  * Sets the requested clockgating state for all instances of
1353  * the hardware IP specified.
1354  * Returns the error code from the last instance.
1355  */
1356 int amdgpu_device_ip_set_clockgating_state(void *dev,
1357                                            enum amd_ip_block_type block_type,
1358                                            enum amd_clockgating_state state)
1359 {
1360         struct amdgpu_device *adev = dev;
1361         int i, r = 0;
1362
1363         for (i = 0; i < adev->num_ip_blocks; i++) {
1364                 if (!adev->ip_blocks[i].status.valid)
1365                         continue;
1366                 if (adev->ip_blocks[i].version->type != block_type)
1367                         continue;
1368                 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1369                         continue;
1370                 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1371                         (void *)adev, state);
1372                 if (r)
1373                         DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1374                                   adev->ip_blocks[i].version->funcs->name, r);
1375         }
1376         return r;
1377 }
1378
1379 /**
1380  * amdgpu_device_ip_set_powergating_state - set the PG state
1381  *
1382  * @dev: amdgpu_device pointer
1383  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1384  * @state: powergating state (gate or ungate)
1385  *
1386  * Sets the requested powergating state for all instances of
1387  * the hardware IP specified.
1388  * Returns the error code from the last instance.
1389  */
1390 int amdgpu_device_ip_set_powergating_state(void *dev,
1391                                            enum amd_ip_block_type block_type,
1392                                            enum amd_powergating_state state)
1393 {
1394         struct amdgpu_device *adev = dev;
1395         int i, r = 0;
1396
1397         for (i = 0; i < adev->num_ip_blocks; i++) {
1398                 if (!adev->ip_blocks[i].status.valid)
1399                         continue;
1400                 if (adev->ip_blocks[i].version->type != block_type)
1401                         continue;
1402                 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1403                         continue;
1404                 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1405                         (void *)adev, state);
1406                 if (r)
1407                         DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1408                                   adev->ip_blocks[i].version->funcs->name, r);
1409         }
1410         return r;
1411 }
1412
1413 /**
1414  * amdgpu_device_ip_get_clockgating_state - get the CG state
1415  *
1416  * @adev: amdgpu_device pointer
1417  * @flags: clockgating feature flags
1418  *
1419  * Walks the list of IPs on the device and updates the clockgating
1420  * flags for each IP.
1421  * Updates @flags with the feature flags for each hardware IP where
1422  * clockgating is enabled.
1423  */
1424 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1425                                             u32 *flags)
1426 {
1427         int i;
1428
1429         for (i = 0; i < adev->num_ip_blocks; i++) {
1430                 if (!adev->ip_blocks[i].status.valid)
1431                         continue;
1432                 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1433                         adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1434         }
1435 }
1436
1437 /**
1438  * amdgpu_device_ip_wait_for_idle - wait for idle
1439  *
1440  * @adev: amdgpu_device pointer
1441  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1442  *
1443  * Waits for the request hardware IP to be idle.
1444  * Returns 0 for success or a negative error code on failure.
1445  */
1446 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1447                                    enum amd_ip_block_type block_type)
1448 {
1449         int i, r;
1450
1451         for (i = 0; i < adev->num_ip_blocks; i++) {
1452                 if (!adev->ip_blocks[i].status.valid)
1453                         continue;
1454                 if (adev->ip_blocks[i].version->type == block_type) {
1455                         r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1456                         if (r)
1457                                 return r;
1458                         break;
1459                 }
1460         }
1461         return 0;
1462
1463 }
1464
1465 /**
1466  * amdgpu_device_ip_is_idle - is the hardware IP idle
1467  *
1468  * @adev: amdgpu_device pointer
1469  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1470  *
1471  * Check if the hardware IP is idle or not.
1472  * Returns true if it the IP is idle, false if not.
1473  */
1474 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1475                               enum amd_ip_block_type block_type)
1476 {
1477         int i;
1478
1479         for (i = 0; i < adev->num_ip_blocks; i++) {
1480                 if (!adev->ip_blocks[i].status.valid)
1481                         continue;
1482                 if (adev->ip_blocks[i].version->type == block_type)
1483                         return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1484         }
1485         return true;
1486
1487 }
1488
1489 /**
1490  * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1491  *
1492  * @adev: amdgpu_device pointer
1493  * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1494  *
1495  * Returns a pointer to the hardware IP block structure
1496  * if it exists for the asic, otherwise NULL.
1497  */
1498 struct amdgpu_ip_block *
1499 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1500                               enum amd_ip_block_type type)
1501 {
1502         int i;
1503
1504         for (i = 0; i < adev->num_ip_blocks; i++)
1505                 if (adev->ip_blocks[i].version->type == type)
1506                         return &adev->ip_blocks[i];
1507
1508         return NULL;
1509 }
1510
1511 /**
1512  * amdgpu_device_ip_block_version_cmp
1513  *
1514  * @adev: amdgpu_device pointer
1515  * @type: enum amd_ip_block_type
1516  * @major: major version
1517  * @minor: minor version
1518  *
1519  * return 0 if equal or greater
1520  * return 1 if smaller or the ip_block doesn't exist
1521  */
1522 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1523                                        enum amd_ip_block_type type,
1524                                        u32 major, u32 minor)
1525 {
1526         struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1527
1528         if (ip_block && ((ip_block->version->major > major) ||
1529                         ((ip_block->version->major == major) &&
1530                         (ip_block->version->minor >= minor))))
1531                 return 0;
1532
1533         return 1;
1534 }
1535
1536 /**
1537  * amdgpu_device_ip_block_add
1538  *
1539  * @adev: amdgpu_device pointer
1540  * @ip_block_version: pointer to the IP to add
1541  *
1542  * Adds the IP block driver information to the collection of IPs
1543  * on the asic.
1544  */
1545 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1546                                const struct amdgpu_ip_block_version *ip_block_version)
1547 {
1548         if (!ip_block_version)
1549                 return -EINVAL;
1550
1551         DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1552                   ip_block_version->funcs->name);
1553
1554         adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1555
1556         return 0;
1557 }
1558
1559 /**
1560  * amdgpu_device_enable_virtual_display - enable virtual display feature
1561  *
1562  * @adev: amdgpu_device pointer
1563  *
1564  * Enabled the virtual display feature if the user has enabled it via
1565  * the module parameter virtual_display.  This feature provides a virtual
1566  * display hardware on headless boards or in virtualized environments.
1567  * This function parses and validates the configuration string specified by
1568  * the user and configues the virtual display configuration (number of
1569  * virtual connectors, crtcs, etc.) specified.
1570  */
1571 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1572 {
1573         adev->enable_virtual_display = false;
1574
1575         if (amdgpu_virtual_display) {
1576                 struct drm_device *ddev = adev_to_drm(adev);
1577                 const char *pci_address_name = pci_name(ddev->pdev);
1578                 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1579
1580                 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1581                 pciaddstr_tmp = pciaddstr;
1582                 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1583                         pciaddname = strsep(&pciaddname_tmp, ",");
1584                         if (!strcmp("all", pciaddname)
1585                             || !strcmp(pci_address_name, pciaddname)) {
1586                                 long num_crtc;
1587                                 int res = -1;
1588
1589                                 adev->enable_virtual_display = true;
1590
1591                                 if (pciaddname_tmp)
1592                                         res = kstrtol(pciaddname_tmp, 10,
1593                                                       &num_crtc);
1594
1595                                 if (!res) {
1596                                         if (num_crtc < 1)
1597                                                 num_crtc = 1;
1598                                         if (num_crtc > 6)
1599                                                 num_crtc = 6;
1600                                         adev->mode_info.num_crtc = num_crtc;
1601                                 } else {
1602                                         adev->mode_info.num_crtc = 1;
1603                                 }
1604                                 break;
1605                         }
1606                 }
1607
1608                 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1609                          amdgpu_virtual_display, pci_address_name,
1610                          adev->enable_virtual_display, adev->mode_info.num_crtc);
1611
1612                 kfree(pciaddstr);
1613         }
1614 }
1615
1616 /**
1617  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1618  *
1619  * @adev: amdgpu_device pointer
1620  *
1621  * Parses the asic configuration parameters specified in the gpu info
1622  * firmware and makes them availale to the driver for use in configuring
1623  * the asic.
1624  * Returns 0 on success, -EINVAL on failure.
1625  */
1626 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1627 {
1628         const char *chip_name;
1629         char fw_name[40];
1630         int err;
1631         const struct gpu_info_firmware_header_v1_0 *hdr;
1632
1633         adev->firmware.gpu_info_fw = NULL;
1634
1635         if (adev->mman.discovery_bin) {
1636                 amdgpu_discovery_get_gfx_info(adev);
1637
1638                 /*
1639                  * FIXME: The bounding box is still needed by Navi12, so
1640                  * temporarily read it from gpu_info firmware. Should be droped
1641                  * when DAL no longer needs it.
1642                  */
1643                 if (adev->asic_type != CHIP_NAVI12)
1644                         return 0;
1645         }
1646
1647         switch (adev->asic_type) {
1648 #ifdef CONFIG_DRM_AMDGPU_SI
1649         case CHIP_VERDE:
1650         case CHIP_TAHITI:
1651         case CHIP_PITCAIRN:
1652         case CHIP_OLAND:
1653         case CHIP_HAINAN:
1654 #endif
1655 #ifdef CONFIG_DRM_AMDGPU_CIK
1656         case CHIP_BONAIRE:
1657         case CHIP_HAWAII:
1658         case CHIP_KAVERI:
1659         case CHIP_KABINI:
1660         case CHIP_MULLINS:
1661 #endif
1662         case CHIP_TOPAZ:
1663         case CHIP_TONGA:
1664         case CHIP_FIJI:
1665         case CHIP_POLARIS10:
1666         case CHIP_POLARIS11:
1667         case CHIP_POLARIS12:
1668         case CHIP_VEGAM:
1669         case CHIP_CARRIZO:
1670         case CHIP_STONEY:
1671         case CHIP_VEGA20:
1672         case CHIP_SIENNA_CICHLID:
1673         case CHIP_NAVY_FLOUNDER:
1674         default:
1675                 return 0;
1676         case CHIP_VEGA10:
1677                 chip_name = "vega10";
1678                 break;
1679         case CHIP_VEGA12:
1680                 chip_name = "vega12";
1681                 break;
1682         case CHIP_RAVEN:
1683                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1684                         chip_name = "raven2";
1685                 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1686                         chip_name = "picasso";
1687                 else
1688                         chip_name = "raven";
1689                 break;
1690         case CHIP_ARCTURUS:
1691                 chip_name = "arcturus";
1692                 break;
1693         case CHIP_RENOIR:
1694                 chip_name = "renoir";
1695                 break;
1696         case CHIP_NAVI10:
1697                 chip_name = "navi10";
1698                 break;
1699         case CHIP_NAVI14:
1700                 chip_name = "navi14";
1701                 break;
1702         case CHIP_NAVI12:
1703                 chip_name = "navi12";
1704                 break;
1705         }
1706
1707         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1708         err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1709         if (err) {
1710                 dev_err(adev->dev,
1711                         "Failed to load gpu_info firmware \"%s\"\n",
1712                         fw_name);
1713                 goto out;
1714         }
1715         err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1716         if (err) {
1717                 dev_err(adev->dev,
1718                         "Failed to validate gpu_info firmware \"%s\"\n",
1719                         fw_name);
1720                 goto out;
1721         }
1722
1723         hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1724         amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1725
1726         switch (hdr->version_major) {
1727         case 1:
1728         {
1729                 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1730                         (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1731                                                                 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1732
1733                 /*
1734                  * Should be droped when DAL no longer needs it.
1735                  */
1736                 if (adev->asic_type == CHIP_NAVI12)
1737                         goto parse_soc_bounding_box;
1738
1739                 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1740                 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1741                 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1742                 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1743                 adev->gfx.config.max_texture_channel_caches =
1744                         le32_to_cpu(gpu_info_fw->gc_num_tccs);
1745                 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1746                 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1747                 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1748                 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1749                 adev->gfx.config.double_offchip_lds_buf =
1750                         le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1751                 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1752                 adev->gfx.cu_info.max_waves_per_simd =
1753                         le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1754                 adev->gfx.cu_info.max_scratch_slots_per_cu =
1755                         le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1756                 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
1757                 if (hdr->version_minor >= 1) {
1758                         const struct gpu_info_firmware_v1_1 *gpu_info_fw =
1759                                 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
1760                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1761                         adev->gfx.config.num_sc_per_sh =
1762                                 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
1763                         adev->gfx.config.num_packer_per_sc =
1764                                 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
1765                 }
1766
1767 parse_soc_bounding_box:
1768                 /*
1769                  * soc bounding box info is not integrated in disocovery table,
1770                  * we always need to parse it from gpu info firmware if needed.
1771                  */
1772                 if (hdr->version_minor == 2) {
1773                         const struct gpu_info_firmware_v1_2 *gpu_info_fw =
1774                                 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
1775                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1776                         adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
1777                 }
1778                 break;
1779         }
1780         default:
1781                 dev_err(adev->dev,
1782                         "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1783                 err = -EINVAL;
1784                 goto out;
1785         }
1786 out:
1787         return err;
1788 }
1789
1790 /**
1791  * amdgpu_device_ip_early_init - run early init for hardware IPs
1792  *
1793  * @adev: amdgpu_device pointer
1794  *
1795  * Early initialization pass for hardware IPs.  The hardware IPs that make
1796  * up each asic are discovered each IP's early_init callback is run.  This
1797  * is the first stage in initializing the asic.
1798  * Returns 0 on success, negative error code on failure.
1799  */
1800 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
1801 {
1802         int i, r;
1803
1804         amdgpu_device_enable_virtual_display(adev);
1805
1806         if (amdgpu_sriov_vf(adev)) {
1807                 r = amdgpu_virt_request_full_gpu(adev, true);
1808                 if (r)
1809                         return r;
1810         }
1811
1812         switch (adev->asic_type) {
1813 #ifdef CONFIG_DRM_AMDGPU_SI
1814         case CHIP_VERDE:
1815         case CHIP_TAHITI:
1816         case CHIP_PITCAIRN:
1817         case CHIP_OLAND:
1818         case CHIP_HAINAN:
1819                 adev->family = AMDGPU_FAMILY_SI;
1820                 r = si_set_ip_blocks(adev);
1821                 if (r)
1822                         return r;
1823                 break;
1824 #endif
1825 #ifdef CONFIG_DRM_AMDGPU_CIK
1826         case CHIP_BONAIRE:
1827         case CHIP_HAWAII:
1828         case CHIP_KAVERI:
1829         case CHIP_KABINI:
1830         case CHIP_MULLINS:
1831                 if (adev->flags & AMD_IS_APU)
1832                         adev->family = AMDGPU_FAMILY_KV;
1833                 else
1834                         adev->family = AMDGPU_FAMILY_CI;
1835
1836                 r = cik_set_ip_blocks(adev);
1837                 if (r)
1838                         return r;
1839                 break;
1840 #endif
1841         case CHIP_TOPAZ:
1842         case CHIP_TONGA:
1843         case CHIP_FIJI:
1844         case CHIP_POLARIS10:
1845         case CHIP_POLARIS11:
1846         case CHIP_POLARIS12:
1847         case CHIP_VEGAM:
1848         case CHIP_CARRIZO:
1849         case CHIP_STONEY:
1850                 if (adev->flags & AMD_IS_APU)
1851                         adev->family = AMDGPU_FAMILY_CZ;
1852                 else
1853                         adev->family = AMDGPU_FAMILY_VI;
1854
1855                 r = vi_set_ip_blocks(adev);
1856                 if (r)
1857                         return r;
1858                 break;
1859         case CHIP_VEGA10:
1860         case CHIP_VEGA12:
1861         case CHIP_VEGA20:
1862         case CHIP_RAVEN:
1863         case CHIP_ARCTURUS:
1864         case CHIP_RENOIR:
1865                 if (adev->flags & AMD_IS_APU)
1866                         adev->family = AMDGPU_FAMILY_RV;
1867                 else
1868                         adev->family = AMDGPU_FAMILY_AI;
1869
1870                 r = soc15_set_ip_blocks(adev);
1871                 if (r)
1872                         return r;
1873                 break;
1874         case  CHIP_NAVI10:
1875         case  CHIP_NAVI14:
1876         case  CHIP_NAVI12:
1877         case  CHIP_SIENNA_CICHLID:
1878         case  CHIP_NAVY_FLOUNDER:
1879                 adev->family = AMDGPU_FAMILY_NV;
1880
1881                 r = nv_set_ip_blocks(adev);
1882                 if (r)
1883                         return r;
1884                 break;
1885         default:
1886                 /* FIXME: not supported yet */
1887                 return -EINVAL;
1888         }
1889
1890         amdgpu_amdkfd_device_probe(adev);
1891
1892         adev->pm.pp_feature = amdgpu_pp_feature_mask;
1893         if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
1894                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1895
1896         for (i = 0; i < adev->num_ip_blocks; i++) {
1897                 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1898                         DRM_ERROR("disabled ip block: %d <%s>\n",
1899                                   i, adev->ip_blocks[i].version->funcs->name);
1900                         adev->ip_blocks[i].status.valid = false;
1901                 } else {
1902                         if (adev->ip_blocks[i].version->funcs->early_init) {
1903                                 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
1904                                 if (r == -ENOENT) {
1905                                         adev->ip_blocks[i].status.valid = false;
1906                                 } else if (r) {
1907                                         DRM_ERROR("early_init of IP block <%s> failed %d\n",
1908                                                   adev->ip_blocks[i].version->funcs->name, r);
1909                                         return r;
1910                                 } else {
1911                                         adev->ip_blocks[i].status.valid = true;
1912                                 }
1913                         } else {
1914                                 adev->ip_blocks[i].status.valid = true;
1915                         }
1916                 }
1917                 /* get the vbios after the asic_funcs are set up */
1918                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
1919                         r = amdgpu_device_parse_gpu_info_fw(adev);
1920                         if (r)
1921                                 return r;
1922
1923                         /* Read BIOS */
1924                         if (!amdgpu_get_bios(adev))
1925                                 return -EINVAL;
1926
1927                         r = amdgpu_atombios_init(adev);
1928                         if (r) {
1929                                 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
1930                                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
1931                                 return r;
1932                         }
1933                 }
1934         }
1935
1936         adev->cg_flags &= amdgpu_cg_mask;
1937         adev->pg_flags &= amdgpu_pg_mask;
1938
1939         return 0;
1940 }
1941
1942 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
1943 {
1944         int i, r;
1945
1946         for (i = 0; i < adev->num_ip_blocks; i++) {
1947                 if (!adev->ip_blocks[i].status.sw)
1948                         continue;
1949                 if (adev->ip_blocks[i].status.hw)
1950                         continue;
1951                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1952                     (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
1953                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
1954                         r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1955                         if (r) {
1956                                 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1957                                           adev->ip_blocks[i].version->funcs->name, r);
1958                                 return r;
1959                         }
1960                         adev->ip_blocks[i].status.hw = true;
1961                 }
1962         }
1963
1964         return 0;
1965 }
1966
1967 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
1968 {
1969         int i, r;
1970
1971         for (i = 0; i < adev->num_ip_blocks; i++) {
1972                 if (!adev->ip_blocks[i].status.sw)
1973                         continue;
1974                 if (adev->ip_blocks[i].status.hw)
1975                         continue;
1976                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1977                 if (r) {
1978                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1979                                   adev->ip_blocks[i].version->funcs->name, r);
1980                         return r;
1981                 }
1982                 adev->ip_blocks[i].status.hw = true;
1983         }
1984
1985         return 0;
1986 }
1987
1988 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
1989 {
1990         int r = 0;
1991         int i;
1992         uint32_t smu_version;
1993
1994         if (adev->asic_type >= CHIP_VEGA10) {
1995                 for (i = 0; i < adev->num_ip_blocks; i++) {
1996                         if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
1997                                 continue;
1998
1999                         /* no need to do the fw loading again if already done*/
2000                         if (adev->ip_blocks[i].status.hw == true)
2001                                 break;
2002
2003                         if (amdgpu_in_reset(adev) || adev->in_suspend) {
2004                                 r = adev->ip_blocks[i].version->funcs->resume(adev);
2005                                 if (r) {
2006                                         DRM_ERROR("resume of IP block <%s> failed %d\n",
2007                                                           adev->ip_blocks[i].version->funcs->name, r);
2008                                         return r;
2009                                 }
2010                         } else {
2011                                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2012                                 if (r) {
2013                                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2014                                                           adev->ip_blocks[i].version->funcs->name, r);
2015                                         return r;
2016                                 }
2017                         }
2018
2019                         adev->ip_blocks[i].status.hw = true;
2020                         break;
2021                 }
2022         }
2023
2024         if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2025                 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2026
2027         return r;
2028 }
2029
2030 /**
2031  * amdgpu_device_ip_init - run init for hardware IPs
2032  *
2033  * @adev: amdgpu_device pointer
2034  *
2035  * Main initialization pass for hardware IPs.  The list of all the hardware
2036  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2037  * are run.  sw_init initializes the software state associated with each IP
2038  * and hw_init initializes the hardware associated with each IP.
2039  * Returns 0 on success, negative error code on failure.
2040  */
2041 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2042 {
2043         int i, r;
2044
2045         r = amdgpu_ras_init(adev);
2046         if (r)
2047                 return r;
2048
2049         for (i = 0; i < adev->num_ip_blocks; i++) {
2050                 if (!adev->ip_blocks[i].status.valid)
2051                         continue;
2052                 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2053                 if (r) {
2054                         DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2055                                   adev->ip_blocks[i].version->funcs->name, r);
2056                         goto init_failed;
2057                 }
2058                 adev->ip_blocks[i].status.sw = true;
2059
2060                 /* need to do gmc hw init early so we can allocate gpu mem */
2061                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2062                         r = amdgpu_device_vram_scratch_init(adev);
2063                         if (r) {
2064                                 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2065                                 goto init_failed;
2066                         }
2067                         r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2068                         if (r) {
2069                                 DRM_ERROR("hw_init %d failed %d\n", i, r);
2070                                 goto init_failed;
2071                         }
2072                         r = amdgpu_device_wb_init(adev);
2073                         if (r) {
2074                                 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2075                                 goto init_failed;
2076                         }
2077                         adev->ip_blocks[i].status.hw = true;
2078
2079                         /* right after GMC hw init, we create CSA */
2080                         if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
2081                                 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2082                                                                 AMDGPU_GEM_DOMAIN_VRAM,
2083                                                                 AMDGPU_CSA_SIZE);
2084                                 if (r) {
2085                                         DRM_ERROR("allocate CSA failed %d\n", r);
2086                                         goto init_failed;
2087                                 }
2088                         }
2089                 }
2090         }
2091
2092         if (amdgpu_sriov_vf(adev))
2093                 amdgpu_virt_init_data_exchange(adev);
2094
2095         r = amdgpu_ib_pool_init(adev);
2096         if (r) {
2097                 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2098                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2099                 goto init_failed;
2100         }
2101
2102         r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2103         if (r)
2104                 goto init_failed;
2105
2106         r = amdgpu_device_ip_hw_init_phase1(adev);
2107         if (r)
2108                 goto init_failed;
2109
2110         r = amdgpu_device_fw_loading(adev);
2111         if (r)
2112                 goto init_failed;
2113
2114         r = amdgpu_device_ip_hw_init_phase2(adev);
2115         if (r)
2116                 goto init_failed;
2117
2118         /*
2119          * retired pages will be loaded from eeprom and reserved here,
2120          * it should be called after amdgpu_device_ip_hw_init_phase2  since
2121          * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2122          * for I2C communication which only true at this point.
2123          *
2124          * amdgpu_ras_recovery_init may fail, but the upper only cares the
2125          * failure from bad gpu situation and stop amdgpu init process
2126          * accordingly. For other failed cases, it will still release all
2127          * the resource and print error message, rather than returning one
2128          * negative value to upper level.
2129          *
2130          * Note: theoretically, this should be called before all vram allocations
2131          * to protect retired page from abusing
2132          */
2133         r = amdgpu_ras_recovery_init(adev);
2134         if (r)
2135                 goto init_failed;
2136
2137         if (adev->gmc.xgmi.num_physical_nodes > 1)
2138                 amdgpu_xgmi_add_device(adev);
2139         amdgpu_amdkfd_device_init(adev);
2140
2141         amdgpu_fru_get_product_info(adev);
2142
2143 init_failed:
2144         if (amdgpu_sriov_vf(adev))
2145                 amdgpu_virt_release_full_gpu(adev, true);
2146
2147         return r;
2148 }
2149
2150 /**
2151  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2152  *
2153  * @adev: amdgpu_device pointer
2154  *
2155  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2156  * this function before a GPU reset.  If the value is retained after a
2157  * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2158  */
2159 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2160 {
2161         memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2162 }
2163
2164 /**
2165  * amdgpu_device_check_vram_lost - check if vram is valid
2166  *
2167  * @adev: amdgpu_device pointer
2168  *
2169  * Checks the reset magic value written to the gart pointer in VRAM.
2170  * The driver calls this after a GPU reset to see if the contents of
2171  * VRAM is lost or now.
2172  * returns true if vram is lost, false if not.
2173  */
2174 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2175 {
2176         if (memcmp(adev->gart.ptr, adev->reset_magic,
2177                         AMDGPU_RESET_MAGIC_NUM))
2178                 return true;
2179
2180         if (!amdgpu_in_reset(adev))
2181                 return false;
2182
2183         /*
2184          * For all ASICs with baco/mode1 reset, the VRAM is
2185          * always assumed to be lost.
2186          */
2187         switch (amdgpu_asic_reset_method(adev)) {
2188         case AMD_RESET_METHOD_BACO:
2189         case AMD_RESET_METHOD_MODE1:
2190                 return true;
2191         default:
2192                 return false;
2193         }
2194 }
2195
2196 /**
2197  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2198  *
2199  * @adev: amdgpu_device pointer
2200  * @state: clockgating state (gate or ungate)
2201  *
2202  * The list of all the hardware IPs that make up the asic is walked and the
2203  * set_clockgating_state callbacks are run.
2204  * Late initialization pass enabling clockgating for hardware IPs.
2205  * Fini or suspend, pass disabling clockgating for hardware IPs.
2206  * Returns 0 on success, negative error code on failure.
2207  */
2208
2209 static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2210                                                 enum amd_clockgating_state state)
2211 {
2212         int i, j, r;
2213
2214         if (amdgpu_emu_mode == 1)
2215                 return 0;
2216
2217         for (j = 0; j < adev->num_ip_blocks; j++) {
2218                 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2219                 if (!adev->ip_blocks[i].status.late_initialized)
2220                         continue;
2221                 /* skip CG for VCE/UVD, it's handled specially */
2222                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2223                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2224                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2225                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2226                     adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2227                         /* enable clockgating to save power */
2228                         r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2229                                                                                      state);
2230                         if (r) {
2231                                 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2232                                           adev->ip_blocks[i].version->funcs->name, r);
2233                                 return r;
2234                         }
2235                 }
2236         }
2237
2238         return 0;
2239 }
2240
2241 static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_powergating_state state)
2242 {
2243         int i, j, r;
2244
2245         if (amdgpu_emu_mode == 1)
2246                 return 0;
2247
2248         for (j = 0; j < adev->num_ip_blocks; j++) {
2249                 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2250                 if (!adev->ip_blocks[i].status.late_initialized)
2251                         continue;
2252                 /* skip CG for VCE/UVD, it's handled specially */
2253                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2254                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2255                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2256                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2257                     adev->ip_blocks[i].version->funcs->set_powergating_state) {
2258                         /* enable powergating to save power */
2259                         r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2260                                                                                         state);
2261                         if (r) {
2262                                 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2263                                           adev->ip_blocks[i].version->funcs->name, r);
2264                                 return r;
2265                         }
2266                 }
2267         }
2268         return 0;
2269 }
2270
2271 static int amdgpu_device_enable_mgpu_fan_boost(void)
2272 {
2273         struct amdgpu_gpu_instance *gpu_ins;
2274         struct amdgpu_device *adev;
2275         int i, ret = 0;
2276
2277         mutex_lock(&mgpu_info.mutex);
2278
2279         /*
2280          * MGPU fan boost feature should be enabled
2281          * only when there are two or more dGPUs in
2282          * the system
2283          */
2284         if (mgpu_info.num_dgpu < 2)
2285                 goto out;
2286
2287         for (i = 0; i < mgpu_info.num_dgpu; i++) {
2288                 gpu_ins = &(mgpu_info.gpu_ins[i]);
2289                 adev = gpu_ins->adev;
2290                 if (!(adev->flags & AMD_IS_APU) &&
2291                     !gpu_ins->mgpu_fan_enabled) {
2292                         ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2293                         if (ret)
2294                                 break;
2295
2296                         gpu_ins->mgpu_fan_enabled = 1;
2297                 }
2298         }
2299
2300 out:
2301         mutex_unlock(&mgpu_info.mutex);
2302
2303         return ret;
2304 }
2305
2306 /**
2307  * amdgpu_device_ip_late_init - run late init for hardware IPs
2308  *
2309  * @adev: amdgpu_device pointer
2310  *
2311  * Late initialization pass for hardware IPs.  The list of all the hardware
2312  * IPs that make up the asic is walked and the late_init callbacks are run.
2313  * late_init covers any special initialization that an IP requires
2314  * after all of the have been initialized or something that needs to happen
2315  * late in the init process.
2316  * Returns 0 on success, negative error code on failure.
2317  */
2318 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2319 {
2320         struct amdgpu_gpu_instance *gpu_instance;
2321         int i = 0, r;
2322
2323         for (i = 0; i < adev->num_ip_blocks; i++) {
2324                 if (!adev->ip_blocks[i].status.hw)
2325                         continue;
2326                 if (adev->ip_blocks[i].version->funcs->late_init) {
2327                         r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2328                         if (r) {
2329                                 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2330                                           adev->ip_blocks[i].version->funcs->name, r);
2331                                 return r;
2332                         }
2333                 }
2334                 adev->ip_blocks[i].status.late_initialized = true;
2335         }
2336
2337         amdgpu_ras_set_error_query_ready(adev, true);
2338
2339         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2340         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2341
2342         amdgpu_device_fill_reset_magic(adev);
2343
2344         r = amdgpu_device_enable_mgpu_fan_boost();
2345         if (r)
2346                 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2347
2348
2349         if (adev->gmc.xgmi.num_physical_nodes > 1) {
2350                 mutex_lock(&mgpu_info.mutex);
2351
2352                 /*
2353                  * Reset device p-state to low as this was booted with high.
2354                  *
2355                  * This should be performed only after all devices from the same
2356                  * hive get initialized.
2357                  *
2358                  * However, it's unknown how many device in the hive in advance.
2359                  * As this is counted one by one during devices initializations.
2360                  *
2361                  * So, we wait for all XGMI interlinked devices initialized.
2362                  * This may bring some delays as those devices may come from
2363                  * different hives. But that should be OK.
2364                  */
2365                 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2366                         for (i = 0; i < mgpu_info.num_gpu; i++) {
2367                                 gpu_instance = &(mgpu_info.gpu_ins[i]);
2368                                 if (gpu_instance->adev->flags & AMD_IS_APU)
2369                                         continue;
2370
2371                                 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2372                                                 AMDGPU_XGMI_PSTATE_MIN);
2373                                 if (r) {
2374                                         DRM_ERROR("pstate setting failed (%d).\n", r);
2375                                         break;
2376                                 }
2377                         }
2378                 }
2379
2380                 mutex_unlock(&mgpu_info.mutex);
2381         }
2382
2383         return 0;
2384 }
2385
2386 /**
2387  * amdgpu_device_ip_fini - run fini for hardware IPs
2388  *
2389  * @adev: amdgpu_device pointer
2390  *
2391  * Main teardown pass for hardware IPs.  The list of all the hardware
2392  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2393  * are run.  hw_fini tears down the hardware associated with each IP
2394  * and sw_fini tears down any software state associated with each IP.
2395  * Returns 0 on success, negative error code on failure.
2396  */
2397 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2398 {
2399         int i, r;
2400
2401         if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2402                 amdgpu_virt_release_ras_err_handler_data(adev);
2403
2404         amdgpu_ras_pre_fini(adev);
2405
2406         if (adev->gmc.xgmi.num_physical_nodes > 1)
2407                 amdgpu_xgmi_remove_device(adev);
2408
2409         amdgpu_amdkfd_device_fini(adev);
2410
2411         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2412         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2413
2414         /* need to disable SMC first */
2415         for (i = 0; i < adev->num_ip_blocks; i++) {
2416                 if (!adev->ip_blocks[i].status.hw)
2417                         continue;
2418                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2419                         r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2420                         /* XXX handle errors */
2421                         if (r) {
2422                                 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2423                                           adev->ip_blocks[i].version->funcs->name, r);
2424                         }
2425                         adev->ip_blocks[i].status.hw = false;
2426                         break;
2427                 }
2428         }
2429
2430         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2431                 if (!adev->ip_blocks[i].status.hw)
2432                         continue;
2433
2434                 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2435                 /* XXX handle errors */
2436                 if (r) {
2437                         DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2438                                   adev->ip_blocks[i].version->funcs->name, r);
2439                 }
2440
2441                 adev->ip_blocks[i].status.hw = false;
2442         }
2443
2444
2445         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2446                 if (!adev->ip_blocks[i].status.sw)
2447                         continue;
2448
2449                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2450                         amdgpu_ucode_free_bo(adev);
2451                         amdgpu_free_static_csa(&adev->virt.csa_obj);
2452                         amdgpu_device_wb_fini(adev);
2453                         amdgpu_device_vram_scratch_fini(adev);
2454                         amdgpu_ib_pool_fini(adev);
2455                 }
2456
2457                 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2458                 /* XXX handle errors */
2459                 if (r) {
2460                         DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2461                                   adev->ip_blocks[i].version->funcs->name, r);
2462                 }
2463                 adev->ip_blocks[i].status.sw = false;
2464                 adev->ip_blocks[i].status.valid = false;
2465         }
2466
2467         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2468                 if (!adev->ip_blocks[i].status.late_initialized)
2469                         continue;
2470                 if (adev->ip_blocks[i].version->funcs->late_fini)
2471                         adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2472                 adev->ip_blocks[i].status.late_initialized = false;
2473         }
2474
2475         amdgpu_ras_fini(adev);
2476
2477         if (amdgpu_sriov_vf(adev))
2478                 if (amdgpu_virt_release_full_gpu(adev, false))
2479                         DRM_ERROR("failed to release exclusive mode on fini\n");
2480
2481         return 0;
2482 }
2483
2484 /**
2485  * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2486  *
2487  * @work: work_struct.
2488  */
2489 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2490 {
2491         struct amdgpu_device *adev =
2492                 container_of(work, struct amdgpu_device, delayed_init_work.work);
2493         int r;
2494
2495         r = amdgpu_ib_ring_tests(adev);
2496         if (r)
2497                 DRM_ERROR("ib ring test failed (%d).\n", r);
2498 }
2499
2500 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2501 {
2502         struct amdgpu_device *adev =
2503                 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2504
2505         mutex_lock(&adev->gfx.gfx_off_mutex);
2506         if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
2507                 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2508                         adev->gfx.gfx_off_state = true;
2509         }
2510         mutex_unlock(&adev->gfx.gfx_off_mutex);
2511 }
2512
2513 /**
2514  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2515  *
2516  * @adev: amdgpu_device pointer
2517  *
2518  * Main suspend function for hardware IPs.  The list of all the hardware
2519  * IPs that make up the asic is walked, clockgating is disabled and the
2520  * suspend callbacks are run.  suspend puts the hardware and software state
2521  * in each IP into a state suitable for suspend.
2522  * Returns 0 on success, negative error code on failure.
2523  */
2524 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2525 {
2526         int i, r;
2527
2528         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2529         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2530
2531         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2532                 if (!adev->ip_blocks[i].status.valid)
2533                         continue;
2534
2535                 /* displays are handled separately */
2536                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2537                         continue;
2538
2539                 /* XXX handle errors */
2540                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2541                 /* XXX handle errors */
2542                 if (r) {
2543                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
2544                                   adev->ip_blocks[i].version->funcs->name, r);
2545                         return r;
2546                 }
2547
2548                 adev->ip_blocks[i].status.hw = false;
2549         }
2550
2551         return 0;
2552 }
2553
2554 /**
2555  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2556  *
2557  * @adev: amdgpu_device pointer
2558  *
2559  * Main suspend function for hardware IPs.  The list of all the hardware
2560  * IPs that make up the asic is walked, clockgating is disabled and the
2561  * suspend callbacks are run.  suspend puts the hardware and software state
2562  * in each IP into a state suitable for suspend.
2563  * Returns 0 on success, negative error code on failure.
2564  */
2565 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2566 {
2567         int i, r;
2568
2569         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2570                 if (!adev->ip_blocks[i].status.valid)
2571                         continue;
2572                 /* displays are handled in phase1 */
2573                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2574                         continue;
2575                 /* PSP lost connection when err_event_athub occurs */
2576                 if (amdgpu_ras_intr_triggered() &&
2577                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2578                         adev->ip_blocks[i].status.hw = false;
2579                         continue;
2580                 }
2581                 /* XXX handle errors */
2582                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2583                 /* XXX handle errors */
2584                 if (r) {
2585                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
2586                                   adev->ip_blocks[i].version->funcs->name, r);
2587                 }
2588                 adev->ip_blocks[i].status.hw = false;
2589                 /* handle putting the SMC in the appropriate state */
2590                 if(!amdgpu_sriov_vf(adev)){
2591                         if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2592                                 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
2593                                 if (r) {
2594                                         DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
2595                                                         adev->mp1_state, r);
2596                                         return r;
2597                                 }
2598                         }
2599                 }
2600                 adev->ip_blocks[i].status.hw = false;
2601         }
2602
2603         return 0;
2604 }
2605
2606 /**
2607  * amdgpu_device_ip_suspend - run suspend for hardware IPs
2608  *
2609  * @adev: amdgpu_device pointer
2610  *
2611  * Main suspend function for hardware IPs.  The list of all the hardware
2612  * IPs that make up the asic is walked, clockgating is disabled and the
2613  * suspend callbacks are run.  suspend puts the hardware and software state
2614  * in each IP into a state suitable for suspend.
2615  * Returns 0 on success, negative error code on failure.
2616  */
2617 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
2618 {
2619         int r;
2620
2621         if (amdgpu_sriov_vf(adev))
2622                 amdgpu_virt_request_full_gpu(adev, false);
2623
2624         r = amdgpu_device_ip_suspend_phase1(adev);
2625         if (r)
2626                 return r;
2627         r = amdgpu_device_ip_suspend_phase2(adev);
2628
2629         if (amdgpu_sriov_vf(adev))
2630                 amdgpu_virt_release_full_gpu(adev, false);
2631
2632         return r;
2633 }
2634
2635 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2636 {
2637         int i, r;
2638
2639         static enum amd_ip_block_type ip_order[] = {
2640                 AMD_IP_BLOCK_TYPE_GMC,
2641                 AMD_IP_BLOCK_TYPE_COMMON,
2642                 AMD_IP_BLOCK_TYPE_PSP,
2643                 AMD_IP_BLOCK_TYPE_IH,
2644         };
2645
2646         for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2647                 int j;
2648                 struct amdgpu_ip_block *block;
2649
2650                 block = &adev->ip_blocks[i];
2651                 block->status.hw = false;
2652
2653                 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
2654
2655                         if (block->version->type != ip_order[j] ||
2656                                 !block->status.valid)
2657                                 continue;
2658
2659                         r = block->version->funcs->hw_init(adev);
2660                         DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2661                         if (r)
2662                                 return r;
2663                         block->status.hw = true;
2664                 }
2665         }
2666
2667         return 0;
2668 }
2669
2670 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
2671 {
2672         int i, r;
2673
2674         static enum amd_ip_block_type ip_order[] = {
2675                 AMD_IP_BLOCK_TYPE_SMC,
2676                 AMD_IP_BLOCK_TYPE_DCE,
2677                 AMD_IP_BLOCK_TYPE_GFX,
2678                 AMD_IP_BLOCK_TYPE_SDMA,
2679                 AMD_IP_BLOCK_TYPE_UVD,
2680                 AMD_IP_BLOCK_TYPE_VCE,
2681                 AMD_IP_BLOCK_TYPE_VCN
2682         };
2683
2684         for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2685                 int j;
2686                 struct amdgpu_ip_block *block;
2687
2688                 for (j = 0; j < adev->num_ip_blocks; j++) {
2689                         block = &adev->ip_blocks[j];
2690
2691                         if (block->version->type != ip_order[i] ||
2692                                 !block->status.valid ||
2693                                 block->status.hw)
2694                                 continue;
2695
2696                         if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
2697                                 r = block->version->funcs->resume(adev);
2698                         else
2699                                 r = block->version->funcs->hw_init(adev);
2700
2701                         DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2702                         if (r)
2703                                 return r;
2704                         block->status.hw = true;
2705                 }
2706         }
2707
2708         return 0;
2709 }
2710
2711 /**
2712  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
2713  *
2714  * @adev: amdgpu_device pointer
2715  *
2716  * First resume function for hardware IPs.  The list of all the hardware
2717  * IPs that make up the asic is walked and the resume callbacks are run for
2718  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
2719  * after a suspend and updates the software state as necessary.  This
2720  * function is also used for restoring the GPU after a GPU reset.
2721  * Returns 0 on success, negative error code on failure.
2722  */
2723 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
2724 {
2725         int i, r;
2726
2727         for (i = 0; i < adev->num_ip_blocks; i++) {
2728                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2729                         continue;
2730                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2731                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2732                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2733
2734                         r = adev->ip_blocks[i].version->funcs->resume(adev);
2735                         if (r) {
2736                                 DRM_ERROR("resume of IP block <%s> failed %d\n",
2737                                           adev->ip_blocks[i].version->funcs->name, r);
2738                                 return r;
2739                         }
2740                         adev->ip_blocks[i].status.hw = true;
2741                 }
2742         }
2743
2744         return 0;
2745 }
2746
2747 /**
2748  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
2749  *
2750  * @adev: amdgpu_device pointer
2751  *
2752  * First resume function for hardware IPs.  The list of all the hardware
2753  * IPs that make up the asic is walked and the resume callbacks are run for
2754  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
2755  * functional state after a suspend and updates the software state as
2756  * necessary.  This function is also used for restoring the GPU after a GPU
2757  * reset.
2758  * Returns 0 on success, negative error code on failure.
2759  */
2760 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
2761 {
2762         int i, r;
2763
2764         for (i = 0; i < adev->num_ip_blocks; i++) {
2765                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2766                         continue;
2767                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2768                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2769                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
2770                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
2771                         continue;
2772                 r = adev->ip_blocks[i].version->funcs->resume(adev);
2773                 if (r) {
2774                         DRM_ERROR("resume of IP block <%s> failed %d\n",
2775                                   adev->ip_blocks[i].version->funcs->name, r);
2776                         return r;
2777                 }
2778                 adev->ip_blocks[i].status.hw = true;
2779         }
2780
2781         return 0;
2782 }
2783
2784 /**
2785  * amdgpu_device_ip_resume - run resume for hardware IPs
2786  *
2787  * @adev: amdgpu_device pointer
2788  *
2789  * Main resume function for hardware IPs.  The hardware IPs
2790  * are split into two resume functions because they are
2791  * are also used in in recovering from a GPU reset and some additional
2792  * steps need to be take between them.  In this case (S3/S4) they are
2793  * run sequentially.
2794  * Returns 0 on success, negative error code on failure.
2795  */
2796 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
2797 {
2798         int r;
2799
2800         r = amdgpu_device_ip_resume_phase1(adev);
2801         if (r)
2802                 return r;
2803
2804         r = amdgpu_device_fw_loading(adev);
2805         if (r)
2806                 return r;
2807
2808         r = amdgpu_device_ip_resume_phase2(adev);
2809
2810         return r;
2811 }
2812
2813 /**
2814  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
2815  *
2816  * @adev: amdgpu_device pointer
2817  *
2818  * Query the VBIOS data tables to determine if the board supports SR-IOV.
2819  */
2820 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
2821 {
2822         if (amdgpu_sriov_vf(adev)) {
2823                 if (adev->is_atom_fw) {
2824                         if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2825                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2826                 } else {
2827                         if (amdgpu_atombios_has_gpu_virtualization_table(adev))
2828                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2829                 }
2830
2831                 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
2832                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
2833         }
2834 }
2835
2836 /**
2837  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
2838  *
2839  * @asic_type: AMD asic type
2840  *
2841  * Check if there is DC (new modesetting infrastructre) support for an asic.
2842  * returns true if DC has support, false if not.
2843  */
2844 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
2845 {
2846         switch (asic_type) {
2847 #if defined(CONFIG_DRM_AMD_DC)
2848 #if defined(CONFIG_DRM_AMD_DC_SI)
2849         case CHIP_TAHITI:
2850         case CHIP_PITCAIRN:
2851         case CHIP_VERDE:
2852         case CHIP_OLAND:
2853 #endif
2854         case CHIP_BONAIRE:
2855         case CHIP_KAVERI:
2856         case CHIP_KABINI:
2857         case CHIP_MULLINS:
2858                 /*
2859                  * We have systems in the wild with these ASICs that require
2860                  * LVDS and VGA support which is not supported with DC.
2861                  *
2862                  * Fallback to the non-DC driver here by default so as not to
2863                  * cause regressions.
2864                  */
2865                 return amdgpu_dc > 0;
2866         case CHIP_HAWAII:
2867         case CHIP_CARRIZO:
2868         case CHIP_STONEY:
2869         case CHIP_POLARIS10:
2870         case CHIP_POLARIS11:
2871         case CHIP_POLARIS12:
2872         case CHIP_VEGAM:
2873         case CHIP_TONGA:
2874         case CHIP_FIJI:
2875         case CHIP_VEGA10:
2876         case CHIP_VEGA12:
2877         case CHIP_VEGA20:
2878 #if defined(CONFIG_DRM_AMD_DC_DCN)
2879         case CHIP_RAVEN:
2880         case CHIP_NAVI10:
2881         case CHIP_NAVI14:
2882         case CHIP_NAVI12:
2883         case CHIP_RENOIR:
2884 #endif
2885 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
2886         case CHIP_SIENNA_CICHLID:
2887         case CHIP_NAVY_FLOUNDER:
2888 #endif
2889                 return amdgpu_dc != 0;
2890 #endif
2891         default:
2892                 if (amdgpu_dc > 0)
2893                         DRM_INFO("Display Core has been requested via kernel parameter "
2894                                          "but isn't supported by ASIC, ignoring\n");
2895                 return false;
2896         }
2897 }
2898
2899 /**
2900  * amdgpu_device_has_dc_support - check if dc is supported
2901  *
2902  * @adev: amdgpu_device_pointer
2903  *
2904  * Returns true for supported, false for not supported
2905  */
2906 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
2907 {
2908         if (amdgpu_sriov_vf(adev) || adev->enable_virtual_display)
2909                 return false;
2910
2911         return amdgpu_device_asic_has_dc_support(adev->asic_type);
2912 }
2913
2914
2915 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
2916 {
2917         struct amdgpu_device *adev =
2918                 container_of(__work, struct amdgpu_device, xgmi_reset_work);
2919         struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2920
2921         /* It's a bug to not have a hive within this function */
2922         if (WARN_ON(!hive))
2923                 return;
2924
2925         /*
2926          * Use task barrier to synchronize all xgmi reset works across the
2927          * hive. task_barrier_enter and task_barrier_exit will block
2928          * until all the threads running the xgmi reset works reach
2929          * those points. task_barrier_full will do both blocks.
2930          */
2931         if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
2932
2933                 task_barrier_enter(&hive->tb);
2934                 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
2935
2936                 if (adev->asic_reset_res)
2937                         goto fail;
2938
2939                 task_barrier_exit(&hive->tb);
2940                 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
2941
2942                 if (adev->asic_reset_res)
2943                         goto fail;
2944
2945                 if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
2946                         adev->mmhub.funcs->reset_ras_error_count(adev);
2947         } else {
2948
2949                 task_barrier_full(&hive->tb);
2950                 adev->asic_reset_res =  amdgpu_asic_reset(adev);
2951         }
2952
2953 fail:
2954         if (adev->asic_reset_res)
2955                 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
2956                          adev->asic_reset_res, adev_to_drm(adev)->unique);
2957         amdgpu_put_xgmi_hive(hive);
2958 }
2959
2960 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
2961 {
2962         char *input = amdgpu_lockup_timeout;
2963         char *timeout_setting = NULL;
2964         int index = 0;
2965         long timeout;
2966         int ret = 0;
2967
2968         /*
2969          * By default timeout for non compute jobs is 10000.
2970          * And there is no timeout enforced on compute jobs.
2971          * In SR-IOV or passthrough mode, timeout for compute
2972          * jobs are 60000 by default.
2973          */
2974         adev->gfx_timeout = msecs_to_jiffies(10000);
2975         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
2976         if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
2977                 adev->compute_timeout =  msecs_to_jiffies(60000);
2978         else
2979                 adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
2980
2981         if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
2982                 while ((timeout_setting = strsep(&input, ",")) &&
2983                                 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
2984                         ret = kstrtol(timeout_setting, 0, &timeout);
2985                         if (ret)
2986                                 return ret;
2987
2988                         if (timeout == 0) {
2989                                 index++;
2990                                 continue;
2991                         } else if (timeout < 0) {
2992                                 timeout = MAX_SCHEDULE_TIMEOUT;
2993                         } else {
2994                                 timeout = msecs_to_jiffies(timeout);
2995                         }
2996
2997                         switch (index++) {
2998                         case 0:
2999                                 adev->gfx_timeout = timeout;
3000                                 break;
3001                         case 1:
3002                                 adev->compute_timeout = timeout;
3003                                 break;
3004                         case 2:
3005                                 adev->sdma_timeout = timeout;
3006                                 break;
3007                         case 3:
3008                                 adev->video_timeout = timeout;
3009                                 break;
3010                         default:
3011                                 break;
3012                         }
3013                 }
3014                 /*
3015                  * There is only one value specified and
3016                  * it should apply to all non-compute jobs.
3017                  */
3018                 if (index == 1) {
3019                         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3020                         if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3021                                 adev->compute_timeout = adev->gfx_timeout;
3022                 }
3023         }
3024
3025         return ret;
3026 }
3027
3028 static const struct attribute *amdgpu_dev_attributes[] = {
3029         &dev_attr_product_name.attr,
3030         &dev_attr_product_number.attr,
3031         &dev_attr_serial_number.attr,
3032         &dev_attr_pcie_replay_count.attr,
3033         NULL
3034 };
3035
3036
3037 /**
3038  * amdgpu_device_init - initialize the driver
3039  *
3040  * @adev: amdgpu_device pointer
3041  * @flags: driver flags
3042  *
3043  * Initializes the driver info and hw (all asics).
3044  * Returns 0 for success or an error on failure.
3045  * Called at driver startup.
3046  */
3047 int amdgpu_device_init(struct amdgpu_device *adev,
3048                        uint32_t flags)
3049 {
3050         struct drm_device *ddev = adev_to_drm(adev);
3051         struct pci_dev *pdev = adev->pdev;
3052         int r, i;
3053         bool boco = false;
3054         u32 max_MBps;
3055
3056         adev->shutdown = false;
3057         adev->flags = flags;
3058
3059         if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3060                 adev->asic_type = amdgpu_force_asic_type;
3061         else
3062                 adev->asic_type = flags & AMD_ASIC_MASK;
3063
3064         adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3065         if (amdgpu_emu_mode == 1)
3066                 adev->usec_timeout *= 10;
3067         adev->gmc.gart_size = 512 * 1024 * 1024;
3068         adev->accel_working = false;
3069         adev->num_rings = 0;
3070         adev->mman.buffer_funcs = NULL;
3071         adev->mman.buffer_funcs_ring = NULL;
3072         adev->vm_manager.vm_pte_funcs = NULL;
3073         adev->vm_manager.vm_pte_num_scheds = 0;
3074         adev->gmc.gmc_funcs = NULL;
3075         adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3076         bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3077
3078         adev->smc_rreg = &amdgpu_invalid_rreg;
3079         adev->smc_wreg = &amdgpu_invalid_wreg;
3080         adev->pcie_rreg = &amdgpu_invalid_rreg;
3081         adev->pcie_wreg = &amdgpu_invalid_wreg;
3082         adev->pciep_rreg = &amdgpu_invalid_rreg;
3083         adev->pciep_wreg = &amdgpu_invalid_wreg;
3084         adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3085         adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3086         adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3087         adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3088         adev->didt_rreg = &amdgpu_invalid_rreg;
3089         adev->didt_wreg = &amdgpu_invalid_wreg;
3090         adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3091         adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3092         adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3093         adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3094
3095         DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3096                  amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3097                  pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3098
3099         /* mutex initialization are all done here so we
3100          * can recall function without having locking issues */
3101         atomic_set(&adev->irq.ih.lock, 0);
3102         mutex_init(&adev->firmware.mutex);
3103         mutex_init(&adev->pm.mutex);
3104         mutex_init(&adev->gfx.gpu_clock_mutex);
3105         mutex_init(&adev->srbm_mutex);
3106         mutex_init(&adev->gfx.pipe_reserve_mutex);
3107         mutex_init(&adev->gfx.gfx_off_mutex);
3108         mutex_init(&adev->grbm_idx_mutex);
3109         mutex_init(&adev->mn_lock);
3110         mutex_init(&adev->virt.vf_errors.lock);
3111         hash_init(adev->mn_hash);
3112         atomic_set(&adev->in_gpu_reset, 0);
3113         init_rwsem(&adev->reset_sem);
3114         mutex_init(&adev->psp.mutex);
3115         mutex_init(&adev->notifier_lock);
3116
3117         r = amdgpu_device_check_arguments(adev);
3118         if (r)
3119                 return r;
3120
3121         spin_lock_init(&adev->mmio_idx_lock);
3122         spin_lock_init(&adev->smc_idx_lock);
3123         spin_lock_init(&adev->pcie_idx_lock);
3124         spin_lock_init(&adev->uvd_ctx_idx_lock);
3125         spin_lock_init(&adev->didt_idx_lock);
3126         spin_lock_init(&adev->gc_cac_idx_lock);
3127         spin_lock_init(&adev->se_cac_idx_lock);
3128         spin_lock_init(&adev->audio_endpt_idx_lock);
3129         spin_lock_init(&adev->mm_stats.lock);
3130
3131         INIT_LIST_HEAD(&adev->shadow_list);
3132         mutex_init(&adev->shadow_list_lock);
3133
3134         INIT_DELAYED_WORK(&adev->delayed_init_work,
3135                           amdgpu_device_delayed_init_work_handler);
3136         INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3137                           amdgpu_device_delay_enable_gfx_off);
3138
3139         INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3140
3141         adev->gfx.gfx_off_req_count = 1;
3142         adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3143
3144         atomic_set(&adev->throttling_logging_enabled, 1);
3145         /*
3146          * If throttling continues, logging will be performed every minute
3147          * to avoid log flooding. "-1" is subtracted since the thermal
3148          * throttling interrupt comes every second. Thus, the total logging
3149          * interval is 59 seconds(retelimited printk interval) + 1(waiting
3150          * for throttling interrupt) = 60 seconds.
3151          */
3152         ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3153         ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3154
3155         /* Registers mapping */
3156         /* TODO: block userspace mapping of io register */
3157         if (adev->asic_type >= CHIP_BONAIRE) {
3158                 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3159                 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3160         } else {
3161                 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3162                 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3163         }
3164
3165         adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3166         if (adev->rmmio == NULL) {
3167                 return -ENOMEM;
3168         }
3169         DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3170         DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3171
3172         /* io port mapping */
3173         for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3174                 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
3175                         adev->rio_mem_size = pci_resource_len(adev->pdev, i);
3176                         adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
3177                         break;
3178                 }
3179         }
3180         if (adev->rio_mem == NULL)
3181                 DRM_INFO("PCI I/O BAR is not found.\n");
3182
3183         /* enable PCIE atomic ops */
3184         r = pci_enable_atomic_ops_to_root(adev->pdev,
3185                                           PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3186                                           PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3187         if (r) {
3188                 adev->have_atomics_support = false;
3189                 DRM_INFO("PCIE atomic ops is not supported\n");
3190         } else {
3191                 adev->have_atomics_support = true;
3192         }
3193
3194         amdgpu_device_get_pcie_info(adev);
3195
3196         if (amdgpu_mcbp)
3197                 DRM_INFO("MCBP is enabled\n");
3198
3199         if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
3200                 adev->enable_mes = true;
3201
3202         /* detect hw virtualization here */
3203         amdgpu_detect_virtualization(adev);
3204
3205         r = amdgpu_device_get_job_timeout_settings(adev);
3206         if (r) {
3207                 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3208                 goto failed_unmap;
3209         }
3210
3211         /* early init functions */
3212         r = amdgpu_device_ip_early_init(adev);
3213         if (r)
3214                 goto failed_unmap;
3215
3216         /* doorbell bar mapping and doorbell index init*/
3217         amdgpu_device_doorbell_init(adev);
3218
3219         /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3220         /* this will fail for cards that aren't VGA class devices, just
3221          * ignore it */
3222         vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
3223
3224         if (amdgpu_device_supports_boco(ddev))
3225                 boco = true;
3226         if (amdgpu_has_atpx() &&
3227             (amdgpu_is_atpx_hybrid() ||
3228              amdgpu_has_atpx_dgpu_power_cntl()) &&
3229             !pci_is_thunderbolt_attached(adev->pdev))
3230                 vga_switcheroo_register_client(adev->pdev,
3231                                                &amdgpu_switcheroo_ops, boco);
3232         if (boco)
3233                 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3234
3235         if (amdgpu_emu_mode == 1) {
3236                 /* post the asic on emulation mode */
3237                 emu_soc_asic_init(adev);
3238                 goto fence_driver_init;
3239         }
3240
3241         /* detect if we are with an SRIOV vbios */
3242         amdgpu_device_detect_sriov_bios(adev);
3243
3244         /* check if we need to reset the asic
3245          *  E.g., driver was not cleanly unloaded previously, etc.
3246          */
3247         if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3248                 r = amdgpu_asic_reset(adev);
3249                 if (r) {
3250                         dev_err(adev->dev, "asic reset on init failed\n");
3251                         goto failed;
3252                 }
3253         }
3254
3255         pci_enable_pcie_error_reporting(adev->ddev.pdev);
3256
3257         /* Post card if necessary */
3258         if (amdgpu_device_need_post(adev)) {
3259                 if (!adev->bios) {
3260                         dev_err(adev->dev, "no vBIOS found\n");
3261                         r = -EINVAL;
3262                         goto failed;
3263                 }
3264                 DRM_INFO("GPU posting now...\n");
3265                 r = amdgpu_device_asic_init(adev);
3266                 if (r) {
3267                         dev_err(adev->dev, "gpu post error!\n");
3268                         goto failed;
3269                 }
3270         }
3271
3272         if (adev->is_atom_fw) {
3273                 /* Initialize clocks */
3274                 r = amdgpu_atomfirmware_get_clock_info(adev);
3275                 if (r) {
3276                         dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3277                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3278                         goto failed;
3279                 }
3280         } else {
3281                 /* Initialize clocks */
3282                 r = amdgpu_atombios_get_clock_info(adev);
3283                 if (r) {
3284                         dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3285                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3286                         goto failed;
3287                 }
3288                 /* init i2c buses */
3289                 if (!amdgpu_device_has_dc_support(adev))
3290                         amdgpu_atombios_i2c_init(adev);
3291         }
3292
3293 fence_driver_init:
3294         /* Fence driver */
3295         r = amdgpu_fence_driver_init(adev);
3296         if (r) {
3297                 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
3298                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3299                 goto failed;
3300         }
3301
3302         /* init the mode config */
3303         drm_mode_config_init(adev_to_drm(adev));
3304
3305         r = amdgpu_device_ip_init(adev);
3306         if (r) {
3307                 /* failed in exclusive mode due to timeout */
3308                 if (amdgpu_sriov_vf(adev) &&
3309                     !amdgpu_sriov_runtime(adev) &&
3310                     amdgpu_virt_mmio_blocked(adev) &&
3311                     !amdgpu_virt_wait_reset(adev)) {
3312                         dev_err(adev->dev, "VF exclusive mode timeout\n");
3313                         /* Don't send request since VF is inactive. */
3314                         adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3315                         adev->virt.ops = NULL;
3316                         r = -EAGAIN;
3317                         goto failed;
3318                 }
3319                 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3320                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3321                 goto failed;
3322         }
3323
3324         dev_info(adev->dev,
3325                 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3326                         adev->gfx.config.max_shader_engines,
3327                         adev->gfx.config.max_sh_per_se,
3328                         adev->gfx.config.max_cu_per_sh,
3329                         adev->gfx.cu_info.number);
3330
3331         adev->accel_working = true;
3332
3333         amdgpu_vm_check_compute_bug(adev);
3334
3335         /* Initialize the buffer migration limit. */
3336         if (amdgpu_moverate >= 0)
3337                 max_MBps = amdgpu_moverate;
3338         else
3339                 max_MBps = 8; /* Allow 8 MB/s. */
3340         /* Get a log2 for easy divisions. */
3341         adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3342
3343         amdgpu_fbdev_init(adev);
3344
3345         r = amdgpu_pm_sysfs_init(adev);
3346         if (r) {
3347                 adev->pm_sysfs_en = false;
3348                 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3349         } else
3350                 adev->pm_sysfs_en = true;
3351
3352         r = amdgpu_ucode_sysfs_init(adev);
3353         if (r) {
3354                 adev->ucode_sysfs_en = false;
3355                 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3356         } else
3357                 adev->ucode_sysfs_en = true;
3358
3359         if ((amdgpu_testing & 1)) {
3360                 if (adev->accel_working)
3361                         amdgpu_test_moves(adev);
3362                 else
3363                         DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
3364         }
3365         if (amdgpu_benchmarking) {
3366                 if (adev->accel_working)
3367                         amdgpu_benchmark(adev, amdgpu_benchmarking);
3368                 else
3369                         DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
3370         }
3371
3372         /*
3373          * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3374          * Otherwise the mgpu fan boost feature will be skipped due to the
3375          * gpu instance is counted less.
3376          */
3377         amdgpu_register_gpu_instance(adev);
3378
3379         /* enable clockgating, etc. after ib tests, etc. since some blocks require
3380          * explicit gating rather than handling it automatically.
3381          */
3382         r = amdgpu_device_ip_late_init(adev);
3383         if (r) {
3384                 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3385                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3386                 goto failed;
3387         }
3388
3389         /* must succeed. */
3390         amdgpu_ras_resume(adev);
3391
3392         queue_delayed_work(system_wq, &adev->delayed_init_work,
3393                            msecs_to_jiffies(AMDGPU_RESUME_MS));
3394
3395         if (amdgpu_sriov_vf(adev))
3396                 flush_delayed_work(&adev->delayed_init_work);
3397
3398         r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3399         if (r)
3400                 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3401
3402         if (IS_ENABLED(CONFIG_PERF_EVENTS))
3403                 r = amdgpu_pmu_init(adev);
3404         if (r)
3405                 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3406
3407         /* Have stored pci confspace at hand for restore in sudden PCI error */
3408         if (amdgpu_device_cache_pci_state(adev->pdev))
3409                 pci_restore_state(pdev);
3410
3411         return 0;
3412
3413 failed:
3414         amdgpu_vf_error_trans_all(adev);
3415         if (boco)
3416                 vga_switcheroo_fini_domain_pm_ops(adev->dev);
3417
3418 failed_unmap:
3419         iounmap(adev->rmmio);
3420         adev->rmmio = NULL;
3421
3422         return r;
3423 }
3424
3425 /**
3426  * amdgpu_device_fini - tear down the driver
3427  *
3428  * @adev: amdgpu_device pointer
3429  *
3430  * Tear down the driver info (all asics).
3431  * Called at driver shutdown.
3432  */
3433 void amdgpu_device_fini(struct amdgpu_device *adev)
3434 {
3435         dev_info(adev->dev, "amdgpu: finishing device.\n");
3436         flush_delayed_work(&adev->delayed_init_work);
3437         adev->shutdown = true;
3438
3439         kfree(adev->pci_state);
3440
3441         /* make sure IB test finished before entering exclusive mode
3442          * to avoid preemption on IB test
3443          * */
3444         if (amdgpu_sriov_vf(adev)) {
3445                 amdgpu_virt_request_full_gpu(adev, false);
3446                 amdgpu_virt_fini_data_exchange(adev);
3447         }
3448
3449         /* disable all interrupts */
3450         amdgpu_irq_disable_all(adev);
3451         if (adev->mode_info.mode_config_initialized){
3452                 if (!amdgpu_device_has_dc_support(adev))
3453                         drm_helper_force_disable_all(adev_to_drm(adev));
3454                 else
3455                         drm_atomic_helper_shutdown(adev_to_drm(adev));
3456         }
3457         amdgpu_fence_driver_fini(adev);
3458         if (adev->pm_sysfs_en)
3459                 amdgpu_pm_sysfs_fini(adev);
3460         amdgpu_fbdev_fini(adev);
3461         amdgpu_device_ip_fini(adev);
3462         release_firmware(adev->firmware.gpu_info_fw);
3463         adev->firmware.gpu_info_fw = NULL;
3464         adev->accel_working = false;
3465         /* free i2c buses */
3466         if (!amdgpu_device_has_dc_support(adev))
3467                 amdgpu_i2c_fini(adev);
3468
3469         if (amdgpu_emu_mode != 1)
3470                 amdgpu_atombios_fini(adev);
3471
3472         kfree(adev->bios);
3473         adev->bios = NULL;
3474         if (amdgpu_has_atpx() &&
3475             (amdgpu_is_atpx_hybrid() ||
3476              amdgpu_has_atpx_dgpu_power_cntl()) &&
3477             !pci_is_thunderbolt_attached(adev->pdev))
3478                 vga_switcheroo_unregister_client(adev->pdev);
3479         if (amdgpu_device_supports_boco(adev_to_drm(adev)))
3480                 vga_switcheroo_fini_domain_pm_ops(adev->dev);
3481         vga_client_register(adev->pdev, NULL, NULL, NULL);
3482         if (adev->rio_mem)
3483                 pci_iounmap(adev->pdev, adev->rio_mem);
3484         adev->rio_mem = NULL;
3485         iounmap(adev->rmmio);
3486         adev->rmmio = NULL;
3487         amdgpu_device_doorbell_fini(adev);
3488
3489         if (adev->ucode_sysfs_en)
3490                 amdgpu_ucode_sysfs_fini(adev);
3491
3492         sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3493         if (IS_ENABLED(CONFIG_PERF_EVENTS))
3494                 amdgpu_pmu_fini(adev);
3495         if (adev->mman.discovery_bin)
3496                 amdgpu_discovery_fini(adev);
3497 }
3498
3499
3500 /*
3501  * Suspend & resume.
3502  */
3503 /**
3504  * amdgpu_device_suspend - initiate device suspend
3505  *
3506  * @dev: drm dev pointer
3507  * @fbcon : notify the fbdev of suspend
3508  *
3509  * Puts the hw in the suspend state (all asics).
3510  * Returns 0 for success or an error on failure.
3511  * Called at driver suspend.
3512  */
3513 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
3514 {
3515         struct amdgpu_device *adev;
3516         struct drm_crtc *crtc;
3517         struct drm_connector *connector;
3518         struct drm_connector_list_iter iter;
3519         int r;
3520
3521         adev = drm_to_adev(dev);
3522
3523         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3524                 return 0;
3525
3526         adev->in_suspend = true;
3527         drm_kms_helper_poll_disable(dev);
3528
3529         if (fbcon)
3530                 amdgpu_fbdev_set_suspend(adev, 1);
3531
3532         cancel_delayed_work_sync(&adev->delayed_init_work);
3533
3534         if (!amdgpu_device_has_dc_support(adev)) {
3535                 /* turn off display hw */
3536                 drm_modeset_lock_all(dev);
3537                 drm_connector_list_iter_begin(dev, &iter);
3538                 drm_for_each_connector_iter(connector, &iter)
3539                         drm_helper_connector_dpms(connector,
3540                                                   DRM_MODE_DPMS_OFF);
3541                 drm_connector_list_iter_end(&iter);
3542                 drm_modeset_unlock_all(dev);
3543                         /* unpin the front buffers and cursors */
3544                 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3545                         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3546                         struct drm_framebuffer *fb = crtc->primary->fb;
3547                         struct amdgpu_bo *robj;
3548
3549                         if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
3550                                 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
3551                                 r = amdgpu_bo_reserve(aobj, true);
3552                                 if (r == 0) {
3553                                         amdgpu_bo_unpin(aobj);
3554                                         amdgpu_bo_unreserve(aobj);
3555                                 }
3556                         }
3557
3558                         if (fb == NULL || fb->obj[0] == NULL) {
3559                                 continue;
3560                         }
3561                         robj = gem_to_amdgpu_bo(fb->obj[0]);
3562                         /* don't unpin kernel fb objects */
3563                         if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
3564                                 r = amdgpu_bo_reserve(robj, true);
3565                                 if (r == 0) {
3566                                         amdgpu_bo_unpin(robj);
3567                                         amdgpu_bo_unreserve(robj);
3568                                 }
3569                         }
3570                 }
3571         }
3572
3573         amdgpu_ras_suspend(adev);
3574
3575         r = amdgpu_device_ip_suspend_phase1(adev);
3576
3577         amdgpu_amdkfd_suspend(adev, !fbcon);
3578
3579         /* evict vram memory */
3580         amdgpu_bo_evict_vram(adev);
3581
3582         amdgpu_fence_driver_suspend(adev);
3583
3584         r = amdgpu_device_ip_suspend_phase2(adev);
3585
3586         /* evict remaining vram memory
3587          * This second call to evict vram is to evict the gart page table
3588          * using the CPU.
3589          */
3590         amdgpu_bo_evict_vram(adev);
3591
3592         return 0;
3593 }
3594
3595 /**
3596  * amdgpu_device_resume - initiate device resume
3597  *
3598  * @dev: drm dev pointer
3599  * @fbcon : notify the fbdev of resume
3600  *
3601  * Bring the hw back to operating state (all asics).
3602  * Returns 0 for success or an error on failure.
3603  * Called at driver resume.
3604  */
3605 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
3606 {
3607         struct drm_connector *connector;
3608         struct drm_connector_list_iter iter;
3609         struct amdgpu_device *adev = drm_to_adev(dev);
3610         struct drm_crtc *crtc;
3611         int r = 0;
3612
3613         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3614                 return 0;
3615
3616         /* post card */
3617         if (amdgpu_device_need_post(adev)) {
3618                 r = amdgpu_device_asic_init(adev);
3619                 if (r)
3620                         dev_err(adev->dev, "amdgpu asic init failed\n");
3621         }
3622
3623         r = amdgpu_device_ip_resume(adev);
3624         if (r) {
3625                 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
3626                 return r;
3627         }
3628         amdgpu_fence_driver_resume(adev);
3629
3630
3631         r = amdgpu_device_ip_late_init(adev);
3632         if (r)
3633                 return r;
3634
3635         queue_delayed_work(system_wq, &adev->delayed_init_work,
3636                            msecs_to_jiffies(AMDGPU_RESUME_MS));
3637
3638         if (!amdgpu_device_has_dc_support(adev)) {
3639                 /* pin cursors */
3640                 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3641                         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3642
3643                         if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
3644                                 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
3645                                 r = amdgpu_bo_reserve(aobj, true);
3646                                 if (r == 0) {
3647                                         r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
3648                                         if (r != 0)
3649                                                 dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
3650                                         amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
3651                                         amdgpu_bo_unreserve(aobj);
3652                                 }
3653                         }
3654                 }
3655         }
3656         r = amdgpu_amdkfd_resume(adev, !fbcon);
3657         if (r)
3658                 return r;
3659
3660         /* Make sure IB tests flushed */
3661         flush_delayed_work(&adev->delayed_init_work);
3662
3663         /* blat the mode back in */
3664         if (fbcon) {
3665                 if (!amdgpu_device_has_dc_support(adev)) {
3666                         /* pre DCE11 */
3667                         drm_helper_resume_force_mode(dev);
3668
3669                         /* turn on display hw */
3670                         drm_modeset_lock_all(dev);
3671
3672                         drm_connector_list_iter_begin(dev, &iter);
3673                         drm_for_each_connector_iter(connector, &iter)
3674                                 drm_helper_connector_dpms(connector,
3675                                                           DRM_MODE_DPMS_ON);
3676                         drm_connector_list_iter_end(&iter);
3677
3678                         drm_modeset_unlock_all(dev);
3679                 }
3680                 amdgpu_fbdev_set_suspend(adev, 0);
3681         }
3682
3683         drm_kms_helper_poll_enable(dev);
3684
3685         amdgpu_ras_resume(adev);
3686
3687         /*
3688          * Most of the connector probing functions try to acquire runtime pm
3689          * refs to ensure that the GPU is powered on when connector polling is
3690          * performed. Since we're calling this from a runtime PM callback,
3691          * trying to acquire rpm refs will cause us to deadlock.
3692          *
3693          * Since we're guaranteed to be holding the rpm lock, it's safe to
3694          * temporarily disable the rpm helpers so this doesn't deadlock us.
3695          */
3696 #ifdef CONFIG_PM
3697         dev->dev->power.disable_depth++;
3698 #endif
3699         if (!amdgpu_device_has_dc_support(adev))
3700                 drm_helper_hpd_irq_event(dev);
3701         else
3702                 drm_kms_helper_hotplug_event(dev);
3703 #ifdef CONFIG_PM
3704         dev->dev->power.disable_depth--;
3705 #endif
3706         adev->in_suspend = false;
3707
3708         return 0;
3709 }
3710
3711 /**
3712  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
3713  *
3714  * @adev: amdgpu_device pointer
3715  *
3716  * The list of all the hardware IPs that make up the asic is walked and
3717  * the check_soft_reset callbacks are run.  check_soft_reset determines
3718  * if the asic is still hung or not.
3719  * Returns true if any of the IPs are still in a hung state, false if not.
3720  */
3721 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
3722 {
3723         int i;
3724         bool asic_hang = false;
3725
3726         if (amdgpu_sriov_vf(adev))
3727                 return true;
3728
3729         if (amdgpu_asic_need_full_reset(adev))
3730                 return true;
3731
3732         for (i = 0; i < adev->num_ip_blocks; i++) {
3733                 if (!adev->ip_blocks[i].status.valid)
3734                         continue;
3735                 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
3736                         adev->ip_blocks[i].status.hang =
3737                                 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
3738                 if (adev->ip_blocks[i].status.hang) {
3739                         dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
3740                         asic_hang = true;
3741                 }
3742         }
3743         return asic_hang;
3744 }
3745
3746 /**
3747  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
3748  *
3749  * @adev: amdgpu_device pointer
3750  *
3751  * The list of all the hardware IPs that make up the asic is walked and the
3752  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
3753  * handles any IP specific hardware or software state changes that are
3754  * necessary for a soft reset to succeed.
3755  * Returns 0 on success, negative error code on failure.
3756  */
3757 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
3758 {
3759         int i, r = 0;
3760
3761         for (i = 0; i < adev->num_ip_blocks; i++) {
3762                 if (!adev->ip_blocks[i].status.valid)
3763                         continue;
3764                 if (adev->ip_blocks[i].status.hang &&
3765                     adev->ip_blocks[i].version->funcs->pre_soft_reset) {
3766                         r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
3767                         if (r)
3768                                 return r;
3769                 }
3770         }
3771
3772         return 0;
3773 }
3774
3775 /**
3776  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
3777  *
3778  * @adev: amdgpu_device pointer
3779  *
3780  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
3781  * reset is necessary to recover.
3782  * Returns true if a full asic reset is required, false if not.
3783  */
3784 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
3785 {
3786         int i;
3787
3788         if (amdgpu_asic_need_full_reset(adev))
3789                 return true;
3790
3791         for (i = 0; i < adev->num_ip_blocks; i++) {
3792                 if (!adev->ip_blocks[i].status.valid)
3793                         continue;
3794                 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
3795                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
3796                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
3797                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
3798                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3799                         if (adev->ip_blocks[i].status.hang) {
3800                                 dev_info(adev->dev, "Some block need full reset!\n");
3801                                 return true;
3802                         }
3803                 }
3804         }
3805         return false;
3806 }
3807
3808 /**
3809  * amdgpu_device_ip_soft_reset - do a soft reset
3810  *
3811  * @adev: amdgpu_device pointer
3812  *
3813  * The list of all the hardware IPs that make up the asic is walked and the
3814  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
3815  * IP specific hardware or software state changes that are necessary to soft
3816  * reset the IP.
3817  * Returns 0 on success, negative error code on failure.
3818  */
3819 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
3820 {
3821         int i, r = 0;
3822
3823         for (i = 0; i < adev->num_ip_blocks; i++) {
3824                 if (!adev->ip_blocks[i].status.valid)
3825                         continue;
3826                 if (adev->ip_blocks[i].status.hang &&
3827                     adev->ip_blocks[i].version->funcs->soft_reset) {
3828                         r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
3829                         if (r)
3830                                 return r;
3831                 }
3832         }
3833
3834         return 0;
3835 }
3836
3837 /**
3838  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
3839  *
3840  * @adev: amdgpu_device pointer
3841  *
3842  * The list of all the hardware IPs that make up the asic is walked and the
3843  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
3844  * handles any IP specific hardware or software state changes that are
3845  * necessary after the IP has been soft reset.
3846  * Returns 0 on success, negative error code on failure.
3847  */
3848 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
3849 {
3850         int i, r = 0;
3851
3852         for (i = 0; i < adev->num_ip_blocks; i++) {
3853                 if (!adev->ip_blocks[i].status.valid)
3854                         continue;
3855                 if (adev->ip_blocks[i].status.hang &&
3856                     adev->ip_blocks[i].version->funcs->post_soft_reset)
3857                         r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
3858                 if (r)
3859                         return r;
3860         }
3861
3862         return 0;
3863 }
3864
3865 /**
3866  * amdgpu_device_recover_vram - Recover some VRAM contents
3867  *
3868  * @adev: amdgpu_device pointer
3869  *
3870  * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
3871  * restore things like GPUVM page tables after a GPU reset where
3872  * the contents of VRAM might be lost.
3873  *
3874  * Returns:
3875  * 0 on success, negative error code on failure.
3876  */
3877 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
3878 {
3879         struct dma_fence *fence = NULL, *next = NULL;
3880         struct amdgpu_bo *shadow;
3881         long r = 1, tmo;
3882
3883         if (amdgpu_sriov_runtime(adev))
3884                 tmo = msecs_to_jiffies(8000);
3885         else
3886                 tmo = msecs_to_jiffies(100);
3887
3888         dev_info(adev->dev, "recover vram bo from shadow start\n");
3889         mutex_lock(&adev->shadow_list_lock);
3890         list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
3891
3892                 /* No need to recover an evicted BO */
3893                 if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
3894                     shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
3895                     shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
3896                         continue;
3897
3898                 r = amdgpu_bo_restore_shadow(shadow, &next);
3899                 if (r)
3900                         break;
3901
3902                 if (fence) {
3903                         tmo = dma_fence_wait_timeout(fence, false, tmo);
3904                         dma_fence_put(fence);
3905                         fence = next;
3906                         if (tmo == 0) {
3907                                 r = -ETIMEDOUT;
3908                                 break;
3909                         } else if (tmo < 0) {
3910                                 r = tmo;
3911                                 break;
3912                         }
3913                 } else {
3914                         fence = next;
3915                 }
3916         }
3917         mutex_unlock(&adev->shadow_list_lock);
3918
3919         if (fence)
3920                 tmo = dma_fence_wait_timeout(fence, false, tmo);
3921         dma_fence_put(fence);
3922
3923         if (r < 0 || tmo <= 0) {
3924                 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
3925                 return -EIO;
3926         }
3927
3928         dev_info(adev->dev, "recover vram bo from shadow done\n");
3929         return 0;
3930 }
3931
3932
3933 /**
3934  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
3935  *
3936  * @adev: amdgpu device pointer
3937  * @from_hypervisor: request from hypervisor
3938  *
3939  * do VF FLR and reinitialize Asic
3940  * return 0 means succeeded otherwise failed
3941  */
3942 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
3943                                      bool from_hypervisor)
3944 {
3945         int r;
3946
3947         if (from_hypervisor)
3948                 r = amdgpu_virt_request_full_gpu(adev, true);
3949         else
3950                 r = amdgpu_virt_reset_gpu(adev);
3951         if (r)
3952                 return r;
3953
3954         amdgpu_amdkfd_pre_reset(adev);
3955
3956         /* Resume IP prior to SMC */
3957         r = amdgpu_device_ip_reinit_early_sriov(adev);
3958         if (r)
3959                 goto error;
3960
3961         amdgpu_virt_init_data_exchange(adev);
3962         /* we need recover gart prior to run SMC/CP/SDMA resume */
3963         amdgpu_gtt_mgr_recover(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
3964
3965         r = amdgpu_device_fw_loading(adev);
3966         if (r)
3967                 return r;
3968
3969         /* now we are okay to resume SMC/CP/SDMA */
3970         r = amdgpu_device_ip_reinit_late_sriov(adev);
3971         if (r)
3972                 goto error;
3973
3974         amdgpu_irq_gpu_reset_resume_helper(adev);
3975         r = amdgpu_ib_ring_tests(adev);
3976         amdgpu_amdkfd_post_reset(adev);
3977
3978 error:
3979         amdgpu_virt_release_full_gpu(adev, true);
3980         if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
3981                 amdgpu_inc_vram_lost(adev);
3982                 r = amdgpu_device_recover_vram(adev);
3983         }
3984
3985         return r;
3986 }
3987
3988 /**
3989  * amdgpu_device_has_job_running - check if there is any job in mirror list
3990  *
3991  * @adev: amdgpu device pointer
3992  *
3993  * check if there is any job in mirror list
3994  */
3995 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
3996 {
3997         int i;
3998         struct drm_sched_job *job;
3999
4000         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4001                 struct amdgpu_ring *ring = adev->rings[i];
4002
4003                 if (!ring || !ring->sched.thread)
4004                         continue;
4005
4006                 spin_lock(&ring->sched.job_list_lock);
4007                 job = list_first_entry_or_null(&ring->sched.ring_mirror_list,
4008                                 struct drm_sched_job, node);
4009                 spin_unlock(&ring->sched.job_list_lock);
4010                 if (job)
4011                         return true;
4012         }
4013         return false;
4014 }
4015
4016 /**
4017  * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4018  *
4019  * @adev: amdgpu device pointer
4020  *
4021  * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4022  * a hung GPU.
4023  */
4024 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4025 {
4026         if (!amdgpu_device_ip_check_soft_reset(adev)) {
4027                 dev_info(adev->dev, "Timeout, but no hardware hang detected.\n");
4028                 return false;
4029         }
4030
4031         if (amdgpu_gpu_recovery == 0)
4032                 goto disabled;
4033
4034         if (amdgpu_sriov_vf(adev))
4035                 return true;
4036
4037         if (amdgpu_gpu_recovery == -1) {
4038                 switch (adev->asic_type) {
4039                 case CHIP_BONAIRE:
4040                 case CHIP_HAWAII:
4041                 case CHIP_TOPAZ:
4042                 case CHIP_TONGA:
4043                 case CHIP_FIJI:
4044                 case CHIP_POLARIS10:
4045                 case CHIP_POLARIS11:
4046                 case CHIP_POLARIS12:
4047                 case CHIP_VEGAM:
4048                 case CHIP_VEGA20:
4049                 case CHIP_VEGA10:
4050                 case CHIP_VEGA12:
4051                 case CHIP_RAVEN:
4052                 case CHIP_ARCTURUS:
4053                 case CHIP_RENOIR:
4054                 case CHIP_NAVI10:
4055                 case CHIP_NAVI14:
4056                 case CHIP_NAVI12:
4057                 case CHIP_SIENNA_CICHLID:
4058                         break;
4059                 default:
4060                         goto disabled;
4061                 }
4062         }
4063
4064         return true;
4065
4066 disabled:
4067                 dev_info(adev->dev, "GPU recovery disabled.\n");
4068                 return false;
4069 }
4070
4071
4072 static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4073                                         struct amdgpu_job *job,
4074                                         bool *need_full_reset_arg)
4075 {
4076         int i, r = 0;
4077         bool need_full_reset  = *need_full_reset_arg;
4078
4079         amdgpu_debugfs_wait_dump(adev);
4080
4081         if (amdgpu_sriov_vf(adev)) {
4082                 /* stop the data exchange thread */
4083                 amdgpu_virt_fini_data_exchange(adev);
4084         }
4085
4086         /* block all schedulers and reset given job's ring */
4087         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4088                 struct amdgpu_ring *ring = adev->rings[i];
4089
4090                 if (!ring || !ring->sched.thread)
4091                         continue;
4092
4093                 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4094                 amdgpu_fence_driver_force_completion(ring);
4095         }
4096
4097         if(job)
4098                 drm_sched_increase_karma(&job->base);
4099
4100         /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4101         if (!amdgpu_sriov_vf(adev)) {
4102
4103                 if (!need_full_reset)
4104                         need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4105
4106                 if (!need_full_reset) {
4107                         amdgpu_device_ip_pre_soft_reset(adev);
4108                         r = amdgpu_device_ip_soft_reset(adev);
4109                         amdgpu_device_ip_post_soft_reset(adev);
4110                         if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4111                                 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4112                                 need_full_reset = true;
4113                         }
4114                 }
4115
4116                 if (need_full_reset)
4117                         r = amdgpu_device_ip_suspend(adev);
4118
4119                 *need_full_reset_arg = need_full_reset;
4120         }
4121
4122         return r;
4123 }
4124
4125 static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
4126                                struct list_head *device_list_handle,
4127                                bool *need_full_reset_arg,
4128                                bool skip_hw_reset)
4129 {
4130         struct amdgpu_device *tmp_adev = NULL;
4131         bool need_full_reset = *need_full_reset_arg, vram_lost = false;
4132         int r = 0;
4133
4134         /*
4135          * ASIC reset has to be done on all HGMI hive nodes ASAP
4136          * to allow proper links negotiation in FW (within 1 sec)
4137          */
4138         if (!skip_hw_reset && need_full_reset) {
4139                 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4140                         /* For XGMI run all resets in parallel to speed up the process */
4141                         if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4142                                 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4143                                         r = -EALREADY;
4144                         } else
4145                                 r = amdgpu_asic_reset(tmp_adev);
4146
4147                         if (r) {
4148                                 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4149                                          r, adev_to_drm(tmp_adev)->unique);
4150                                 break;
4151                         }
4152                 }
4153
4154                 /* For XGMI wait for all resets to complete before proceed */
4155                 if (!r) {
4156                         list_for_each_entry(tmp_adev, device_list_handle,
4157                                             gmc.xgmi.head) {
4158                                 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4159                                         flush_work(&tmp_adev->xgmi_reset_work);
4160                                         r = tmp_adev->asic_reset_res;
4161                                         if (r)
4162                                                 break;
4163                                 }
4164                         }
4165                 }
4166         }
4167
4168         if (!r && amdgpu_ras_intr_triggered()) {
4169                 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4170                         if (tmp_adev->mmhub.funcs &&
4171                             tmp_adev->mmhub.funcs->reset_ras_error_count)
4172                                 tmp_adev->mmhub.funcs->reset_ras_error_count(tmp_adev);
4173                 }
4174
4175                 amdgpu_ras_intr_cleared();
4176         }
4177
4178         list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4179                 if (need_full_reset) {
4180                         /* post card */
4181                         if (amdgpu_device_asic_init(tmp_adev))
4182                                 dev_warn(tmp_adev->dev, "asic atom init failed!");
4183
4184                         if (!r) {
4185                                 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4186                                 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4187                                 if (r)
4188                                         goto out;
4189
4190                                 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4191                                 if (vram_lost) {
4192                                         DRM_INFO("VRAM is lost due to GPU reset!\n");
4193                                         amdgpu_inc_vram_lost(tmp_adev);
4194                                 }
4195
4196                                 r = amdgpu_gtt_mgr_recover(ttm_manager_type(&tmp_adev->mman.bdev, TTM_PL_TT));
4197                                 if (r)
4198                                         goto out;
4199
4200                                 r = amdgpu_device_fw_loading(tmp_adev);
4201                                 if (r)
4202                                         return r;
4203
4204                                 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4205                                 if (r)
4206                                         goto out;
4207
4208                                 if (vram_lost)
4209                                         amdgpu_device_fill_reset_magic(tmp_adev);
4210
4211                                 /*
4212                                  * Add this ASIC as tracked as reset was already
4213                                  * complete successfully.
4214                                  */
4215                                 amdgpu_register_gpu_instance(tmp_adev);
4216
4217                                 r = amdgpu_device_ip_late_init(tmp_adev);
4218                                 if (r)
4219                                         goto out;
4220
4221                                 amdgpu_fbdev_set_suspend(tmp_adev, 0);
4222
4223                                 /*
4224                                  * The GPU enters bad state once faulty pages
4225                                  * by ECC has reached the threshold, and ras
4226                                  * recovery is scheduled next. So add one check
4227                                  * here to break recovery if it indeed exceeds
4228                                  * bad page threshold, and remind user to
4229                                  * retire this GPU or setting one bigger
4230                                  * bad_page_threshold value to fix this once
4231                                  * probing driver again.
4232                                  */
4233                                 if (!amdgpu_ras_check_err_threshold(tmp_adev)) {
4234                                         /* must succeed. */
4235                                         amdgpu_ras_resume(tmp_adev);
4236                                 } else {
4237                                         r = -EINVAL;
4238                                         goto out;
4239                                 }
4240
4241                                 /* Update PSP FW topology after reset */
4242                                 if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4243                                         r = amdgpu_xgmi_update_topology(hive, tmp_adev);
4244                         }
4245                 }
4246
4247 out:
4248                 if (!r) {
4249                         amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4250                         r = amdgpu_ib_ring_tests(tmp_adev);
4251                         if (r) {
4252                                 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4253                                 r = amdgpu_device_ip_suspend(tmp_adev);
4254                                 need_full_reset = true;
4255                                 r = -EAGAIN;
4256                                 goto end;
4257                         }
4258                 }
4259
4260                 if (!r)
4261                         r = amdgpu_device_recover_vram(tmp_adev);
4262                 else
4263                         tmp_adev->asic_reset_res = r;
4264         }
4265
4266 end:
4267         *need_full_reset_arg = need_full_reset;
4268         return r;
4269 }
4270
4271 static bool amdgpu_device_lock_adev(struct amdgpu_device *adev,
4272                                 struct amdgpu_hive_info *hive)
4273 {
4274         if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
4275                 return false;
4276
4277         if (hive) {
4278                 down_write_nest_lock(&adev->reset_sem, &hive->hive_lock);
4279         } else {
4280                 down_write(&adev->reset_sem);
4281         }
4282
4283         atomic_inc(&adev->gpu_reset_counter);
4284         switch (amdgpu_asic_reset_method(adev)) {
4285         case AMD_RESET_METHOD_MODE1:
4286                 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4287                 break;
4288         case AMD_RESET_METHOD_MODE2:
4289                 adev->mp1_state = PP_MP1_STATE_RESET;
4290                 break;
4291         default:
4292                 adev->mp1_state = PP_MP1_STATE_NONE;
4293                 break;
4294         }
4295
4296         return true;
4297 }
4298
4299 static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
4300 {
4301         amdgpu_vf_error_trans_all(adev);
4302         adev->mp1_state = PP_MP1_STATE_NONE;
4303         atomic_set(&adev->in_gpu_reset, 0);
4304         up_write(&adev->reset_sem);
4305 }
4306
4307 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
4308 {
4309         struct pci_dev *p = NULL;
4310
4311         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4312                         adev->pdev->bus->number, 1);
4313         if (p) {
4314                 pm_runtime_enable(&(p->dev));
4315                 pm_runtime_resume(&(p->dev));
4316         }
4317 }
4318
4319 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
4320 {
4321         enum amd_reset_method reset_method;
4322         struct pci_dev *p = NULL;
4323         u64 expires;
4324
4325         /*
4326          * For now, only BACO and mode1 reset are confirmed
4327          * to suffer the audio issue without proper suspended.
4328          */
4329         reset_method = amdgpu_asic_reset_method(adev);
4330         if ((reset_method != AMD_RESET_METHOD_BACO) &&
4331              (reset_method != AMD_RESET_METHOD_MODE1))
4332                 return -EINVAL;
4333
4334         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4335                         adev->pdev->bus->number, 1);
4336         if (!p)
4337                 return -ENODEV;
4338
4339         expires = pm_runtime_autosuspend_expiration(&(p->dev));
4340         if (!expires)
4341                 /*
4342                  * If we cannot get the audio device autosuspend delay,
4343                  * a fixed 4S interval will be used. Considering 3S is
4344                  * the audio controller default autosuspend delay setting.
4345                  * 4S used here is guaranteed to cover that.
4346                  */
4347                 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
4348
4349         while (!pm_runtime_status_suspended(&(p->dev))) {
4350                 if (!pm_runtime_suspend(&(p->dev)))
4351                         break;
4352
4353                 if (expires < ktime_get_mono_fast_ns()) {
4354                         dev_warn(adev->dev, "failed to suspend display audio\n");
4355                         /* TODO: abort the succeeding gpu reset? */
4356                         return -ETIMEDOUT;
4357                 }
4358         }
4359
4360         pm_runtime_disable(&(p->dev));
4361
4362         return 0;
4363 }
4364
4365 /**
4366  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
4367  *
4368  * @adev: amdgpu device pointer
4369  * @job: which job trigger hang
4370  *
4371  * Attempt to reset the GPU if it has hung (all asics).
4372  * Attempt to do soft-reset or full-reset and reinitialize Asic
4373  * Returns 0 for success or an error on failure.
4374  */
4375
4376 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
4377                               struct amdgpu_job *job)
4378 {
4379         struct list_head device_list, *device_list_handle =  NULL;
4380         bool need_full_reset = false;
4381         bool job_signaled = false;
4382         struct amdgpu_hive_info *hive = NULL;
4383         struct amdgpu_device *tmp_adev = NULL;
4384         int i, r = 0;
4385         bool need_emergency_restart = false;
4386         bool audio_suspended = false;
4387
4388         /**
4389          * Special case: RAS triggered and full reset isn't supported
4390          */
4391         need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
4392
4393         /*
4394          * Flush RAM to disk so that after reboot
4395          * the user can read log and see why the system rebooted.
4396          */
4397         if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
4398                 DRM_WARN("Emergency reboot.");
4399
4400                 ksys_sync_helper();
4401                 emergency_restart();
4402         }
4403
4404         dev_info(adev->dev, "GPU %s begin!\n",
4405                 need_emergency_restart ? "jobs stop":"reset");
4406
4407         /*
4408          * Here we trylock to avoid chain of resets executing from
4409          * either trigger by jobs on different adevs in XGMI hive or jobs on
4410          * different schedulers for same device while this TO handler is running.
4411          * We always reset all schedulers for device and all devices for XGMI
4412          * hive so that should take care of them too.
4413          */
4414         hive = amdgpu_get_xgmi_hive(adev);
4415         if (hive) {
4416                 if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) {
4417                         DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
4418                                 job ? job->base.id : -1, hive->hive_id);
4419                         amdgpu_put_xgmi_hive(hive);
4420                         return 0;
4421                 }
4422                 mutex_lock(&hive->hive_lock);
4423         }
4424
4425         /*
4426          * Build list of devices to reset.
4427          * In case we are in XGMI hive mode, resort the device list
4428          * to put adev in the 1st position.
4429          */
4430         INIT_LIST_HEAD(&device_list);
4431         if (adev->gmc.xgmi.num_physical_nodes > 1) {
4432                 if (!hive)
4433                         return -ENODEV;
4434                 if (!list_is_first(&adev->gmc.xgmi.head, &hive->device_list))
4435                         list_rotate_to_front(&adev->gmc.xgmi.head, &hive->device_list);
4436                 device_list_handle = &hive->device_list;
4437         } else {
4438                 list_add_tail(&adev->gmc.xgmi.head, &device_list);
4439                 device_list_handle = &device_list;
4440         }
4441
4442         /* block all schedulers and reset given job's ring */
4443         list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4444                 if (!amdgpu_device_lock_adev(tmp_adev, hive)) {
4445                         dev_info(tmp_adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress",
4446                                   job ? job->base.id : -1);
4447                         r = 0;
4448                         goto skip_recovery;
4449                 }
4450
4451                 /*
4452                  * Try to put the audio codec into suspend state
4453                  * before gpu reset started.
4454                  *
4455                  * Due to the power domain of the graphics device
4456                  * is shared with AZ power domain. Without this,
4457                  * we may change the audio hardware from behind
4458                  * the audio driver's back. That will trigger
4459                  * some audio codec errors.
4460                  */
4461                 if (!amdgpu_device_suspend_display_audio(tmp_adev))
4462                         audio_suspended = true;
4463
4464                 amdgpu_ras_set_error_query_ready(tmp_adev, false);
4465
4466                 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
4467
4468                 if (!amdgpu_sriov_vf(tmp_adev))
4469                         amdgpu_amdkfd_pre_reset(tmp_adev);
4470
4471                 /*
4472                  * Mark these ASICs to be reseted as untracked first
4473                  * And add them back after reset completed
4474                  */
4475                 amdgpu_unregister_gpu_instance(tmp_adev);
4476
4477                 amdgpu_fbdev_set_suspend(tmp_adev, 1);
4478
4479                 /* disable ras on ALL IPs */
4480                 if (!need_emergency_restart &&
4481                       amdgpu_device_ip_need_full_reset(tmp_adev))
4482                         amdgpu_ras_suspend(tmp_adev);
4483
4484                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4485                         struct amdgpu_ring *ring = tmp_adev->rings[i];
4486
4487                         if (!ring || !ring->sched.thread)
4488                                 continue;
4489
4490                         drm_sched_stop(&ring->sched, job ? &job->base : NULL);
4491
4492                         if (need_emergency_restart)
4493                                 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
4494                 }
4495         }
4496
4497         if (need_emergency_restart)
4498                 goto skip_sched_resume;
4499
4500         /*
4501          * Must check guilty signal here since after this point all old
4502          * HW fences are force signaled.
4503          *
4504          * job->base holds a reference to parent fence
4505          */
4506         if (job && job->base.s_fence->parent &&
4507             dma_fence_is_signaled(job->base.s_fence->parent)) {
4508                 job_signaled = true;
4509                 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
4510                 goto skip_hw_reset;
4511         }
4512
4513 retry:  /* Rest of adevs pre asic reset from XGMI hive. */
4514         list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4515                 r = amdgpu_device_pre_asic_reset(tmp_adev,
4516                                                  NULL,
4517                                                  &need_full_reset);
4518                 /*TODO Should we stop ?*/
4519                 if (r) {
4520                         dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
4521                                   r, adev_to_drm(tmp_adev)->unique);
4522                         tmp_adev->asic_reset_res = r;
4523                 }
4524         }
4525
4526         /* Actual ASIC resets if needed.*/
4527         /* TODO Implement XGMI hive reset logic for SRIOV */
4528         if (amdgpu_sriov_vf(adev)) {
4529                 r = amdgpu_device_reset_sriov(adev, job ? false : true);
4530                 if (r)
4531                         adev->asic_reset_res = r;
4532         } else {
4533                 r  = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset, false);
4534                 if (r && r == -EAGAIN)
4535                         goto retry;
4536         }
4537
4538 skip_hw_reset:
4539
4540         /* Post ASIC reset for all devs .*/
4541         list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4542
4543                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4544                         struct amdgpu_ring *ring = tmp_adev->rings[i];
4545
4546                         if (!ring || !ring->sched.thread)
4547                                 continue;
4548
4549                         /* No point to resubmit jobs if we didn't HW reset*/
4550                         if (!tmp_adev->asic_reset_res && !job_signaled)
4551                                 drm_sched_resubmit_jobs(&ring->sched);
4552
4553                         drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
4554                 }
4555
4556                 if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
4557                         drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
4558                 }
4559
4560                 tmp_adev->asic_reset_res = 0;
4561
4562                 if (r) {
4563                         /* bad news, how to tell it to userspace ? */
4564                         dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
4565                         amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
4566                 } else {
4567                         dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
4568                 }
4569         }
4570
4571 skip_sched_resume:
4572         list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4573                 /*unlock kfd: SRIOV would do it separately */
4574                 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
4575                         amdgpu_amdkfd_post_reset(tmp_adev);
4576                 if (audio_suspended)
4577                         amdgpu_device_resume_display_audio(tmp_adev);
4578                 amdgpu_device_unlock_adev(tmp_adev);
4579         }
4580
4581 skip_recovery:
4582         if (hive) {
4583                 atomic_set(&hive->in_reset, 0);
4584                 mutex_unlock(&hive->hive_lock);
4585                 amdgpu_put_xgmi_hive(hive);
4586         }
4587
4588         if (r)
4589                 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
4590         return r;
4591 }
4592
4593 /**
4594  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
4595  *
4596  * @adev: amdgpu_device pointer
4597  *
4598  * Fetchs and stores in the driver the PCIE capabilities (gen speed
4599  * and lanes) of the slot the device is in. Handles APUs and
4600  * virtualized environments where PCIE config space may not be available.
4601  */
4602 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
4603 {
4604         struct pci_dev *pdev;
4605         enum pci_bus_speed speed_cap, platform_speed_cap;
4606         enum pcie_link_width platform_link_width;
4607
4608         if (amdgpu_pcie_gen_cap)
4609                 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
4610
4611         if (amdgpu_pcie_lane_cap)
4612                 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
4613
4614         /* covers APUs as well */
4615         if (pci_is_root_bus(adev->pdev->bus)) {
4616                 if (adev->pm.pcie_gen_mask == 0)
4617                         adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
4618                 if (adev->pm.pcie_mlw_mask == 0)
4619                         adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
4620                 return;
4621         }
4622
4623         if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
4624                 return;
4625
4626         pcie_bandwidth_available(adev->pdev, NULL,
4627                                  &platform_speed_cap, &platform_link_width);
4628
4629         if (adev->pm.pcie_gen_mask == 0) {
4630                 /* asic caps */
4631                 pdev = adev->pdev;
4632                 speed_cap = pcie_get_speed_cap(pdev);
4633                 if (speed_cap == PCI_SPEED_UNKNOWN) {
4634                         adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4635                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4636                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
4637                 } else {
4638                         if (speed_cap == PCIE_SPEED_16_0GT)
4639                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4640                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4641                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4642                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
4643                         else if (speed_cap == PCIE_SPEED_8_0GT)
4644                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4645                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4646                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
4647                         else if (speed_cap == PCIE_SPEED_5_0GT)
4648                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4649                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
4650                         else
4651                                 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
4652                 }
4653                 /* platform caps */
4654                 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
4655                         adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4656                                                    CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
4657                 } else {
4658                         if (platform_speed_cap == PCIE_SPEED_16_0GT)
4659                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4660                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4661                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4662                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
4663                         else if (platform_speed_cap == PCIE_SPEED_8_0GT)
4664                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4665                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4666                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
4667                         else if (platform_speed_cap == PCIE_SPEED_5_0GT)
4668                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4669                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
4670                         else
4671                                 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
4672
4673                 }
4674         }
4675         if (adev->pm.pcie_mlw_mask == 0) {
4676                 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
4677                         adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
4678                 } else {
4679                         switch (platform_link_width) {
4680                         case PCIE_LNK_X32:
4681                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
4682                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
4683                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4684                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4685                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4686                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4687                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4688                                 break;
4689                         case PCIE_LNK_X16:
4690                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
4691                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4692                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4693                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4694                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4695                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4696                                 break;
4697                         case PCIE_LNK_X12:
4698                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4699                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4700                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4701                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4702                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4703                                 break;
4704                         case PCIE_LNK_X8:
4705                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4706                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4707                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4708                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4709                                 break;
4710                         case PCIE_LNK_X4:
4711                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4712                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4713                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4714                                 break;
4715                         case PCIE_LNK_X2:
4716                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4717                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4718                                 break;
4719                         case PCIE_LNK_X1:
4720                                 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
4721                                 break;
4722                         default:
4723                                 break;
4724                         }
4725                 }
4726         }
4727 }
4728
4729 int amdgpu_device_baco_enter(struct drm_device *dev)
4730 {
4731         struct amdgpu_device *adev = drm_to_adev(dev);
4732         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4733
4734         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
4735                 return -ENOTSUPP;
4736
4737         if (ras && ras->supported)
4738                 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
4739
4740         return amdgpu_dpm_baco_enter(adev);
4741 }
4742
4743 int amdgpu_device_baco_exit(struct drm_device *dev)
4744 {
4745         struct amdgpu_device *adev = drm_to_adev(dev);
4746         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4747         int ret = 0;
4748
4749         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
4750                 return -ENOTSUPP;
4751
4752         ret = amdgpu_dpm_baco_exit(adev);
4753         if (ret)
4754                 return ret;
4755
4756         if (ras && ras->supported)
4757                 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
4758
4759         return 0;
4760 }
4761
4762 static void amdgpu_cancel_all_tdr(struct amdgpu_device *adev)
4763 {
4764         int i;
4765
4766         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4767                 struct amdgpu_ring *ring = adev->rings[i];
4768
4769                 if (!ring || !ring->sched.thread)
4770                         continue;
4771
4772                 cancel_delayed_work_sync(&ring->sched.work_tdr);
4773         }
4774 }
4775
4776 /**
4777  * amdgpu_pci_error_detected - Called when a PCI error is detected.
4778  * @pdev: PCI device struct
4779  * @state: PCI channel state
4780  *
4781  * Description: Called when a PCI error is detected.
4782  *
4783  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
4784  */
4785 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
4786 {
4787         struct drm_device *dev = pci_get_drvdata(pdev);
4788         struct amdgpu_device *adev = drm_to_adev(dev);
4789         int i;
4790
4791         DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
4792
4793         if (adev->gmc.xgmi.num_physical_nodes > 1) {
4794                 DRM_WARN("No support for XGMI hive yet...");
4795                 return PCI_ERS_RESULT_DISCONNECT;
4796         }
4797
4798         switch (state) {
4799         case pci_channel_io_normal:
4800                 return PCI_ERS_RESULT_CAN_RECOVER;
4801         /* Fatal error, prepare for slot reset */
4802         case pci_channel_io_frozen:             
4803                 /*              
4804                  * Cancel and wait for all TDRs in progress if failing to
4805                  * set  adev->in_gpu_reset in amdgpu_device_lock_adev
4806                  *
4807                  * Locking adev->reset_sem will prevent any external access
4808                  * to GPU during PCI error recovery
4809                  */
4810                 while (!amdgpu_device_lock_adev(adev, NULL))
4811                         amdgpu_cancel_all_tdr(adev);
4812
4813                 /*
4814                  * Block any work scheduling as we do for regular GPU reset
4815                  * for the duration of the recovery
4816                  */
4817                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4818                         struct amdgpu_ring *ring = adev->rings[i];
4819
4820                         if (!ring || !ring->sched.thread)
4821                                 continue;
4822
4823                         drm_sched_stop(&ring->sched, NULL);
4824                 }
4825                 return PCI_ERS_RESULT_NEED_RESET;
4826         case pci_channel_io_perm_failure:
4827                 /* Permanent error, prepare for device removal */
4828                 return PCI_ERS_RESULT_DISCONNECT;
4829         }
4830
4831         return PCI_ERS_RESULT_NEED_RESET;
4832 }
4833
4834 /**
4835  * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
4836  * @pdev: pointer to PCI device
4837  */
4838 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
4839 {
4840
4841         DRM_INFO("PCI error: mmio enabled callback!!\n");
4842
4843         /* TODO - dump whatever for debugging purposes */
4844
4845         /* This called only if amdgpu_pci_error_detected returns
4846          * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
4847          * works, no need to reset slot.
4848          */
4849
4850         return PCI_ERS_RESULT_RECOVERED;
4851 }
4852
4853 /**
4854  * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
4855  * @pdev: PCI device struct
4856  *
4857  * Description: This routine is called by the pci error recovery
4858  * code after the PCI slot has been reset, just before we
4859  * should resume normal operations.
4860  */
4861 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
4862 {
4863         struct drm_device *dev = pci_get_drvdata(pdev);
4864         struct amdgpu_device *adev = drm_to_adev(dev);
4865         int r, i;
4866         bool need_full_reset = true;
4867         u32 memsize;
4868         struct list_head device_list;
4869
4870         DRM_INFO("PCI error: slot reset callback!!\n");
4871
4872         INIT_LIST_HEAD(&device_list);
4873         list_add_tail(&adev->gmc.xgmi.head, &device_list);
4874
4875         /* wait for asic to come out of reset */
4876         msleep(500);
4877
4878         /* Restore PCI confspace */
4879         amdgpu_device_load_pci_state(pdev);
4880
4881         /* confirm  ASIC came out of reset */
4882         for (i = 0; i < adev->usec_timeout; i++) {
4883                 memsize = amdgpu_asic_get_config_memsize(adev);
4884
4885                 if (memsize != 0xffffffff)
4886                         break;
4887                 udelay(1);
4888         }
4889         if (memsize == 0xffffffff) {
4890                 r = -ETIME;
4891                 goto out;
4892         }
4893
4894         adev->in_pci_err_recovery = true;       
4895         r = amdgpu_device_pre_asic_reset(adev, NULL, &need_full_reset);
4896         adev->in_pci_err_recovery = false;
4897         if (r)
4898                 goto out;
4899
4900         r = amdgpu_do_asic_reset(NULL, &device_list, &need_full_reset, true);
4901
4902 out:
4903         if (!r) {
4904                 if (amdgpu_device_cache_pci_state(adev->pdev))
4905                         pci_restore_state(adev->pdev);
4906
4907                 DRM_INFO("PCIe error recovery succeeded\n");
4908         } else {
4909                 DRM_ERROR("PCIe error recovery failed, err:%d", r);
4910                 amdgpu_device_unlock_adev(adev);
4911         }
4912
4913         return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
4914 }
4915
4916 /**
4917  * amdgpu_pci_resume() - resume normal ops after PCI reset
4918  * @pdev: pointer to PCI device
4919  *
4920  * Called when the error recovery driver tells us that its
4921  * OK to resume normal operation. Use completion to allow
4922  * halted scsi ops to resume.
4923  */
4924 void amdgpu_pci_resume(struct pci_dev *pdev)
4925 {
4926         struct drm_device *dev = pci_get_drvdata(pdev);
4927         struct amdgpu_device *adev = drm_to_adev(dev);
4928         int i;
4929
4930
4931         DRM_INFO("PCI error: resume callback!!\n");
4932
4933         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4934                 struct amdgpu_ring *ring = adev->rings[i];
4935
4936                 if (!ring || !ring->sched.thread)
4937                         continue;
4938
4939
4940                 drm_sched_resubmit_jobs(&ring->sched);
4941                 drm_sched_start(&ring->sched, true);
4942         }
4943
4944         amdgpu_device_unlock_adev(adev);
4945 }
4946
4947 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
4948 {
4949         struct drm_device *dev = pci_get_drvdata(pdev);
4950         struct amdgpu_device *adev = drm_to_adev(dev);
4951         int r;
4952
4953         r = pci_save_state(pdev);
4954         if (!r) {
4955                 kfree(adev->pci_state);
4956
4957                 adev->pci_state = pci_store_saved_state(pdev);
4958
4959                 if (!adev->pci_state) {
4960                         DRM_ERROR("Failed to store PCI saved state");
4961                         return false;
4962                 }
4963         } else {
4964                 DRM_WARN("Failed to save PCI state, err:%d\n", r);
4965                 return false;
4966         }
4967
4968         return true;
4969 }
4970
4971 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
4972 {
4973         struct drm_device *dev = pci_get_drvdata(pdev);
4974         struct amdgpu_device *adev = drm_to_adev(dev);
4975         int r;
4976
4977         if (!adev->pci_state)
4978                 return false;
4979
4980         r = pci_load_saved_state(pdev, adev->pci_state);
4981
4982         if (!r) {
4983                 pci_restore_state(pdev);
4984         } else {
4985                 DRM_WARN("Failed to load PCI state, err:%d\n", r);
4986                 return false;
4987         }
4988
4989         return true;
4990 }
4991
4992
This page took 0.327152 seconds and 4 git commands to generate.