]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
Merge branch 'kvm-fix-svm-races' into kvm-master
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_probe_helper.h>
36 #include <drm/amdgpu_drm.h>
37 #include <linux/vgaarb.h>
38 #include <linux/vga_switcheroo.h>
39 #include <linux/efi.h>
40 #include "amdgpu.h"
41 #include "amdgpu_trace.h"
42 #include "amdgpu_i2c.h"
43 #include "atom.h"
44 #include "amdgpu_atombios.h"
45 #include "amdgpu_atomfirmware.h"
46 #include "amd_pcie.h"
47 #ifdef CONFIG_DRM_AMDGPU_SI
48 #include "si.h"
49 #endif
50 #ifdef CONFIG_DRM_AMDGPU_CIK
51 #include "cik.h"
52 #endif
53 #include "vi.h"
54 #include "soc15.h"
55 #include "nv.h"
56 #include "bif/bif_4_1_d.h"
57 #include <linux/pci.h>
58 #include <linux/firmware.h>
59 #include "amdgpu_vf_error.h"
60
61 #include "amdgpu_amdkfd.h"
62 #include "amdgpu_pm.h"
63
64 #include "amdgpu_xgmi.h"
65 #include "amdgpu_ras.h"
66 #include "amdgpu_pmu.h"
67 #include "amdgpu_fru_eeprom.h"
68
69 #include <linux/suspend.h>
70 #include <drm/task_barrier.h>
71 #include <linux/pm_runtime.h>
72
73 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
74 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
75 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
76 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
77 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
78 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
79 MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
80 MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
81 MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
82 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
83 MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin");
84
85 #define AMDGPU_RESUME_MS                2000
86
87 const char *amdgpu_asic_name[] = {
88         "TAHITI",
89         "PITCAIRN",
90         "VERDE",
91         "OLAND",
92         "HAINAN",
93         "BONAIRE",
94         "KAVERI",
95         "KABINI",
96         "HAWAII",
97         "MULLINS",
98         "TOPAZ",
99         "TONGA",
100         "FIJI",
101         "CARRIZO",
102         "STONEY",
103         "POLARIS10",
104         "POLARIS11",
105         "POLARIS12",
106         "VEGAM",
107         "VEGA10",
108         "VEGA12",
109         "VEGA20",
110         "RAVEN",
111         "ARCTURUS",
112         "RENOIR",
113         "NAVI10",
114         "NAVI14",
115         "NAVI12",
116         "SIENNA_CICHLID",
117         "NAVY_FLOUNDER",
118         "VANGOGH",
119         "DIMGREY_CAVEFISH",
120         "LAST",
121 };
122
123 /**
124  * DOC: pcie_replay_count
125  *
126  * The amdgpu driver provides a sysfs API for reporting the total number
127  * of PCIe replays (NAKs)
128  * The file pcie_replay_count is used for this and returns the total
129  * number of replays as a sum of the NAKs generated and NAKs received
130  */
131
132 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
133                 struct device_attribute *attr, char *buf)
134 {
135         struct drm_device *ddev = dev_get_drvdata(dev);
136         struct amdgpu_device *adev = drm_to_adev(ddev);
137         uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
138
139         return snprintf(buf, PAGE_SIZE, "%llu\n", cnt);
140 }
141
142 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
143                 amdgpu_device_get_pcie_replay_count, NULL);
144
145 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
146
147 /**
148  * DOC: product_name
149  *
150  * The amdgpu driver provides a sysfs API for reporting the product name
151  * for the device
152  * The file serial_number is used for this and returns the product name
153  * as returned from the FRU.
154  * NOTE: This is only available for certain server cards
155  */
156
157 static ssize_t amdgpu_device_get_product_name(struct device *dev,
158                 struct device_attribute *attr, char *buf)
159 {
160         struct drm_device *ddev = dev_get_drvdata(dev);
161         struct amdgpu_device *adev = drm_to_adev(ddev);
162
163         return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_name);
164 }
165
166 static DEVICE_ATTR(product_name, S_IRUGO,
167                 amdgpu_device_get_product_name, NULL);
168
169 /**
170  * DOC: product_number
171  *
172  * The amdgpu driver provides a sysfs API for reporting the part number
173  * for the device
174  * The file serial_number is used for this and returns the part number
175  * as returned from the FRU.
176  * NOTE: This is only available for certain server cards
177  */
178
179 static ssize_t amdgpu_device_get_product_number(struct device *dev,
180                 struct device_attribute *attr, char *buf)
181 {
182         struct drm_device *ddev = dev_get_drvdata(dev);
183         struct amdgpu_device *adev = drm_to_adev(ddev);
184
185         return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_number);
186 }
187
188 static DEVICE_ATTR(product_number, S_IRUGO,
189                 amdgpu_device_get_product_number, NULL);
190
191 /**
192  * DOC: serial_number
193  *
194  * The amdgpu driver provides a sysfs API for reporting the serial number
195  * for the device
196  * The file serial_number is used for this and returns the serial number
197  * as returned from the FRU.
198  * NOTE: This is only available for certain server cards
199  */
200
201 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
202                 struct device_attribute *attr, char *buf)
203 {
204         struct drm_device *ddev = dev_get_drvdata(dev);
205         struct amdgpu_device *adev = drm_to_adev(ddev);
206
207         return snprintf(buf, PAGE_SIZE, "%s\n", adev->serial);
208 }
209
210 static DEVICE_ATTR(serial_number, S_IRUGO,
211                 amdgpu_device_get_serial_number, NULL);
212
213 /**
214  * amdgpu_device_supports_atpx - Is the device a dGPU with HG/PX power control
215  *
216  * @dev: drm_device pointer
217  *
218  * Returns true if the device is a dGPU with HG/PX power control,
219  * otherwise return false.
220  */
221 bool amdgpu_device_supports_atpx(struct drm_device *dev)
222 {
223         struct amdgpu_device *adev = drm_to_adev(dev);
224
225         if (adev->flags & AMD_IS_PX)
226                 return true;
227         return false;
228 }
229
230 /**
231  * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
232  *
233  * @dev: drm_device pointer
234  *
235  * Returns true if the device is a dGPU with HG/PX power control,
236  * otherwise return false.
237  */
238 bool amdgpu_device_supports_boco(struct drm_device *dev)
239 {
240         struct amdgpu_device *adev = drm_to_adev(dev);
241
242         if (adev->has_pr3)
243                 return true;
244         return false;
245 }
246
247 /**
248  * amdgpu_device_supports_baco - Does the device support BACO
249  *
250  * @dev: drm_device pointer
251  *
252  * Returns true if the device supporte BACO,
253  * otherwise return false.
254  */
255 bool amdgpu_device_supports_baco(struct drm_device *dev)
256 {
257         struct amdgpu_device *adev = drm_to_adev(dev);
258
259         return amdgpu_asic_supports_baco(adev);
260 }
261
262 /*
263  * VRAM access helper functions
264  */
265
266 /**
267  * amdgpu_device_vram_access - read/write a buffer in vram
268  *
269  * @adev: amdgpu_device pointer
270  * @pos: offset of the buffer in vram
271  * @buf: virtual address of the buffer in system memory
272  * @size: read/write size, sizeof(@buf) must > @size
273  * @write: true - write to vram, otherwise - read from vram
274  */
275 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
276                                uint32_t *buf, size_t size, bool write)
277 {
278         unsigned long flags;
279         uint32_t hi = ~0;
280         uint64_t last;
281
282
283 #ifdef CONFIG_64BIT
284         last = min(pos + size, adev->gmc.visible_vram_size);
285         if (last > pos) {
286                 void __iomem *addr = adev->mman.aper_base_kaddr + pos;
287                 size_t count = last - pos;
288
289                 if (write) {
290                         memcpy_toio(addr, buf, count);
291                         mb();
292                         amdgpu_asic_flush_hdp(adev, NULL);
293                 } else {
294                         amdgpu_asic_invalidate_hdp(adev, NULL);
295                         mb();
296                         memcpy_fromio(buf, addr, count);
297                 }
298
299                 if (count == size)
300                         return;
301
302                 pos += count;
303                 buf += count / 4;
304                 size -= count;
305         }
306 #endif
307
308         spin_lock_irqsave(&adev->mmio_idx_lock, flags);
309         for (last = pos + size; pos < last; pos += 4) {
310                 uint32_t tmp = pos >> 31;
311
312                 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
313                 if (tmp != hi) {
314                         WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
315                         hi = tmp;
316                 }
317                 if (write)
318                         WREG32_NO_KIQ(mmMM_DATA, *buf++);
319                 else
320                         *buf++ = RREG32_NO_KIQ(mmMM_DATA);
321         }
322         spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
323 }
324
325 /*
326  * register access helper functions.
327  */
328 /**
329  * amdgpu_device_rreg - read a memory mapped IO or indirect register
330  *
331  * @adev: amdgpu_device pointer
332  * @reg: dword aligned register offset
333  * @acc_flags: access flags which require special behavior
334  *
335  * Returns the 32 bit value from the offset specified.
336  */
337 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
338                             uint32_t reg, uint32_t acc_flags)
339 {
340         uint32_t ret;
341
342         if (adev->in_pci_err_recovery)
343                 return 0;
344
345         if ((reg * 4) < adev->rmmio_size) {
346                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
347                     amdgpu_sriov_runtime(adev) &&
348                     down_read_trylock(&adev->reset_sem)) {
349                         ret = amdgpu_kiq_rreg(adev, reg);
350                         up_read(&adev->reset_sem);
351                 } else {
352                         ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
353                 }
354         } else {
355                 ret = adev->pcie_rreg(adev, reg * 4);
356         }
357
358         trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
359
360         return ret;
361 }
362
363 /*
364  * MMIO register read with bytes helper functions
365  * @offset:bytes offset from MMIO start
366  *
367 */
368
369 /**
370  * amdgpu_mm_rreg8 - read a memory mapped IO register
371  *
372  * @adev: amdgpu_device pointer
373  * @offset: byte aligned register offset
374  *
375  * Returns the 8 bit value from the offset specified.
376  */
377 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
378 {
379         if (adev->in_pci_err_recovery)
380                 return 0;
381
382         if (offset < adev->rmmio_size)
383                 return (readb(adev->rmmio + offset));
384         BUG();
385 }
386
387 /*
388  * MMIO register write with bytes helper functions
389  * @offset:bytes offset from MMIO start
390  * @value: the value want to be written to the register
391  *
392 */
393 /**
394  * amdgpu_mm_wreg8 - read a memory mapped IO register
395  *
396  * @adev: amdgpu_device pointer
397  * @offset: byte aligned register offset
398  * @value: 8 bit value to write
399  *
400  * Writes the value specified to the offset specified.
401  */
402 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
403 {
404         if (adev->in_pci_err_recovery)
405                 return;
406
407         if (offset < adev->rmmio_size)
408                 writeb(value, adev->rmmio + offset);
409         else
410                 BUG();
411 }
412
413 /**
414  * amdgpu_device_wreg - write to a memory mapped IO or indirect register
415  *
416  * @adev: amdgpu_device pointer
417  * @reg: dword aligned register offset
418  * @v: 32 bit value to write to the register
419  * @acc_flags: access flags which require special behavior
420  *
421  * Writes the value specified to the offset specified.
422  */
423 void amdgpu_device_wreg(struct amdgpu_device *adev,
424                         uint32_t reg, uint32_t v,
425                         uint32_t acc_flags)
426 {
427         if (adev->in_pci_err_recovery)
428                 return;
429
430         if ((reg * 4) < adev->rmmio_size) {
431                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
432                     amdgpu_sriov_runtime(adev) &&
433                     down_read_trylock(&adev->reset_sem)) {
434                         amdgpu_kiq_wreg(adev, reg, v);
435                         up_read(&adev->reset_sem);
436                 } else {
437                         writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
438                 }
439         } else {
440                 adev->pcie_wreg(adev, reg * 4, v);
441         }
442
443         trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
444 }
445
446 /*
447  * amdgpu_mm_wreg_mmio_rlc -  write register either with mmio or with RLC path if in range
448  *
449  * this function is invoked only the debugfs register access
450  * */
451 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
452                              uint32_t reg, uint32_t v)
453 {
454         if (adev->in_pci_err_recovery)
455                 return;
456
457         if (amdgpu_sriov_fullaccess(adev) &&
458             adev->gfx.rlc.funcs &&
459             adev->gfx.rlc.funcs->is_rlcg_access_range) {
460                 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
461                         return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v);
462         } else {
463                 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
464         }
465 }
466
467 /**
468  * amdgpu_io_rreg - read an IO register
469  *
470  * @adev: amdgpu_device pointer
471  * @reg: dword aligned register offset
472  *
473  * Returns the 32 bit value from the offset specified.
474  */
475 u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
476 {
477         if (adev->in_pci_err_recovery)
478                 return 0;
479
480         if ((reg * 4) < adev->rio_mem_size)
481                 return ioread32(adev->rio_mem + (reg * 4));
482         else {
483                 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
484                 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
485         }
486 }
487
488 /**
489  * amdgpu_io_wreg - write to an IO register
490  *
491  * @adev: amdgpu_device pointer
492  * @reg: dword aligned register offset
493  * @v: 32 bit value to write to the register
494  *
495  * Writes the value specified to the offset specified.
496  */
497 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
498 {
499         if (adev->in_pci_err_recovery)
500                 return;
501
502         if ((reg * 4) < adev->rio_mem_size)
503                 iowrite32(v, adev->rio_mem + (reg * 4));
504         else {
505                 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
506                 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
507         }
508 }
509
510 /**
511  * amdgpu_mm_rdoorbell - read a doorbell dword
512  *
513  * @adev: amdgpu_device pointer
514  * @index: doorbell index
515  *
516  * Returns the value in the doorbell aperture at the
517  * requested doorbell index (CIK).
518  */
519 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
520 {
521         if (adev->in_pci_err_recovery)
522                 return 0;
523
524         if (index < adev->doorbell.num_doorbells) {
525                 return readl(adev->doorbell.ptr + index);
526         } else {
527                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
528                 return 0;
529         }
530 }
531
532 /**
533  * amdgpu_mm_wdoorbell - write a doorbell dword
534  *
535  * @adev: amdgpu_device pointer
536  * @index: doorbell index
537  * @v: value to write
538  *
539  * Writes @v to the doorbell aperture at the
540  * requested doorbell index (CIK).
541  */
542 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
543 {
544         if (adev->in_pci_err_recovery)
545                 return;
546
547         if (index < adev->doorbell.num_doorbells) {
548                 writel(v, adev->doorbell.ptr + index);
549         } else {
550                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
551         }
552 }
553
554 /**
555  * amdgpu_mm_rdoorbell64 - read a doorbell Qword
556  *
557  * @adev: amdgpu_device pointer
558  * @index: doorbell index
559  *
560  * Returns the value in the doorbell aperture at the
561  * requested doorbell index (VEGA10+).
562  */
563 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
564 {
565         if (adev->in_pci_err_recovery)
566                 return 0;
567
568         if (index < adev->doorbell.num_doorbells) {
569                 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
570         } else {
571                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
572                 return 0;
573         }
574 }
575
576 /**
577  * amdgpu_mm_wdoorbell64 - write a doorbell Qword
578  *
579  * @adev: amdgpu_device pointer
580  * @index: doorbell index
581  * @v: value to write
582  *
583  * Writes @v to the doorbell aperture at the
584  * requested doorbell index (VEGA10+).
585  */
586 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
587 {
588         if (adev->in_pci_err_recovery)
589                 return;
590
591         if (index < adev->doorbell.num_doorbells) {
592                 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
593         } else {
594                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
595         }
596 }
597
598 /**
599  * amdgpu_device_indirect_rreg - read an indirect register
600  *
601  * @adev: amdgpu_device pointer
602  * @pcie_index: mmio register offset
603  * @pcie_data: mmio register offset
604  * @reg_addr: indirect register address to read from
605  *
606  * Returns the value of indirect register @reg_addr
607  */
608 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
609                                 u32 pcie_index, u32 pcie_data,
610                                 u32 reg_addr)
611 {
612         unsigned long flags;
613         u32 r;
614         void __iomem *pcie_index_offset;
615         void __iomem *pcie_data_offset;
616
617         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
618         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
619         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
620
621         writel(reg_addr, pcie_index_offset);
622         readl(pcie_index_offset);
623         r = readl(pcie_data_offset);
624         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
625
626         return r;
627 }
628
629 /**
630  * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
631  *
632  * @adev: amdgpu_device pointer
633  * @pcie_index: mmio register offset
634  * @pcie_data: mmio register offset
635  * @reg_addr: indirect register address to read from
636  *
637  * Returns the value of indirect register @reg_addr
638  */
639 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
640                                   u32 pcie_index, u32 pcie_data,
641                                   u32 reg_addr)
642 {
643         unsigned long flags;
644         u64 r;
645         void __iomem *pcie_index_offset;
646         void __iomem *pcie_data_offset;
647
648         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
649         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
650         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
651
652         /* read low 32 bits */
653         writel(reg_addr, pcie_index_offset);
654         readl(pcie_index_offset);
655         r = readl(pcie_data_offset);
656         /* read high 32 bits */
657         writel(reg_addr + 4, pcie_index_offset);
658         readl(pcie_index_offset);
659         r |= ((u64)readl(pcie_data_offset) << 32);
660         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
661
662         return r;
663 }
664
665 /**
666  * amdgpu_device_indirect_wreg - write an indirect register address
667  *
668  * @adev: amdgpu_device pointer
669  * @pcie_index: mmio register offset
670  * @pcie_data: mmio register offset
671  * @reg_addr: indirect register offset
672  * @reg_data: indirect register data
673  *
674  */
675 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
676                                  u32 pcie_index, u32 pcie_data,
677                                  u32 reg_addr, u32 reg_data)
678 {
679         unsigned long flags;
680         void __iomem *pcie_index_offset;
681         void __iomem *pcie_data_offset;
682
683         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
684         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
685         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
686
687         writel(reg_addr, pcie_index_offset);
688         readl(pcie_index_offset);
689         writel(reg_data, pcie_data_offset);
690         readl(pcie_data_offset);
691         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
692 }
693
694 /**
695  * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
696  *
697  * @adev: amdgpu_device pointer
698  * @pcie_index: mmio register offset
699  * @pcie_data: mmio register offset
700  * @reg_addr: indirect register offset
701  * @reg_data: indirect register data
702  *
703  */
704 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
705                                    u32 pcie_index, u32 pcie_data,
706                                    u32 reg_addr, u64 reg_data)
707 {
708         unsigned long flags;
709         void __iomem *pcie_index_offset;
710         void __iomem *pcie_data_offset;
711
712         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
713         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
714         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
715
716         /* write low 32 bits */
717         writel(reg_addr, pcie_index_offset);
718         readl(pcie_index_offset);
719         writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
720         readl(pcie_data_offset);
721         /* write high 32 bits */
722         writel(reg_addr + 4, pcie_index_offset);
723         readl(pcie_index_offset);
724         writel((u32)(reg_data >> 32), pcie_data_offset);
725         readl(pcie_data_offset);
726         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
727 }
728
729 /**
730  * amdgpu_invalid_rreg - dummy reg read function
731  *
732  * @adev: amdgpu_device pointer
733  * @reg: offset of register
734  *
735  * Dummy register read function.  Used for register blocks
736  * that certain asics don't have (all asics).
737  * Returns the value in the register.
738  */
739 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
740 {
741         DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
742         BUG();
743         return 0;
744 }
745
746 /**
747  * amdgpu_invalid_wreg - dummy reg write function
748  *
749  * @adev: amdgpu_device pointer
750  * @reg: offset of register
751  * @v: value to write to the register
752  *
753  * Dummy register read function.  Used for register blocks
754  * that certain asics don't have (all asics).
755  */
756 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
757 {
758         DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
759                   reg, v);
760         BUG();
761 }
762
763 /**
764  * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
765  *
766  * @adev: amdgpu_device pointer
767  * @reg: offset of register
768  *
769  * Dummy register read function.  Used for register blocks
770  * that certain asics don't have (all asics).
771  * Returns the value in the register.
772  */
773 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
774 {
775         DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
776         BUG();
777         return 0;
778 }
779
780 /**
781  * amdgpu_invalid_wreg64 - dummy reg write function
782  *
783  * @adev: amdgpu_device pointer
784  * @reg: offset of register
785  * @v: value to write to the register
786  *
787  * Dummy register read function.  Used for register blocks
788  * that certain asics don't have (all asics).
789  */
790 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
791 {
792         DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
793                   reg, v);
794         BUG();
795 }
796
797 /**
798  * amdgpu_block_invalid_rreg - dummy reg read function
799  *
800  * @adev: amdgpu_device pointer
801  * @block: offset of instance
802  * @reg: offset of register
803  *
804  * Dummy register read function.  Used for register blocks
805  * that certain asics don't have (all asics).
806  * Returns the value in the register.
807  */
808 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
809                                           uint32_t block, uint32_t reg)
810 {
811         DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
812                   reg, block);
813         BUG();
814         return 0;
815 }
816
817 /**
818  * amdgpu_block_invalid_wreg - dummy reg write function
819  *
820  * @adev: amdgpu_device pointer
821  * @block: offset of instance
822  * @reg: offset of register
823  * @v: value to write to the register
824  *
825  * Dummy register read function.  Used for register blocks
826  * that certain asics don't have (all asics).
827  */
828 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
829                                       uint32_t block,
830                                       uint32_t reg, uint32_t v)
831 {
832         DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
833                   reg, block, v);
834         BUG();
835 }
836
837 /**
838  * amdgpu_device_asic_init - Wrapper for atom asic_init
839  *
840  * @adev: amdgpu_device pointer
841  *
842  * Does any asic specific work and then calls atom asic init.
843  */
844 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
845 {
846         amdgpu_asic_pre_asic_init(adev);
847
848         return amdgpu_atom_asic_init(adev->mode_info.atom_context);
849 }
850
851 /**
852  * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
853  *
854  * @adev: amdgpu_device pointer
855  *
856  * Allocates a scratch page of VRAM for use by various things in the
857  * driver.
858  */
859 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
860 {
861         return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
862                                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
863                                        &adev->vram_scratch.robj,
864                                        &adev->vram_scratch.gpu_addr,
865                                        (void **)&adev->vram_scratch.ptr);
866 }
867
868 /**
869  * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
870  *
871  * @adev: amdgpu_device pointer
872  *
873  * Frees the VRAM scratch page.
874  */
875 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
876 {
877         amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
878 }
879
880 /**
881  * amdgpu_device_program_register_sequence - program an array of registers.
882  *
883  * @adev: amdgpu_device pointer
884  * @registers: pointer to the register array
885  * @array_size: size of the register array
886  *
887  * Programs an array or registers with and and or masks.
888  * This is a helper for setting golden registers.
889  */
890 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
891                                              const u32 *registers,
892                                              const u32 array_size)
893 {
894         u32 tmp, reg, and_mask, or_mask;
895         int i;
896
897         if (array_size % 3)
898                 return;
899
900         for (i = 0; i < array_size; i +=3) {
901                 reg = registers[i + 0];
902                 and_mask = registers[i + 1];
903                 or_mask = registers[i + 2];
904
905                 if (and_mask == 0xffffffff) {
906                         tmp = or_mask;
907                 } else {
908                         tmp = RREG32(reg);
909                         tmp &= ~and_mask;
910                         if (adev->family >= AMDGPU_FAMILY_AI)
911                                 tmp |= (or_mask & and_mask);
912                         else
913                                 tmp |= or_mask;
914                 }
915                 WREG32(reg, tmp);
916         }
917 }
918
919 /**
920  * amdgpu_device_pci_config_reset - reset the GPU
921  *
922  * @adev: amdgpu_device pointer
923  *
924  * Resets the GPU using the pci config reset sequence.
925  * Only applicable to asics prior to vega10.
926  */
927 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
928 {
929         pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
930 }
931
932 /**
933  * amdgpu_device_pci_reset - reset the GPU using generic PCI means
934  *
935  * @adev: amdgpu_device pointer
936  *
937  * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
938  */
939 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
940 {
941         return pci_reset_function(adev->pdev);
942 }
943
944 /*
945  * GPU doorbell aperture helpers function.
946  */
947 /**
948  * amdgpu_device_doorbell_init - Init doorbell driver information.
949  *
950  * @adev: amdgpu_device pointer
951  *
952  * Init doorbell driver information (CIK)
953  * Returns 0 on success, error on failure.
954  */
955 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
956 {
957
958         /* No doorbell on SI hardware generation */
959         if (adev->asic_type < CHIP_BONAIRE) {
960                 adev->doorbell.base = 0;
961                 adev->doorbell.size = 0;
962                 adev->doorbell.num_doorbells = 0;
963                 adev->doorbell.ptr = NULL;
964                 return 0;
965         }
966
967         if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
968                 return -EINVAL;
969
970         amdgpu_asic_init_doorbell_index(adev);
971
972         /* doorbell bar mapping */
973         adev->doorbell.base = pci_resource_start(adev->pdev, 2);
974         adev->doorbell.size = pci_resource_len(adev->pdev, 2);
975
976         adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
977                                              adev->doorbell_index.max_assignment+1);
978         if (adev->doorbell.num_doorbells == 0)
979                 return -EINVAL;
980
981         /* For Vega, reserve and map two pages on doorbell BAR since SDMA
982          * paging queue doorbell use the second page. The
983          * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
984          * doorbells are in the first page. So with paging queue enabled,
985          * the max num_doorbells should + 1 page (0x400 in dword)
986          */
987         if (adev->asic_type >= CHIP_VEGA10)
988                 adev->doorbell.num_doorbells += 0x400;
989
990         adev->doorbell.ptr = ioremap(adev->doorbell.base,
991                                      adev->doorbell.num_doorbells *
992                                      sizeof(u32));
993         if (adev->doorbell.ptr == NULL)
994                 return -ENOMEM;
995
996         return 0;
997 }
998
999 /**
1000  * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1001  *
1002  * @adev: amdgpu_device pointer
1003  *
1004  * Tear down doorbell driver information (CIK)
1005  */
1006 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1007 {
1008         iounmap(adev->doorbell.ptr);
1009         adev->doorbell.ptr = NULL;
1010 }
1011
1012
1013
1014 /*
1015  * amdgpu_device_wb_*()
1016  * Writeback is the method by which the GPU updates special pages in memory
1017  * with the status of certain GPU events (fences, ring pointers,etc.).
1018  */
1019
1020 /**
1021  * amdgpu_device_wb_fini - Disable Writeback and free memory
1022  *
1023  * @adev: amdgpu_device pointer
1024  *
1025  * Disables Writeback and frees the Writeback memory (all asics).
1026  * Used at driver shutdown.
1027  */
1028 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1029 {
1030         if (adev->wb.wb_obj) {
1031                 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1032                                       &adev->wb.gpu_addr,
1033                                       (void **)&adev->wb.wb);
1034                 adev->wb.wb_obj = NULL;
1035         }
1036 }
1037
1038 /**
1039  * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
1040  *
1041  * @adev: amdgpu_device pointer
1042  *
1043  * Initializes writeback and allocates writeback memory (all asics).
1044  * Used at driver startup.
1045  * Returns 0 on success or an -error on failure.
1046  */
1047 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1048 {
1049         int r;
1050
1051         if (adev->wb.wb_obj == NULL) {
1052                 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1053                 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1054                                             PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1055                                             &adev->wb.wb_obj, &adev->wb.gpu_addr,
1056                                             (void **)&adev->wb.wb);
1057                 if (r) {
1058                         dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1059                         return r;
1060                 }
1061
1062                 adev->wb.num_wb = AMDGPU_MAX_WB;
1063                 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1064
1065                 /* clear wb memory */
1066                 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1067         }
1068
1069         return 0;
1070 }
1071
1072 /**
1073  * amdgpu_device_wb_get - Allocate a wb entry
1074  *
1075  * @adev: amdgpu_device pointer
1076  * @wb: wb index
1077  *
1078  * Allocate a wb slot for use by the driver (all asics).
1079  * Returns 0 on success or -EINVAL on failure.
1080  */
1081 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1082 {
1083         unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1084
1085         if (offset < adev->wb.num_wb) {
1086                 __set_bit(offset, adev->wb.used);
1087                 *wb = offset << 3; /* convert to dw offset */
1088                 return 0;
1089         } else {
1090                 return -EINVAL;
1091         }
1092 }
1093
1094 /**
1095  * amdgpu_device_wb_free - Free a wb entry
1096  *
1097  * @adev: amdgpu_device pointer
1098  * @wb: wb index
1099  *
1100  * Free a wb slot allocated for use by the driver (all asics)
1101  */
1102 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1103 {
1104         wb >>= 3;
1105         if (wb < adev->wb.num_wb)
1106                 __clear_bit(wb, adev->wb.used);
1107 }
1108
1109 /**
1110  * amdgpu_device_resize_fb_bar - try to resize FB BAR
1111  *
1112  * @adev: amdgpu_device pointer
1113  *
1114  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1115  * to fail, but if any of the BARs is not accessible after the size we abort
1116  * driver loading by returning -ENODEV.
1117  */
1118 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1119 {
1120         int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1121         struct pci_bus *root;
1122         struct resource *res;
1123         unsigned i;
1124         u16 cmd;
1125         int r;
1126
1127         /* Bypass for VF */
1128         if (amdgpu_sriov_vf(adev))
1129                 return 0;
1130
1131         /* skip if the bios has already enabled large BAR */
1132         if (adev->gmc.real_vram_size &&
1133             (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1134                 return 0;
1135
1136         /* Check if the root BUS has 64bit memory resources */
1137         root = adev->pdev->bus;
1138         while (root->parent)
1139                 root = root->parent;
1140
1141         pci_bus_for_each_resource(root, res, i) {
1142                 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1143                     res->start > 0x100000000ull)
1144                         break;
1145         }
1146
1147         /* Trying to resize is pointless without a root hub window above 4GB */
1148         if (!res)
1149                 return 0;
1150
1151         /* Limit the BAR size to what is available */
1152         rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1153                         rbar_size);
1154
1155         /* Disable memory decoding while we change the BAR addresses and size */
1156         pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1157         pci_write_config_word(adev->pdev, PCI_COMMAND,
1158                               cmd & ~PCI_COMMAND_MEMORY);
1159
1160         /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1161         amdgpu_device_doorbell_fini(adev);
1162         if (adev->asic_type >= CHIP_BONAIRE)
1163                 pci_release_resource(adev->pdev, 2);
1164
1165         pci_release_resource(adev->pdev, 0);
1166
1167         r = pci_resize_resource(adev->pdev, 0, rbar_size);
1168         if (r == -ENOSPC)
1169                 DRM_INFO("Not enough PCI address space for a large BAR.");
1170         else if (r && r != -ENOTSUPP)
1171                 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1172
1173         pci_assign_unassigned_bus_resources(adev->pdev->bus);
1174
1175         /* When the doorbell or fb BAR isn't available we have no chance of
1176          * using the device.
1177          */
1178         r = amdgpu_device_doorbell_init(adev);
1179         if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1180                 return -ENODEV;
1181
1182         pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1183
1184         return 0;
1185 }
1186
1187 /*
1188  * GPU helpers function.
1189  */
1190 /**
1191  * amdgpu_device_need_post - check if the hw need post or not
1192  *
1193  * @adev: amdgpu_device pointer
1194  *
1195  * Check if the asic has been initialized (all asics) at driver startup
1196  * or post is needed if  hw reset is performed.
1197  * Returns true if need or false if not.
1198  */
1199 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1200 {
1201         uint32_t reg;
1202
1203         if (amdgpu_sriov_vf(adev))
1204                 return false;
1205
1206         if (amdgpu_passthrough(adev)) {
1207                 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1208                  * some old smc fw still need driver do vPost otherwise gpu hang, while
1209                  * those smc fw version above 22.15 doesn't have this flaw, so we force
1210                  * vpost executed for smc version below 22.15
1211                  */
1212                 if (adev->asic_type == CHIP_FIJI) {
1213                         int err;
1214                         uint32_t fw_ver;
1215                         err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1216                         /* force vPost if error occured */
1217                         if (err)
1218                                 return true;
1219
1220                         fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1221                         if (fw_ver < 0x00160e00)
1222                                 return true;
1223                 }
1224         }
1225
1226         if (adev->has_hw_reset) {
1227                 adev->has_hw_reset = false;
1228                 return true;
1229         }
1230
1231         /* bios scratch used on CIK+ */
1232         if (adev->asic_type >= CHIP_BONAIRE)
1233                 return amdgpu_atombios_scratch_need_asic_init(adev);
1234
1235         /* check MEM_SIZE for older asics */
1236         reg = amdgpu_asic_get_config_memsize(adev);
1237
1238         if ((reg != 0) && (reg != 0xffffffff))
1239                 return false;
1240
1241         return true;
1242 }
1243
1244 /* if we get transitioned to only one device, take VGA back */
1245 /**
1246  * amdgpu_device_vga_set_decode - enable/disable vga decode
1247  *
1248  * @cookie: amdgpu_device pointer
1249  * @state: enable/disable vga decode
1250  *
1251  * Enable/disable vga decode (all asics).
1252  * Returns VGA resource flags.
1253  */
1254 static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
1255 {
1256         struct amdgpu_device *adev = cookie;
1257         amdgpu_asic_set_vga_state(adev, state);
1258         if (state)
1259                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1260                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1261         else
1262                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1263 }
1264
1265 /**
1266  * amdgpu_device_check_block_size - validate the vm block size
1267  *
1268  * @adev: amdgpu_device pointer
1269  *
1270  * Validates the vm block size specified via module parameter.
1271  * The vm block size defines number of bits in page table versus page directory,
1272  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1273  * page table and the remaining bits are in the page directory.
1274  */
1275 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1276 {
1277         /* defines number of bits in page table versus page directory,
1278          * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1279          * page table and the remaining bits are in the page directory */
1280         if (amdgpu_vm_block_size == -1)
1281                 return;
1282
1283         if (amdgpu_vm_block_size < 9) {
1284                 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1285                          amdgpu_vm_block_size);
1286                 amdgpu_vm_block_size = -1;
1287         }
1288 }
1289
1290 /**
1291  * amdgpu_device_check_vm_size - validate the vm size
1292  *
1293  * @adev: amdgpu_device pointer
1294  *
1295  * Validates the vm size in GB specified via module parameter.
1296  * The VM size is the size of the GPU virtual memory space in GB.
1297  */
1298 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1299 {
1300         /* no need to check the default value */
1301         if (amdgpu_vm_size == -1)
1302                 return;
1303
1304         if (amdgpu_vm_size < 1) {
1305                 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1306                          amdgpu_vm_size);
1307                 amdgpu_vm_size = -1;
1308         }
1309 }
1310
1311 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1312 {
1313         struct sysinfo si;
1314         bool is_os_64 = (sizeof(void *) == 8);
1315         uint64_t total_memory;
1316         uint64_t dram_size_seven_GB = 0x1B8000000;
1317         uint64_t dram_size_three_GB = 0xB8000000;
1318
1319         if (amdgpu_smu_memory_pool_size == 0)
1320                 return;
1321
1322         if (!is_os_64) {
1323                 DRM_WARN("Not 64-bit OS, feature not supported\n");
1324                 goto def_value;
1325         }
1326         si_meminfo(&si);
1327         total_memory = (uint64_t)si.totalram * si.mem_unit;
1328
1329         if ((amdgpu_smu_memory_pool_size == 1) ||
1330                 (amdgpu_smu_memory_pool_size == 2)) {
1331                 if (total_memory < dram_size_three_GB)
1332                         goto def_value1;
1333         } else if ((amdgpu_smu_memory_pool_size == 4) ||
1334                 (amdgpu_smu_memory_pool_size == 8)) {
1335                 if (total_memory < dram_size_seven_GB)
1336                         goto def_value1;
1337         } else {
1338                 DRM_WARN("Smu memory pool size not supported\n");
1339                 goto def_value;
1340         }
1341         adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1342
1343         return;
1344
1345 def_value1:
1346         DRM_WARN("No enough system memory\n");
1347 def_value:
1348         adev->pm.smu_prv_buffer_size = 0;
1349 }
1350
1351 /**
1352  * amdgpu_device_check_arguments - validate module params
1353  *
1354  * @adev: amdgpu_device pointer
1355  *
1356  * Validates certain module parameters and updates
1357  * the associated values used by the driver (all asics).
1358  */
1359 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1360 {
1361         if (amdgpu_sched_jobs < 4) {
1362                 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1363                          amdgpu_sched_jobs);
1364                 amdgpu_sched_jobs = 4;
1365         } else if (!is_power_of_2(amdgpu_sched_jobs)){
1366                 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1367                          amdgpu_sched_jobs);
1368                 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1369         }
1370
1371         if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1372                 /* gart size must be greater or equal to 32M */
1373                 dev_warn(adev->dev, "gart size (%d) too small\n",
1374                          amdgpu_gart_size);
1375                 amdgpu_gart_size = -1;
1376         }
1377
1378         if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1379                 /* gtt size must be greater or equal to 32M */
1380                 dev_warn(adev->dev, "gtt size (%d) too small\n",
1381                                  amdgpu_gtt_size);
1382                 amdgpu_gtt_size = -1;
1383         }
1384
1385         /* valid range is between 4 and 9 inclusive */
1386         if (amdgpu_vm_fragment_size != -1 &&
1387             (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1388                 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1389                 amdgpu_vm_fragment_size = -1;
1390         }
1391
1392         if (amdgpu_sched_hw_submission < 2) {
1393                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1394                          amdgpu_sched_hw_submission);
1395                 amdgpu_sched_hw_submission = 2;
1396         } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1397                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1398                          amdgpu_sched_hw_submission);
1399                 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1400         }
1401
1402         amdgpu_device_check_smu_prv_buffer_size(adev);
1403
1404         amdgpu_device_check_vm_size(adev);
1405
1406         amdgpu_device_check_block_size(adev);
1407
1408         adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1409
1410         amdgpu_gmc_tmz_set(adev);
1411
1412         amdgpu_gmc_noretry_set(adev);
1413
1414         return 0;
1415 }
1416
1417 /**
1418  * amdgpu_switcheroo_set_state - set switcheroo state
1419  *
1420  * @pdev: pci dev pointer
1421  * @state: vga_switcheroo state
1422  *
1423  * Callback for the switcheroo driver.  Suspends or resumes the
1424  * the asics before or after it is powered up using ACPI methods.
1425  */
1426 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1427                                         enum vga_switcheroo_state state)
1428 {
1429         struct drm_device *dev = pci_get_drvdata(pdev);
1430         int r;
1431
1432         if (amdgpu_device_supports_atpx(dev) && state == VGA_SWITCHEROO_OFF)
1433                 return;
1434
1435         if (state == VGA_SWITCHEROO_ON) {
1436                 pr_info("switched on\n");
1437                 /* don't suspend or resume card normally */
1438                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1439
1440                 pci_set_power_state(pdev, PCI_D0);
1441                 amdgpu_device_load_pci_state(pdev);
1442                 r = pci_enable_device(pdev);
1443                 if (r)
1444                         DRM_WARN("pci_enable_device failed (%d)\n", r);
1445                 amdgpu_device_resume(dev, true);
1446
1447                 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1448         } else {
1449                 pr_info("switched off\n");
1450                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1451                 amdgpu_device_suspend(dev, true);
1452                 amdgpu_device_cache_pci_state(pdev);
1453                 /* Shut down the device */
1454                 pci_disable_device(pdev);
1455                 pci_set_power_state(pdev, PCI_D3cold);
1456                 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1457         }
1458 }
1459
1460 /**
1461  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1462  *
1463  * @pdev: pci dev pointer
1464  *
1465  * Callback for the switcheroo driver.  Check of the switcheroo
1466  * state can be changed.
1467  * Returns true if the state can be changed, false if not.
1468  */
1469 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1470 {
1471         struct drm_device *dev = pci_get_drvdata(pdev);
1472
1473         /*
1474         * FIXME: open_count is protected by drm_global_mutex but that would lead to
1475         * locking inversion with the driver load path. And the access here is
1476         * completely racy anyway. So don't bother with locking for now.
1477         */
1478         return atomic_read(&dev->open_count) == 0;
1479 }
1480
1481 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1482         .set_gpu_state = amdgpu_switcheroo_set_state,
1483         .reprobe = NULL,
1484         .can_switch = amdgpu_switcheroo_can_switch,
1485 };
1486
1487 /**
1488  * amdgpu_device_ip_set_clockgating_state - set the CG state
1489  *
1490  * @dev: amdgpu_device pointer
1491  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1492  * @state: clockgating state (gate or ungate)
1493  *
1494  * Sets the requested clockgating state for all instances of
1495  * the hardware IP specified.
1496  * Returns the error code from the last instance.
1497  */
1498 int amdgpu_device_ip_set_clockgating_state(void *dev,
1499                                            enum amd_ip_block_type block_type,
1500                                            enum amd_clockgating_state state)
1501 {
1502         struct amdgpu_device *adev = dev;
1503         int i, r = 0;
1504
1505         for (i = 0; i < adev->num_ip_blocks; i++) {
1506                 if (!adev->ip_blocks[i].status.valid)
1507                         continue;
1508                 if (adev->ip_blocks[i].version->type != block_type)
1509                         continue;
1510                 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1511                         continue;
1512                 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1513                         (void *)adev, state);
1514                 if (r)
1515                         DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1516                                   adev->ip_blocks[i].version->funcs->name, r);
1517         }
1518         return r;
1519 }
1520
1521 /**
1522  * amdgpu_device_ip_set_powergating_state - set the PG state
1523  *
1524  * @dev: amdgpu_device pointer
1525  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1526  * @state: powergating state (gate or ungate)
1527  *
1528  * Sets the requested powergating state for all instances of
1529  * the hardware IP specified.
1530  * Returns the error code from the last instance.
1531  */
1532 int amdgpu_device_ip_set_powergating_state(void *dev,
1533                                            enum amd_ip_block_type block_type,
1534                                            enum amd_powergating_state state)
1535 {
1536         struct amdgpu_device *adev = dev;
1537         int i, r = 0;
1538
1539         for (i = 0; i < adev->num_ip_blocks; i++) {
1540                 if (!adev->ip_blocks[i].status.valid)
1541                         continue;
1542                 if (adev->ip_blocks[i].version->type != block_type)
1543                         continue;
1544                 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1545                         continue;
1546                 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1547                         (void *)adev, state);
1548                 if (r)
1549                         DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1550                                   adev->ip_blocks[i].version->funcs->name, r);
1551         }
1552         return r;
1553 }
1554
1555 /**
1556  * amdgpu_device_ip_get_clockgating_state - get the CG state
1557  *
1558  * @adev: amdgpu_device pointer
1559  * @flags: clockgating feature flags
1560  *
1561  * Walks the list of IPs on the device and updates the clockgating
1562  * flags for each IP.
1563  * Updates @flags with the feature flags for each hardware IP where
1564  * clockgating is enabled.
1565  */
1566 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1567                                             u32 *flags)
1568 {
1569         int i;
1570
1571         for (i = 0; i < adev->num_ip_blocks; i++) {
1572                 if (!adev->ip_blocks[i].status.valid)
1573                         continue;
1574                 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1575                         adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1576         }
1577 }
1578
1579 /**
1580  * amdgpu_device_ip_wait_for_idle - wait for idle
1581  *
1582  * @adev: amdgpu_device pointer
1583  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1584  *
1585  * Waits for the request hardware IP to be idle.
1586  * Returns 0 for success or a negative error code on failure.
1587  */
1588 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1589                                    enum amd_ip_block_type block_type)
1590 {
1591         int i, r;
1592
1593         for (i = 0; i < adev->num_ip_blocks; i++) {
1594                 if (!adev->ip_blocks[i].status.valid)
1595                         continue;
1596                 if (adev->ip_blocks[i].version->type == block_type) {
1597                         r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1598                         if (r)
1599                                 return r;
1600                         break;
1601                 }
1602         }
1603         return 0;
1604
1605 }
1606
1607 /**
1608  * amdgpu_device_ip_is_idle - is the hardware IP idle
1609  *
1610  * @adev: amdgpu_device pointer
1611  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1612  *
1613  * Check if the hardware IP is idle or not.
1614  * Returns true if it the IP is idle, false if not.
1615  */
1616 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1617                               enum amd_ip_block_type block_type)
1618 {
1619         int i;
1620
1621         for (i = 0; i < adev->num_ip_blocks; i++) {
1622                 if (!adev->ip_blocks[i].status.valid)
1623                         continue;
1624                 if (adev->ip_blocks[i].version->type == block_type)
1625                         return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1626         }
1627         return true;
1628
1629 }
1630
1631 /**
1632  * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1633  *
1634  * @adev: amdgpu_device pointer
1635  * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1636  *
1637  * Returns a pointer to the hardware IP block structure
1638  * if it exists for the asic, otherwise NULL.
1639  */
1640 struct amdgpu_ip_block *
1641 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1642                               enum amd_ip_block_type type)
1643 {
1644         int i;
1645
1646         for (i = 0; i < adev->num_ip_blocks; i++)
1647                 if (adev->ip_blocks[i].version->type == type)
1648                         return &adev->ip_blocks[i];
1649
1650         return NULL;
1651 }
1652
1653 /**
1654  * amdgpu_device_ip_block_version_cmp
1655  *
1656  * @adev: amdgpu_device pointer
1657  * @type: enum amd_ip_block_type
1658  * @major: major version
1659  * @minor: minor version
1660  *
1661  * return 0 if equal or greater
1662  * return 1 if smaller or the ip_block doesn't exist
1663  */
1664 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1665                                        enum amd_ip_block_type type,
1666                                        u32 major, u32 minor)
1667 {
1668         struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1669
1670         if (ip_block && ((ip_block->version->major > major) ||
1671                         ((ip_block->version->major == major) &&
1672                         (ip_block->version->minor >= minor))))
1673                 return 0;
1674
1675         return 1;
1676 }
1677
1678 /**
1679  * amdgpu_device_ip_block_add
1680  *
1681  * @adev: amdgpu_device pointer
1682  * @ip_block_version: pointer to the IP to add
1683  *
1684  * Adds the IP block driver information to the collection of IPs
1685  * on the asic.
1686  */
1687 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1688                                const struct amdgpu_ip_block_version *ip_block_version)
1689 {
1690         if (!ip_block_version)
1691                 return -EINVAL;
1692
1693         DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1694                   ip_block_version->funcs->name);
1695
1696         adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1697
1698         return 0;
1699 }
1700
1701 /**
1702  * amdgpu_device_enable_virtual_display - enable virtual display feature
1703  *
1704  * @adev: amdgpu_device pointer
1705  *
1706  * Enabled the virtual display feature if the user has enabled it via
1707  * the module parameter virtual_display.  This feature provides a virtual
1708  * display hardware on headless boards or in virtualized environments.
1709  * This function parses and validates the configuration string specified by
1710  * the user and configues the virtual display configuration (number of
1711  * virtual connectors, crtcs, etc.) specified.
1712  */
1713 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1714 {
1715         adev->enable_virtual_display = false;
1716
1717         if (amdgpu_virtual_display) {
1718                 const char *pci_address_name = pci_name(adev->pdev);
1719                 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1720
1721                 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1722                 pciaddstr_tmp = pciaddstr;
1723                 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1724                         pciaddname = strsep(&pciaddname_tmp, ",");
1725                         if (!strcmp("all", pciaddname)
1726                             || !strcmp(pci_address_name, pciaddname)) {
1727                                 long num_crtc;
1728                                 int res = -1;
1729
1730                                 adev->enable_virtual_display = true;
1731
1732                                 if (pciaddname_tmp)
1733                                         res = kstrtol(pciaddname_tmp, 10,
1734                                                       &num_crtc);
1735
1736                                 if (!res) {
1737                                         if (num_crtc < 1)
1738                                                 num_crtc = 1;
1739                                         if (num_crtc > 6)
1740                                                 num_crtc = 6;
1741                                         adev->mode_info.num_crtc = num_crtc;
1742                                 } else {
1743                                         adev->mode_info.num_crtc = 1;
1744                                 }
1745                                 break;
1746                         }
1747                 }
1748
1749                 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1750                          amdgpu_virtual_display, pci_address_name,
1751                          adev->enable_virtual_display, adev->mode_info.num_crtc);
1752
1753                 kfree(pciaddstr);
1754         }
1755 }
1756
1757 /**
1758  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1759  *
1760  * @adev: amdgpu_device pointer
1761  *
1762  * Parses the asic configuration parameters specified in the gpu info
1763  * firmware and makes them availale to the driver for use in configuring
1764  * the asic.
1765  * Returns 0 on success, -EINVAL on failure.
1766  */
1767 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1768 {
1769         const char *chip_name;
1770         char fw_name[40];
1771         int err;
1772         const struct gpu_info_firmware_header_v1_0 *hdr;
1773
1774         adev->firmware.gpu_info_fw = NULL;
1775
1776         if (adev->mman.discovery_bin) {
1777                 amdgpu_discovery_get_gfx_info(adev);
1778
1779                 /*
1780                  * FIXME: The bounding box is still needed by Navi12, so
1781                  * temporarily read it from gpu_info firmware. Should be droped
1782                  * when DAL no longer needs it.
1783                  */
1784                 if (adev->asic_type != CHIP_NAVI12)
1785                         return 0;
1786         }
1787
1788         switch (adev->asic_type) {
1789 #ifdef CONFIG_DRM_AMDGPU_SI
1790         case CHIP_VERDE:
1791         case CHIP_TAHITI:
1792         case CHIP_PITCAIRN:
1793         case CHIP_OLAND:
1794         case CHIP_HAINAN:
1795 #endif
1796 #ifdef CONFIG_DRM_AMDGPU_CIK
1797         case CHIP_BONAIRE:
1798         case CHIP_HAWAII:
1799         case CHIP_KAVERI:
1800         case CHIP_KABINI:
1801         case CHIP_MULLINS:
1802 #endif
1803         case CHIP_TOPAZ:
1804         case CHIP_TONGA:
1805         case CHIP_FIJI:
1806         case CHIP_POLARIS10:
1807         case CHIP_POLARIS11:
1808         case CHIP_POLARIS12:
1809         case CHIP_VEGAM:
1810         case CHIP_CARRIZO:
1811         case CHIP_STONEY:
1812         case CHIP_VEGA20:
1813         case CHIP_SIENNA_CICHLID:
1814         case CHIP_NAVY_FLOUNDER:
1815         case CHIP_DIMGREY_CAVEFISH:
1816         default:
1817                 return 0;
1818         case CHIP_VEGA10:
1819                 chip_name = "vega10";
1820                 break;
1821         case CHIP_VEGA12:
1822                 chip_name = "vega12";
1823                 break;
1824         case CHIP_RAVEN:
1825                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1826                         chip_name = "raven2";
1827                 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1828                         chip_name = "picasso";
1829                 else
1830                         chip_name = "raven";
1831                 break;
1832         case CHIP_ARCTURUS:
1833                 chip_name = "arcturus";
1834                 break;
1835         case CHIP_RENOIR:
1836                 if (adev->apu_flags & AMD_APU_IS_RENOIR)
1837                         chip_name = "renoir";
1838                 else
1839                         chip_name = "green_sardine";
1840                 break;
1841         case CHIP_NAVI10:
1842                 chip_name = "navi10";
1843                 break;
1844         case CHIP_NAVI14:
1845                 chip_name = "navi14";
1846                 break;
1847         case CHIP_NAVI12:
1848                 chip_name = "navi12";
1849                 break;
1850         case CHIP_VANGOGH:
1851                 chip_name = "vangogh";
1852                 break;
1853         }
1854
1855         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1856         err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1857         if (err) {
1858                 dev_err(adev->dev,
1859                         "Failed to load gpu_info firmware \"%s\"\n",
1860                         fw_name);
1861                 goto out;
1862         }
1863         err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1864         if (err) {
1865                 dev_err(adev->dev,
1866                         "Failed to validate gpu_info firmware \"%s\"\n",
1867                         fw_name);
1868                 goto out;
1869         }
1870
1871         hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1872         amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1873
1874         switch (hdr->version_major) {
1875         case 1:
1876         {
1877                 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1878                         (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1879                                                                 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1880
1881                 /*
1882                  * Should be droped when DAL no longer needs it.
1883                  */
1884                 if (adev->asic_type == CHIP_NAVI12)
1885                         goto parse_soc_bounding_box;
1886
1887                 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1888                 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1889                 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1890                 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1891                 adev->gfx.config.max_texture_channel_caches =
1892                         le32_to_cpu(gpu_info_fw->gc_num_tccs);
1893                 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1894                 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1895                 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1896                 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1897                 adev->gfx.config.double_offchip_lds_buf =
1898                         le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1899                 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1900                 adev->gfx.cu_info.max_waves_per_simd =
1901                         le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1902                 adev->gfx.cu_info.max_scratch_slots_per_cu =
1903                         le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1904                 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
1905                 if (hdr->version_minor >= 1) {
1906                         const struct gpu_info_firmware_v1_1 *gpu_info_fw =
1907                                 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
1908                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1909                         adev->gfx.config.num_sc_per_sh =
1910                                 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
1911                         adev->gfx.config.num_packer_per_sc =
1912                                 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
1913                 }
1914
1915 parse_soc_bounding_box:
1916                 /*
1917                  * soc bounding box info is not integrated in disocovery table,
1918                  * we always need to parse it from gpu info firmware if needed.
1919                  */
1920                 if (hdr->version_minor == 2) {
1921                         const struct gpu_info_firmware_v1_2 *gpu_info_fw =
1922                                 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
1923                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1924                         adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
1925                 }
1926                 break;
1927         }
1928         default:
1929                 dev_err(adev->dev,
1930                         "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1931                 err = -EINVAL;
1932                 goto out;
1933         }
1934 out:
1935         return err;
1936 }
1937
1938 /**
1939  * amdgpu_device_ip_early_init - run early init for hardware IPs
1940  *
1941  * @adev: amdgpu_device pointer
1942  *
1943  * Early initialization pass for hardware IPs.  The hardware IPs that make
1944  * up each asic are discovered each IP's early_init callback is run.  This
1945  * is the first stage in initializing the asic.
1946  * Returns 0 on success, negative error code on failure.
1947  */
1948 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
1949 {
1950         int i, r;
1951
1952         amdgpu_device_enable_virtual_display(adev);
1953
1954         if (amdgpu_sriov_vf(adev)) {
1955                 r = amdgpu_virt_request_full_gpu(adev, true);
1956                 if (r)
1957                         return r;
1958         }
1959
1960         switch (adev->asic_type) {
1961 #ifdef CONFIG_DRM_AMDGPU_SI
1962         case CHIP_VERDE:
1963         case CHIP_TAHITI:
1964         case CHIP_PITCAIRN:
1965         case CHIP_OLAND:
1966         case CHIP_HAINAN:
1967                 adev->family = AMDGPU_FAMILY_SI;
1968                 r = si_set_ip_blocks(adev);
1969                 if (r)
1970                         return r;
1971                 break;
1972 #endif
1973 #ifdef CONFIG_DRM_AMDGPU_CIK
1974         case CHIP_BONAIRE:
1975         case CHIP_HAWAII:
1976         case CHIP_KAVERI:
1977         case CHIP_KABINI:
1978         case CHIP_MULLINS:
1979                 if (adev->flags & AMD_IS_APU)
1980                         adev->family = AMDGPU_FAMILY_KV;
1981                 else
1982                         adev->family = AMDGPU_FAMILY_CI;
1983
1984                 r = cik_set_ip_blocks(adev);
1985                 if (r)
1986                         return r;
1987                 break;
1988 #endif
1989         case CHIP_TOPAZ:
1990         case CHIP_TONGA:
1991         case CHIP_FIJI:
1992         case CHIP_POLARIS10:
1993         case CHIP_POLARIS11:
1994         case CHIP_POLARIS12:
1995         case CHIP_VEGAM:
1996         case CHIP_CARRIZO:
1997         case CHIP_STONEY:
1998                 if (adev->flags & AMD_IS_APU)
1999                         adev->family = AMDGPU_FAMILY_CZ;
2000                 else
2001                         adev->family = AMDGPU_FAMILY_VI;
2002
2003                 r = vi_set_ip_blocks(adev);
2004                 if (r)
2005                         return r;
2006                 break;
2007         case CHIP_VEGA10:
2008         case CHIP_VEGA12:
2009         case CHIP_VEGA20:
2010         case CHIP_RAVEN:
2011         case CHIP_ARCTURUS:
2012         case CHIP_RENOIR:
2013                 if (adev->flags & AMD_IS_APU)
2014                         adev->family = AMDGPU_FAMILY_RV;
2015                 else
2016                         adev->family = AMDGPU_FAMILY_AI;
2017
2018                 r = soc15_set_ip_blocks(adev);
2019                 if (r)
2020                         return r;
2021                 break;
2022         case  CHIP_NAVI10:
2023         case  CHIP_NAVI14:
2024         case  CHIP_NAVI12:
2025         case  CHIP_SIENNA_CICHLID:
2026         case  CHIP_NAVY_FLOUNDER:
2027         case  CHIP_DIMGREY_CAVEFISH:
2028         case CHIP_VANGOGH:
2029                 if (adev->asic_type == CHIP_VANGOGH)
2030                         adev->family = AMDGPU_FAMILY_VGH;
2031                 else
2032                         adev->family = AMDGPU_FAMILY_NV;
2033
2034                 r = nv_set_ip_blocks(adev);
2035                 if (r)
2036                         return r;
2037                 break;
2038         default:
2039                 /* FIXME: not supported yet */
2040                 return -EINVAL;
2041         }
2042
2043         amdgpu_amdkfd_device_probe(adev);
2044
2045         adev->pm.pp_feature = amdgpu_pp_feature_mask;
2046         if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2047                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2048
2049         for (i = 0; i < adev->num_ip_blocks; i++) {
2050                 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2051                         DRM_ERROR("disabled ip block: %d <%s>\n",
2052                                   i, adev->ip_blocks[i].version->funcs->name);
2053                         adev->ip_blocks[i].status.valid = false;
2054                 } else {
2055                         if (adev->ip_blocks[i].version->funcs->early_init) {
2056                                 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2057                                 if (r == -ENOENT) {
2058                                         adev->ip_blocks[i].status.valid = false;
2059                                 } else if (r) {
2060                                         DRM_ERROR("early_init of IP block <%s> failed %d\n",
2061                                                   adev->ip_blocks[i].version->funcs->name, r);
2062                                         return r;
2063                                 } else {
2064                                         adev->ip_blocks[i].status.valid = true;
2065                                 }
2066                         } else {
2067                                 adev->ip_blocks[i].status.valid = true;
2068                         }
2069                 }
2070                 /* get the vbios after the asic_funcs are set up */
2071                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2072                         r = amdgpu_device_parse_gpu_info_fw(adev);
2073                         if (r)
2074                                 return r;
2075
2076                         /* Read BIOS */
2077                         if (!amdgpu_get_bios(adev))
2078                                 return -EINVAL;
2079
2080                         r = amdgpu_atombios_init(adev);
2081                         if (r) {
2082                                 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2083                                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2084                                 return r;
2085                         }
2086                 }
2087         }
2088
2089         adev->cg_flags &= amdgpu_cg_mask;
2090         adev->pg_flags &= amdgpu_pg_mask;
2091
2092         return 0;
2093 }
2094
2095 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2096 {
2097         int i, r;
2098
2099         for (i = 0; i < adev->num_ip_blocks; i++) {
2100                 if (!adev->ip_blocks[i].status.sw)
2101                         continue;
2102                 if (adev->ip_blocks[i].status.hw)
2103                         continue;
2104                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2105                     (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2106                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2107                         r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2108                         if (r) {
2109                                 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2110                                           adev->ip_blocks[i].version->funcs->name, r);
2111                                 return r;
2112                         }
2113                         adev->ip_blocks[i].status.hw = true;
2114                 }
2115         }
2116
2117         return 0;
2118 }
2119
2120 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2121 {
2122         int i, r;
2123
2124         for (i = 0; i < adev->num_ip_blocks; i++) {
2125                 if (!adev->ip_blocks[i].status.sw)
2126                         continue;
2127                 if (adev->ip_blocks[i].status.hw)
2128                         continue;
2129                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2130                 if (r) {
2131                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2132                                   adev->ip_blocks[i].version->funcs->name, r);
2133                         return r;
2134                 }
2135                 adev->ip_blocks[i].status.hw = true;
2136         }
2137
2138         return 0;
2139 }
2140
2141 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2142 {
2143         int r = 0;
2144         int i;
2145         uint32_t smu_version;
2146
2147         if (adev->asic_type >= CHIP_VEGA10) {
2148                 for (i = 0; i < adev->num_ip_blocks; i++) {
2149                         if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2150                                 continue;
2151
2152                         /* no need to do the fw loading again if already done*/
2153                         if (adev->ip_blocks[i].status.hw == true)
2154                                 break;
2155
2156                         if (amdgpu_in_reset(adev) || adev->in_suspend) {
2157                                 r = adev->ip_blocks[i].version->funcs->resume(adev);
2158                                 if (r) {
2159                                         DRM_ERROR("resume of IP block <%s> failed %d\n",
2160                                                           adev->ip_blocks[i].version->funcs->name, r);
2161                                         return r;
2162                                 }
2163                         } else {
2164                                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2165                                 if (r) {
2166                                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2167                                                           adev->ip_blocks[i].version->funcs->name, r);
2168                                         return r;
2169                                 }
2170                         }
2171
2172                         adev->ip_blocks[i].status.hw = true;
2173                         break;
2174                 }
2175         }
2176
2177         if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2178                 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2179
2180         return r;
2181 }
2182
2183 /**
2184  * amdgpu_device_ip_init - run init for hardware IPs
2185  *
2186  * @adev: amdgpu_device pointer
2187  *
2188  * Main initialization pass for hardware IPs.  The list of all the hardware
2189  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2190  * are run.  sw_init initializes the software state associated with each IP
2191  * and hw_init initializes the hardware associated with each IP.
2192  * Returns 0 on success, negative error code on failure.
2193  */
2194 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2195 {
2196         int i, r;
2197
2198         r = amdgpu_ras_init(adev);
2199         if (r)
2200                 return r;
2201
2202         for (i = 0; i < adev->num_ip_blocks; i++) {
2203                 if (!adev->ip_blocks[i].status.valid)
2204                         continue;
2205                 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2206                 if (r) {
2207                         DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2208                                   adev->ip_blocks[i].version->funcs->name, r);
2209                         goto init_failed;
2210                 }
2211                 adev->ip_blocks[i].status.sw = true;
2212
2213                 /* need to do gmc hw init early so we can allocate gpu mem */
2214                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2215                         r = amdgpu_device_vram_scratch_init(adev);
2216                         if (r) {
2217                                 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2218                                 goto init_failed;
2219                         }
2220                         r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2221                         if (r) {
2222                                 DRM_ERROR("hw_init %d failed %d\n", i, r);
2223                                 goto init_failed;
2224                         }
2225                         r = amdgpu_device_wb_init(adev);
2226                         if (r) {
2227                                 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2228                                 goto init_failed;
2229                         }
2230                         adev->ip_blocks[i].status.hw = true;
2231
2232                         /* right after GMC hw init, we create CSA */
2233                         if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
2234                                 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2235                                                                 AMDGPU_GEM_DOMAIN_VRAM,
2236                                                                 AMDGPU_CSA_SIZE);
2237                                 if (r) {
2238                                         DRM_ERROR("allocate CSA failed %d\n", r);
2239                                         goto init_failed;
2240                                 }
2241                         }
2242                 }
2243         }
2244
2245         if (amdgpu_sriov_vf(adev))
2246                 amdgpu_virt_init_data_exchange(adev);
2247
2248         r = amdgpu_ib_pool_init(adev);
2249         if (r) {
2250                 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2251                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2252                 goto init_failed;
2253         }
2254
2255         r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2256         if (r)
2257                 goto init_failed;
2258
2259         r = amdgpu_device_ip_hw_init_phase1(adev);
2260         if (r)
2261                 goto init_failed;
2262
2263         r = amdgpu_device_fw_loading(adev);
2264         if (r)
2265                 goto init_failed;
2266
2267         r = amdgpu_device_ip_hw_init_phase2(adev);
2268         if (r)
2269                 goto init_failed;
2270
2271         /*
2272          * retired pages will be loaded from eeprom and reserved here,
2273          * it should be called after amdgpu_device_ip_hw_init_phase2  since
2274          * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2275          * for I2C communication which only true at this point.
2276          *
2277          * amdgpu_ras_recovery_init may fail, but the upper only cares the
2278          * failure from bad gpu situation and stop amdgpu init process
2279          * accordingly. For other failed cases, it will still release all
2280          * the resource and print error message, rather than returning one
2281          * negative value to upper level.
2282          *
2283          * Note: theoretically, this should be called before all vram allocations
2284          * to protect retired page from abusing
2285          */
2286         r = amdgpu_ras_recovery_init(adev);
2287         if (r)
2288                 goto init_failed;
2289
2290         if (adev->gmc.xgmi.num_physical_nodes > 1)
2291                 amdgpu_xgmi_add_device(adev);
2292         amdgpu_amdkfd_device_init(adev);
2293
2294         amdgpu_fru_get_product_info(adev);
2295
2296 init_failed:
2297         if (amdgpu_sriov_vf(adev))
2298                 amdgpu_virt_release_full_gpu(adev, true);
2299
2300         return r;
2301 }
2302
2303 /**
2304  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2305  *
2306  * @adev: amdgpu_device pointer
2307  *
2308  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2309  * this function before a GPU reset.  If the value is retained after a
2310  * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2311  */
2312 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2313 {
2314         memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2315 }
2316
2317 /**
2318  * amdgpu_device_check_vram_lost - check if vram is valid
2319  *
2320  * @adev: amdgpu_device pointer
2321  *
2322  * Checks the reset magic value written to the gart pointer in VRAM.
2323  * The driver calls this after a GPU reset to see if the contents of
2324  * VRAM is lost or now.
2325  * returns true if vram is lost, false if not.
2326  */
2327 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2328 {
2329         if (memcmp(adev->gart.ptr, adev->reset_magic,
2330                         AMDGPU_RESET_MAGIC_NUM))
2331                 return true;
2332
2333         if (!amdgpu_in_reset(adev))
2334                 return false;
2335
2336         /*
2337          * For all ASICs with baco/mode1 reset, the VRAM is
2338          * always assumed to be lost.
2339          */
2340         switch (amdgpu_asic_reset_method(adev)) {
2341         case AMD_RESET_METHOD_BACO:
2342         case AMD_RESET_METHOD_MODE1:
2343                 return true;
2344         default:
2345                 return false;
2346         }
2347 }
2348
2349 /**
2350  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2351  *
2352  * @adev: amdgpu_device pointer
2353  * @state: clockgating state (gate or ungate)
2354  *
2355  * The list of all the hardware IPs that make up the asic is walked and the
2356  * set_clockgating_state callbacks are run.
2357  * Late initialization pass enabling clockgating for hardware IPs.
2358  * Fini or suspend, pass disabling clockgating for hardware IPs.
2359  * Returns 0 on success, negative error code on failure.
2360  */
2361
2362 static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2363                                                 enum amd_clockgating_state state)
2364 {
2365         int i, j, r;
2366
2367         if (amdgpu_emu_mode == 1)
2368                 return 0;
2369
2370         for (j = 0; j < adev->num_ip_blocks; j++) {
2371                 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2372                 if (!adev->ip_blocks[i].status.late_initialized)
2373                         continue;
2374                 /* skip CG for GFX on S0ix */
2375                 if (adev->in_s0ix &&
2376                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2377                         continue;
2378                 /* skip CG for VCE/UVD, it's handled specially */
2379                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2380                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2381                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2382                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2383                     adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2384                         /* enable clockgating to save power */
2385                         r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2386                                                                                      state);
2387                         if (r) {
2388                                 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2389                                           adev->ip_blocks[i].version->funcs->name, r);
2390                                 return r;
2391                         }
2392                 }
2393         }
2394
2395         return 0;
2396 }
2397
2398 static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_powergating_state state)
2399 {
2400         int i, j, r;
2401
2402         if (amdgpu_emu_mode == 1)
2403                 return 0;
2404
2405         for (j = 0; j < adev->num_ip_blocks; j++) {
2406                 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2407                 if (!adev->ip_blocks[i].status.late_initialized)
2408                         continue;
2409                 /* skip PG for GFX on S0ix */
2410                 if (adev->in_s0ix &&
2411                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2412                         continue;
2413                 /* skip CG for VCE/UVD, it's handled specially */
2414                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2415                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2416                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2417                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2418                     adev->ip_blocks[i].version->funcs->set_powergating_state) {
2419                         /* enable powergating to save power */
2420                         r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2421                                                                                         state);
2422                         if (r) {
2423                                 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2424                                           adev->ip_blocks[i].version->funcs->name, r);
2425                                 return r;
2426                         }
2427                 }
2428         }
2429         return 0;
2430 }
2431
2432 static int amdgpu_device_enable_mgpu_fan_boost(void)
2433 {
2434         struct amdgpu_gpu_instance *gpu_ins;
2435         struct amdgpu_device *adev;
2436         int i, ret = 0;
2437
2438         mutex_lock(&mgpu_info.mutex);
2439
2440         /*
2441          * MGPU fan boost feature should be enabled
2442          * only when there are two or more dGPUs in
2443          * the system
2444          */
2445         if (mgpu_info.num_dgpu < 2)
2446                 goto out;
2447
2448         for (i = 0; i < mgpu_info.num_dgpu; i++) {
2449                 gpu_ins = &(mgpu_info.gpu_ins[i]);
2450                 adev = gpu_ins->adev;
2451                 if (!(adev->flags & AMD_IS_APU) &&
2452                     !gpu_ins->mgpu_fan_enabled) {
2453                         ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2454                         if (ret)
2455                                 break;
2456
2457                         gpu_ins->mgpu_fan_enabled = 1;
2458                 }
2459         }
2460
2461 out:
2462         mutex_unlock(&mgpu_info.mutex);
2463
2464         return ret;
2465 }
2466
2467 /**
2468  * amdgpu_device_ip_late_init - run late init for hardware IPs
2469  *
2470  * @adev: amdgpu_device pointer
2471  *
2472  * Late initialization pass for hardware IPs.  The list of all the hardware
2473  * IPs that make up the asic is walked and the late_init callbacks are run.
2474  * late_init covers any special initialization that an IP requires
2475  * after all of the have been initialized or something that needs to happen
2476  * late in the init process.
2477  * Returns 0 on success, negative error code on failure.
2478  */
2479 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2480 {
2481         struct amdgpu_gpu_instance *gpu_instance;
2482         int i = 0, r;
2483
2484         for (i = 0; i < adev->num_ip_blocks; i++) {
2485                 if (!adev->ip_blocks[i].status.hw)
2486                         continue;
2487                 if (adev->ip_blocks[i].version->funcs->late_init) {
2488                         r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2489                         if (r) {
2490                                 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2491                                           adev->ip_blocks[i].version->funcs->name, r);
2492                                 return r;
2493                         }
2494                 }
2495                 adev->ip_blocks[i].status.late_initialized = true;
2496         }
2497
2498         amdgpu_ras_set_error_query_ready(adev, true);
2499
2500         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2501         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2502
2503         amdgpu_device_fill_reset_magic(adev);
2504
2505         r = amdgpu_device_enable_mgpu_fan_boost();
2506         if (r)
2507                 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2508
2509
2510         if (adev->gmc.xgmi.num_physical_nodes > 1) {
2511                 mutex_lock(&mgpu_info.mutex);
2512
2513                 /*
2514                  * Reset device p-state to low as this was booted with high.
2515                  *
2516                  * This should be performed only after all devices from the same
2517                  * hive get initialized.
2518                  *
2519                  * However, it's unknown how many device in the hive in advance.
2520                  * As this is counted one by one during devices initializations.
2521                  *
2522                  * So, we wait for all XGMI interlinked devices initialized.
2523                  * This may bring some delays as those devices may come from
2524                  * different hives. But that should be OK.
2525                  */
2526                 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2527                         for (i = 0; i < mgpu_info.num_gpu; i++) {
2528                                 gpu_instance = &(mgpu_info.gpu_ins[i]);
2529                                 if (gpu_instance->adev->flags & AMD_IS_APU)
2530                                         continue;
2531
2532                                 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2533                                                 AMDGPU_XGMI_PSTATE_MIN);
2534                                 if (r) {
2535                                         DRM_ERROR("pstate setting failed (%d).\n", r);
2536                                         break;
2537                                 }
2538                         }
2539                 }
2540
2541                 mutex_unlock(&mgpu_info.mutex);
2542         }
2543
2544         return 0;
2545 }
2546
2547 /**
2548  * amdgpu_device_ip_fini - run fini for hardware IPs
2549  *
2550  * @adev: amdgpu_device pointer
2551  *
2552  * Main teardown pass for hardware IPs.  The list of all the hardware
2553  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2554  * are run.  hw_fini tears down the hardware associated with each IP
2555  * and sw_fini tears down any software state associated with each IP.
2556  * Returns 0 on success, negative error code on failure.
2557  */
2558 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2559 {
2560         int i, r;
2561
2562         if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2563                 amdgpu_virt_release_ras_err_handler_data(adev);
2564
2565         amdgpu_ras_pre_fini(adev);
2566
2567         if (adev->gmc.xgmi.num_physical_nodes > 1)
2568                 amdgpu_xgmi_remove_device(adev);
2569
2570         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2571         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2572
2573         amdgpu_amdkfd_device_fini(adev);
2574
2575         /* need to disable SMC first */
2576         for (i = 0; i < adev->num_ip_blocks; i++) {
2577                 if (!adev->ip_blocks[i].status.hw)
2578                         continue;
2579                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2580                         r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2581                         /* XXX handle errors */
2582                         if (r) {
2583                                 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2584                                           adev->ip_blocks[i].version->funcs->name, r);
2585                         }
2586                         adev->ip_blocks[i].status.hw = false;
2587                         break;
2588                 }
2589         }
2590
2591         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2592                 if (!adev->ip_blocks[i].status.hw)
2593                         continue;
2594
2595                 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2596                 /* XXX handle errors */
2597                 if (r) {
2598                         DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2599                                   adev->ip_blocks[i].version->funcs->name, r);
2600                 }
2601
2602                 adev->ip_blocks[i].status.hw = false;
2603         }
2604
2605
2606         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2607                 if (!adev->ip_blocks[i].status.sw)
2608                         continue;
2609
2610                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2611                         amdgpu_ucode_free_bo(adev);
2612                         amdgpu_free_static_csa(&adev->virt.csa_obj);
2613                         amdgpu_device_wb_fini(adev);
2614                         amdgpu_device_vram_scratch_fini(adev);
2615                         amdgpu_ib_pool_fini(adev);
2616                 }
2617
2618                 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2619                 /* XXX handle errors */
2620                 if (r) {
2621                         DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2622                                   adev->ip_blocks[i].version->funcs->name, r);
2623                 }
2624                 adev->ip_blocks[i].status.sw = false;
2625                 adev->ip_blocks[i].status.valid = false;
2626         }
2627
2628         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2629                 if (!adev->ip_blocks[i].status.late_initialized)
2630                         continue;
2631                 if (adev->ip_blocks[i].version->funcs->late_fini)
2632                         adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2633                 adev->ip_blocks[i].status.late_initialized = false;
2634         }
2635
2636         amdgpu_ras_fini(adev);
2637
2638         if (amdgpu_sriov_vf(adev))
2639                 if (amdgpu_virt_release_full_gpu(adev, false))
2640                         DRM_ERROR("failed to release exclusive mode on fini\n");
2641
2642         return 0;
2643 }
2644
2645 /**
2646  * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2647  *
2648  * @work: work_struct.
2649  */
2650 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2651 {
2652         struct amdgpu_device *adev =
2653                 container_of(work, struct amdgpu_device, delayed_init_work.work);
2654         int r;
2655
2656         r = amdgpu_ib_ring_tests(adev);
2657         if (r)
2658                 DRM_ERROR("ib ring test failed (%d).\n", r);
2659 }
2660
2661 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2662 {
2663         struct amdgpu_device *adev =
2664                 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2665
2666         mutex_lock(&adev->gfx.gfx_off_mutex);
2667         if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
2668                 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2669                         adev->gfx.gfx_off_state = true;
2670         }
2671         mutex_unlock(&adev->gfx.gfx_off_mutex);
2672 }
2673
2674 /**
2675  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2676  *
2677  * @adev: amdgpu_device pointer
2678  *
2679  * Main suspend function for hardware IPs.  The list of all the hardware
2680  * IPs that make up the asic is walked, clockgating is disabled and the
2681  * suspend callbacks are run.  suspend puts the hardware and software state
2682  * in each IP into a state suitable for suspend.
2683  * Returns 0 on success, negative error code on failure.
2684  */
2685 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2686 {
2687         int i, r;
2688
2689         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2690         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2691
2692         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2693                 if (!adev->ip_blocks[i].status.valid)
2694                         continue;
2695
2696                 /* displays are handled separately */
2697                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2698                         continue;
2699
2700                 /* XXX handle errors */
2701                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2702                 /* XXX handle errors */
2703                 if (r) {
2704                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
2705                                   adev->ip_blocks[i].version->funcs->name, r);
2706                         return r;
2707                 }
2708
2709                 adev->ip_blocks[i].status.hw = false;
2710         }
2711
2712         return 0;
2713 }
2714
2715 /**
2716  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2717  *
2718  * @adev: amdgpu_device pointer
2719  *
2720  * Main suspend function for hardware IPs.  The list of all the hardware
2721  * IPs that make up the asic is walked, clockgating is disabled and the
2722  * suspend callbacks are run.  suspend puts the hardware and software state
2723  * in each IP into a state suitable for suspend.
2724  * Returns 0 on success, negative error code on failure.
2725  */
2726 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2727 {
2728         int i, r;
2729
2730         if (adev->in_s0ix)
2731                 amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
2732
2733         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2734                 if (!adev->ip_blocks[i].status.valid)
2735                         continue;
2736                 /* displays are handled in phase1 */
2737                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2738                         continue;
2739                 /* PSP lost connection when err_event_athub occurs */
2740                 if (amdgpu_ras_intr_triggered() &&
2741                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2742                         adev->ip_blocks[i].status.hw = false;
2743                         continue;
2744                 }
2745
2746                 /* skip suspend of gfx and psp for S0ix
2747                  * gfx is in gfxoff state, so on resume it will exit gfxoff just
2748                  * like at runtime. PSP is also part of the always on hardware
2749                  * so no need to suspend it.
2750                  */
2751                 if (adev->in_s0ix &&
2752                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
2753                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
2754                         continue;
2755
2756                 /* XXX handle errors */
2757                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2758                 /* XXX handle errors */
2759                 if (r) {
2760                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
2761                                   adev->ip_blocks[i].version->funcs->name, r);
2762                 }
2763                 adev->ip_blocks[i].status.hw = false;
2764                 /* handle putting the SMC in the appropriate state */
2765                 if(!amdgpu_sriov_vf(adev)){
2766                         if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2767                                 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
2768                                 if (r) {
2769                                         DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
2770                                                         adev->mp1_state, r);
2771                                         return r;
2772                                 }
2773                         }
2774                 }
2775                 adev->ip_blocks[i].status.hw = false;
2776         }
2777
2778         return 0;
2779 }
2780
2781 /**
2782  * amdgpu_device_ip_suspend - run suspend for hardware IPs
2783  *
2784  * @adev: amdgpu_device pointer
2785  *
2786  * Main suspend function for hardware IPs.  The list of all the hardware
2787  * IPs that make up the asic is walked, clockgating is disabled and the
2788  * suspend callbacks are run.  suspend puts the hardware and software state
2789  * in each IP into a state suitable for suspend.
2790  * Returns 0 on success, negative error code on failure.
2791  */
2792 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
2793 {
2794         int r;
2795
2796         if (amdgpu_sriov_vf(adev))
2797                 amdgpu_virt_request_full_gpu(adev, false);
2798
2799         r = amdgpu_device_ip_suspend_phase1(adev);
2800         if (r)
2801                 return r;
2802         r = amdgpu_device_ip_suspend_phase2(adev);
2803
2804         if (amdgpu_sriov_vf(adev))
2805                 amdgpu_virt_release_full_gpu(adev, false);
2806
2807         return r;
2808 }
2809
2810 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2811 {
2812         int i, r;
2813
2814         static enum amd_ip_block_type ip_order[] = {
2815                 AMD_IP_BLOCK_TYPE_GMC,
2816                 AMD_IP_BLOCK_TYPE_COMMON,
2817                 AMD_IP_BLOCK_TYPE_PSP,
2818                 AMD_IP_BLOCK_TYPE_IH,
2819         };
2820
2821         for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2822                 int j;
2823                 struct amdgpu_ip_block *block;
2824
2825                 block = &adev->ip_blocks[i];
2826                 block->status.hw = false;
2827
2828                 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
2829
2830                         if (block->version->type != ip_order[j] ||
2831                                 !block->status.valid)
2832                                 continue;
2833
2834                         r = block->version->funcs->hw_init(adev);
2835                         DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2836                         if (r)
2837                                 return r;
2838                         block->status.hw = true;
2839                 }
2840         }
2841
2842         return 0;
2843 }
2844
2845 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
2846 {
2847         int i, r;
2848
2849         static enum amd_ip_block_type ip_order[] = {
2850                 AMD_IP_BLOCK_TYPE_SMC,
2851                 AMD_IP_BLOCK_TYPE_DCE,
2852                 AMD_IP_BLOCK_TYPE_GFX,
2853                 AMD_IP_BLOCK_TYPE_SDMA,
2854                 AMD_IP_BLOCK_TYPE_UVD,
2855                 AMD_IP_BLOCK_TYPE_VCE,
2856                 AMD_IP_BLOCK_TYPE_VCN
2857         };
2858
2859         for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2860                 int j;
2861                 struct amdgpu_ip_block *block;
2862
2863                 for (j = 0; j < adev->num_ip_blocks; j++) {
2864                         block = &adev->ip_blocks[j];
2865
2866                         if (block->version->type != ip_order[i] ||
2867                                 !block->status.valid ||
2868                                 block->status.hw)
2869                                 continue;
2870
2871                         if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
2872                                 r = block->version->funcs->resume(adev);
2873                         else
2874                                 r = block->version->funcs->hw_init(adev);
2875
2876                         DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2877                         if (r)
2878                                 return r;
2879                         block->status.hw = true;
2880                 }
2881         }
2882
2883         return 0;
2884 }
2885
2886 /**
2887  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
2888  *
2889  * @adev: amdgpu_device pointer
2890  *
2891  * First resume function for hardware IPs.  The list of all the hardware
2892  * IPs that make up the asic is walked and the resume callbacks are run for
2893  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
2894  * after a suspend and updates the software state as necessary.  This
2895  * function is also used for restoring the GPU after a GPU reset.
2896  * Returns 0 on success, negative error code on failure.
2897  */
2898 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
2899 {
2900         int i, r;
2901
2902         for (i = 0; i < adev->num_ip_blocks; i++) {
2903                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2904                         continue;
2905                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2906                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2907                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2908
2909                         r = adev->ip_blocks[i].version->funcs->resume(adev);
2910                         if (r) {
2911                                 DRM_ERROR("resume of IP block <%s> failed %d\n",
2912                                           adev->ip_blocks[i].version->funcs->name, r);
2913                                 return r;
2914                         }
2915                         adev->ip_blocks[i].status.hw = true;
2916                 }
2917         }
2918
2919         return 0;
2920 }
2921
2922 /**
2923  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
2924  *
2925  * @adev: amdgpu_device pointer
2926  *
2927  * First resume function for hardware IPs.  The list of all the hardware
2928  * IPs that make up the asic is walked and the resume callbacks are run for
2929  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
2930  * functional state after a suspend and updates the software state as
2931  * necessary.  This function is also used for restoring the GPU after a GPU
2932  * reset.
2933  * Returns 0 on success, negative error code on failure.
2934  */
2935 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
2936 {
2937         int i, r;
2938
2939         for (i = 0; i < adev->num_ip_blocks; i++) {
2940                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2941                         continue;
2942                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2943                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2944                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
2945                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
2946                         continue;
2947                 r = adev->ip_blocks[i].version->funcs->resume(adev);
2948                 if (r) {
2949                         DRM_ERROR("resume of IP block <%s> failed %d\n",
2950                                   adev->ip_blocks[i].version->funcs->name, r);
2951                         return r;
2952                 }
2953                 adev->ip_blocks[i].status.hw = true;
2954         }
2955
2956         return 0;
2957 }
2958
2959 /**
2960  * amdgpu_device_ip_resume - run resume for hardware IPs
2961  *
2962  * @adev: amdgpu_device pointer
2963  *
2964  * Main resume function for hardware IPs.  The hardware IPs
2965  * are split into two resume functions because they are
2966  * are also used in in recovering from a GPU reset and some additional
2967  * steps need to be take between them.  In this case (S3/S4) they are
2968  * run sequentially.
2969  * Returns 0 on success, negative error code on failure.
2970  */
2971 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
2972 {
2973         int r;
2974
2975         r = amdgpu_device_ip_resume_phase1(adev);
2976         if (r)
2977                 return r;
2978
2979         r = amdgpu_device_fw_loading(adev);
2980         if (r)
2981                 return r;
2982
2983         r = amdgpu_device_ip_resume_phase2(adev);
2984
2985         return r;
2986 }
2987
2988 /**
2989  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
2990  *
2991  * @adev: amdgpu_device pointer
2992  *
2993  * Query the VBIOS data tables to determine if the board supports SR-IOV.
2994  */
2995 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
2996 {
2997         if (amdgpu_sriov_vf(adev)) {
2998                 if (adev->is_atom_fw) {
2999                         if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
3000                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3001                 } else {
3002                         if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3003                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3004                 }
3005
3006                 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3007                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3008         }
3009 }
3010
3011 /**
3012  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3013  *
3014  * @asic_type: AMD asic type
3015  *
3016  * Check if there is DC (new modesetting infrastructre) support for an asic.
3017  * returns true if DC has support, false if not.
3018  */
3019 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3020 {
3021         switch (asic_type) {
3022 #if defined(CONFIG_DRM_AMD_DC)
3023 #if defined(CONFIG_DRM_AMD_DC_SI)
3024         case CHIP_TAHITI:
3025         case CHIP_PITCAIRN:
3026         case CHIP_VERDE:
3027         case CHIP_OLAND:
3028 #endif
3029         case CHIP_BONAIRE:
3030         case CHIP_KAVERI:
3031         case CHIP_KABINI:
3032         case CHIP_MULLINS:
3033                 /*
3034                  * We have systems in the wild with these ASICs that require
3035                  * LVDS and VGA support which is not supported with DC.
3036                  *
3037                  * Fallback to the non-DC driver here by default so as not to
3038                  * cause regressions.
3039                  */
3040                 return amdgpu_dc > 0;
3041         case CHIP_HAWAII:
3042         case CHIP_CARRIZO:
3043         case CHIP_STONEY:
3044         case CHIP_POLARIS10:
3045         case CHIP_POLARIS11:
3046         case CHIP_POLARIS12:
3047         case CHIP_VEGAM:
3048         case CHIP_TONGA:
3049         case CHIP_FIJI:
3050         case CHIP_VEGA10:
3051         case CHIP_VEGA12:
3052         case CHIP_VEGA20:
3053 #if defined(CONFIG_DRM_AMD_DC_DCN)
3054         case CHIP_RAVEN:
3055         case CHIP_NAVI10:
3056         case CHIP_NAVI14:
3057         case CHIP_NAVI12:
3058         case CHIP_RENOIR:
3059         case CHIP_SIENNA_CICHLID:
3060         case CHIP_NAVY_FLOUNDER:
3061         case CHIP_DIMGREY_CAVEFISH:
3062         case CHIP_VANGOGH:
3063 #endif
3064                 return amdgpu_dc != 0;
3065 #endif
3066         default:
3067                 if (amdgpu_dc > 0)
3068                         DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3069                                          "but isn't supported by ASIC, ignoring\n");
3070                 return false;
3071         }
3072 }
3073
3074 /**
3075  * amdgpu_device_has_dc_support - check if dc is supported
3076  *
3077  * @adev: amdgpu_device pointer
3078  *
3079  * Returns true for supported, false for not supported
3080  */
3081 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3082 {
3083         if (amdgpu_sriov_vf(adev) || adev->enable_virtual_display)
3084                 return false;
3085
3086         return amdgpu_device_asic_has_dc_support(adev->asic_type);
3087 }
3088
3089
3090 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3091 {
3092         struct amdgpu_device *adev =
3093                 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3094         struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3095
3096         /* It's a bug to not have a hive within this function */
3097         if (WARN_ON(!hive))
3098                 return;
3099
3100         /*
3101          * Use task barrier to synchronize all xgmi reset works across the
3102          * hive. task_barrier_enter and task_barrier_exit will block
3103          * until all the threads running the xgmi reset works reach
3104          * those points. task_barrier_full will do both blocks.
3105          */
3106         if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3107
3108                 task_barrier_enter(&hive->tb);
3109                 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3110
3111                 if (adev->asic_reset_res)
3112                         goto fail;
3113
3114                 task_barrier_exit(&hive->tb);
3115                 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3116
3117                 if (adev->asic_reset_res)
3118                         goto fail;
3119
3120                 if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
3121                         adev->mmhub.funcs->reset_ras_error_count(adev);
3122         } else {
3123
3124                 task_barrier_full(&hive->tb);
3125                 adev->asic_reset_res =  amdgpu_asic_reset(adev);
3126         }
3127
3128 fail:
3129         if (adev->asic_reset_res)
3130                 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3131                          adev->asic_reset_res, adev_to_drm(adev)->unique);
3132         amdgpu_put_xgmi_hive(hive);
3133 }
3134
3135 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3136 {
3137         char *input = amdgpu_lockup_timeout;
3138         char *timeout_setting = NULL;
3139         int index = 0;
3140         long timeout;
3141         int ret = 0;
3142
3143         /*
3144          * By default timeout for non compute jobs is 10000.
3145          * And there is no timeout enforced on compute jobs.
3146          * In SR-IOV or passthrough mode, timeout for compute
3147          * jobs are 60000 by default.
3148          */
3149         adev->gfx_timeout = msecs_to_jiffies(10000);
3150         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3151         if (amdgpu_sriov_vf(adev))
3152                 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3153                                         msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3154         else if (amdgpu_passthrough(adev))
3155                 adev->compute_timeout =  msecs_to_jiffies(60000);
3156         else
3157                 adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
3158
3159         if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3160                 while ((timeout_setting = strsep(&input, ",")) &&
3161                                 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3162                         ret = kstrtol(timeout_setting, 0, &timeout);
3163                         if (ret)
3164                                 return ret;
3165
3166                         if (timeout == 0) {
3167                                 index++;
3168                                 continue;
3169                         } else if (timeout < 0) {
3170                                 timeout = MAX_SCHEDULE_TIMEOUT;
3171                         } else {
3172                                 timeout = msecs_to_jiffies(timeout);
3173                         }
3174
3175                         switch (index++) {
3176                         case 0:
3177                                 adev->gfx_timeout = timeout;
3178                                 break;
3179                         case 1:
3180                                 adev->compute_timeout = timeout;
3181                                 break;
3182                         case 2:
3183                                 adev->sdma_timeout = timeout;
3184                                 break;
3185                         case 3:
3186                                 adev->video_timeout = timeout;
3187                                 break;
3188                         default:
3189                                 break;
3190                         }
3191                 }
3192                 /*
3193                  * There is only one value specified and
3194                  * it should apply to all non-compute jobs.
3195                  */
3196                 if (index == 1) {
3197                         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3198                         if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3199                                 adev->compute_timeout = adev->gfx_timeout;
3200                 }
3201         }
3202
3203         return ret;
3204 }
3205
3206 static const struct attribute *amdgpu_dev_attributes[] = {
3207         &dev_attr_product_name.attr,
3208         &dev_attr_product_number.attr,
3209         &dev_attr_serial_number.attr,
3210         &dev_attr_pcie_replay_count.attr,
3211         NULL
3212 };
3213
3214
3215 /**
3216  * amdgpu_device_init - initialize the driver
3217  *
3218  * @adev: amdgpu_device pointer
3219  * @flags: driver flags
3220  *
3221  * Initializes the driver info and hw (all asics).
3222  * Returns 0 for success or an error on failure.
3223  * Called at driver startup.
3224  */
3225 int amdgpu_device_init(struct amdgpu_device *adev,
3226                        uint32_t flags)
3227 {
3228         struct drm_device *ddev = adev_to_drm(adev);
3229         struct pci_dev *pdev = adev->pdev;
3230         int r, i;
3231         bool atpx = false;
3232         u32 max_MBps;
3233
3234         adev->shutdown = false;
3235         adev->flags = flags;
3236
3237         if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3238                 adev->asic_type = amdgpu_force_asic_type;
3239         else
3240                 adev->asic_type = flags & AMD_ASIC_MASK;
3241
3242         adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3243         if (amdgpu_emu_mode == 1)
3244                 adev->usec_timeout *= 10;
3245         adev->gmc.gart_size = 512 * 1024 * 1024;
3246         adev->accel_working = false;
3247         adev->num_rings = 0;
3248         adev->mman.buffer_funcs = NULL;
3249         adev->mman.buffer_funcs_ring = NULL;
3250         adev->vm_manager.vm_pte_funcs = NULL;
3251         adev->vm_manager.vm_pte_num_scheds = 0;
3252         adev->gmc.gmc_funcs = NULL;
3253         adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3254         bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3255
3256         adev->smc_rreg = &amdgpu_invalid_rreg;
3257         adev->smc_wreg = &amdgpu_invalid_wreg;
3258         adev->pcie_rreg = &amdgpu_invalid_rreg;
3259         adev->pcie_wreg = &amdgpu_invalid_wreg;
3260         adev->pciep_rreg = &amdgpu_invalid_rreg;
3261         adev->pciep_wreg = &amdgpu_invalid_wreg;
3262         adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3263         adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3264         adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3265         adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3266         adev->didt_rreg = &amdgpu_invalid_rreg;
3267         adev->didt_wreg = &amdgpu_invalid_wreg;
3268         adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3269         adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3270         adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3271         adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3272
3273         DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3274                  amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3275                  pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3276
3277         /* mutex initialization are all done here so we
3278          * can recall function without having locking issues */
3279         atomic_set(&adev->irq.ih.lock, 0);
3280         mutex_init(&adev->firmware.mutex);
3281         mutex_init(&adev->pm.mutex);
3282         mutex_init(&adev->gfx.gpu_clock_mutex);
3283         mutex_init(&adev->srbm_mutex);
3284         mutex_init(&adev->gfx.pipe_reserve_mutex);
3285         mutex_init(&adev->gfx.gfx_off_mutex);
3286         mutex_init(&adev->grbm_idx_mutex);
3287         mutex_init(&adev->mn_lock);
3288         mutex_init(&adev->virt.vf_errors.lock);
3289         hash_init(adev->mn_hash);
3290         atomic_set(&adev->in_gpu_reset, 0);
3291         init_rwsem(&adev->reset_sem);
3292         mutex_init(&adev->psp.mutex);
3293         mutex_init(&adev->notifier_lock);
3294
3295         r = amdgpu_device_check_arguments(adev);
3296         if (r)
3297                 return r;
3298
3299         spin_lock_init(&adev->mmio_idx_lock);
3300         spin_lock_init(&adev->smc_idx_lock);
3301         spin_lock_init(&adev->pcie_idx_lock);
3302         spin_lock_init(&adev->uvd_ctx_idx_lock);
3303         spin_lock_init(&adev->didt_idx_lock);
3304         spin_lock_init(&adev->gc_cac_idx_lock);
3305         spin_lock_init(&adev->se_cac_idx_lock);
3306         spin_lock_init(&adev->audio_endpt_idx_lock);
3307         spin_lock_init(&adev->mm_stats.lock);
3308
3309         INIT_LIST_HEAD(&adev->shadow_list);
3310         mutex_init(&adev->shadow_list_lock);
3311
3312         INIT_DELAYED_WORK(&adev->delayed_init_work,
3313                           amdgpu_device_delayed_init_work_handler);
3314         INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3315                           amdgpu_device_delay_enable_gfx_off);
3316
3317         INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3318
3319         adev->gfx.gfx_off_req_count = 1;
3320         adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3321
3322         atomic_set(&adev->throttling_logging_enabled, 1);
3323         /*
3324          * If throttling continues, logging will be performed every minute
3325          * to avoid log flooding. "-1" is subtracted since the thermal
3326          * throttling interrupt comes every second. Thus, the total logging
3327          * interval is 59 seconds(retelimited printk interval) + 1(waiting
3328          * for throttling interrupt) = 60 seconds.
3329          */
3330         ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3331         ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3332
3333         /* Registers mapping */
3334         /* TODO: block userspace mapping of io register */
3335         if (adev->asic_type >= CHIP_BONAIRE) {
3336                 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3337                 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3338         } else {
3339                 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3340                 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3341         }
3342
3343         adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3344         if (adev->rmmio == NULL) {
3345                 return -ENOMEM;
3346         }
3347         DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3348         DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3349
3350         /* io port mapping */
3351         for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3352                 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
3353                         adev->rio_mem_size = pci_resource_len(adev->pdev, i);
3354                         adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
3355                         break;
3356                 }
3357         }
3358         if (adev->rio_mem == NULL)
3359                 DRM_INFO("PCI I/O BAR is not found.\n");
3360
3361         /* enable PCIE atomic ops */
3362         r = pci_enable_atomic_ops_to_root(adev->pdev,
3363                                           PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3364                                           PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3365         if (r) {
3366                 adev->have_atomics_support = false;
3367                 DRM_INFO("PCIE atomic ops is not supported\n");
3368         } else {
3369                 adev->have_atomics_support = true;
3370         }
3371
3372         amdgpu_device_get_pcie_info(adev);
3373
3374         if (amdgpu_mcbp)
3375                 DRM_INFO("MCBP is enabled\n");
3376
3377         if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
3378                 adev->enable_mes = true;
3379
3380         /* detect hw virtualization here */
3381         amdgpu_detect_virtualization(adev);
3382
3383         r = amdgpu_device_get_job_timeout_settings(adev);
3384         if (r) {
3385                 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3386                 goto failed_unmap;
3387         }
3388
3389         /* early init functions */
3390         r = amdgpu_device_ip_early_init(adev);
3391         if (r)
3392                 goto failed_unmap;
3393
3394         /* doorbell bar mapping and doorbell index init*/
3395         amdgpu_device_doorbell_init(adev);
3396
3397         /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3398         /* this will fail for cards that aren't VGA class devices, just
3399          * ignore it */
3400         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3401                 vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
3402
3403         if (amdgpu_device_supports_atpx(ddev))
3404                 atpx = true;
3405         if (amdgpu_has_atpx() &&
3406             (amdgpu_is_atpx_hybrid() ||
3407              amdgpu_has_atpx_dgpu_power_cntl()) &&
3408             !pci_is_thunderbolt_attached(adev->pdev))
3409                 vga_switcheroo_register_client(adev->pdev,
3410                                                &amdgpu_switcheroo_ops, atpx);
3411         if (atpx)
3412                 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3413
3414         if (amdgpu_emu_mode == 1) {
3415                 /* post the asic on emulation mode */
3416                 emu_soc_asic_init(adev);
3417                 goto fence_driver_init;
3418         }
3419
3420         /* detect if we are with an SRIOV vbios */
3421         amdgpu_device_detect_sriov_bios(adev);
3422
3423         /* check if we need to reset the asic
3424          *  E.g., driver was not cleanly unloaded previously, etc.
3425          */
3426         if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3427                 r = amdgpu_asic_reset(adev);
3428                 if (r) {
3429                         dev_err(adev->dev, "asic reset on init failed\n");
3430                         goto failed;
3431                 }
3432         }
3433
3434         pci_enable_pcie_error_reporting(adev->pdev);
3435
3436         /* Post card if necessary */
3437         if (amdgpu_device_need_post(adev)) {
3438                 if (!adev->bios) {
3439                         dev_err(adev->dev, "no vBIOS found\n");
3440                         r = -EINVAL;
3441                         goto failed;
3442                 }
3443                 DRM_INFO("GPU posting now...\n");
3444                 r = amdgpu_device_asic_init(adev);
3445                 if (r) {
3446                         dev_err(adev->dev, "gpu post error!\n");
3447                         goto failed;
3448                 }
3449         }
3450
3451         if (adev->is_atom_fw) {
3452                 /* Initialize clocks */
3453                 r = amdgpu_atomfirmware_get_clock_info(adev);
3454                 if (r) {
3455                         dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3456                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3457                         goto failed;
3458                 }
3459         } else {
3460                 /* Initialize clocks */
3461                 r = amdgpu_atombios_get_clock_info(adev);
3462                 if (r) {
3463                         dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3464                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3465                         goto failed;
3466                 }
3467                 /* init i2c buses */
3468                 if (!amdgpu_device_has_dc_support(adev))
3469                         amdgpu_atombios_i2c_init(adev);
3470         }
3471
3472 fence_driver_init:
3473         /* Fence driver */
3474         r = amdgpu_fence_driver_init(adev);
3475         if (r) {
3476                 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
3477                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3478                 goto failed;
3479         }
3480
3481         /* init the mode config */
3482         drm_mode_config_init(adev_to_drm(adev));
3483
3484         r = amdgpu_device_ip_init(adev);
3485         if (r) {
3486                 /* failed in exclusive mode due to timeout */
3487                 if (amdgpu_sriov_vf(adev) &&
3488                     !amdgpu_sriov_runtime(adev) &&
3489                     amdgpu_virt_mmio_blocked(adev) &&
3490                     !amdgpu_virt_wait_reset(adev)) {
3491                         dev_err(adev->dev, "VF exclusive mode timeout\n");
3492                         /* Don't send request since VF is inactive. */
3493                         adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3494                         adev->virt.ops = NULL;
3495                         r = -EAGAIN;
3496                         goto failed;
3497                 }
3498                 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3499                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3500                 goto failed;
3501         }
3502
3503         dev_info(adev->dev,
3504                 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3505                         adev->gfx.config.max_shader_engines,
3506                         adev->gfx.config.max_sh_per_se,
3507                         adev->gfx.config.max_cu_per_sh,
3508                         adev->gfx.cu_info.number);
3509
3510         adev->accel_working = true;
3511
3512         amdgpu_vm_check_compute_bug(adev);
3513
3514         /* Initialize the buffer migration limit. */
3515         if (amdgpu_moverate >= 0)
3516                 max_MBps = amdgpu_moverate;
3517         else
3518                 max_MBps = 8; /* Allow 8 MB/s. */
3519         /* Get a log2 for easy divisions. */
3520         adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3521
3522         amdgpu_fbdev_init(adev);
3523
3524         r = amdgpu_pm_sysfs_init(adev);
3525         if (r) {
3526                 adev->pm_sysfs_en = false;
3527                 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3528         } else
3529                 adev->pm_sysfs_en = true;
3530
3531         r = amdgpu_ucode_sysfs_init(adev);
3532         if (r) {
3533                 adev->ucode_sysfs_en = false;
3534                 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3535         } else
3536                 adev->ucode_sysfs_en = true;
3537
3538         if ((amdgpu_testing & 1)) {
3539                 if (adev->accel_working)
3540                         amdgpu_test_moves(adev);
3541                 else
3542                         DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
3543         }
3544         if (amdgpu_benchmarking) {
3545                 if (adev->accel_working)
3546                         amdgpu_benchmark(adev, amdgpu_benchmarking);
3547                 else
3548                         DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
3549         }
3550
3551         /*
3552          * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3553          * Otherwise the mgpu fan boost feature will be skipped due to the
3554          * gpu instance is counted less.
3555          */
3556         amdgpu_register_gpu_instance(adev);
3557
3558         /* enable clockgating, etc. after ib tests, etc. since some blocks require
3559          * explicit gating rather than handling it automatically.
3560          */
3561         r = amdgpu_device_ip_late_init(adev);
3562         if (r) {
3563                 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3564                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3565                 goto failed;
3566         }
3567
3568         /* must succeed. */
3569         amdgpu_ras_resume(adev);
3570
3571         queue_delayed_work(system_wq, &adev->delayed_init_work,
3572                            msecs_to_jiffies(AMDGPU_RESUME_MS));
3573
3574         if (amdgpu_sriov_vf(adev))
3575                 flush_delayed_work(&adev->delayed_init_work);
3576
3577         r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3578         if (r)
3579                 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3580
3581         if (IS_ENABLED(CONFIG_PERF_EVENTS))
3582                 r = amdgpu_pmu_init(adev);
3583         if (r)
3584                 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3585
3586         /* Have stored pci confspace at hand for restore in sudden PCI error */
3587         if (amdgpu_device_cache_pci_state(adev->pdev))
3588                 pci_restore_state(pdev);
3589
3590         return 0;
3591
3592 failed:
3593         amdgpu_vf_error_trans_all(adev);
3594         if (atpx)
3595                 vga_switcheroo_fini_domain_pm_ops(adev->dev);
3596
3597 failed_unmap:
3598         iounmap(adev->rmmio);
3599         adev->rmmio = NULL;
3600
3601         return r;
3602 }
3603
3604 /**
3605  * amdgpu_device_fini - tear down the driver
3606  *
3607  * @adev: amdgpu_device pointer
3608  *
3609  * Tear down the driver info (all asics).
3610  * Called at driver shutdown.
3611  */
3612 void amdgpu_device_fini(struct amdgpu_device *adev)
3613 {
3614         dev_info(adev->dev, "amdgpu: finishing device.\n");
3615         flush_delayed_work(&adev->delayed_init_work);
3616         adev->shutdown = true;
3617
3618         kfree(adev->pci_state);
3619
3620         /* make sure IB test finished before entering exclusive mode
3621          * to avoid preemption on IB test
3622          * */
3623         if (amdgpu_sriov_vf(adev)) {
3624                 amdgpu_virt_request_full_gpu(adev, false);
3625                 amdgpu_virt_fini_data_exchange(adev);
3626         }
3627
3628         /* disable all interrupts */
3629         amdgpu_irq_disable_all(adev);
3630         if (adev->mode_info.mode_config_initialized){
3631                 if (!amdgpu_device_has_dc_support(adev))
3632                         drm_helper_force_disable_all(adev_to_drm(adev));
3633                 else
3634                         drm_atomic_helper_shutdown(adev_to_drm(adev));
3635         }
3636         amdgpu_fence_driver_fini(adev);
3637         if (adev->pm_sysfs_en)
3638                 amdgpu_pm_sysfs_fini(adev);
3639         amdgpu_fbdev_fini(adev);
3640         amdgpu_device_ip_fini(adev);
3641         release_firmware(adev->firmware.gpu_info_fw);
3642         adev->firmware.gpu_info_fw = NULL;
3643         adev->accel_working = false;
3644         /* free i2c buses */
3645         if (!amdgpu_device_has_dc_support(adev))
3646                 amdgpu_i2c_fini(adev);
3647
3648         if (amdgpu_emu_mode != 1)
3649                 amdgpu_atombios_fini(adev);
3650
3651         kfree(adev->bios);
3652         adev->bios = NULL;
3653         if (amdgpu_has_atpx() &&
3654             (amdgpu_is_atpx_hybrid() ||
3655              amdgpu_has_atpx_dgpu_power_cntl()) &&
3656             !pci_is_thunderbolt_attached(adev->pdev))
3657                 vga_switcheroo_unregister_client(adev->pdev);
3658         if (amdgpu_device_supports_atpx(adev_to_drm(adev)))
3659                 vga_switcheroo_fini_domain_pm_ops(adev->dev);
3660         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3661                 vga_client_register(adev->pdev, NULL, NULL, NULL);
3662         if (adev->rio_mem)
3663                 pci_iounmap(adev->pdev, adev->rio_mem);
3664         adev->rio_mem = NULL;
3665         iounmap(adev->rmmio);
3666         adev->rmmio = NULL;
3667         amdgpu_device_doorbell_fini(adev);
3668
3669         if (adev->ucode_sysfs_en)
3670                 amdgpu_ucode_sysfs_fini(adev);
3671
3672         sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3673         if (IS_ENABLED(CONFIG_PERF_EVENTS))
3674                 amdgpu_pmu_fini(adev);
3675         if (adev->mman.discovery_bin)
3676                 amdgpu_discovery_fini(adev);
3677 }
3678
3679
3680 /*
3681  * Suspend & resume.
3682  */
3683 /**
3684  * amdgpu_device_suspend - initiate device suspend
3685  *
3686  * @dev: drm dev pointer
3687  * @fbcon : notify the fbdev of suspend
3688  *
3689  * Puts the hw in the suspend state (all asics).
3690  * Returns 0 for success or an error on failure.
3691  * Called at driver suspend.
3692  */
3693 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
3694 {
3695         struct amdgpu_device *adev = drm_to_adev(dev);
3696         int r;
3697
3698         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3699                 return 0;
3700
3701         adev->in_suspend = true;
3702         drm_kms_helper_poll_disable(dev);
3703
3704         if (fbcon)
3705                 amdgpu_fbdev_set_suspend(adev, 1);
3706
3707         cancel_delayed_work_sync(&adev->delayed_init_work);
3708
3709         amdgpu_ras_suspend(adev);
3710
3711         r = amdgpu_device_ip_suspend_phase1(adev);
3712
3713         if (!adev->in_s0ix)
3714                 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
3715
3716         /* evict vram memory */
3717         amdgpu_bo_evict_vram(adev);
3718
3719         amdgpu_fence_driver_suspend(adev);
3720
3721         r = amdgpu_device_ip_suspend_phase2(adev);
3722         /* evict remaining vram memory
3723          * This second call to evict vram is to evict the gart page table
3724          * using the CPU.
3725          */
3726         amdgpu_bo_evict_vram(adev);
3727
3728         return 0;
3729 }
3730
3731 /**
3732  * amdgpu_device_resume - initiate device resume
3733  *
3734  * @dev: drm dev pointer
3735  * @fbcon : notify the fbdev of resume
3736  *
3737  * Bring the hw back to operating state (all asics).
3738  * Returns 0 for success or an error on failure.
3739  * Called at driver resume.
3740  */
3741 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
3742 {
3743         struct amdgpu_device *adev = drm_to_adev(dev);
3744         int r = 0;
3745
3746         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3747                 return 0;
3748
3749         if (adev->in_s0ix)
3750                 amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry);
3751
3752         /* post card */
3753         if (amdgpu_device_need_post(adev)) {
3754                 r = amdgpu_device_asic_init(adev);
3755                 if (r)
3756                         dev_err(adev->dev, "amdgpu asic init failed\n");
3757         }
3758
3759         r = amdgpu_device_ip_resume(adev);
3760         if (r) {
3761                 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
3762                 return r;
3763         }
3764         amdgpu_fence_driver_resume(adev);
3765
3766
3767         r = amdgpu_device_ip_late_init(adev);
3768         if (r)
3769                 return r;
3770
3771         queue_delayed_work(system_wq, &adev->delayed_init_work,
3772                            msecs_to_jiffies(AMDGPU_RESUME_MS));
3773
3774         if (!adev->in_s0ix) {
3775                 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
3776                 if (r)
3777                         return r;
3778         }
3779
3780         /* Make sure IB tests flushed */
3781         flush_delayed_work(&adev->delayed_init_work);
3782
3783         if (fbcon)
3784                 amdgpu_fbdev_set_suspend(adev, 0);
3785
3786         drm_kms_helper_poll_enable(dev);
3787
3788         amdgpu_ras_resume(adev);
3789
3790         /*
3791          * Most of the connector probing functions try to acquire runtime pm
3792          * refs to ensure that the GPU is powered on when connector polling is
3793          * performed. Since we're calling this from a runtime PM callback,
3794          * trying to acquire rpm refs will cause us to deadlock.
3795          *
3796          * Since we're guaranteed to be holding the rpm lock, it's safe to
3797          * temporarily disable the rpm helpers so this doesn't deadlock us.
3798          */
3799 #ifdef CONFIG_PM
3800         dev->dev->power.disable_depth++;
3801 #endif
3802         if (!amdgpu_device_has_dc_support(adev))
3803                 drm_helper_hpd_irq_event(dev);
3804         else
3805                 drm_kms_helper_hotplug_event(dev);
3806 #ifdef CONFIG_PM
3807         dev->dev->power.disable_depth--;
3808 #endif
3809         adev->in_suspend = false;
3810
3811         return 0;
3812 }
3813
3814 /**
3815  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
3816  *
3817  * @adev: amdgpu_device pointer
3818  *
3819  * The list of all the hardware IPs that make up the asic is walked and
3820  * the check_soft_reset callbacks are run.  check_soft_reset determines
3821  * if the asic is still hung or not.
3822  * Returns true if any of the IPs are still in a hung state, false if not.
3823  */
3824 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
3825 {
3826         int i;
3827         bool asic_hang = false;
3828
3829         if (amdgpu_sriov_vf(adev))
3830                 return true;
3831
3832         if (amdgpu_asic_need_full_reset(adev))
3833                 return true;
3834
3835         for (i = 0; i < adev->num_ip_blocks; i++) {
3836                 if (!adev->ip_blocks[i].status.valid)
3837                         continue;
3838                 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
3839                         adev->ip_blocks[i].status.hang =
3840                                 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
3841                 if (adev->ip_blocks[i].status.hang) {
3842                         dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
3843                         asic_hang = true;
3844                 }
3845         }
3846         return asic_hang;
3847 }
3848
3849 /**
3850  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
3851  *
3852  * @adev: amdgpu_device pointer
3853  *
3854  * The list of all the hardware IPs that make up the asic is walked and the
3855  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
3856  * handles any IP specific hardware or software state changes that are
3857  * necessary for a soft reset to succeed.
3858  * Returns 0 on success, negative error code on failure.
3859  */
3860 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
3861 {
3862         int i, r = 0;
3863
3864         for (i = 0; i < adev->num_ip_blocks; i++) {
3865                 if (!adev->ip_blocks[i].status.valid)
3866                         continue;
3867                 if (adev->ip_blocks[i].status.hang &&
3868                     adev->ip_blocks[i].version->funcs->pre_soft_reset) {
3869                         r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
3870                         if (r)
3871                                 return r;
3872                 }
3873         }
3874
3875         return 0;
3876 }
3877
3878 /**
3879  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
3880  *
3881  * @adev: amdgpu_device pointer
3882  *
3883  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
3884  * reset is necessary to recover.
3885  * Returns true if a full asic reset is required, false if not.
3886  */
3887 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
3888 {
3889         int i;
3890
3891         if (amdgpu_asic_need_full_reset(adev))
3892                 return true;
3893
3894         for (i = 0; i < adev->num_ip_blocks; i++) {
3895                 if (!adev->ip_blocks[i].status.valid)
3896                         continue;
3897                 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
3898                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
3899                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
3900                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
3901                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3902                         if (adev->ip_blocks[i].status.hang) {
3903                                 dev_info(adev->dev, "Some block need full reset!\n");
3904                                 return true;
3905                         }
3906                 }
3907         }
3908         return false;
3909 }
3910
3911 /**
3912  * amdgpu_device_ip_soft_reset - do a soft reset
3913  *
3914  * @adev: amdgpu_device pointer
3915  *
3916  * The list of all the hardware IPs that make up the asic is walked and the
3917  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
3918  * IP specific hardware or software state changes that are necessary to soft
3919  * reset the IP.
3920  * Returns 0 on success, negative error code on failure.
3921  */
3922 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
3923 {
3924         int i, r = 0;
3925
3926         for (i = 0; i < adev->num_ip_blocks; i++) {
3927                 if (!adev->ip_blocks[i].status.valid)
3928                         continue;
3929                 if (adev->ip_blocks[i].status.hang &&
3930                     adev->ip_blocks[i].version->funcs->soft_reset) {
3931                         r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
3932                         if (r)
3933                                 return r;
3934                 }
3935         }
3936
3937         return 0;
3938 }
3939
3940 /**
3941  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
3942  *
3943  * @adev: amdgpu_device pointer
3944  *
3945  * The list of all the hardware IPs that make up the asic is walked and the
3946  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
3947  * handles any IP specific hardware or software state changes that are
3948  * necessary after the IP has been soft reset.
3949  * Returns 0 on success, negative error code on failure.
3950  */
3951 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
3952 {
3953         int i, r = 0;
3954
3955         for (i = 0; i < adev->num_ip_blocks; i++) {
3956                 if (!adev->ip_blocks[i].status.valid)
3957                         continue;
3958                 if (adev->ip_blocks[i].status.hang &&
3959                     adev->ip_blocks[i].version->funcs->post_soft_reset)
3960                         r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
3961                 if (r)
3962                         return r;
3963         }
3964
3965         return 0;
3966 }
3967
3968 /**
3969  * amdgpu_device_recover_vram - Recover some VRAM contents
3970  *
3971  * @adev: amdgpu_device pointer
3972  *
3973  * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
3974  * restore things like GPUVM page tables after a GPU reset where
3975  * the contents of VRAM might be lost.
3976  *
3977  * Returns:
3978  * 0 on success, negative error code on failure.
3979  */
3980 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
3981 {
3982         struct dma_fence *fence = NULL, *next = NULL;
3983         struct amdgpu_bo *shadow;
3984         long r = 1, tmo;
3985
3986         if (amdgpu_sriov_runtime(adev))
3987                 tmo = msecs_to_jiffies(8000);
3988         else
3989                 tmo = msecs_to_jiffies(100);
3990
3991         dev_info(adev->dev, "recover vram bo from shadow start\n");
3992         mutex_lock(&adev->shadow_list_lock);
3993         list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
3994
3995                 /* No need to recover an evicted BO */
3996                 if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
3997                     shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
3998                     shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
3999                         continue;
4000
4001                 r = amdgpu_bo_restore_shadow(shadow, &next);
4002                 if (r)
4003                         break;
4004
4005                 if (fence) {
4006                         tmo = dma_fence_wait_timeout(fence, false, tmo);
4007                         dma_fence_put(fence);
4008                         fence = next;
4009                         if (tmo == 0) {
4010                                 r = -ETIMEDOUT;
4011                                 break;
4012                         } else if (tmo < 0) {
4013                                 r = tmo;
4014                                 break;
4015                         }
4016                 } else {
4017                         fence = next;
4018                 }
4019         }
4020         mutex_unlock(&adev->shadow_list_lock);
4021
4022         if (fence)
4023                 tmo = dma_fence_wait_timeout(fence, false, tmo);
4024         dma_fence_put(fence);
4025
4026         if (r < 0 || tmo <= 0) {
4027                 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4028                 return -EIO;
4029         }
4030
4031         dev_info(adev->dev, "recover vram bo from shadow done\n");
4032         return 0;
4033 }
4034
4035
4036 /**
4037  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4038  *
4039  * @adev: amdgpu_device pointer
4040  * @from_hypervisor: request from hypervisor
4041  *
4042  * do VF FLR and reinitialize Asic
4043  * return 0 means succeeded otherwise failed
4044  */
4045 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4046                                      bool from_hypervisor)
4047 {
4048         int r;
4049
4050         if (from_hypervisor)
4051                 r = amdgpu_virt_request_full_gpu(adev, true);
4052         else
4053                 r = amdgpu_virt_reset_gpu(adev);
4054         if (r)
4055                 return r;
4056
4057         amdgpu_amdkfd_pre_reset(adev);
4058
4059         /* Resume IP prior to SMC */
4060         r = amdgpu_device_ip_reinit_early_sriov(adev);
4061         if (r)
4062                 goto error;
4063
4064         amdgpu_virt_init_data_exchange(adev);
4065         /* we need recover gart prior to run SMC/CP/SDMA resume */
4066         amdgpu_gtt_mgr_recover(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
4067
4068         r = amdgpu_device_fw_loading(adev);
4069         if (r)
4070                 return r;
4071
4072         /* now we are okay to resume SMC/CP/SDMA */
4073         r = amdgpu_device_ip_reinit_late_sriov(adev);
4074         if (r)
4075                 goto error;
4076
4077         amdgpu_irq_gpu_reset_resume_helper(adev);
4078         r = amdgpu_ib_ring_tests(adev);
4079         amdgpu_amdkfd_post_reset(adev);
4080
4081 error:
4082         amdgpu_virt_release_full_gpu(adev, true);
4083         if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4084                 amdgpu_inc_vram_lost(adev);
4085                 r = amdgpu_device_recover_vram(adev);
4086         }
4087
4088         return r;
4089 }
4090
4091 /**
4092  * amdgpu_device_has_job_running - check if there is any job in mirror list
4093  *
4094  * @adev: amdgpu_device pointer
4095  *
4096  * check if there is any job in mirror list
4097  */
4098 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4099 {
4100         int i;
4101         struct drm_sched_job *job;
4102
4103         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4104                 struct amdgpu_ring *ring = adev->rings[i];
4105
4106                 if (!ring || !ring->sched.thread)
4107                         continue;
4108
4109                 spin_lock(&ring->sched.job_list_lock);
4110                 job = list_first_entry_or_null(&ring->sched.pending_list,
4111                                                struct drm_sched_job, list);
4112                 spin_unlock(&ring->sched.job_list_lock);
4113                 if (job)
4114                         return true;
4115         }
4116         return false;
4117 }
4118
4119 /**
4120  * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4121  *
4122  * @adev: amdgpu_device pointer
4123  *
4124  * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4125  * a hung GPU.
4126  */
4127 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4128 {
4129         if (!amdgpu_device_ip_check_soft_reset(adev)) {
4130                 dev_info(adev->dev, "Timeout, but no hardware hang detected.\n");
4131                 return false;
4132         }
4133
4134         if (amdgpu_gpu_recovery == 0)
4135                 goto disabled;
4136
4137         if (amdgpu_sriov_vf(adev))
4138                 return true;
4139
4140         if (amdgpu_gpu_recovery == -1) {
4141                 switch (adev->asic_type) {
4142                 case CHIP_BONAIRE:
4143                 case CHIP_HAWAII:
4144                 case CHIP_TOPAZ:
4145                 case CHIP_TONGA:
4146                 case CHIP_FIJI:
4147                 case CHIP_POLARIS10:
4148                 case CHIP_POLARIS11:
4149                 case CHIP_POLARIS12:
4150                 case CHIP_VEGAM:
4151                 case CHIP_VEGA20:
4152                 case CHIP_VEGA10:
4153                 case CHIP_VEGA12:
4154                 case CHIP_RAVEN:
4155                 case CHIP_ARCTURUS:
4156                 case CHIP_RENOIR:
4157                 case CHIP_NAVI10:
4158                 case CHIP_NAVI14:
4159                 case CHIP_NAVI12:
4160                 case CHIP_SIENNA_CICHLID:
4161                 case CHIP_NAVY_FLOUNDER:
4162                 case CHIP_DIMGREY_CAVEFISH:
4163                         break;
4164                 default:
4165                         goto disabled;
4166                 }
4167         }
4168
4169         return true;
4170
4171 disabled:
4172                 dev_info(adev->dev, "GPU recovery disabled.\n");
4173                 return false;
4174 }
4175
4176
4177 static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4178                                         struct amdgpu_job *job,
4179                                         bool *need_full_reset_arg)
4180 {
4181         int i, r = 0;
4182         bool need_full_reset  = *need_full_reset_arg;
4183
4184         amdgpu_debugfs_wait_dump(adev);
4185
4186         if (amdgpu_sriov_vf(adev)) {
4187                 /* stop the data exchange thread */
4188                 amdgpu_virt_fini_data_exchange(adev);
4189         }
4190
4191         /* block all schedulers and reset given job's ring */
4192         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4193                 struct amdgpu_ring *ring = adev->rings[i];
4194
4195                 if (!ring || !ring->sched.thread)
4196                         continue;
4197
4198                 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4199                 amdgpu_fence_driver_force_completion(ring);
4200         }
4201
4202         if(job)
4203                 drm_sched_increase_karma(&job->base);
4204
4205         /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4206         if (!amdgpu_sriov_vf(adev)) {
4207
4208                 if (!need_full_reset)
4209                         need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4210
4211                 if (!need_full_reset) {
4212                         amdgpu_device_ip_pre_soft_reset(adev);
4213                         r = amdgpu_device_ip_soft_reset(adev);
4214                         amdgpu_device_ip_post_soft_reset(adev);
4215                         if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4216                                 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4217                                 need_full_reset = true;
4218                         }
4219                 }
4220
4221                 if (need_full_reset)
4222                         r = amdgpu_device_ip_suspend(adev);
4223
4224                 *need_full_reset_arg = need_full_reset;
4225         }
4226
4227         return r;
4228 }
4229
4230 static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
4231                                struct list_head *device_list_handle,
4232                                bool *need_full_reset_arg,
4233                                bool skip_hw_reset)
4234 {
4235         struct amdgpu_device *tmp_adev = NULL;
4236         bool need_full_reset = *need_full_reset_arg, vram_lost = false;
4237         int r = 0;
4238
4239         /*
4240          * ASIC reset has to be done on all HGMI hive nodes ASAP
4241          * to allow proper links negotiation in FW (within 1 sec)
4242          */
4243         if (!skip_hw_reset && need_full_reset) {
4244                 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4245                         /* For XGMI run all resets in parallel to speed up the process */
4246                         if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4247                                 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4248                                         r = -EALREADY;
4249                         } else
4250                                 r = amdgpu_asic_reset(tmp_adev);
4251
4252                         if (r) {
4253                                 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4254                                          r, adev_to_drm(tmp_adev)->unique);
4255                                 break;
4256                         }
4257                 }
4258
4259                 /* For XGMI wait for all resets to complete before proceed */
4260                 if (!r) {
4261                         list_for_each_entry(tmp_adev, device_list_handle,
4262                                             gmc.xgmi.head) {
4263                                 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4264                                         flush_work(&tmp_adev->xgmi_reset_work);
4265                                         r = tmp_adev->asic_reset_res;
4266                                         if (r)
4267                                                 break;
4268                                 }
4269                         }
4270                 }
4271         }
4272
4273         if (!r && amdgpu_ras_intr_triggered()) {
4274                 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4275                         if (tmp_adev->mmhub.funcs &&
4276                             tmp_adev->mmhub.funcs->reset_ras_error_count)
4277                                 tmp_adev->mmhub.funcs->reset_ras_error_count(tmp_adev);
4278                 }
4279
4280                 amdgpu_ras_intr_cleared();
4281         }
4282
4283         list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4284                 if (need_full_reset) {
4285                         /* post card */
4286                         if (amdgpu_device_asic_init(tmp_adev))
4287                                 dev_warn(tmp_adev->dev, "asic atom init failed!");
4288
4289                         if (!r) {
4290                                 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4291                                 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4292                                 if (r)
4293                                         goto out;
4294
4295                                 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4296                                 if (vram_lost) {
4297                                         DRM_INFO("VRAM is lost due to GPU reset!\n");
4298                                         amdgpu_inc_vram_lost(tmp_adev);
4299                                 }
4300
4301                                 r = amdgpu_gtt_mgr_recover(ttm_manager_type(&tmp_adev->mman.bdev, TTM_PL_TT));
4302                                 if (r)
4303                                         goto out;
4304
4305                                 r = amdgpu_device_fw_loading(tmp_adev);
4306                                 if (r)
4307                                         return r;
4308
4309                                 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4310                                 if (r)
4311                                         goto out;
4312
4313                                 if (vram_lost)
4314                                         amdgpu_device_fill_reset_magic(tmp_adev);
4315
4316                                 /*
4317                                  * Add this ASIC as tracked as reset was already
4318                                  * complete successfully.
4319                                  */
4320                                 amdgpu_register_gpu_instance(tmp_adev);
4321
4322                                 r = amdgpu_device_ip_late_init(tmp_adev);
4323                                 if (r)
4324                                         goto out;
4325
4326                                 amdgpu_fbdev_set_suspend(tmp_adev, 0);
4327
4328                                 /*
4329                                  * The GPU enters bad state once faulty pages
4330                                  * by ECC has reached the threshold, and ras
4331                                  * recovery is scheduled next. So add one check
4332                                  * here to break recovery if it indeed exceeds
4333                                  * bad page threshold, and remind user to
4334                                  * retire this GPU or setting one bigger
4335                                  * bad_page_threshold value to fix this once
4336                                  * probing driver again.
4337                                  */
4338                                 if (!amdgpu_ras_check_err_threshold(tmp_adev)) {
4339                                         /* must succeed. */
4340                                         amdgpu_ras_resume(tmp_adev);
4341                                 } else {
4342                                         r = -EINVAL;
4343                                         goto out;
4344                                 }
4345
4346                                 /* Update PSP FW topology after reset */
4347                                 if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4348                                         r = amdgpu_xgmi_update_topology(hive, tmp_adev);
4349                         }
4350                 }
4351
4352 out:
4353                 if (!r) {
4354                         amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4355                         r = amdgpu_ib_ring_tests(tmp_adev);
4356                         if (r) {
4357                                 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4358                                 r = amdgpu_device_ip_suspend(tmp_adev);
4359                                 need_full_reset = true;
4360                                 r = -EAGAIN;
4361                                 goto end;
4362                         }
4363                 }
4364
4365                 if (!r)
4366                         r = amdgpu_device_recover_vram(tmp_adev);
4367                 else
4368                         tmp_adev->asic_reset_res = r;
4369         }
4370
4371 end:
4372         *need_full_reset_arg = need_full_reset;
4373         return r;
4374 }
4375
4376 static bool amdgpu_device_lock_adev(struct amdgpu_device *adev,
4377                                 struct amdgpu_hive_info *hive)
4378 {
4379         if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
4380                 return false;
4381
4382         if (hive) {
4383                 down_write_nest_lock(&adev->reset_sem, &hive->hive_lock);
4384         } else {
4385                 down_write(&adev->reset_sem);
4386         }
4387
4388         atomic_inc(&adev->gpu_reset_counter);
4389         switch (amdgpu_asic_reset_method(adev)) {
4390         case AMD_RESET_METHOD_MODE1:
4391                 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4392                 break;
4393         case AMD_RESET_METHOD_MODE2:
4394                 adev->mp1_state = PP_MP1_STATE_RESET;
4395                 break;
4396         default:
4397                 adev->mp1_state = PP_MP1_STATE_NONE;
4398                 break;
4399         }
4400
4401         return true;
4402 }
4403
4404 static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
4405 {
4406         amdgpu_vf_error_trans_all(adev);
4407         adev->mp1_state = PP_MP1_STATE_NONE;
4408         atomic_set(&adev->in_gpu_reset, 0);
4409         up_write(&adev->reset_sem);
4410 }
4411
4412 /*
4413  * to lockup a list of amdgpu devices in a hive safely, if not a hive
4414  * with multiple nodes, it will be similar as amdgpu_device_lock_adev.
4415  *
4416  * unlock won't require roll back.
4417  */
4418 static int amdgpu_device_lock_hive_adev(struct amdgpu_device *adev, struct amdgpu_hive_info *hive)
4419 {
4420         struct amdgpu_device *tmp_adev = NULL;
4421
4422         if (adev->gmc.xgmi.num_physical_nodes > 1) {
4423                 if (!hive) {
4424                         dev_err(adev->dev, "Hive is NULL while device has multiple xgmi nodes");
4425                         return -ENODEV;
4426                 }
4427                 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4428                         if (!amdgpu_device_lock_adev(tmp_adev, hive))
4429                                 goto roll_back;
4430                 }
4431         } else if (!amdgpu_device_lock_adev(adev, hive))
4432                 return -EAGAIN;
4433
4434         return 0;
4435 roll_back:
4436         if (!list_is_first(&tmp_adev->gmc.xgmi.head, &hive->device_list)) {
4437                 /*
4438                  * if the lockup iteration break in the middle of a hive,
4439                  * it may means there may has a race issue,
4440                  * or a hive device locked up independently.
4441                  * we may be in trouble and may not, so will try to roll back
4442                  * the lock and give out a warnning.
4443                  */
4444                 dev_warn(tmp_adev->dev, "Hive lock iteration broke in the middle. Rolling back to unlock");
4445                 list_for_each_entry_continue_reverse(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4446                         amdgpu_device_unlock_adev(tmp_adev);
4447                 }
4448         }
4449         return -EAGAIN;
4450 }
4451
4452 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
4453 {
4454         struct pci_dev *p = NULL;
4455
4456         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4457                         adev->pdev->bus->number, 1);
4458         if (p) {
4459                 pm_runtime_enable(&(p->dev));
4460                 pm_runtime_resume(&(p->dev));
4461         }
4462 }
4463
4464 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
4465 {
4466         enum amd_reset_method reset_method;
4467         struct pci_dev *p = NULL;
4468         u64 expires;
4469
4470         /*
4471          * For now, only BACO and mode1 reset are confirmed
4472          * to suffer the audio issue without proper suspended.
4473          */
4474         reset_method = amdgpu_asic_reset_method(adev);
4475         if ((reset_method != AMD_RESET_METHOD_BACO) &&
4476              (reset_method != AMD_RESET_METHOD_MODE1))
4477                 return -EINVAL;
4478
4479         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4480                         adev->pdev->bus->number, 1);
4481         if (!p)
4482                 return -ENODEV;
4483
4484         expires = pm_runtime_autosuspend_expiration(&(p->dev));
4485         if (!expires)
4486                 /*
4487                  * If we cannot get the audio device autosuspend delay,
4488                  * a fixed 4S interval will be used. Considering 3S is
4489                  * the audio controller default autosuspend delay setting.
4490                  * 4S used here is guaranteed to cover that.
4491                  */
4492                 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
4493
4494         while (!pm_runtime_status_suspended(&(p->dev))) {
4495                 if (!pm_runtime_suspend(&(p->dev)))
4496                         break;
4497
4498                 if (expires < ktime_get_mono_fast_ns()) {
4499                         dev_warn(adev->dev, "failed to suspend display audio\n");
4500                         /* TODO: abort the succeeding gpu reset? */
4501                         return -ETIMEDOUT;
4502                 }
4503         }
4504
4505         pm_runtime_disable(&(p->dev));
4506
4507         return 0;
4508 }
4509
4510 /**
4511  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
4512  *
4513  * @adev: amdgpu_device pointer
4514  * @job: which job trigger hang
4515  *
4516  * Attempt to reset the GPU if it has hung (all asics).
4517  * Attempt to do soft-reset or full-reset and reinitialize Asic
4518  * Returns 0 for success or an error on failure.
4519  */
4520
4521 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
4522                               struct amdgpu_job *job)
4523 {
4524         struct list_head device_list, *device_list_handle =  NULL;
4525         bool need_full_reset = false;
4526         bool job_signaled = false;
4527         struct amdgpu_hive_info *hive = NULL;
4528         struct amdgpu_device *tmp_adev = NULL;
4529         int i, r = 0;
4530         bool need_emergency_restart = false;
4531         bool audio_suspended = false;
4532
4533         /*
4534          * Special case: RAS triggered and full reset isn't supported
4535          */
4536         need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
4537
4538         /*
4539          * Flush RAM to disk so that after reboot
4540          * the user can read log and see why the system rebooted.
4541          */
4542         if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
4543                 DRM_WARN("Emergency reboot.");
4544
4545                 ksys_sync_helper();
4546                 emergency_restart();
4547         }
4548
4549         dev_info(adev->dev, "GPU %s begin!\n",
4550                 need_emergency_restart ? "jobs stop":"reset");
4551
4552         /*
4553          * Here we trylock to avoid chain of resets executing from
4554          * either trigger by jobs on different adevs in XGMI hive or jobs on
4555          * different schedulers for same device while this TO handler is running.
4556          * We always reset all schedulers for device and all devices for XGMI
4557          * hive so that should take care of them too.
4558          */
4559         hive = amdgpu_get_xgmi_hive(adev);
4560         if (hive) {
4561                 if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) {
4562                         DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
4563                                 job ? job->base.id : -1, hive->hive_id);
4564                         amdgpu_put_xgmi_hive(hive);
4565                         if (job)
4566                                 drm_sched_increase_karma(&job->base);
4567                         return 0;
4568                 }
4569                 mutex_lock(&hive->hive_lock);
4570         }
4571
4572         /*
4573          * lock the device before we try to operate the linked list
4574          * if didn't get the device lock, don't touch the linked list since
4575          * others may iterating it.
4576          */
4577         r = amdgpu_device_lock_hive_adev(adev, hive);
4578         if (r) {
4579                 dev_info(adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress",
4580                                         job ? job->base.id : -1);
4581
4582                 /* even we skipped this reset, still need to set the job to guilty */
4583                 if (job)
4584                         drm_sched_increase_karma(&job->base);
4585                 goto skip_recovery;
4586         }
4587
4588         /*
4589          * Build list of devices to reset.
4590          * In case we are in XGMI hive mode, resort the device list
4591          * to put adev in the 1st position.
4592          */
4593         INIT_LIST_HEAD(&device_list);
4594         if (adev->gmc.xgmi.num_physical_nodes > 1) {
4595                 if (!list_is_first(&adev->gmc.xgmi.head, &hive->device_list))
4596                         list_rotate_to_front(&adev->gmc.xgmi.head, &hive->device_list);
4597                 device_list_handle = &hive->device_list;
4598         } else {
4599                 list_add_tail(&adev->gmc.xgmi.head, &device_list);
4600                 device_list_handle = &device_list;
4601         }
4602
4603         /* block all schedulers and reset given job's ring */
4604         list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4605                 /*
4606                  * Try to put the audio codec into suspend state
4607                  * before gpu reset started.
4608                  *
4609                  * Due to the power domain of the graphics device
4610                  * is shared with AZ power domain. Without this,
4611                  * we may change the audio hardware from behind
4612                  * the audio driver's back. That will trigger
4613                  * some audio codec errors.
4614                  */
4615                 if (!amdgpu_device_suspend_display_audio(tmp_adev))
4616                         audio_suspended = true;
4617
4618                 amdgpu_ras_set_error_query_ready(tmp_adev, false);
4619
4620                 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
4621
4622                 if (!amdgpu_sriov_vf(tmp_adev))
4623                         amdgpu_amdkfd_pre_reset(tmp_adev);
4624
4625                 /*
4626                  * Mark these ASICs to be reseted as untracked first
4627                  * And add them back after reset completed
4628                  */
4629                 amdgpu_unregister_gpu_instance(tmp_adev);
4630
4631                 amdgpu_fbdev_set_suspend(tmp_adev, 1);
4632
4633                 /* disable ras on ALL IPs */
4634                 if (!need_emergency_restart &&
4635                       amdgpu_device_ip_need_full_reset(tmp_adev))
4636                         amdgpu_ras_suspend(tmp_adev);
4637
4638                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4639                         struct amdgpu_ring *ring = tmp_adev->rings[i];
4640
4641                         if (!ring || !ring->sched.thread)
4642                                 continue;
4643
4644                         drm_sched_stop(&ring->sched, job ? &job->base : NULL);
4645
4646                         if (need_emergency_restart)
4647                                 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
4648                 }
4649         }
4650
4651         if (need_emergency_restart)
4652                 goto skip_sched_resume;
4653
4654         /*
4655          * Must check guilty signal here since after this point all old
4656          * HW fences are force signaled.
4657          *
4658          * job->base holds a reference to parent fence
4659          */
4660         if (job && job->base.s_fence->parent &&
4661             dma_fence_is_signaled(job->base.s_fence->parent)) {
4662                 job_signaled = true;
4663                 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
4664                 goto skip_hw_reset;
4665         }
4666
4667 retry:  /* Rest of adevs pre asic reset from XGMI hive. */
4668         list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4669                 r = amdgpu_device_pre_asic_reset(tmp_adev,
4670                                                  (tmp_adev == adev) ? job : NULL,
4671                                                  &need_full_reset);
4672                 /*TODO Should we stop ?*/
4673                 if (r) {
4674                         dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
4675                                   r, adev_to_drm(tmp_adev)->unique);
4676                         tmp_adev->asic_reset_res = r;
4677                 }
4678         }
4679
4680         /* Actual ASIC resets if needed.*/
4681         /* TODO Implement XGMI hive reset logic for SRIOV */
4682         if (amdgpu_sriov_vf(adev)) {
4683                 r = amdgpu_device_reset_sriov(adev, job ? false : true);
4684                 if (r)
4685                         adev->asic_reset_res = r;
4686         } else {
4687                 r  = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset, false);
4688                 if (r && r == -EAGAIN)
4689                         goto retry;
4690         }
4691
4692 skip_hw_reset:
4693
4694         /* Post ASIC reset for all devs .*/
4695         list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4696
4697                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4698                         struct amdgpu_ring *ring = tmp_adev->rings[i];
4699
4700                         if (!ring || !ring->sched.thread)
4701                                 continue;
4702
4703                         /* No point to resubmit jobs if we didn't HW reset*/
4704                         if (!tmp_adev->asic_reset_res && !job_signaled)
4705                                 drm_sched_resubmit_jobs(&ring->sched);
4706
4707                         drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
4708                 }
4709
4710                 if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
4711                         drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
4712                 }
4713
4714                 tmp_adev->asic_reset_res = 0;
4715
4716                 if (r) {
4717                         /* bad news, how to tell it to userspace ? */
4718                         dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
4719                         amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
4720                 } else {
4721                         dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
4722                 }
4723         }
4724
4725 skip_sched_resume:
4726         list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4727                 /*unlock kfd: SRIOV would do it separately */
4728                 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
4729                         amdgpu_amdkfd_post_reset(tmp_adev);
4730                 if (audio_suspended)
4731                         amdgpu_device_resume_display_audio(tmp_adev);
4732                 amdgpu_device_unlock_adev(tmp_adev);
4733         }
4734
4735 skip_recovery:
4736         if (hive) {
4737                 atomic_set(&hive->in_reset, 0);
4738                 mutex_unlock(&hive->hive_lock);
4739                 amdgpu_put_xgmi_hive(hive);
4740         }
4741
4742         if (r && r != -EAGAIN)
4743                 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
4744         return r;
4745 }
4746
4747 /**
4748  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
4749  *
4750  * @adev: amdgpu_device pointer
4751  *
4752  * Fetchs and stores in the driver the PCIE capabilities (gen speed
4753  * and lanes) of the slot the device is in. Handles APUs and
4754  * virtualized environments where PCIE config space may not be available.
4755  */
4756 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
4757 {
4758         struct pci_dev *pdev;
4759         enum pci_bus_speed speed_cap, platform_speed_cap;
4760         enum pcie_link_width platform_link_width;
4761
4762         if (amdgpu_pcie_gen_cap)
4763                 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
4764
4765         if (amdgpu_pcie_lane_cap)
4766                 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
4767
4768         /* covers APUs as well */
4769         if (pci_is_root_bus(adev->pdev->bus)) {
4770                 if (adev->pm.pcie_gen_mask == 0)
4771                         adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
4772                 if (adev->pm.pcie_mlw_mask == 0)
4773                         adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
4774                 return;
4775         }
4776
4777         if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
4778                 return;
4779
4780         pcie_bandwidth_available(adev->pdev, NULL,
4781                                  &platform_speed_cap, &platform_link_width);
4782
4783         if (adev->pm.pcie_gen_mask == 0) {
4784                 /* asic caps */
4785                 pdev = adev->pdev;
4786                 speed_cap = pcie_get_speed_cap(pdev);
4787                 if (speed_cap == PCI_SPEED_UNKNOWN) {
4788                         adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4789                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4790                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
4791                 } else {
4792                         if (speed_cap == PCIE_SPEED_32_0GT)
4793                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4794                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4795                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4796                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
4797                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
4798                         else if (speed_cap == PCIE_SPEED_16_0GT)
4799                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4800                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4801                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4802                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
4803                         else if (speed_cap == PCIE_SPEED_8_0GT)
4804                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4805                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4806                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
4807                         else if (speed_cap == PCIE_SPEED_5_0GT)
4808                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4809                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
4810                         else
4811                                 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
4812                 }
4813                 /* platform caps */
4814                 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
4815                         adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4816                                                    CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
4817                 } else {
4818                         if (platform_speed_cap == PCIE_SPEED_32_0GT)
4819                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4820                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4821                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4822                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
4823                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
4824                         else if (platform_speed_cap == PCIE_SPEED_16_0GT)
4825                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4826                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4827                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4828                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
4829                         else if (platform_speed_cap == PCIE_SPEED_8_0GT)
4830                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4831                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4832                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
4833                         else if (platform_speed_cap == PCIE_SPEED_5_0GT)
4834                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4835                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
4836                         else
4837                                 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
4838
4839                 }
4840         }
4841         if (adev->pm.pcie_mlw_mask == 0) {
4842                 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
4843                         adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
4844                 } else {
4845                         switch (platform_link_width) {
4846                         case PCIE_LNK_X32:
4847                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
4848                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
4849                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4850                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4851                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4852                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4853                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4854                                 break;
4855                         case PCIE_LNK_X16:
4856                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
4857                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4858                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4859                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4860                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4861                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4862                                 break;
4863                         case PCIE_LNK_X12:
4864                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4865                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4866                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4867                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4868                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4869                                 break;
4870                         case PCIE_LNK_X8:
4871                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4872                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4873                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4874                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4875                                 break;
4876                         case PCIE_LNK_X4:
4877                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4878                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4879                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4880                                 break;
4881                         case PCIE_LNK_X2:
4882                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4883                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4884                                 break;
4885                         case PCIE_LNK_X1:
4886                                 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
4887                                 break;
4888                         default:
4889                                 break;
4890                         }
4891                 }
4892         }
4893 }
4894
4895 int amdgpu_device_baco_enter(struct drm_device *dev)
4896 {
4897         struct amdgpu_device *adev = drm_to_adev(dev);
4898         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4899
4900         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
4901                 return -ENOTSUPP;
4902
4903         if (ras && ras->supported && adev->nbio.funcs->enable_doorbell_interrupt)
4904                 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
4905
4906         return amdgpu_dpm_baco_enter(adev);
4907 }
4908
4909 int amdgpu_device_baco_exit(struct drm_device *dev)
4910 {
4911         struct amdgpu_device *adev = drm_to_adev(dev);
4912         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4913         int ret = 0;
4914
4915         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
4916                 return -ENOTSUPP;
4917
4918         ret = amdgpu_dpm_baco_exit(adev);
4919         if (ret)
4920                 return ret;
4921
4922         if (ras && ras->supported && adev->nbio.funcs->enable_doorbell_interrupt)
4923                 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
4924
4925         return 0;
4926 }
4927
4928 static void amdgpu_cancel_all_tdr(struct amdgpu_device *adev)
4929 {
4930         int i;
4931
4932         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4933                 struct amdgpu_ring *ring = adev->rings[i];
4934
4935                 if (!ring || !ring->sched.thread)
4936                         continue;
4937
4938                 cancel_delayed_work_sync(&ring->sched.work_tdr);
4939         }
4940 }
4941
4942 /**
4943  * amdgpu_pci_error_detected - Called when a PCI error is detected.
4944  * @pdev: PCI device struct
4945  * @state: PCI channel state
4946  *
4947  * Description: Called when a PCI error is detected.
4948  *
4949  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
4950  */
4951 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
4952 {
4953         struct drm_device *dev = pci_get_drvdata(pdev);
4954         struct amdgpu_device *adev = drm_to_adev(dev);
4955         int i;
4956
4957         DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
4958
4959         if (adev->gmc.xgmi.num_physical_nodes > 1) {
4960                 DRM_WARN("No support for XGMI hive yet...");
4961                 return PCI_ERS_RESULT_DISCONNECT;
4962         }
4963
4964         switch (state) {
4965         case pci_channel_io_normal:
4966                 return PCI_ERS_RESULT_CAN_RECOVER;
4967         /* Fatal error, prepare for slot reset */
4968         case pci_channel_io_frozen:
4969                 /*
4970                  * Cancel and wait for all TDRs in progress if failing to
4971                  * set  adev->in_gpu_reset in amdgpu_device_lock_adev
4972                  *
4973                  * Locking adev->reset_sem will prevent any external access
4974                  * to GPU during PCI error recovery
4975                  */
4976                 while (!amdgpu_device_lock_adev(adev, NULL))
4977                         amdgpu_cancel_all_tdr(adev);
4978
4979                 /*
4980                  * Block any work scheduling as we do for regular GPU reset
4981                  * for the duration of the recovery
4982                  */
4983                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4984                         struct amdgpu_ring *ring = adev->rings[i];
4985
4986                         if (!ring || !ring->sched.thread)
4987                                 continue;
4988
4989                         drm_sched_stop(&ring->sched, NULL);
4990                 }
4991                 return PCI_ERS_RESULT_NEED_RESET;
4992         case pci_channel_io_perm_failure:
4993                 /* Permanent error, prepare for device removal */
4994                 return PCI_ERS_RESULT_DISCONNECT;
4995         }
4996
4997         return PCI_ERS_RESULT_NEED_RESET;
4998 }
4999
5000 /**
5001  * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5002  * @pdev: pointer to PCI device
5003  */
5004 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5005 {
5006
5007         DRM_INFO("PCI error: mmio enabled callback!!\n");
5008
5009         /* TODO - dump whatever for debugging purposes */
5010
5011         /* This called only if amdgpu_pci_error_detected returns
5012          * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5013          * works, no need to reset slot.
5014          */
5015
5016         return PCI_ERS_RESULT_RECOVERED;
5017 }
5018
5019 /**
5020  * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5021  * @pdev: PCI device struct
5022  *
5023  * Description: This routine is called by the pci error recovery
5024  * code after the PCI slot has been reset, just before we
5025  * should resume normal operations.
5026  */
5027 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5028 {
5029         struct drm_device *dev = pci_get_drvdata(pdev);
5030         struct amdgpu_device *adev = drm_to_adev(dev);
5031         int r, i;
5032         bool need_full_reset = true;
5033         u32 memsize;
5034         struct list_head device_list;
5035
5036         DRM_INFO("PCI error: slot reset callback!!\n");
5037
5038         INIT_LIST_HEAD(&device_list);
5039         list_add_tail(&adev->gmc.xgmi.head, &device_list);
5040
5041         /* wait for asic to come out of reset */
5042         msleep(500);
5043
5044         /* Restore PCI confspace */
5045         amdgpu_device_load_pci_state(pdev);
5046
5047         /* confirm  ASIC came out of reset */
5048         for (i = 0; i < adev->usec_timeout; i++) {
5049                 memsize = amdgpu_asic_get_config_memsize(adev);
5050
5051                 if (memsize != 0xffffffff)
5052                         break;
5053                 udelay(1);
5054         }
5055         if (memsize == 0xffffffff) {
5056                 r = -ETIME;
5057                 goto out;
5058         }
5059
5060         adev->in_pci_err_recovery = true;
5061         r = amdgpu_device_pre_asic_reset(adev, NULL, &need_full_reset);
5062         adev->in_pci_err_recovery = false;
5063         if (r)
5064                 goto out;
5065
5066         r = amdgpu_do_asic_reset(NULL, &device_list, &need_full_reset, true);
5067
5068 out:
5069         if (!r) {
5070                 if (amdgpu_device_cache_pci_state(adev->pdev))
5071                         pci_restore_state(adev->pdev);
5072
5073                 DRM_INFO("PCIe error recovery succeeded\n");
5074         } else {
5075                 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5076                 amdgpu_device_unlock_adev(adev);
5077         }
5078
5079         return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5080 }
5081
5082 /**
5083  * amdgpu_pci_resume() - resume normal ops after PCI reset
5084  * @pdev: pointer to PCI device
5085  *
5086  * Called when the error recovery driver tells us that its
5087  * OK to resume normal operation.
5088  */
5089 void amdgpu_pci_resume(struct pci_dev *pdev)
5090 {
5091         struct drm_device *dev = pci_get_drvdata(pdev);
5092         struct amdgpu_device *adev = drm_to_adev(dev);
5093         int i;
5094
5095
5096         DRM_INFO("PCI error: resume callback!!\n");
5097
5098         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5099                 struct amdgpu_ring *ring = adev->rings[i];
5100
5101                 if (!ring || !ring->sched.thread)
5102                         continue;
5103
5104
5105                 drm_sched_resubmit_jobs(&ring->sched);
5106                 drm_sched_start(&ring->sched, true);
5107         }
5108
5109         amdgpu_device_unlock_adev(adev);
5110 }
5111
5112 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5113 {
5114         struct drm_device *dev = pci_get_drvdata(pdev);
5115         struct amdgpu_device *adev = drm_to_adev(dev);
5116         int r;
5117
5118         r = pci_save_state(pdev);
5119         if (!r) {
5120                 kfree(adev->pci_state);
5121
5122                 adev->pci_state = pci_store_saved_state(pdev);
5123
5124                 if (!adev->pci_state) {
5125                         DRM_ERROR("Failed to store PCI saved state");
5126                         return false;
5127                 }
5128         } else {
5129                 DRM_WARN("Failed to save PCI state, err:%d\n", r);
5130                 return false;
5131         }
5132
5133         return true;
5134 }
5135
5136 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5137 {
5138         struct drm_device *dev = pci_get_drvdata(pdev);
5139         struct amdgpu_device *adev = drm_to_adev(dev);
5140         int r;
5141
5142         if (!adev->pci_state)
5143                 return false;
5144
5145         r = pci_load_saved_state(pdev, adev->pci_state);
5146
5147         if (!r) {
5148                 pci_restore_state(pdev);
5149         } else {
5150                 DRM_WARN("Failed to load PCI state, err:%d\n", r);
5151                 return false;
5152         }
5153
5154         return true;
5155 }
5156
5157
This page took 0.463082 seconds and 4 git commands to generate.