]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
Merge tag 'topic/i915-gem-next-2021-03-26' of ssh://git.freedesktop.org/git/drm/drm...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_probe_helper.h>
36 #include <drm/amdgpu_drm.h>
37 #include <linux/vgaarb.h>
38 #include <linux/vga_switcheroo.h>
39 #include <linux/efi.h>
40 #include "amdgpu.h"
41 #include "amdgpu_trace.h"
42 #include "amdgpu_i2c.h"
43 #include "atom.h"
44 #include "amdgpu_atombios.h"
45 #include "amdgpu_atomfirmware.h"
46 #include "amd_pcie.h"
47 #ifdef CONFIG_DRM_AMDGPU_SI
48 #include "si.h"
49 #endif
50 #ifdef CONFIG_DRM_AMDGPU_CIK
51 #include "cik.h"
52 #endif
53 #include "vi.h"
54 #include "soc15.h"
55 #include "nv.h"
56 #include "bif/bif_4_1_d.h"
57 #include <linux/pci.h>
58 #include <linux/firmware.h>
59 #include "amdgpu_vf_error.h"
60
61 #include "amdgpu_amdkfd.h"
62 #include "amdgpu_pm.h"
63
64 #include "amdgpu_xgmi.h"
65 #include "amdgpu_ras.h"
66 #include "amdgpu_pmu.h"
67 #include "amdgpu_fru_eeprom.h"
68
69 #include <linux/suspend.h>
70 #include <drm/task_barrier.h>
71 #include <linux/pm_runtime.h>
72
73 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
74 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
75 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
76 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
77 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
78 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
79 MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
80 MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
81 MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
82 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
83 MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin");
84
85 #define AMDGPU_RESUME_MS                2000
86
87 const char *amdgpu_asic_name[] = {
88         "TAHITI",
89         "PITCAIRN",
90         "VERDE",
91         "OLAND",
92         "HAINAN",
93         "BONAIRE",
94         "KAVERI",
95         "KABINI",
96         "HAWAII",
97         "MULLINS",
98         "TOPAZ",
99         "TONGA",
100         "FIJI",
101         "CARRIZO",
102         "STONEY",
103         "POLARIS10",
104         "POLARIS11",
105         "POLARIS12",
106         "VEGAM",
107         "VEGA10",
108         "VEGA12",
109         "VEGA20",
110         "RAVEN",
111         "ARCTURUS",
112         "RENOIR",
113         "ALDEBARAN",
114         "NAVI10",
115         "NAVI14",
116         "NAVI12",
117         "SIENNA_CICHLID",
118         "NAVY_FLOUNDER",
119         "VANGOGH",
120         "DIMGREY_CAVEFISH",
121         "LAST",
122 };
123
124 /**
125  * DOC: pcie_replay_count
126  *
127  * The amdgpu driver provides a sysfs API for reporting the total number
128  * of PCIe replays (NAKs)
129  * The file pcie_replay_count is used for this and returns the total
130  * number of replays as a sum of the NAKs generated and NAKs received
131  */
132
133 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
134                 struct device_attribute *attr, char *buf)
135 {
136         struct drm_device *ddev = dev_get_drvdata(dev);
137         struct amdgpu_device *adev = drm_to_adev(ddev);
138         uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
139
140         return snprintf(buf, PAGE_SIZE, "%llu\n", cnt);
141 }
142
143 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
144                 amdgpu_device_get_pcie_replay_count, NULL);
145
146 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
147
148 /**
149  * DOC: product_name
150  *
151  * The amdgpu driver provides a sysfs API for reporting the product name
152  * for the device
153  * The file serial_number is used for this and returns the product name
154  * as returned from the FRU.
155  * NOTE: This is only available for certain server cards
156  */
157
158 static ssize_t amdgpu_device_get_product_name(struct device *dev,
159                 struct device_attribute *attr, char *buf)
160 {
161         struct drm_device *ddev = dev_get_drvdata(dev);
162         struct amdgpu_device *adev = drm_to_adev(ddev);
163
164         return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_name);
165 }
166
167 static DEVICE_ATTR(product_name, S_IRUGO,
168                 amdgpu_device_get_product_name, NULL);
169
170 /**
171  * DOC: product_number
172  *
173  * The amdgpu driver provides a sysfs API for reporting the part number
174  * for the device
175  * The file serial_number is used for this and returns the part number
176  * as returned from the FRU.
177  * NOTE: This is only available for certain server cards
178  */
179
180 static ssize_t amdgpu_device_get_product_number(struct device *dev,
181                 struct device_attribute *attr, char *buf)
182 {
183         struct drm_device *ddev = dev_get_drvdata(dev);
184         struct amdgpu_device *adev = drm_to_adev(ddev);
185
186         return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_number);
187 }
188
189 static DEVICE_ATTR(product_number, S_IRUGO,
190                 amdgpu_device_get_product_number, NULL);
191
192 /**
193  * DOC: serial_number
194  *
195  * The amdgpu driver provides a sysfs API for reporting the serial number
196  * for the device
197  * The file serial_number is used for this and returns the serial number
198  * as returned from the FRU.
199  * NOTE: This is only available for certain server cards
200  */
201
202 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
203                 struct device_attribute *attr, char *buf)
204 {
205         struct drm_device *ddev = dev_get_drvdata(dev);
206         struct amdgpu_device *adev = drm_to_adev(ddev);
207
208         return snprintf(buf, PAGE_SIZE, "%s\n", adev->serial);
209 }
210
211 static DEVICE_ATTR(serial_number, S_IRUGO,
212                 amdgpu_device_get_serial_number, NULL);
213
214 /**
215  * amdgpu_device_supports_atpx - Is the device a dGPU with HG/PX power control
216  *
217  * @dev: drm_device pointer
218  *
219  * Returns true if the device is a dGPU with HG/PX power control,
220  * otherwise return false.
221  */
222 bool amdgpu_device_supports_atpx(struct drm_device *dev)
223 {
224         struct amdgpu_device *adev = drm_to_adev(dev);
225
226         if (adev->flags & AMD_IS_PX)
227                 return true;
228         return false;
229 }
230
231 /**
232  * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
233  *
234  * @dev: drm_device pointer
235  *
236  * Returns true if the device is a dGPU with HG/PX power control,
237  * otherwise return false.
238  */
239 bool amdgpu_device_supports_boco(struct drm_device *dev)
240 {
241         struct amdgpu_device *adev = drm_to_adev(dev);
242
243         if (adev->has_pr3)
244                 return true;
245         return false;
246 }
247
248 /**
249  * amdgpu_device_supports_baco - Does the device support BACO
250  *
251  * @dev: drm_device pointer
252  *
253  * Returns true if the device supporte BACO,
254  * otherwise return false.
255  */
256 bool amdgpu_device_supports_baco(struct drm_device *dev)
257 {
258         struct amdgpu_device *adev = drm_to_adev(dev);
259
260         return amdgpu_asic_supports_baco(adev);
261 }
262
263 /*
264  * VRAM access helper functions
265  */
266
267 /**
268  * amdgpu_device_vram_access - read/write a buffer in vram
269  *
270  * @adev: amdgpu_device pointer
271  * @pos: offset of the buffer in vram
272  * @buf: virtual address of the buffer in system memory
273  * @size: read/write size, sizeof(@buf) must > @size
274  * @write: true - write to vram, otherwise - read from vram
275  */
276 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
277                                uint32_t *buf, size_t size, bool write)
278 {
279         unsigned long flags;
280         uint32_t hi = ~0;
281         uint64_t last;
282
283
284 #ifdef CONFIG_64BIT
285         last = min(pos + size, adev->gmc.visible_vram_size);
286         if (last > pos) {
287                 void __iomem *addr = adev->mman.aper_base_kaddr + pos;
288                 size_t count = last - pos;
289
290                 if (write) {
291                         memcpy_toio(addr, buf, count);
292                         mb();
293                         amdgpu_asic_flush_hdp(adev, NULL);
294                 } else {
295                         amdgpu_asic_invalidate_hdp(adev, NULL);
296                         mb();
297                         memcpy_fromio(buf, addr, count);
298                 }
299
300                 if (count == size)
301                         return;
302
303                 pos += count;
304                 buf += count / 4;
305                 size -= count;
306         }
307 #endif
308
309         spin_lock_irqsave(&adev->mmio_idx_lock, flags);
310         for (last = pos + size; pos < last; pos += 4) {
311                 uint32_t tmp = pos >> 31;
312
313                 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
314                 if (tmp != hi) {
315                         WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
316                         hi = tmp;
317                 }
318                 if (write)
319                         WREG32_NO_KIQ(mmMM_DATA, *buf++);
320                 else
321                         *buf++ = RREG32_NO_KIQ(mmMM_DATA);
322         }
323         spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
324 }
325
326 /*
327  * register access helper functions.
328  */
329 /**
330  * amdgpu_device_rreg - read a memory mapped IO or indirect register
331  *
332  * @adev: amdgpu_device pointer
333  * @reg: dword aligned register offset
334  * @acc_flags: access flags which require special behavior
335  *
336  * Returns the 32 bit value from the offset specified.
337  */
338 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
339                             uint32_t reg, uint32_t acc_flags)
340 {
341         uint32_t ret;
342
343         if (adev->in_pci_err_recovery)
344                 return 0;
345
346         if ((reg * 4) < adev->rmmio_size) {
347                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
348                     amdgpu_sriov_runtime(adev) &&
349                     down_read_trylock(&adev->reset_sem)) {
350                         ret = amdgpu_kiq_rreg(adev, reg);
351                         up_read(&adev->reset_sem);
352                 } else {
353                         ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
354                 }
355         } else {
356                 ret = adev->pcie_rreg(adev, reg * 4);
357         }
358
359         trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
360
361         return ret;
362 }
363
364 /*
365  * MMIO register read with bytes helper functions
366  * @offset:bytes offset from MMIO start
367  *
368 */
369
370 /**
371  * amdgpu_mm_rreg8 - read a memory mapped IO register
372  *
373  * @adev: amdgpu_device pointer
374  * @offset: byte aligned register offset
375  *
376  * Returns the 8 bit value from the offset specified.
377  */
378 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
379 {
380         if (adev->in_pci_err_recovery)
381                 return 0;
382
383         if (offset < adev->rmmio_size)
384                 return (readb(adev->rmmio + offset));
385         BUG();
386 }
387
388 /*
389  * MMIO register write with bytes helper functions
390  * @offset:bytes offset from MMIO start
391  * @value: the value want to be written to the register
392  *
393 */
394 /**
395  * amdgpu_mm_wreg8 - read a memory mapped IO register
396  *
397  * @adev: amdgpu_device pointer
398  * @offset: byte aligned register offset
399  * @value: 8 bit value to write
400  *
401  * Writes the value specified to the offset specified.
402  */
403 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
404 {
405         if (adev->in_pci_err_recovery)
406                 return;
407
408         if (offset < adev->rmmio_size)
409                 writeb(value, adev->rmmio + offset);
410         else
411                 BUG();
412 }
413
414 /**
415  * amdgpu_device_wreg - write to a memory mapped IO or indirect register
416  *
417  * @adev: amdgpu_device pointer
418  * @reg: dword aligned register offset
419  * @v: 32 bit value to write to the register
420  * @acc_flags: access flags which require special behavior
421  *
422  * Writes the value specified to the offset specified.
423  */
424 void amdgpu_device_wreg(struct amdgpu_device *adev,
425                         uint32_t reg, uint32_t v,
426                         uint32_t acc_flags)
427 {
428         if (adev->in_pci_err_recovery)
429                 return;
430
431         if ((reg * 4) < adev->rmmio_size) {
432                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
433                     amdgpu_sriov_runtime(adev) &&
434                     down_read_trylock(&adev->reset_sem)) {
435                         amdgpu_kiq_wreg(adev, reg, v);
436                         up_read(&adev->reset_sem);
437                 } else {
438                         writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
439                 }
440         } else {
441                 adev->pcie_wreg(adev, reg * 4, v);
442         }
443
444         trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
445 }
446
447 /*
448  * amdgpu_mm_wreg_mmio_rlc -  write register either with mmio or with RLC path if in range
449  *
450  * this function is invoked only the debugfs register access
451  * */
452 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
453                              uint32_t reg, uint32_t v)
454 {
455         if (adev->in_pci_err_recovery)
456                 return;
457
458         if (amdgpu_sriov_fullaccess(adev) &&
459             adev->gfx.rlc.funcs &&
460             adev->gfx.rlc.funcs->is_rlcg_access_range) {
461                 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
462                         return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v);
463         } else {
464                 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
465         }
466 }
467
468 /**
469  * amdgpu_mm_rdoorbell - read a doorbell dword
470  *
471  * @adev: amdgpu_device pointer
472  * @index: doorbell index
473  *
474  * Returns the value in the doorbell aperture at the
475  * requested doorbell index (CIK).
476  */
477 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
478 {
479         if (adev->in_pci_err_recovery)
480                 return 0;
481
482         if (index < adev->doorbell.num_doorbells) {
483                 return readl(adev->doorbell.ptr + index);
484         } else {
485                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
486                 return 0;
487         }
488 }
489
490 /**
491  * amdgpu_mm_wdoorbell - write a doorbell dword
492  *
493  * @adev: amdgpu_device pointer
494  * @index: doorbell index
495  * @v: value to write
496  *
497  * Writes @v to the doorbell aperture at the
498  * requested doorbell index (CIK).
499  */
500 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
501 {
502         if (adev->in_pci_err_recovery)
503                 return;
504
505         if (index < adev->doorbell.num_doorbells) {
506                 writel(v, adev->doorbell.ptr + index);
507         } else {
508                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
509         }
510 }
511
512 /**
513  * amdgpu_mm_rdoorbell64 - read a doorbell Qword
514  *
515  * @adev: amdgpu_device pointer
516  * @index: doorbell index
517  *
518  * Returns the value in the doorbell aperture at the
519  * requested doorbell index (VEGA10+).
520  */
521 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
522 {
523         if (adev->in_pci_err_recovery)
524                 return 0;
525
526         if (index < adev->doorbell.num_doorbells) {
527                 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
528         } else {
529                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
530                 return 0;
531         }
532 }
533
534 /**
535  * amdgpu_mm_wdoorbell64 - write a doorbell Qword
536  *
537  * @adev: amdgpu_device pointer
538  * @index: doorbell index
539  * @v: value to write
540  *
541  * Writes @v to the doorbell aperture at the
542  * requested doorbell index (VEGA10+).
543  */
544 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
545 {
546         if (adev->in_pci_err_recovery)
547                 return;
548
549         if (index < adev->doorbell.num_doorbells) {
550                 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
551         } else {
552                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
553         }
554 }
555
556 /**
557  * amdgpu_device_indirect_rreg - read an indirect register
558  *
559  * @adev: amdgpu_device pointer
560  * @pcie_index: mmio register offset
561  * @pcie_data: mmio register offset
562  * @reg_addr: indirect register address to read from
563  *
564  * Returns the value of indirect register @reg_addr
565  */
566 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
567                                 u32 pcie_index, u32 pcie_data,
568                                 u32 reg_addr)
569 {
570         unsigned long flags;
571         u32 r;
572         void __iomem *pcie_index_offset;
573         void __iomem *pcie_data_offset;
574
575         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
576         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
577         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
578
579         writel(reg_addr, pcie_index_offset);
580         readl(pcie_index_offset);
581         r = readl(pcie_data_offset);
582         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
583
584         return r;
585 }
586
587 /**
588  * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
589  *
590  * @adev: amdgpu_device pointer
591  * @pcie_index: mmio register offset
592  * @pcie_data: mmio register offset
593  * @reg_addr: indirect register address to read from
594  *
595  * Returns the value of indirect register @reg_addr
596  */
597 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
598                                   u32 pcie_index, u32 pcie_data,
599                                   u32 reg_addr)
600 {
601         unsigned long flags;
602         u64 r;
603         void __iomem *pcie_index_offset;
604         void __iomem *pcie_data_offset;
605
606         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
607         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
608         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
609
610         /* read low 32 bits */
611         writel(reg_addr, pcie_index_offset);
612         readl(pcie_index_offset);
613         r = readl(pcie_data_offset);
614         /* read high 32 bits */
615         writel(reg_addr + 4, pcie_index_offset);
616         readl(pcie_index_offset);
617         r |= ((u64)readl(pcie_data_offset) << 32);
618         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
619
620         return r;
621 }
622
623 /**
624  * amdgpu_device_indirect_wreg - write an indirect register address
625  *
626  * @adev: amdgpu_device pointer
627  * @pcie_index: mmio register offset
628  * @pcie_data: mmio register offset
629  * @reg_addr: indirect register offset
630  * @reg_data: indirect register data
631  *
632  */
633 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
634                                  u32 pcie_index, u32 pcie_data,
635                                  u32 reg_addr, u32 reg_data)
636 {
637         unsigned long flags;
638         void __iomem *pcie_index_offset;
639         void __iomem *pcie_data_offset;
640
641         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
642         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
643         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
644
645         writel(reg_addr, pcie_index_offset);
646         readl(pcie_index_offset);
647         writel(reg_data, pcie_data_offset);
648         readl(pcie_data_offset);
649         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
650 }
651
652 /**
653  * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
654  *
655  * @adev: amdgpu_device pointer
656  * @pcie_index: mmio register offset
657  * @pcie_data: mmio register offset
658  * @reg_addr: indirect register offset
659  * @reg_data: indirect register data
660  *
661  */
662 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
663                                    u32 pcie_index, u32 pcie_data,
664                                    u32 reg_addr, u64 reg_data)
665 {
666         unsigned long flags;
667         void __iomem *pcie_index_offset;
668         void __iomem *pcie_data_offset;
669
670         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
671         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
672         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
673
674         /* write low 32 bits */
675         writel(reg_addr, pcie_index_offset);
676         readl(pcie_index_offset);
677         writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
678         readl(pcie_data_offset);
679         /* write high 32 bits */
680         writel(reg_addr + 4, pcie_index_offset);
681         readl(pcie_index_offset);
682         writel((u32)(reg_data >> 32), pcie_data_offset);
683         readl(pcie_data_offset);
684         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
685 }
686
687 /**
688  * amdgpu_invalid_rreg - dummy reg read function
689  *
690  * @adev: amdgpu_device pointer
691  * @reg: offset of register
692  *
693  * Dummy register read function.  Used for register blocks
694  * that certain asics don't have (all asics).
695  * Returns the value in the register.
696  */
697 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
698 {
699         DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
700         BUG();
701         return 0;
702 }
703
704 /**
705  * amdgpu_invalid_wreg - dummy reg write function
706  *
707  * @adev: amdgpu_device pointer
708  * @reg: offset of register
709  * @v: value to write to the register
710  *
711  * Dummy register read function.  Used for register blocks
712  * that certain asics don't have (all asics).
713  */
714 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
715 {
716         DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
717                   reg, v);
718         BUG();
719 }
720
721 /**
722  * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
723  *
724  * @adev: amdgpu_device pointer
725  * @reg: offset of register
726  *
727  * Dummy register read function.  Used for register blocks
728  * that certain asics don't have (all asics).
729  * Returns the value in the register.
730  */
731 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
732 {
733         DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
734         BUG();
735         return 0;
736 }
737
738 /**
739  * amdgpu_invalid_wreg64 - dummy reg write function
740  *
741  * @adev: amdgpu_device pointer
742  * @reg: offset of register
743  * @v: value to write to the register
744  *
745  * Dummy register read function.  Used for register blocks
746  * that certain asics don't have (all asics).
747  */
748 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
749 {
750         DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
751                   reg, v);
752         BUG();
753 }
754
755 /**
756  * amdgpu_block_invalid_rreg - dummy reg read function
757  *
758  * @adev: amdgpu_device pointer
759  * @block: offset of instance
760  * @reg: offset of register
761  *
762  * Dummy register read function.  Used for register blocks
763  * that certain asics don't have (all asics).
764  * Returns the value in the register.
765  */
766 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
767                                           uint32_t block, uint32_t reg)
768 {
769         DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
770                   reg, block);
771         BUG();
772         return 0;
773 }
774
775 /**
776  * amdgpu_block_invalid_wreg - dummy reg write function
777  *
778  * @adev: amdgpu_device pointer
779  * @block: offset of instance
780  * @reg: offset of register
781  * @v: value to write to the register
782  *
783  * Dummy register read function.  Used for register blocks
784  * that certain asics don't have (all asics).
785  */
786 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
787                                       uint32_t block,
788                                       uint32_t reg, uint32_t v)
789 {
790         DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
791                   reg, block, v);
792         BUG();
793 }
794
795 /**
796  * amdgpu_device_asic_init - Wrapper for atom asic_init
797  *
798  * @adev: amdgpu_device pointer
799  *
800  * Does any asic specific work and then calls atom asic init.
801  */
802 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
803 {
804         amdgpu_asic_pre_asic_init(adev);
805
806         return amdgpu_atom_asic_init(adev->mode_info.atom_context);
807 }
808
809 /**
810  * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
811  *
812  * @adev: amdgpu_device pointer
813  *
814  * Allocates a scratch page of VRAM for use by various things in the
815  * driver.
816  */
817 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
818 {
819         return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
820                                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
821                                        &adev->vram_scratch.robj,
822                                        &adev->vram_scratch.gpu_addr,
823                                        (void **)&adev->vram_scratch.ptr);
824 }
825
826 /**
827  * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
828  *
829  * @adev: amdgpu_device pointer
830  *
831  * Frees the VRAM scratch page.
832  */
833 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
834 {
835         amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
836 }
837
838 /**
839  * amdgpu_device_program_register_sequence - program an array of registers.
840  *
841  * @adev: amdgpu_device pointer
842  * @registers: pointer to the register array
843  * @array_size: size of the register array
844  *
845  * Programs an array or registers with and and or masks.
846  * This is a helper for setting golden registers.
847  */
848 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
849                                              const u32 *registers,
850                                              const u32 array_size)
851 {
852         u32 tmp, reg, and_mask, or_mask;
853         int i;
854
855         if (array_size % 3)
856                 return;
857
858         for (i = 0; i < array_size; i +=3) {
859                 reg = registers[i + 0];
860                 and_mask = registers[i + 1];
861                 or_mask = registers[i + 2];
862
863                 if (and_mask == 0xffffffff) {
864                         tmp = or_mask;
865                 } else {
866                         tmp = RREG32(reg);
867                         tmp &= ~and_mask;
868                         if (adev->family >= AMDGPU_FAMILY_AI)
869                                 tmp |= (or_mask & and_mask);
870                         else
871                                 tmp |= or_mask;
872                 }
873                 WREG32(reg, tmp);
874         }
875 }
876
877 /**
878  * amdgpu_device_pci_config_reset - reset the GPU
879  *
880  * @adev: amdgpu_device pointer
881  *
882  * Resets the GPU using the pci config reset sequence.
883  * Only applicable to asics prior to vega10.
884  */
885 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
886 {
887         pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
888 }
889
890 /**
891  * amdgpu_device_pci_reset - reset the GPU using generic PCI means
892  *
893  * @adev: amdgpu_device pointer
894  *
895  * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
896  */
897 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
898 {
899         return pci_reset_function(adev->pdev);
900 }
901
902 /*
903  * GPU doorbell aperture helpers function.
904  */
905 /**
906  * amdgpu_device_doorbell_init - Init doorbell driver information.
907  *
908  * @adev: amdgpu_device pointer
909  *
910  * Init doorbell driver information (CIK)
911  * Returns 0 on success, error on failure.
912  */
913 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
914 {
915
916         /* No doorbell on SI hardware generation */
917         if (adev->asic_type < CHIP_BONAIRE) {
918                 adev->doorbell.base = 0;
919                 adev->doorbell.size = 0;
920                 adev->doorbell.num_doorbells = 0;
921                 adev->doorbell.ptr = NULL;
922                 return 0;
923         }
924
925         if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
926                 return -EINVAL;
927
928         amdgpu_asic_init_doorbell_index(adev);
929
930         /* doorbell bar mapping */
931         adev->doorbell.base = pci_resource_start(adev->pdev, 2);
932         adev->doorbell.size = pci_resource_len(adev->pdev, 2);
933
934         adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
935                                              adev->doorbell_index.max_assignment+1);
936         if (adev->doorbell.num_doorbells == 0)
937                 return -EINVAL;
938
939         /* For Vega, reserve and map two pages on doorbell BAR since SDMA
940          * paging queue doorbell use the second page. The
941          * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
942          * doorbells are in the first page. So with paging queue enabled,
943          * the max num_doorbells should + 1 page (0x400 in dword)
944          */
945         if (adev->asic_type >= CHIP_VEGA10)
946                 adev->doorbell.num_doorbells += 0x400;
947
948         adev->doorbell.ptr = ioremap(adev->doorbell.base,
949                                      adev->doorbell.num_doorbells *
950                                      sizeof(u32));
951         if (adev->doorbell.ptr == NULL)
952                 return -ENOMEM;
953
954         return 0;
955 }
956
957 /**
958  * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
959  *
960  * @adev: amdgpu_device pointer
961  *
962  * Tear down doorbell driver information (CIK)
963  */
964 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
965 {
966         iounmap(adev->doorbell.ptr);
967         adev->doorbell.ptr = NULL;
968 }
969
970
971
972 /*
973  * amdgpu_device_wb_*()
974  * Writeback is the method by which the GPU updates special pages in memory
975  * with the status of certain GPU events (fences, ring pointers,etc.).
976  */
977
978 /**
979  * amdgpu_device_wb_fini - Disable Writeback and free memory
980  *
981  * @adev: amdgpu_device pointer
982  *
983  * Disables Writeback and frees the Writeback memory (all asics).
984  * Used at driver shutdown.
985  */
986 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
987 {
988         if (adev->wb.wb_obj) {
989                 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
990                                       &adev->wb.gpu_addr,
991                                       (void **)&adev->wb.wb);
992                 adev->wb.wb_obj = NULL;
993         }
994 }
995
996 /**
997  * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
998  *
999  * @adev: amdgpu_device pointer
1000  *
1001  * Initializes writeback and allocates writeback memory (all asics).
1002  * Used at driver startup.
1003  * Returns 0 on success or an -error on failure.
1004  */
1005 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1006 {
1007         int r;
1008
1009         if (adev->wb.wb_obj == NULL) {
1010                 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1011                 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1012                                             PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1013                                             &adev->wb.wb_obj, &adev->wb.gpu_addr,
1014                                             (void **)&adev->wb.wb);
1015                 if (r) {
1016                         dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1017                         return r;
1018                 }
1019
1020                 adev->wb.num_wb = AMDGPU_MAX_WB;
1021                 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1022
1023                 /* clear wb memory */
1024                 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1025         }
1026
1027         return 0;
1028 }
1029
1030 /**
1031  * amdgpu_device_wb_get - Allocate a wb entry
1032  *
1033  * @adev: amdgpu_device pointer
1034  * @wb: wb index
1035  *
1036  * Allocate a wb slot for use by the driver (all asics).
1037  * Returns 0 on success or -EINVAL on failure.
1038  */
1039 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1040 {
1041         unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1042
1043         if (offset < adev->wb.num_wb) {
1044                 __set_bit(offset, adev->wb.used);
1045                 *wb = offset << 3; /* convert to dw offset */
1046                 return 0;
1047         } else {
1048                 return -EINVAL;
1049         }
1050 }
1051
1052 /**
1053  * amdgpu_device_wb_free - Free a wb entry
1054  *
1055  * @adev: amdgpu_device pointer
1056  * @wb: wb index
1057  *
1058  * Free a wb slot allocated for use by the driver (all asics)
1059  */
1060 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1061 {
1062         wb >>= 3;
1063         if (wb < adev->wb.num_wb)
1064                 __clear_bit(wb, adev->wb.used);
1065 }
1066
1067 /**
1068  * amdgpu_device_resize_fb_bar - try to resize FB BAR
1069  *
1070  * @adev: amdgpu_device pointer
1071  *
1072  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1073  * to fail, but if any of the BARs is not accessible after the size we abort
1074  * driver loading by returning -ENODEV.
1075  */
1076 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1077 {
1078         int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1079         struct pci_bus *root;
1080         struct resource *res;
1081         unsigned i;
1082         u16 cmd;
1083         int r;
1084
1085         /* Bypass for VF */
1086         if (amdgpu_sriov_vf(adev))
1087                 return 0;
1088
1089         /* skip if the bios has already enabled large BAR */
1090         if (adev->gmc.real_vram_size &&
1091             (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1092                 return 0;
1093
1094         /* Check if the root BUS has 64bit memory resources */
1095         root = adev->pdev->bus;
1096         while (root->parent)
1097                 root = root->parent;
1098
1099         pci_bus_for_each_resource(root, res, i) {
1100                 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1101                     res->start > 0x100000000ull)
1102                         break;
1103         }
1104
1105         /* Trying to resize is pointless without a root hub window above 4GB */
1106         if (!res)
1107                 return 0;
1108
1109         /* Limit the BAR size to what is available */
1110         rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1111                         rbar_size);
1112
1113         /* Disable memory decoding while we change the BAR addresses and size */
1114         pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1115         pci_write_config_word(adev->pdev, PCI_COMMAND,
1116                               cmd & ~PCI_COMMAND_MEMORY);
1117
1118         /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1119         amdgpu_device_doorbell_fini(adev);
1120         if (adev->asic_type >= CHIP_BONAIRE)
1121                 pci_release_resource(adev->pdev, 2);
1122
1123         pci_release_resource(adev->pdev, 0);
1124
1125         r = pci_resize_resource(adev->pdev, 0, rbar_size);
1126         if (r == -ENOSPC)
1127                 DRM_INFO("Not enough PCI address space for a large BAR.");
1128         else if (r && r != -ENOTSUPP)
1129                 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1130
1131         pci_assign_unassigned_bus_resources(adev->pdev->bus);
1132
1133         /* When the doorbell or fb BAR isn't available we have no chance of
1134          * using the device.
1135          */
1136         r = amdgpu_device_doorbell_init(adev);
1137         if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1138                 return -ENODEV;
1139
1140         pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1141
1142         return 0;
1143 }
1144
1145 /*
1146  * GPU helpers function.
1147  */
1148 /**
1149  * amdgpu_device_need_post - check if the hw need post or not
1150  *
1151  * @adev: amdgpu_device pointer
1152  *
1153  * Check if the asic has been initialized (all asics) at driver startup
1154  * or post is needed if  hw reset is performed.
1155  * Returns true if need or false if not.
1156  */
1157 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1158 {
1159         uint32_t reg;
1160
1161         if (amdgpu_sriov_vf(adev))
1162                 return false;
1163
1164         if (amdgpu_passthrough(adev)) {
1165                 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1166                  * some old smc fw still need driver do vPost otherwise gpu hang, while
1167                  * those smc fw version above 22.15 doesn't have this flaw, so we force
1168                  * vpost executed for smc version below 22.15
1169                  */
1170                 if (adev->asic_type == CHIP_FIJI) {
1171                         int err;
1172                         uint32_t fw_ver;
1173                         err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1174                         /* force vPost if error occured */
1175                         if (err)
1176                                 return true;
1177
1178                         fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1179                         if (fw_ver < 0x00160e00)
1180                                 return true;
1181                 }
1182         }
1183
1184         /* Don't post if we need to reset whole hive on init */
1185         if (adev->gmc.xgmi.pending_reset)
1186                 return false;
1187
1188         if (adev->has_hw_reset) {
1189                 adev->has_hw_reset = false;
1190                 return true;
1191         }
1192
1193         /* bios scratch used on CIK+ */
1194         if (adev->asic_type >= CHIP_BONAIRE)
1195                 return amdgpu_atombios_scratch_need_asic_init(adev);
1196
1197         /* check MEM_SIZE for older asics */
1198         reg = amdgpu_asic_get_config_memsize(adev);
1199
1200         if ((reg != 0) && (reg != 0xffffffff))
1201                 return false;
1202
1203         return true;
1204 }
1205
1206 /* if we get transitioned to only one device, take VGA back */
1207 /**
1208  * amdgpu_device_vga_set_decode - enable/disable vga decode
1209  *
1210  * @cookie: amdgpu_device pointer
1211  * @state: enable/disable vga decode
1212  *
1213  * Enable/disable vga decode (all asics).
1214  * Returns VGA resource flags.
1215  */
1216 static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
1217 {
1218         struct amdgpu_device *adev = cookie;
1219         amdgpu_asic_set_vga_state(adev, state);
1220         if (state)
1221                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1222                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1223         else
1224                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1225 }
1226
1227 /**
1228  * amdgpu_device_check_block_size - validate the vm block size
1229  *
1230  * @adev: amdgpu_device pointer
1231  *
1232  * Validates the vm block size specified via module parameter.
1233  * The vm block size defines number of bits in page table versus page directory,
1234  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1235  * page table and the remaining bits are in the page directory.
1236  */
1237 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1238 {
1239         /* defines number of bits in page table versus page directory,
1240          * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1241          * page table and the remaining bits are in the page directory */
1242         if (amdgpu_vm_block_size == -1)
1243                 return;
1244
1245         if (amdgpu_vm_block_size < 9) {
1246                 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1247                          amdgpu_vm_block_size);
1248                 amdgpu_vm_block_size = -1;
1249         }
1250 }
1251
1252 /**
1253  * amdgpu_device_check_vm_size - validate the vm size
1254  *
1255  * @adev: amdgpu_device pointer
1256  *
1257  * Validates the vm size in GB specified via module parameter.
1258  * The VM size is the size of the GPU virtual memory space in GB.
1259  */
1260 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1261 {
1262         /* no need to check the default value */
1263         if (amdgpu_vm_size == -1)
1264                 return;
1265
1266         if (amdgpu_vm_size < 1) {
1267                 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1268                          amdgpu_vm_size);
1269                 amdgpu_vm_size = -1;
1270         }
1271 }
1272
1273 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1274 {
1275         struct sysinfo si;
1276         bool is_os_64 = (sizeof(void *) == 8);
1277         uint64_t total_memory;
1278         uint64_t dram_size_seven_GB = 0x1B8000000;
1279         uint64_t dram_size_three_GB = 0xB8000000;
1280
1281         if (amdgpu_smu_memory_pool_size == 0)
1282                 return;
1283
1284         if (!is_os_64) {
1285                 DRM_WARN("Not 64-bit OS, feature not supported\n");
1286                 goto def_value;
1287         }
1288         si_meminfo(&si);
1289         total_memory = (uint64_t)si.totalram * si.mem_unit;
1290
1291         if ((amdgpu_smu_memory_pool_size == 1) ||
1292                 (amdgpu_smu_memory_pool_size == 2)) {
1293                 if (total_memory < dram_size_three_GB)
1294                         goto def_value1;
1295         } else if ((amdgpu_smu_memory_pool_size == 4) ||
1296                 (amdgpu_smu_memory_pool_size == 8)) {
1297                 if (total_memory < dram_size_seven_GB)
1298                         goto def_value1;
1299         } else {
1300                 DRM_WARN("Smu memory pool size not supported\n");
1301                 goto def_value;
1302         }
1303         adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1304
1305         return;
1306
1307 def_value1:
1308         DRM_WARN("No enough system memory\n");
1309 def_value:
1310         adev->pm.smu_prv_buffer_size = 0;
1311 }
1312
1313 /**
1314  * amdgpu_device_check_arguments - validate module params
1315  *
1316  * @adev: amdgpu_device pointer
1317  *
1318  * Validates certain module parameters and updates
1319  * the associated values used by the driver (all asics).
1320  */
1321 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1322 {
1323         if (amdgpu_sched_jobs < 4) {
1324                 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1325                          amdgpu_sched_jobs);
1326                 amdgpu_sched_jobs = 4;
1327         } else if (!is_power_of_2(amdgpu_sched_jobs)){
1328                 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1329                          amdgpu_sched_jobs);
1330                 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1331         }
1332
1333         if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1334                 /* gart size must be greater or equal to 32M */
1335                 dev_warn(adev->dev, "gart size (%d) too small\n",
1336                          amdgpu_gart_size);
1337                 amdgpu_gart_size = -1;
1338         }
1339
1340         if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1341                 /* gtt size must be greater or equal to 32M */
1342                 dev_warn(adev->dev, "gtt size (%d) too small\n",
1343                                  amdgpu_gtt_size);
1344                 amdgpu_gtt_size = -1;
1345         }
1346
1347         /* valid range is between 4 and 9 inclusive */
1348         if (amdgpu_vm_fragment_size != -1 &&
1349             (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1350                 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1351                 amdgpu_vm_fragment_size = -1;
1352         }
1353
1354         if (amdgpu_sched_hw_submission < 2) {
1355                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1356                          amdgpu_sched_hw_submission);
1357                 amdgpu_sched_hw_submission = 2;
1358         } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1359                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1360                          amdgpu_sched_hw_submission);
1361                 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1362         }
1363
1364         amdgpu_device_check_smu_prv_buffer_size(adev);
1365
1366         amdgpu_device_check_vm_size(adev);
1367
1368         amdgpu_device_check_block_size(adev);
1369
1370         adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1371
1372         amdgpu_gmc_tmz_set(adev);
1373
1374         amdgpu_gmc_noretry_set(adev);
1375
1376         return 0;
1377 }
1378
1379 /**
1380  * amdgpu_switcheroo_set_state - set switcheroo state
1381  *
1382  * @pdev: pci dev pointer
1383  * @state: vga_switcheroo state
1384  *
1385  * Callback for the switcheroo driver.  Suspends or resumes the
1386  * the asics before or after it is powered up using ACPI methods.
1387  */
1388 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1389                                         enum vga_switcheroo_state state)
1390 {
1391         struct drm_device *dev = pci_get_drvdata(pdev);
1392         int r;
1393
1394         if (amdgpu_device_supports_atpx(dev) && state == VGA_SWITCHEROO_OFF)
1395                 return;
1396
1397         if (state == VGA_SWITCHEROO_ON) {
1398                 pr_info("switched on\n");
1399                 /* don't suspend or resume card normally */
1400                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1401
1402                 pci_set_power_state(pdev, PCI_D0);
1403                 amdgpu_device_load_pci_state(pdev);
1404                 r = pci_enable_device(pdev);
1405                 if (r)
1406                         DRM_WARN("pci_enable_device failed (%d)\n", r);
1407                 amdgpu_device_resume(dev, true);
1408
1409                 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1410         } else {
1411                 pr_info("switched off\n");
1412                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1413                 amdgpu_device_suspend(dev, true);
1414                 amdgpu_device_cache_pci_state(pdev);
1415                 /* Shut down the device */
1416                 pci_disable_device(pdev);
1417                 pci_set_power_state(pdev, PCI_D3cold);
1418                 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1419         }
1420 }
1421
1422 /**
1423  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1424  *
1425  * @pdev: pci dev pointer
1426  *
1427  * Callback for the switcheroo driver.  Check of the switcheroo
1428  * state can be changed.
1429  * Returns true if the state can be changed, false if not.
1430  */
1431 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1432 {
1433         struct drm_device *dev = pci_get_drvdata(pdev);
1434
1435         /*
1436         * FIXME: open_count is protected by drm_global_mutex but that would lead to
1437         * locking inversion with the driver load path. And the access here is
1438         * completely racy anyway. So don't bother with locking for now.
1439         */
1440         return atomic_read(&dev->open_count) == 0;
1441 }
1442
1443 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1444         .set_gpu_state = amdgpu_switcheroo_set_state,
1445         .reprobe = NULL,
1446         .can_switch = amdgpu_switcheroo_can_switch,
1447 };
1448
1449 /**
1450  * amdgpu_device_ip_set_clockgating_state - set the CG state
1451  *
1452  * @dev: amdgpu_device pointer
1453  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1454  * @state: clockgating state (gate or ungate)
1455  *
1456  * Sets the requested clockgating state for all instances of
1457  * the hardware IP specified.
1458  * Returns the error code from the last instance.
1459  */
1460 int amdgpu_device_ip_set_clockgating_state(void *dev,
1461                                            enum amd_ip_block_type block_type,
1462                                            enum amd_clockgating_state state)
1463 {
1464         struct amdgpu_device *adev = dev;
1465         int i, r = 0;
1466
1467         for (i = 0; i < adev->num_ip_blocks; i++) {
1468                 if (!adev->ip_blocks[i].status.valid)
1469                         continue;
1470                 if (adev->ip_blocks[i].version->type != block_type)
1471                         continue;
1472                 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1473                         continue;
1474                 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1475                         (void *)adev, state);
1476                 if (r)
1477                         DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1478                                   adev->ip_blocks[i].version->funcs->name, r);
1479         }
1480         return r;
1481 }
1482
1483 /**
1484  * amdgpu_device_ip_set_powergating_state - set the PG state
1485  *
1486  * @dev: amdgpu_device pointer
1487  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1488  * @state: powergating state (gate or ungate)
1489  *
1490  * Sets the requested powergating state for all instances of
1491  * the hardware IP specified.
1492  * Returns the error code from the last instance.
1493  */
1494 int amdgpu_device_ip_set_powergating_state(void *dev,
1495                                            enum amd_ip_block_type block_type,
1496                                            enum amd_powergating_state state)
1497 {
1498         struct amdgpu_device *adev = dev;
1499         int i, r = 0;
1500
1501         for (i = 0; i < adev->num_ip_blocks; i++) {
1502                 if (!adev->ip_blocks[i].status.valid)
1503                         continue;
1504                 if (adev->ip_blocks[i].version->type != block_type)
1505                         continue;
1506                 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1507                         continue;
1508                 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1509                         (void *)adev, state);
1510                 if (r)
1511                         DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1512                                   adev->ip_blocks[i].version->funcs->name, r);
1513         }
1514         return r;
1515 }
1516
1517 /**
1518  * amdgpu_device_ip_get_clockgating_state - get the CG state
1519  *
1520  * @adev: amdgpu_device pointer
1521  * @flags: clockgating feature flags
1522  *
1523  * Walks the list of IPs on the device and updates the clockgating
1524  * flags for each IP.
1525  * Updates @flags with the feature flags for each hardware IP where
1526  * clockgating is enabled.
1527  */
1528 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1529                                             u32 *flags)
1530 {
1531         int i;
1532
1533         for (i = 0; i < adev->num_ip_blocks; i++) {
1534                 if (!adev->ip_blocks[i].status.valid)
1535                         continue;
1536                 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1537                         adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1538         }
1539 }
1540
1541 /**
1542  * amdgpu_device_ip_wait_for_idle - wait for idle
1543  *
1544  * @adev: amdgpu_device pointer
1545  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1546  *
1547  * Waits for the request hardware IP to be idle.
1548  * Returns 0 for success or a negative error code on failure.
1549  */
1550 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1551                                    enum amd_ip_block_type block_type)
1552 {
1553         int i, r;
1554
1555         for (i = 0; i < adev->num_ip_blocks; i++) {
1556                 if (!adev->ip_blocks[i].status.valid)
1557                         continue;
1558                 if (adev->ip_blocks[i].version->type == block_type) {
1559                         r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1560                         if (r)
1561                                 return r;
1562                         break;
1563                 }
1564         }
1565         return 0;
1566
1567 }
1568
1569 /**
1570  * amdgpu_device_ip_is_idle - is the hardware IP idle
1571  *
1572  * @adev: amdgpu_device pointer
1573  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1574  *
1575  * Check if the hardware IP is idle or not.
1576  * Returns true if it the IP is idle, false if not.
1577  */
1578 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1579                               enum amd_ip_block_type block_type)
1580 {
1581         int i;
1582
1583         for (i = 0; i < adev->num_ip_blocks; i++) {
1584                 if (!adev->ip_blocks[i].status.valid)
1585                         continue;
1586                 if (adev->ip_blocks[i].version->type == block_type)
1587                         return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1588         }
1589         return true;
1590
1591 }
1592
1593 /**
1594  * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1595  *
1596  * @adev: amdgpu_device pointer
1597  * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1598  *
1599  * Returns a pointer to the hardware IP block structure
1600  * if it exists for the asic, otherwise NULL.
1601  */
1602 struct amdgpu_ip_block *
1603 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1604                               enum amd_ip_block_type type)
1605 {
1606         int i;
1607
1608         for (i = 0; i < adev->num_ip_blocks; i++)
1609                 if (adev->ip_blocks[i].version->type == type)
1610                         return &adev->ip_blocks[i];
1611
1612         return NULL;
1613 }
1614
1615 /**
1616  * amdgpu_device_ip_block_version_cmp
1617  *
1618  * @adev: amdgpu_device pointer
1619  * @type: enum amd_ip_block_type
1620  * @major: major version
1621  * @minor: minor version
1622  *
1623  * return 0 if equal or greater
1624  * return 1 if smaller or the ip_block doesn't exist
1625  */
1626 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1627                                        enum amd_ip_block_type type,
1628                                        u32 major, u32 minor)
1629 {
1630         struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1631
1632         if (ip_block && ((ip_block->version->major > major) ||
1633                         ((ip_block->version->major == major) &&
1634                         (ip_block->version->minor >= minor))))
1635                 return 0;
1636
1637         return 1;
1638 }
1639
1640 /**
1641  * amdgpu_device_ip_block_add
1642  *
1643  * @adev: amdgpu_device pointer
1644  * @ip_block_version: pointer to the IP to add
1645  *
1646  * Adds the IP block driver information to the collection of IPs
1647  * on the asic.
1648  */
1649 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1650                                const struct amdgpu_ip_block_version *ip_block_version)
1651 {
1652         if (!ip_block_version)
1653                 return -EINVAL;
1654
1655         DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1656                   ip_block_version->funcs->name);
1657
1658         adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1659
1660         return 0;
1661 }
1662
1663 /**
1664  * amdgpu_device_enable_virtual_display - enable virtual display feature
1665  *
1666  * @adev: amdgpu_device pointer
1667  *
1668  * Enabled the virtual display feature if the user has enabled it via
1669  * the module parameter virtual_display.  This feature provides a virtual
1670  * display hardware on headless boards or in virtualized environments.
1671  * This function parses and validates the configuration string specified by
1672  * the user and configues the virtual display configuration (number of
1673  * virtual connectors, crtcs, etc.) specified.
1674  */
1675 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1676 {
1677         adev->enable_virtual_display = false;
1678
1679         if (amdgpu_virtual_display) {
1680                 const char *pci_address_name = pci_name(adev->pdev);
1681                 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1682
1683                 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1684                 pciaddstr_tmp = pciaddstr;
1685                 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1686                         pciaddname = strsep(&pciaddname_tmp, ",");
1687                         if (!strcmp("all", pciaddname)
1688                             || !strcmp(pci_address_name, pciaddname)) {
1689                                 long num_crtc;
1690                                 int res = -1;
1691
1692                                 adev->enable_virtual_display = true;
1693
1694                                 if (pciaddname_tmp)
1695                                         res = kstrtol(pciaddname_tmp, 10,
1696                                                       &num_crtc);
1697
1698                                 if (!res) {
1699                                         if (num_crtc < 1)
1700                                                 num_crtc = 1;
1701                                         if (num_crtc > 6)
1702                                                 num_crtc = 6;
1703                                         adev->mode_info.num_crtc = num_crtc;
1704                                 } else {
1705                                         adev->mode_info.num_crtc = 1;
1706                                 }
1707                                 break;
1708                         }
1709                 }
1710
1711                 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1712                          amdgpu_virtual_display, pci_address_name,
1713                          adev->enable_virtual_display, adev->mode_info.num_crtc);
1714
1715                 kfree(pciaddstr);
1716         }
1717 }
1718
1719 /**
1720  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1721  *
1722  * @adev: amdgpu_device pointer
1723  *
1724  * Parses the asic configuration parameters specified in the gpu info
1725  * firmware and makes them availale to the driver for use in configuring
1726  * the asic.
1727  * Returns 0 on success, -EINVAL on failure.
1728  */
1729 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1730 {
1731         const char *chip_name;
1732         char fw_name[40];
1733         int err;
1734         const struct gpu_info_firmware_header_v1_0 *hdr;
1735
1736         adev->firmware.gpu_info_fw = NULL;
1737
1738         if (adev->mman.discovery_bin) {
1739                 amdgpu_discovery_get_gfx_info(adev);
1740
1741                 /*
1742                  * FIXME: The bounding box is still needed by Navi12, so
1743                  * temporarily read it from gpu_info firmware. Should be droped
1744                  * when DAL no longer needs it.
1745                  */
1746                 if (adev->asic_type != CHIP_NAVI12)
1747                         return 0;
1748         }
1749
1750         switch (adev->asic_type) {
1751 #ifdef CONFIG_DRM_AMDGPU_SI
1752         case CHIP_VERDE:
1753         case CHIP_TAHITI:
1754         case CHIP_PITCAIRN:
1755         case CHIP_OLAND:
1756         case CHIP_HAINAN:
1757 #endif
1758 #ifdef CONFIG_DRM_AMDGPU_CIK
1759         case CHIP_BONAIRE:
1760         case CHIP_HAWAII:
1761         case CHIP_KAVERI:
1762         case CHIP_KABINI:
1763         case CHIP_MULLINS:
1764 #endif
1765         case CHIP_TOPAZ:
1766         case CHIP_TONGA:
1767         case CHIP_FIJI:
1768         case CHIP_POLARIS10:
1769         case CHIP_POLARIS11:
1770         case CHIP_POLARIS12:
1771         case CHIP_VEGAM:
1772         case CHIP_CARRIZO:
1773         case CHIP_STONEY:
1774         case CHIP_VEGA20:
1775         case CHIP_ALDEBARAN:
1776         case CHIP_SIENNA_CICHLID:
1777         case CHIP_NAVY_FLOUNDER:
1778         case CHIP_DIMGREY_CAVEFISH:
1779         default:
1780                 return 0;
1781         case CHIP_VEGA10:
1782                 chip_name = "vega10";
1783                 break;
1784         case CHIP_VEGA12:
1785                 chip_name = "vega12";
1786                 break;
1787         case CHIP_RAVEN:
1788                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1789                         chip_name = "raven2";
1790                 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1791                         chip_name = "picasso";
1792                 else
1793                         chip_name = "raven";
1794                 break;
1795         case CHIP_ARCTURUS:
1796                 chip_name = "arcturus";
1797                 break;
1798         case CHIP_RENOIR:
1799                 if (adev->apu_flags & AMD_APU_IS_RENOIR)
1800                         chip_name = "renoir";
1801                 else
1802                         chip_name = "green_sardine";
1803                 break;
1804         case CHIP_NAVI10:
1805                 chip_name = "navi10";
1806                 break;
1807         case CHIP_NAVI14:
1808                 chip_name = "navi14";
1809                 break;
1810         case CHIP_NAVI12:
1811                 chip_name = "navi12";
1812                 break;
1813         case CHIP_VANGOGH:
1814                 chip_name = "vangogh";
1815                 break;
1816         }
1817
1818         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1819         err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1820         if (err) {
1821                 dev_err(adev->dev,
1822                         "Failed to load gpu_info firmware \"%s\"\n",
1823                         fw_name);
1824                 goto out;
1825         }
1826         err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1827         if (err) {
1828                 dev_err(adev->dev,
1829                         "Failed to validate gpu_info firmware \"%s\"\n",
1830                         fw_name);
1831                 goto out;
1832         }
1833
1834         hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1835         amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1836
1837         switch (hdr->version_major) {
1838         case 1:
1839         {
1840                 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1841                         (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1842                                                                 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1843
1844                 /*
1845                  * Should be droped when DAL no longer needs it.
1846                  */
1847                 if (adev->asic_type == CHIP_NAVI12)
1848                         goto parse_soc_bounding_box;
1849
1850                 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1851                 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1852                 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1853                 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1854                 adev->gfx.config.max_texture_channel_caches =
1855                         le32_to_cpu(gpu_info_fw->gc_num_tccs);
1856                 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1857                 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1858                 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1859                 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1860                 adev->gfx.config.double_offchip_lds_buf =
1861                         le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1862                 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1863                 adev->gfx.cu_info.max_waves_per_simd =
1864                         le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1865                 adev->gfx.cu_info.max_scratch_slots_per_cu =
1866                         le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1867                 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
1868                 if (hdr->version_minor >= 1) {
1869                         const struct gpu_info_firmware_v1_1 *gpu_info_fw =
1870                                 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
1871                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1872                         adev->gfx.config.num_sc_per_sh =
1873                                 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
1874                         adev->gfx.config.num_packer_per_sc =
1875                                 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
1876                 }
1877
1878 parse_soc_bounding_box:
1879                 /*
1880                  * soc bounding box info is not integrated in disocovery table,
1881                  * we always need to parse it from gpu info firmware if needed.
1882                  */
1883                 if (hdr->version_minor == 2) {
1884                         const struct gpu_info_firmware_v1_2 *gpu_info_fw =
1885                                 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
1886                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1887                         adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
1888                 }
1889                 break;
1890         }
1891         default:
1892                 dev_err(adev->dev,
1893                         "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1894                 err = -EINVAL;
1895                 goto out;
1896         }
1897 out:
1898         return err;
1899 }
1900
1901 /**
1902  * amdgpu_device_ip_early_init - run early init for hardware IPs
1903  *
1904  * @adev: amdgpu_device pointer
1905  *
1906  * Early initialization pass for hardware IPs.  The hardware IPs that make
1907  * up each asic are discovered each IP's early_init callback is run.  This
1908  * is the first stage in initializing the asic.
1909  * Returns 0 on success, negative error code on failure.
1910  */
1911 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
1912 {
1913         int i, r;
1914
1915         amdgpu_device_enable_virtual_display(adev);
1916
1917         if (amdgpu_sriov_vf(adev)) {
1918                 r = amdgpu_virt_request_full_gpu(adev, true);
1919                 if (r)
1920                         return r;
1921         }
1922
1923         switch (adev->asic_type) {
1924 #ifdef CONFIG_DRM_AMDGPU_SI
1925         case CHIP_VERDE:
1926         case CHIP_TAHITI:
1927         case CHIP_PITCAIRN:
1928         case CHIP_OLAND:
1929         case CHIP_HAINAN:
1930                 adev->family = AMDGPU_FAMILY_SI;
1931                 r = si_set_ip_blocks(adev);
1932                 if (r)
1933                         return r;
1934                 break;
1935 #endif
1936 #ifdef CONFIG_DRM_AMDGPU_CIK
1937         case CHIP_BONAIRE:
1938         case CHIP_HAWAII:
1939         case CHIP_KAVERI:
1940         case CHIP_KABINI:
1941         case CHIP_MULLINS:
1942                 if (adev->flags & AMD_IS_APU)
1943                         adev->family = AMDGPU_FAMILY_KV;
1944                 else
1945                         adev->family = AMDGPU_FAMILY_CI;
1946
1947                 r = cik_set_ip_blocks(adev);
1948                 if (r)
1949                         return r;
1950                 break;
1951 #endif
1952         case CHIP_TOPAZ:
1953         case CHIP_TONGA:
1954         case CHIP_FIJI:
1955         case CHIP_POLARIS10:
1956         case CHIP_POLARIS11:
1957         case CHIP_POLARIS12:
1958         case CHIP_VEGAM:
1959         case CHIP_CARRIZO:
1960         case CHIP_STONEY:
1961                 if (adev->flags & AMD_IS_APU)
1962                         adev->family = AMDGPU_FAMILY_CZ;
1963                 else
1964                         adev->family = AMDGPU_FAMILY_VI;
1965
1966                 r = vi_set_ip_blocks(adev);
1967                 if (r)
1968                         return r;
1969                 break;
1970         case CHIP_VEGA10:
1971         case CHIP_VEGA12:
1972         case CHIP_VEGA20:
1973         case CHIP_RAVEN:
1974         case CHIP_ARCTURUS:
1975         case CHIP_RENOIR:
1976         case CHIP_ALDEBARAN:
1977                 if (adev->flags & AMD_IS_APU)
1978                         adev->family = AMDGPU_FAMILY_RV;
1979                 else
1980                         adev->family = AMDGPU_FAMILY_AI;
1981
1982                 r = soc15_set_ip_blocks(adev);
1983                 if (r)
1984                         return r;
1985                 break;
1986         case  CHIP_NAVI10:
1987         case  CHIP_NAVI14:
1988         case  CHIP_NAVI12:
1989         case  CHIP_SIENNA_CICHLID:
1990         case  CHIP_NAVY_FLOUNDER:
1991         case  CHIP_DIMGREY_CAVEFISH:
1992         case CHIP_VANGOGH:
1993                 if (adev->asic_type == CHIP_VANGOGH)
1994                         adev->family = AMDGPU_FAMILY_VGH;
1995                 else
1996                         adev->family = AMDGPU_FAMILY_NV;
1997
1998                 r = nv_set_ip_blocks(adev);
1999                 if (r)
2000                         return r;
2001                 break;
2002         default:
2003                 /* FIXME: not supported yet */
2004                 return -EINVAL;
2005         }
2006
2007         amdgpu_amdkfd_device_probe(adev);
2008
2009         adev->pm.pp_feature = amdgpu_pp_feature_mask;
2010         if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2011                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2012         if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2013                 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2014
2015         for (i = 0; i < adev->num_ip_blocks; i++) {
2016                 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2017                         DRM_ERROR("disabled ip block: %d <%s>\n",
2018                                   i, adev->ip_blocks[i].version->funcs->name);
2019                         adev->ip_blocks[i].status.valid = false;
2020                 } else {
2021                         if (adev->ip_blocks[i].version->funcs->early_init) {
2022                                 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2023                                 if (r == -ENOENT) {
2024                                         adev->ip_blocks[i].status.valid = false;
2025                                 } else if (r) {
2026                                         DRM_ERROR("early_init of IP block <%s> failed %d\n",
2027                                                   adev->ip_blocks[i].version->funcs->name, r);
2028                                         return r;
2029                                 } else {
2030                                         adev->ip_blocks[i].status.valid = true;
2031                                 }
2032                         } else {
2033                                 adev->ip_blocks[i].status.valid = true;
2034                         }
2035                 }
2036                 /* get the vbios after the asic_funcs are set up */
2037                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2038                         r = amdgpu_device_parse_gpu_info_fw(adev);
2039                         if (r)
2040                                 return r;
2041
2042                         /* Read BIOS */
2043                         if (!amdgpu_get_bios(adev))
2044                                 return -EINVAL;
2045
2046                         r = amdgpu_atombios_init(adev);
2047                         if (r) {
2048                                 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2049                                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2050                                 return r;
2051                         }
2052                 }
2053         }
2054
2055         adev->cg_flags &= amdgpu_cg_mask;
2056         adev->pg_flags &= amdgpu_pg_mask;
2057
2058         return 0;
2059 }
2060
2061 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2062 {
2063         int i, r;
2064
2065         for (i = 0; i < adev->num_ip_blocks; i++) {
2066                 if (!adev->ip_blocks[i].status.sw)
2067                         continue;
2068                 if (adev->ip_blocks[i].status.hw)
2069                         continue;
2070                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2071                     (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2072                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2073                         r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2074                         if (r) {
2075                                 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2076                                           adev->ip_blocks[i].version->funcs->name, r);
2077                                 return r;
2078                         }
2079                         adev->ip_blocks[i].status.hw = true;
2080                 }
2081         }
2082
2083         return 0;
2084 }
2085
2086 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2087 {
2088         int i, r;
2089
2090         for (i = 0; i < adev->num_ip_blocks; i++) {
2091                 if (!adev->ip_blocks[i].status.sw)
2092                         continue;
2093                 if (adev->ip_blocks[i].status.hw)
2094                         continue;
2095                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2096                 if (r) {
2097                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2098                                   adev->ip_blocks[i].version->funcs->name, r);
2099                         return r;
2100                 }
2101                 adev->ip_blocks[i].status.hw = true;
2102         }
2103
2104         return 0;
2105 }
2106
2107 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2108 {
2109         int r = 0;
2110         int i;
2111         uint32_t smu_version;
2112
2113         if (adev->asic_type >= CHIP_VEGA10) {
2114                 for (i = 0; i < adev->num_ip_blocks; i++) {
2115                         if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2116                                 continue;
2117
2118                         if (!adev->ip_blocks[i].status.sw)
2119                                 continue;
2120
2121                         /* no need to do the fw loading again if already done*/
2122                         if (adev->ip_blocks[i].status.hw == true)
2123                                 break;
2124
2125                         if (amdgpu_in_reset(adev) || adev->in_suspend) {
2126                                 r = adev->ip_blocks[i].version->funcs->resume(adev);
2127                                 if (r) {
2128                                         DRM_ERROR("resume of IP block <%s> failed %d\n",
2129                                                           adev->ip_blocks[i].version->funcs->name, r);
2130                                         return r;
2131                                 }
2132                         } else {
2133                                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2134                                 if (r) {
2135                                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2136                                                           adev->ip_blocks[i].version->funcs->name, r);
2137                                         return r;
2138                                 }
2139                         }
2140
2141                         adev->ip_blocks[i].status.hw = true;
2142                         break;
2143                 }
2144         }
2145
2146         if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2147                 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2148
2149         return r;
2150 }
2151
2152 /**
2153  * amdgpu_device_ip_init - run init for hardware IPs
2154  *
2155  * @adev: amdgpu_device pointer
2156  *
2157  * Main initialization pass for hardware IPs.  The list of all the hardware
2158  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2159  * are run.  sw_init initializes the software state associated with each IP
2160  * and hw_init initializes the hardware associated with each IP.
2161  * Returns 0 on success, negative error code on failure.
2162  */
2163 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2164 {
2165         int i, r;
2166
2167         r = amdgpu_ras_init(adev);
2168         if (r)
2169                 return r;
2170
2171         for (i = 0; i < adev->num_ip_blocks; i++) {
2172                 if (!adev->ip_blocks[i].status.valid)
2173                         continue;
2174                 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2175                 if (r) {
2176                         DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2177                                   adev->ip_blocks[i].version->funcs->name, r);
2178                         goto init_failed;
2179                 }
2180                 adev->ip_blocks[i].status.sw = true;
2181
2182                 /* need to do gmc hw init early so we can allocate gpu mem */
2183                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2184                         r = amdgpu_device_vram_scratch_init(adev);
2185                         if (r) {
2186                                 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2187                                 goto init_failed;
2188                         }
2189                         r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2190                         if (r) {
2191                                 DRM_ERROR("hw_init %d failed %d\n", i, r);
2192                                 goto init_failed;
2193                         }
2194                         r = amdgpu_device_wb_init(adev);
2195                         if (r) {
2196                                 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2197                                 goto init_failed;
2198                         }
2199                         adev->ip_blocks[i].status.hw = true;
2200
2201                         /* right after GMC hw init, we create CSA */
2202                         if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
2203                                 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2204                                                                 AMDGPU_GEM_DOMAIN_VRAM,
2205                                                                 AMDGPU_CSA_SIZE);
2206                                 if (r) {
2207                                         DRM_ERROR("allocate CSA failed %d\n", r);
2208                                         goto init_failed;
2209                                 }
2210                         }
2211                 }
2212         }
2213
2214         if (amdgpu_sriov_vf(adev))
2215                 amdgpu_virt_init_data_exchange(adev);
2216
2217         r = amdgpu_ib_pool_init(adev);
2218         if (r) {
2219                 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2220                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2221                 goto init_failed;
2222         }
2223
2224         r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2225         if (r)
2226                 goto init_failed;
2227
2228         r = amdgpu_device_ip_hw_init_phase1(adev);
2229         if (r)
2230                 goto init_failed;
2231
2232         r = amdgpu_device_fw_loading(adev);
2233         if (r)
2234                 goto init_failed;
2235
2236         r = amdgpu_device_ip_hw_init_phase2(adev);
2237         if (r)
2238                 goto init_failed;
2239
2240         /*
2241          * retired pages will be loaded from eeprom and reserved here,
2242          * it should be called after amdgpu_device_ip_hw_init_phase2  since
2243          * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2244          * for I2C communication which only true at this point.
2245          *
2246          * amdgpu_ras_recovery_init may fail, but the upper only cares the
2247          * failure from bad gpu situation and stop amdgpu init process
2248          * accordingly. For other failed cases, it will still release all
2249          * the resource and print error message, rather than returning one
2250          * negative value to upper level.
2251          *
2252          * Note: theoretically, this should be called before all vram allocations
2253          * to protect retired page from abusing
2254          */
2255         r = amdgpu_ras_recovery_init(adev);
2256         if (r)
2257                 goto init_failed;
2258
2259         if (adev->gmc.xgmi.num_physical_nodes > 1)
2260                 amdgpu_xgmi_add_device(adev);
2261
2262         /* Don't init kfd if whole hive need to be reset during init */
2263         if (!adev->gmc.xgmi.pending_reset)
2264                 amdgpu_amdkfd_device_init(adev);
2265
2266         amdgpu_fru_get_product_info(adev);
2267
2268 init_failed:
2269         if (amdgpu_sriov_vf(adev))
2270                 amdgpu_virt_release_full_gpu(adev, true);
2271
2272         return r;
2273 }
2274
2275 /**
2276  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2277  *
2278  * @adev: amdgpu_device pointer
2279  *
2280  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2281  * this function before a GPU reset.  If the value is retained after a
2282  * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2283  */
2284 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2285 {
2286         memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2287 }
2288
2289 /**
2290  * amdgpu_device_check_vram_lost - check if vram is valid
2291  *
2292  * @adev: amdgpu_device pointer
2293  *
2294  * Checks the reset magic value written to the gart pointer in VRAM.
2295  * The driver calls this after a GPU reset to see if the contents of
2296  * VRAM is lost or now.
2297  * returns true if vram is lost, false if not.
2298  */
2299 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2300 {
2301         if (memcmp(adev->gart.ptr, adev->reset_magic,
2302                         AMDGPU_RESET_MAGIC_NUM))
2303                 return true;
2304
2305         if (!amdgpu_in_reset(adev))
2306                 return false;
2307
2308         /*
2309          * For all ASICs with baco/mode1 reset, the VRAM is
2310          * always assumed to be lost.
2311          */
2312         switch (amdgpu_asic_reset_method(adev)) {
2313         case AMD_RESET_METHOD_BACO:
2314         case AMD_RESET_METHOD_MODE1:
2315                 return true;
2316         default:
2317                 return false;
2318         }
2319 }
2320
2321 /**
2322  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2323  *
2324  * @adev: amdgpu_device pointer
2325  * @state: clockgating state (gate or ungate)
2326  *
2327  * The list of all the hardware IPs that make up the asic is walked and the
2328  * set_clockgating_state callbacks are run.
2329  * Late initialization pass enabling clockgating for hardware IPs.
2330  * Fini or suspend, pass disabling clockgating for hardware IPs.
2331  * Returns 0 on success, negative error code on failure.
2332  */
2333
2334 static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2335                                                 enum amd_clockgating_state state)
2336 {
2337         int i, j, r;
2338
2339         if (amdgpu_emu_mode == 1)
2340                 return 0;
2341
2342         for (j = 0; j < adev->num_ip_blocks; j++) {
2343                 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2344                 if (!adev->ip_blocks[i].status.late_initialized)
2345                         continue;
2346                 /* skip CG for VCE/UVD, it's handled specially */
2347                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2348                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2349                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2350                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2351                     adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2352                         /* enable clockgating to save power */
2353                         r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2354                                                                                      state);
2355                         if (r) {
2356                                 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2357                                           adev->ip_blocks[i].version->funcs->name, r);
2358                                 return r;
2359                         }
2360                 }
2361         }
2362
2363         return 0;
2364 }
2365
2366 static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_powergating_state state)
2367 {
2368         int i, j, r;
2369
2370         if (amdgpu_emu_mode == 1)
2371                 return 0;
2372
2373         for (j = 0; j < adev->num_ip_blocks; j++) {
2374                 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2375                 if (!adev->ip_blocks[i].status.late_initialized)
2376                         continue;
2377                 /* skip CG for VCE/UVD, it's handled specially */
2378                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2379                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2380                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2381                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2382                     adev->ip_blocks[i].version->funcs->set_powergating_state) {
2383                         /* enable powergating to save power */
2384                         r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2385                                                                                         state);
2386                         if (r) {
2387                                 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2388                                           adev->ip_blocks[i].version->funcs->name, r);
2389                                 return r;
2390                         }
2391                 }
2392         }
2393         return 0;
2394 }
2395
2396 static int amdgpu_device_enable_mgpu_fan_boost(void)
2397 {
2398         struct amdgpu_gpu_instance *gpu_ins;
2399         struct amdgpu_device *adev;
2400         int i, ret = 0;
2401
2402         mutex_lock(&mgpu_info.mutex);
2403
2404         /*
2405          * MGPU fan boost feature should be enabled
2406          * only when there are two or more dGPUs in
2407          * the system
2408          */
2409         if (mgpu_info.num_dgpu < 2)
2410                 goto out;
2411
2412         for (i = 0; i < mgpu_info.num_dgpu; i++) {
2413                 gpu_ins = &(mgpu_info.gpu_ins[i]);
2414                 adev = gpu_ins->adev;
2415                 if (!(adev->flags & AMD_IS_APU) &&
2416                     !gpu_ins->mgpu_fan_enabled) {
2417                         ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2418                         if (ret)
2419                                 break;
2420
2421                         gpu_ins->mgpu_fan_enabled = 1;
2422                 }
2423         }
2424
2425 out:
2426         mutex_unlock(&mgpu_info.mutex);
2427
2428         return ret;
2429 }
2430
2431 /**
2432  * amdgpu_device_ip_late_init - run late init for hardware IPs
2433  *
2434  * @adev: amdgpu_device pointer
2435  *
2436  * Late initialization pass for hardware IPs.  The list of all the hardware
2437  * IPs that make up the asic is walked and the late_init callbacks are run.
2438  * late_init covers any special initialization that an IP requires
2439  * after all of the have been initialized or something that needs to happen
2440  * late in the init process.
2441  * Returns 0 on success, negative error code on failure.
2442  */
2443 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2444 {
2445         struct amdgpu_gpu_instance *gpu_instance;
2446         int i = 0, r;
2447
2448         for (i = 0; i < adev->num_ip_blocks; i++) {
2449                 if (!adev->ip_blocks[i].status.hw)
2450                         continue;
2451                 if (adev->ip_blocks[i].version->funcs->late_init) {
2452                         r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2453                         if (r) {
2454                                 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2455                                           adev->ip_blocks[i].version->funcs->name, r);
2456                                 return r;
2457                         }
2458                 }
2459                 adev->ip_blocks[i].status.late_initialized = true;
2460         }
2461
2462         amdgpu_ras_set_error_query_ready(adev, true);
2463
2464         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2465         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2466
2467         amdgpu_device_fill_reset_magic(adev);
2468
2469         r = amdgpu_device_enable_mgpu_fan_boost();
2470         if (r)
2471                 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2472
2473         /* For XGMI + passthrough configuration on arcturus, enable light SBR */
2474         if (adev->asic_type == CHIP_ARCTURUS &&
2475             amdgpu_passthrough(adev) &&
2476             adev->gmc.xgmi.num_physical_nodes > 1)
2477                 smu_set_light_sbr(&adev->smu, true);
2478
2479         if (adev->gmc.xgmi.num_physical_nodes > 1) {
2480                 mutex_lock(&mgpu_info.mutex);
2481
2482                 /*
2483                  * Reset device p-state to low as this was booted with high.
2484                  *
2485                  * This should be performed only after all devices from the same
2486                  * hive get initialized.
2487                  *
2488                  * However, it's unknown how many device in the hive in advance.
2489                  * As this is counted one by one during devices initializations.
2490                  *
2491                  * So, we wait for all XGMI interlinked devices initialized.
2492                  * This may bring some delays as those devices may come from
2493                  * different hives. But that should be OK.
2494                  */
2495                 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2496                         for (i = 0; i < mgpu_info.num_gpu; i++) {
2497                                 gpu_instance = &(mgpu_info.gpu_ins[i]);
2498                                 if (gpu_instance->adev->flags & AMD_IS_APU)
2499                                         continue;
2500
2501                                 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2502                                                 AMDGPU_XGMI_PSTATE_MIN);
2503                                 if (r) {
2504                                         DRM_ERROR("pstate setting failed (%d).\n", r);
2505                                         break;
2506                                 }
2507                         }
2508                 }
2509
2510                 mutex_unlock(&mgpu_info.mutex);
2511         }
2512
2513         return 0;
2514 }
2515
2516 /**
2517  * amdgpu_device_ip_fini - run fini for hardware IPs
2518  *
2519  * @adev: amdgpu_device pointer
2520  *
2521  * Main teardown pass for hardware IPs.  The list of all the hardware
2522  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2523  * are run.  hw_fini tears down the hardware associated with each IP
2524  * and sw_fini tears down any software state associated with each IP.
2525  * Returns 0 on success, negative error code on failure.
2526  */
2527 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2528 {
2529         int i, r;
2530
2531         if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2532                 amdgpu_virt_release_ras_err_handler_data(adev);
2533
2534         amdgpu_ras_pre_fini(adev);
2535
2536         if (adev->gmc.xgmi.num_physical_nodes > 1)
2537                 amdgpu_xgmi_remove_device(adev);
2538
2539         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2540         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2541
2542         amdgpu_amdkfd_device_fini(adev);
2543
2544         /* need to disable SMC first */
2545         for (i = 0; i < adev->num_ip_blocks; i++) {
2546                 if (!adev->ip_blocks[i].status.hw)
2547                         continue;
2548                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2549                         r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2550                         /* XXX handle errors */
2551                         if (r) {
2552                                 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2553                                           adev->ip_blocks[i].version->funcs->name, r);
2554                         }
2555                         adev->ip_blocks[i].status.hw = false;
2556                         break;
2557                 }
2558         }
2559
2560         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2561                 if (!adev->ip_blocks[i].status.hw)
2562                         continue;
2563
2564                 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2565                 /* XXX handle errors */
2566                 if (r) {
2567                         DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2568                                   adev->ip_blocks[i].version->funcs->name, r);
2569                 }
2570
2571                 adev->ip_blocks[i].status.hw = false;
2572         }
2573
2574
2575         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2576                 if (!adev->ip_blocks[i].status.sw)
2577                         continue;
2578
2579                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2580                         amdgpu_ucode_free_bo(adev);
2581                         amdgpu_free_static_csa(&adev->virt.csa_obj);
2582                         amdgpu_device_wb_fini(adev);
2583                         amdgpu_device_vram_scratch_fini(adev);
2584                         amdgpu_ib_pool_fini(adev);
2585                 }
2586
2587                 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2588                 /* XXX handle errors */
2589                 if (r) {
2590                         DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2591                                   adev->ip_blocks[i].version->funcs->name, r);
2592                 }
2593                 adev->ip_blocks[i].status.sw = false;
2594                 adev->ip_blocks[i].status.valid = false;
2595         }
2596
2597         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2598                 if (!adev->ip_blocks[i].status.late_initialized)
2599                         continue;
2600                 if (adev->ip_blocks[i].version->funcs->late_fini)
2601                         adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2602                 adev->ip_blocks[i].status.late_initialized = false;
2603         }
2604
2605         amdgpu_ras_fini(adev);
2606
2607         if (amdgpu_sriov_vf(adev))
2608                 if (amdgpu_virt_release_full_gpu(adev, false))
2609                         DRM_ERROR("failed to release exclusive mode on fini\n");
2610
2611         return 0;
2612 }
2613
2614 /**
2615  * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2616  *
2617  * @work: work_struct.
2618  */
2619 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2620 {
2621         struct amdgpu_device *adev =
2622                 container_of(work, struct amdgpu_device, delayed_init_work.work);
2623         int r;
2624
2625         r = amdgpu_ib_ring_tests(adev);
2626         if (r)
2627                 DRM_ERROR("ib ring test failed (%d).\n", r);
2628 }
2629
2630 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2631 {
2632         struct amdgpu_device *adev =
2633                 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2634
2635         mutex_lock(&adev->gfx.gfx_off_mutex);
2636         if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
2637                 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2638                         adev->gfx.gfx_off_state = true;
2639         }
2640         mutex_unlock(&adev->gfx.gfx_off_mutex);
2641 }
2642
2643 /**
2644  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2645  *
2646  * @adev: amdgpu_device pointer
2647  *
2648  * Main suspend function for hardware IPs.  The list of all the hardware
2649  * IPs that make up the asic is walked, clockgating is disabled and the
2650  * suspend callbacks are run.  suspend puts the hardware and software state
2651  * in each IP into a state suitable for suspend.
2652  * Returns 0 on success, negative error code on failure.
2653  */
2654 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2655 {
2656         int i, r;
2657
2658         if (adev->in_poweroff_reboot_com ||
2659             !amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) {
2660                 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2661                 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2662         }
2663
2664         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2665                 if (!adev->ip_blocks[i].status.valid)
2666                         continue;
2667
2668                 /* displays are handled separately */
2669                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2670                         continue;
2671
2672                 /* XXX handle errors */
2673                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2674                 /* XXX handle errors */
2675                 if (r) {
2676                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
2677                                   adev->ip_blocks[i].version->funcs->name, r);
2678                         return r;
2679                 }
2680
2681                 adev->ip_blocks[i].status.hw = false;
2682         }
2683
2684         return 0;
2685 }
2686
2687 /**
2688  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2689  *
2690  * @adev: amdgpu_device pointer
2691  *
2692  * Main suspend function for hardware IPs.  The list of all the hardware
2693  * IPs that make up the asic is walked, clockgating is disabled and the
2694  * suspend callbacks are run.  suspend puts the hardware and software state
2695  * in each IP into a state suitable for suspend.
2696  * Returns 0 on success, negative error code on failure.
2697  */
2698 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2699 {
2700         int i, r;
2701
2702         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2703                 if (!adev->ip_blocks[i].status.valid)
2704                         continue;
2705                 /* displays are handled in phase1 */
2706                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2707                         continue;
2708                 /* PSP lost connection when err_event_athub occurs */
2709                 if (amdgpu_ras_intr_triggered() &&
2710                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2711                         adev->ip_blocks[i].status.hw = false;
2712                         continue;
2713                 }
2714
2715                 /* skip unnecessary suspend if we do not initialize them yet */
2716                 if (adev->gmc.xgmi.pending_reset &&
2717                     !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2718                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
2719                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2720                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
2721                         adev->ip_blocks[i].status.hw = false;
2722                         continue;
2723                 }
2724                 /* XXX handle errors */
2725                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2726                 /* XXX handle errors */
2727                 if (r) {
2728                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
2729                                   adev->ip_blocks[i].version->funcs->name, r);
2730                 }
2731                 adev->ip_blocks[i].status.hw = false;
2732                 /* handle putting the SMC in the appropriate state */
2733                 if(!amdgpu_sriov_vf(adev)){
2734                         if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2735                                 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
2736                                 if (r) {
2737                                         DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
2738                                                         adev->mp1_state, r);
2739                                         return r;
2740                                 }
2741                         }
2742                 }
2743         }
2744
2745         return 0;
2746 }
2747
2748 /**
2749  * amdgpu_device_ip_suspend - run suspend for hardware IPs
2750  *
2751  * @adev: amdgpu_device pointer
2752  *
2753  * Main suspend function for hardware IPs.  The list of all the hardware
2754  * IPs that make up the asic is walked, clockgating is disabled and the
2755  * suspend callbacks are run.  suspend puts the hardware and software state
2756  * in each IP into a state suitable for suspend.
2757  * Returns 0 on success, negative error code on failure.
2758  */
2759 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
2760 {
2761         int r;
2762
2763         if (amdgpu_sriov_vf(adev)) {
2764                 amdgpu_virt_fini_data_exchange(adev);
2765                 amdgpu_virt_request_full_gpu(adev, false);
2766         }
2767
2768         r = amdgpu_device_ip_suspend_phase1(adev);
2769         if (r)
2770                 return r;
2771         r = amdgpu_device_ip_suspend_phase2(adev);
2772
2773         if (amdgpu_sriov_vf(adev))
2774                 amdgpu_virt_release_full_gpu(adev, false);
2775
2776         return r;
2777 }
2778
2779 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2780 {
2781         int i, r;
2782
2783         static enum amd_ip_block_type ip_order[] = {
2784                 AMD_IP_BLOCK_TYPE_GMC,
2785                 AMD_IP_BLOCK_TYPE_COMMON,
2786                 AMD_IP_BLOCK_TYPE_PSP,
2787                 AMD_IP_BLOCK_TYPE_IH,
2788         };
2789
2790         for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2791                 int j;
2792                 struct amdgpu_ip_block *block;
2793
2794                 block = &adev->ip_blocks[i];
2795                 block->status.hw = false;
2796
2797                 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
2798
2799                         if (block->version->type != ip_order[j] ||
2800                                 !block->status.valid)
2801                                 continue;
2802
2803                         r = block->version->funcs->hw_init(adev);
2804                         DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2805                         if (r)
2806                                 return r;
2807                         block->status.hw = true;
2808                 }
2809         }
2810
2811         return 0;
2812 }
2813
2814 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
2815 {
2816         int i, r;
2817
2818         static enum amd_ip_block_type ip_order[] = {
2819                 AMD_IP_BLOCK_TYPE_SMC,
2820                 AMD_IP_BLOCK_TYPE_DCE,
2821                 AMD_IP_BLOCK_TYPE_GFX,
2822                 AMD_IP_BLOCK_TYPE_SDMA,
2823                 AMD_IP_BLOCK_TYPE_UVD,
2824                 AMD_IP_BLOCK_TYPE_VCE,
2825                 AMD_IP_BLOCK_TYPE_VCN
2826         };
2827
2828         for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2829                 int j;
2830                 struct amdgpu_ip_block *block;
2831
2832                 for (j = 0; j < adev->num_ip_blocks; j++) {
2833                         block = &adev->ip_blocks[j];
2834
2835                         if (block->version->type != ip_order[i] ||
2836                                 !block->status.valid ||
2837                                 block->status.hw)
2838                                 continue;
2839
2840                         if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
2841                                 r = block->version->funcs->resume(adev);
2842                         else
2843                                 r = block->version->funcs->hw_init(adev);
2844
2845                         DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2846                         if (r)
2847                                 return r;
2848                         block->status.hw = true;
2849                 }
2850         }
2851
2852         return 0;
2853 }
2854
2855 /**
2856  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
2857  *
2858  * @adev: amdgpu_device pointer
2859  *
2860  * First resume function for hardware IPs.  The list of all the hardware
2861  * IPs that make up the asic is walked and the resume callbacks are run for
2862  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
2863  * after a suspend and updates the software state as necessary.  This
2864  * function is also used for restoring the GPU after a GPU reset.
2865  * Returns 0 on success, negative error code on failure.
2866  */
2867 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
2868 {
2869         int i, r;
2870
2871         for (i = 0; i < adev->num_ip_blocks; i++) {
2872                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2873                         continue;
2874                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2875                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2876                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2877
2878                         r = adev->ip_blocks[i].version->funcs->resume(adev);
2879                         if (r) {
2880                                 DRM_ERROR("resume of IP block <%s> failed %d\n",
2881                                           adev->ip_blocks[i].version->funcs->name, r);
2882                                 return r;
2883                         }
2884                         adev->ip_blocks[i].status.hw = true;
2885                 }
2886         }
2887
2888         return 0;
2889 }
2890
2891 /**
2892  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
2893  *
2894  * @adev: amdgpu_device pointer
2895  *
2896  * First resume function for hardware IPs.  The list of all the hardware
2897  * IPs that make up the asic is walked and the resume callbacks are run for
2898  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
2899  * functional state after a suspend and updates the software state as
2900  * necessary.  This function is also used for restoring the GPU after a GPU
2901  * reset.
2902  * Returns 0 on success, negative error code on failure.
2903  */
2904 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
2905 {
2906         int i, r;
2907
2908         for (i = 0; i < adev->num_ip_blocks; i++) {
2909                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2910                         continue;
2911                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2912                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2913                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
2914                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
2915                         continue;
2916                 r = adev->ip_blocks[i].version->funcs->resume(adev);
2917                 if (r) {
2918                         DRM_ERROR("resume of IP block <%s> failed %d\n",
2919                                   adev->ip_blocks[i].version->funcs->name, r);
2920                         return r;
2921                 }
2922                 adev->ip_blocks[i].status.hw = true;
2923         }
2924
2925         return 0;
2926 }
2927
2928 /**
2929  * amdgpu_device_ip_resume - run resume for hardware IPs
2930  *
2931  * @adev: amdgpu_device pointer
2932  *
2933  * Main resume function for hardware IPs.  The hardware IPs
2934  * are split into two resume functions because they are
2935  * are also used in in recovering from a GPU reset and some additional
2936  * steps need to be take between them.  In this case (S3/S4) they are
2937  * run sequentially.
2938  * Returns 0 on success, negative error code on failure.
2939  */
2940 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
2941 {
2942         int r;
2943
2944         r = amdgpu_device_ip_resume_phase1(adev);
2945         if (r)
2946                 return r;
2947
2948         r = amdgpu_device_fw_loading(adev);
2949         if (r)
2950                 return r;
2951
2952         r = amdgpu_device_ip_resume_phase2(adev);
2953
2954         return r;
2955 }
2956
2957 /**
2958  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
2959  *
2960  * @adev: amdgpu_device pointer
2961  *
2962  * Query the VBIOS data tables to determine if the board supports SR-IOV.
2963  */
2964 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
2965 {
2966         if (amdgpu_sriov_vf(adev)) {
2967                 if (adev->is_atom_fw) {
2968                         if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2969                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2970                 } else {
2971                         if (amdgpu_atombios_has_gpu_virtualization_table(adev))
2972                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2973                 }
2974
2975                 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
2976                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
2977         }
2978 }
2979
2980 /**
2981  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
2982  *
2983  * @asic_type: AMD asic type
2984  *
2985  * Check if there is DC (new modesetting infrastructre) support for an asic.
2986  * returns true if DC has support, false if not.
2987  */
2988 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
2989 {
2990         switch (asic_type) {
2991 #if defined(CONFIG_DRM_AMD_DC)
2992 #if defined(CONFIG_DRM_AMD_DC_SI)
2993         case CHIP_TAHITI:
2994         case CHIP_PITCAIRN:
2995         case CHIP_VERDE:
2996         case CHIP_OLAND:
2997 #endif
2998         case CHIP_BONAIRE:
2999         case CHIP_KAVERI:
3000         case CHIP_KABINI:
3001         case CHIP_MULLINS:
3002                 /*
3003                  * We have systems in the wild with these ASICs that require
3004                  * LVDS and VGA support which is not supported with DC.
3005                  *
3006                  * Fallback to the non-DC driver here by default so as not to
3007                  * cause regressions.
3008                  */
3009                 return amdgpu_dc > 0;
3010         case CHIP_HAWAII:
3011         case CHIP_CARRIZO:
3012         case CHIP_STONEY:
3013         case CHIP_POLARIS10:
3014         case CHIP_POLARIS11:
3015         case CHIP_POLARIS12:
3016         case CHIP_VEGAM:
3017         case CHIP_TONGA:
3018         case CHIP_FIJI:
3019         case CHIP_VEGA10:
3020         case CHIP_VEGA12:
3021         case CHIP_VEGA20:
3022 #if defined(CONFIG_DRM_AMD_DC_DCN)
3023         case CHIP_RAVEN:
3024         case CHIP_NAVI10:
3025         case CHIP_NAVI14:
3026         case CHIP_NAVI12:
3027         case CHIP_RENOIR:
3028         case CHIP_SIENNA_CICHLID:
3029         case CHIP_NAVY_FLOUNDER:
3030         case CHIP_DIMGREY_CAVEFISH:
3031         case CHIP_VANGOGH:
3032 #endif
3033                 return amdgpu_dc != 0;
3034 #endif
3035         default:
3036                 if (amdgpu_dc > 0)
3037                         DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3038                                          "but isn't supported by ASIC, ignoring\n");
3039                 return false;
3040         }
3041 }
3042
3043 /**
3044  * amdgpu_device_has_dc_support - check if dc is supported
3045  *
3046  * @adev: amdgpu_device pointer
3047  *
3048  * Returns true for supported, false for not supported
3049  */
3050 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3051 {
3052         if (amdgpu_sriov_vf(adev) || adev->enable_virtual_display)
3053                 return false;
3054
3055         return amdgpu_device_asic_has_dc_support(adev->asic_type);
3056 }
3057
3058
3059 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3060 {
3061         struct amdgpu_device *adev =
3062                 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3063         struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3064
3065         /* It's a bug to not have a hive within this function */
3066         if (WARN_ON(!hive))
3067                 return;
3068
3069         /*
3070          * Use task barrier to synchronize all xgmi reset works across the
3071          * hive. task_barrier_enter and task_barrier_exit will block
3072          * until all the threads running the xgmi reset works reach
3073          * those points. task_barrier_full will do both blocks.
3074          */
3075         if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3076
3077                 task_barrier_enter(&hive->tb);
3078                 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3079
3080                 if (adev->asic_reset_res)
3081                         goto fail;
3082
3083                 task_barrier_exit(&hive->tb);
3084                 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3085
3086                 if (adev->asic_reset_res)
3087                         goto fail;
3088
3089                 if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
3090                         adev->mmhub.funcs->reset_ras_error_count(adev);
3091         } else {
3092
3093                 task_barrier_full(&hive->tb);
3094                 adev->asic_reset_res =  amdgpu_asic_reset(adev);
3095         }
3096
3097 fail:
3098         if (adev->asic_reset_res)
3099                 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3100                          adev->asic_reset_res, adev_to_drm(adev)->unique);
3101         amdgpu_put_xgmi_hive(hive);
3102 }
3103
3104 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3105 {
3106         char *input = amdgpu_lockup_timeout;
3107         char *timeout_setting = NULL;
3108         int index = 0;
3109         long timeout;
3110         int ret = 0;
3111
3112         /*
3113          * By default timeout for non compute jobs is 10000.
3114          * And there is no timeout enforced on compute jobs.
3115          * In SR-IOV or passthrough mode, timeout for compute
3116          * jobs are 60000 by default.
3117          */
3118         adev->gfx_timeout = msecs_to_jiffies(10000);
3119         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3120         if (amdgpu_sriov_vf(adev))
3121                 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3122                                         msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3123         else if (amdgpu_passthrough(adev))
3124                 adev->compute_timeout =  msecs_to_jiffies(60000);
3125         else
3126                 adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
3127
3128         if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3129                 while ((timeout_setting = strsep(&input, ",")) &&
3130                                 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3131                         ret = kstrtol(timeout_setting, 0, &timeout);
3132                         if (ret)
3133                                 return ret;
3134
3135                         if (timeout == 0) {
3136                                 index++;
3137                                 continue;
3138                         } else if (timeout < 0) {
3139                                 timeout = MAX_SCHEDULE_TIMEOUT;
3140                         } else {
3141                                 timeout = msecs_to_jiffies(timeout);
3142                         }
3143
3144                         switch (index++) {
3145                         case 0:
3146                                 adev->gfx_timeout = timeout;
3147                                 break;
3148                         case 1:
3149                                 adev->compute_timeout = timeout;
3150                                 break;
3151                         case 2:
3152                                 adev->sdma_timeout = timeout;
3153                                 break;
3154                         case 3:
3155                                 adev->video_timeout = timeout;
3156                                 break;
3157                         default:
3158                                 break;
3159                         }
3160                 }
3161                 /*
3162                  * There is only one value specified and
3163                  * it should apply to all non-compute jobs.
3164                  */
3165                 if (index == 1) {
3166                         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3167                         if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3168                                 adev->compute_timeout = adev->gfx_timeout;
3169                 }
3170         }
3171
3172         return ret;
3173 }
3174
3175 static const struct attribute *amdgpu_dev_attributes[] = {
3176         &dev_attr_product_name.attr,
3177         &dev_attr_product_number.attr,
3178         &dev_attr_serial_number.attr,
3179         &dev_attr_pcie_replay_count.attr,
3180         NULL
3181 };
3182
3183
3184 /**
3185  * amdgpu_device_init - initialize the driver
3186  *
3187  * @adev: amdgpu_device pointer
3188  * @flags: driver flags
3189  *
3190  * Initializes the driver info and hw (all asics).
3191  * Returns 0 for success or an error on failure.
3192  * Called at driver startup.
3193  */
3194 int amdgpu_device_init(struct amdgpu_device *adev,
3195                        uint32_t flags)
3196 {
3197         struct drm_device *ddev = adev_to_drm(adev);
3198         struct pci_dev *pdev = adev->pdev;
3199         int r, i;
3200         bool atpx = false;
3201         u32 max_MBps;
3202
3203         adev->shutdown = false;
3204         adev->flags = flags;
3205
3206         if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3207                 adev->asic_type = amdgpu_force_asic_type;
3208         else
3209                 adev->asic_type = flags & AMD_ASIC_MASK;
3210
3211         adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3212         if (amdgpu_emu_mode == 1)
3213                 adev->usec_timeout *= 10;
3214         adev->gmc.gart_size = 512 * 1024 * 1024;
3215         adev->accel_working = false;
3216         adev->num_rings = 0;
3217         adev->mman.buffer_funcs = NULL;
3218         adev->mman.buffer_funcs_ring = NULL;
3219         adev->vm_manager.vm_pte_funcs = NULL;
3220         adev->vm_manager.vm_pte_num_scheds = 0;
3221         adev->gmc.gmc_funcs = NULL;
3222         adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3223         bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3224
3225         adev->smc_rreg = &amdgpu_invalid_rreg;
3226         adev->smc_wreg = &amdgpu_invalid_wreg;
3227         adev->pcie_rreg = &amdgpu_invalid_rreg;
3228         adev->pcie_wreg = &amdgpu_invalid_wreg;
3229         adev->pciep_rreg = &amdgpu_invalid_rreg;
3230         adev->pciep_wreg = &amdgpu_invalid_wreg;
3231         adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3232         adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3233         adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3234         adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3235         adev->didt_rreg = &amdgpu_invalid_rreg;
3236         adev->didt_wreg = &amdgpu_invalid_wreg;
3237         adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3238         adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3239         adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3240         adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3241
3242         DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3243                  amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3244                  pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3245
3246         /* mutex initialization are all done here so we
3247          * can recall function without having locking issues */
3248         mutex_init(&adev->firmware.mutex);
3249         mutex_init(&adev->pm.mutex);
3250         mutex_init(&adev->gfx.gpu_clock_mutex);
3251         mutex_init(&adev->srbm_mutex);
3252         mutex_init(&adev->gfx.pipe_reserve_mutex);
3253         mutex_init(&adev->gfx.gfx_off_mutex);
3254         mutex_init(&adev->grbm_idx_mutex);
3255         mutex_init(&adev->mn_lock);
3256         mutex_init(&adev->virt.vf_errors.lock);
3257         hash_init(adev->mn_hash);
3258         atomic_set(&adev->in_gpu_reset, 0);
3259         init_rwsem(&adev->reset_sem);
3260         mutex_init(&adev->psp.mutex);
3261         mutex_init(&adev->notifier_lock);
3262
3263         r = amdgpu_device_check_arguments(adev);
3264         if (r)
3265                 return r;
3266
3267         spin_lock_init(&adev->mmio_idx_lock);
3268         spin_lock_init(&adev->smc_idx_lock);
3269         spin_lock_init(&adev->pcie_idx_lock);
3270         spin_lock_init(&adev->uvd_ctx_idx_lock);
3271         spin_lock_init(&adev->didt_idx_lock);
3272         spin_lock_init(&adev->gc_cac_idx_lock);
3273         spin_lock_init(&adev->se_cac_idx_lock);
3274         spin_lock_init(&adev->audio_endpt_idx_lock);
3275         spin_lock_init(&adev->mm_stats.lock);
3276
3277         INIT_LIST_HEAD(&adev->shadow_list);
3278         mutex_init(&adev->shadow_list_lock);
3279
3280         INIT_LIST_HEAD(&adev->reset_list);
3281
3282         INIT_DELAYED_WORK(&adev->delayed_init_work,
3283                           amdgpu_device_delayed_init_work_handler);
3284         INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3285                           amdgpu_device_delay_enable_gfx_off);
3286
3287         INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3288
3289         adev->gfx.gfx_off_req_count = 1;
3290         adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3291
3292         atomic_set(&adev->throttling_logging_enabled, 1);
3293         /*
3294          * If throttling continues, logging will be performed every minute
3295          * to avoid log flooding. "-1" is subtracted since the thermal
3296          * throttling interrupt comes every second. Thus, the total logging
3297          * interval is 59 seconds(retelimited printk interval) + 1(waiting
3298          * for throttling interrupt) = 60 seconds.
3299          */
3300         ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3301         ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3302
3303         /* Registers mapping */
3304         /* TODO: block userspace mapping of io register */
3305         if (adev->asic_type >= CHIP_BONAIRE) {
3306                 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3307                 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3308         } else {
3309                 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3310                 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3311         }
3312
3313         adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3314         if (adev->rmmio == NULL) {
3315                 return -ENOMEM;
3316         }
3317         DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3318         DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3319
3320         /* enable PCIE atomic ops */
3321         r = pci_enable_atomic_ops_to_root(adev->pdev,
3322                                           PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3323                                           PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3324         if (r) {
3325                 adev->have_atomics_support = false;
3326                 DRM_INFO("PCIE atomic ops is not supported\n");
3327         } else {
3328                 adev->have_atomics_support = true;
3329         }
3330
3331         amdgpu_device_get_pcie_info(adev);
3332
3333         if (amdgpu_mcbp)
3334                 DRM_INFO("MCBP is enabled\n");
3335
3336         if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
3337                 adev->enable_mes = true;
3338
3339         /* detect hw virtualization here */
3340         amdgpu_detect_virtualization(adev);
3341
3342         r = amdgpu_device_get_job_timeout_settings(adev);
3343         if (r) {
3344                 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3345                 goto failed_unmap;
3346         }
3347
3348         /* early init functions */
3349         r = amdgpu_device_ip_early_init(adev);
3350         if (r)
3351                 goto failed_unmap;
3352
3353         /* doorbell bar mapping and doorbell index init*/
3354         amdgpu_device_doorbell_init(adev);
3355
3356         /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3357         /* this will fail for cards that aren't VGA class devices, just
3358          * ignore it */
3359         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3360                 vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
3361
3362         if (amdgpu_device_supports_atpx(ddev))
3363                 atpx = true;
3364         if (amdgpu_has_atpx() &&
3365             (amdgpu_is_atpx_hybrid() ||
3366              amdgpu_has_atpx_dgpu_power_cntl()) &&
3367             !pci_is_thunderbolt_attached(adev->pdev))
3368                 vga_switcheroo_register_client(adev->pdev,
3369                                                &amdgpu_switcheroo_ops, atpx);
3370         if (atpx)
3371                 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3372
3373         if (amdgpu_emu_mode == 1) {
3374                 /* post the asic on emulation mode */
3375                 emu_soc_asic_init(adev);
3376                 goto fence_driver_init;
3377         }
3378
3379         /* detect if we are with an SRIOV vbios */
3380         amdgpu_device_detect_sriov_bios(adev);
3381
3382         /* check if we need to reset the asic
3383          *  E.g., driver was not cleanly unloaded previously, etc.
3384          */
3385         if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3386                 if (adev->gmc.xgmi.num_physical_nodes) {
3387                         dev_info(adev->dev, "Pending hive reset.\n");
3388                         adev->gmc.xgmi.pending_reset = true;
3389                         /* Only need to init necessary block for SMU to handle the reset */
3390                         for (i = 0; i < adev->num_ip_blocks; i++) {
3391                                 if (!adev->ip_blocks[i].status.valid)
3392                                         continue;
3393                                 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3394                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3395                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3396                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3397                                         DRM_DEBUG("IP %s disabled for hw_init.\n",
3398                                                 adev->ip_blocks[i].version->funcs->name);
3399                                         adev->ip_blocks[i].status.hw = true;
3400                                 }
3401                         }
3402                 } else {
3403                         r = amdgpu_asic_reset(adev);
3404                         if (r) {
3405                                 dev_err(adev->dev, "asic reset on init failed\n");
3406                                 goto failed;
3407                         }
3408                 }
3409         }
3410
3411         pci_enable_pcie_error_reporting(adev->pdev);
3412
3413         /* Post card if necessary */
3414         if (amdgpu_device_need_post(adev)) {
3415                 if (!adev->bios) {
3416                         dev_err(adev->dev, "no vBIOS found\n");
3417                         r = -EINVAL;
3418                         goto failed;
3419                 }
3420                 DRM_INFO("GPU posting now...\n");
3421                 r = amdgpu_device_asic_init(adev);
3422                 if (r) {
3423                         dev_err(adev->dev, "gpu post error!\n");
3424                         goto failed;
3425                 }
3426         }
3427
3428         if (adev->is_atom_fw) {
3429                 /* Initialize clocks */
3430                 r = amdgpu_atomfirmware_get_clock_info(adev);
3431                 if (r) {
3432                         dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3433                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3434                         goto failed;
3435                 }
3436         } else {
3437                 /* Initialize clocks */
3438                 r = amdgpu_atombios_get_clock_info(adev);
3439                 if (r) {
3440                         dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3441                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3442                         goto failed;
3443                 }
3444                 /* init i2c buses */
3445                 if (!amdgpu_device_has_dc_support(adev))
3446                         amdgpu_atombios_i2c_init(adev);
3447         }
3448
3449 fence_driver_init:
3450         /* Fence driver */
3451         r = amdgpu_fence_driver_init(adev);
3452         if (r) {
3453                 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
3454                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3455                 goto failed;
3456         }
3457
3458         /* init the mode config */
3459         drm_mode_config_init(adev_to_drm(adev));
3460
3461         r = amdgpu_device_ip_init(adev);
3462         if (r) {
3463                 /* failed in exclusive mode due to timeout */
3464                 if (amdgpu_sriov_vf(adev) &&
3465                     !amdgpu_sriov_runtime(adev) &&
3466                     amdgpu_virt_mmio_blocked(adev) &&
3467                     !amdgpu_virt_wait_reset(adev)) {
3468                         dev_err(adev->dev, "VF exclusive mode timeout\n");
3469                         /* Don't send request since VF is inactive. */
3470                         adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3471                         adev->virt.ops = NULL;
3472                         r = -EAGAIN;
3473                         goto release_ras_con;
3474                 }
3475                 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3476                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3477                 goto release_ras_con;
3478         }
3479
3480         dev_info(adev->dev,
3481                 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3482                         adev->gfx.config.max_shader_engines,
3483                         adev->gfx.config.max_sh_per_se,
3484                         adev->gfx.config.max_cu_per_sh,
3485                         adev->gfx.cu_info.number);
3486
3487         adev->accel_working = true;
3488
3489         amdgpu_vm_check_compute_bug(adev);
3490
3491         /* Initialize the buffer migration limit. */
3492         if (amdgpu_moverate >= 0)
3493                 max_MBps = amdgpu_moverate;
3494         else
3495                 max_MBps = 8; /* Allow 8 MB/s. */
3496         /* Get a log2 for easy divisions. */
3497         adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3498
3499         amdgpu_fbdev_init(adev);
3500
3501         r = amdgpu_pm_sysfs_init(adev);
3502         if (r) {
3503                 adev->pm_sysfs_en = false;
3504                 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3505         } else
3506                 adev->pm_sysfs_en = true;
3507
3508         r = amdgpu_ucode_sysfs_init(adev);
3509         if (r) {
3510                 adev->ucode_sysfs_en = false;
3511                 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3512         } else
3513                 adev->ucode_sysfs_en = true;
3514
3515         if ((amdgpu_testing & 1)) {
3516                 if (adev->accel_working)
3517                         amdgpu_test_moves(adev);
3518                 else
3519                         DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
3520         }
3521         if (amdgpu_benchmarking) {
3522                 if (adev->accel_working)
3523                         amdgpu_benchmark(adev, amdgpu_benchmarking);
3524                 else
3525                         DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
3526         }
3527
3528         /*
3529          * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3530          * Otherwise the mgpu fan boost feature will be skipped due to the
3531          * gpu instance is counted less.
3532          */
3533         amdgpu_register_gpu_instance(adev);
3534
3535         /* enable clockgating, etc. after ib tests, etc. since some blocks require
3536          * explicit gating rather than handling it automatically.
3537          */
3538         if (!adev->gmc.xgmi.pending_reset) {
3539                 r = amdgpu_device_ip_late_init(adev);
3540                 if (r) {
3541                         dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3542                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3543                         goto release_ras_con;
3544                 }
3545                 /* must succeed. */
3546                 amdgpu_ras_resume(adev);
3547                 queue_delayed_work(system_wq, &adev->delayed_init_work,
3548                                    msecs_to_jiffies(AMDGPU_RESUME_MS));
3549         }
3550
3551         if (amdgpu_sriov_vf(adev))
3552                 flush_delayed_work(&adev->delayed_init_work);
3553
3554         r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3555         if (r)
3556                 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3557
3558         if (IS_ENABLED(CONFIG_PERF_EVENTS))
3559                 r = amdgpu_pmu_init(adev);
3560         if (r)
3561                 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3562
3563         /* Have stored pci confspace at hand for restore in sudden PCI error */
3564         if (amdgpu_device_cache_pci_state(adev->pdev))
3565                 pci_restore_state(pdev);
3566
3567         if (adev->gmc.xgmi.pending_reset)
3568                 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3569                                    msecs_to_jiffies(AMDGPU_RESUME_MS));
3570
3571         return 0;
3572
3573 release_ras_con:
3574         amdgpu_release_ras_context(adev);
3575
3576 failed:
3577         amdgpu_vf_error_trans_all(adev);
3578         if (atpx)
3579                 vga_switcheroo_fini_domain_pm_ops(adev->dev);
3580
3581 failed_unmap:
3582         iounmap(adev->rmmio);
3583         adev->rmmio = NULL;
3584
3585         return r;
3586 }
3587
3588 /**
3589  * amdgpu_device_fini - tear down the driver
3590  *
3591  * @adev: amdgpu_device pointer
3592  *
3593  * Tear down the driver info (all asics).
3594  * Called at driver shutdown.
3595  */
3596 void amdgpu_device_fini(struct amdgpu_device *adev)
3597 {
3598         dev_info(adev->dev, "amdgpu: finishing device.\n");
3599         flush_delayed_work(&adev->delayed_init_work);
3600         ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
3601         adev->shutdown = true;
3602
3603         kfree(adev->pci_state);
3604
3605         /* make sure IB test finished before entering exclusive mode
3606          * to avoid preemption on IB test
3607          * */
3608         if (amdgpu_sriov_vf(adev)) {
3609                 amdgpu_virt_request_full_gpu(adev, false);
3610                 amdgpu_virt_fini_data_exchange(adev);
3611         }
3612
3613         /* disable all interrupts */
3614         amdgpu_irq_disable_all(adev);
3615         if (adev->mode_info.mode_config_initialized){
3616                 if (!amdgpu_device_has_dc_support(adev))
3617                         drm_helper_force_disable_all(adev_to_drm(adev));
3618                 else
3619                         drm_atomic_helper_shutdown(adev_to_drm(adev));
3620         }
3621         amdgpu_fence_driver_fini(adev);
3622         if (adev->pm_sysfs_en)
3623                 amdgpu_pm_sysfs_fini(adev);
3624         amdgpu_fbdev_fini(adev);
3625         amdgpu_device_ip_fini(adev);
3626         release_firmware(adev->firmware.gpu_info_fw);
3627         adev->firmware.gpu_info_fw = NULL;
3628         adev->accel_working = false;
3629         /* free i2c buses */
3630         if (!amdgpu_device_has_dc_support(adev))
3631                 amdgpu_i2c_fini(adev);
3632
3633         if (amdgpu_emu_mode != 1)
3634                 amdgpu_atombios_fini(adev);
3635
3636         kfree(adev->bios);
3637         adev->bios = NULL;
3638         if (amdgpu_has_atpx() &&
3639             (amdgpu_is_atpx_hybrid() ||
3640              amdgpu_has_atpx_dgpu_power_cntl()) &&
3641             !pci_is_thunderbolt_attached(adev->pdev))
3642                 vga_switcheroo_unregister_client(adev->pdev);
3643         if (amdgpu_device_supports_atpx(adev_to_drm(adev)))
3644                 vga_switcheroo_fini_domain_pm_ops(adev->dev);
3645         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3646                 vga_client_register(adev->pdev, NULL, NULL, NULL);
3647         iounmap(adev->rmmio);
3648         adev->rmmio = NULL;
3649         amdgpu_device_doorbell_fini(adev);
3650
3651         if (adev->ucode_sysfs_en)
3652                 amdgpu_ucode_sysfs_fini(adev);
3653
3654         sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3655         if (IS_ENABLED(CONFIG_PERF_EVENTS))
3656                 amdgpu_pmu_fini(adev);
3657         if (adev->mman.discovery_bin)
3658                 amdgpu_discovery_fini(adev);
3659 }
3660
3661
3662 /*
3663  * Suspend & resume.
3664  */
3665 /**
3666  * amdgpu_device_suspend - initiate device suspend
3667  *
3668  * @dev: drm dev pointer
3669  * @fbcon : notify the fbdev of suspend
3670  *
3671  * Puts the hw in the suspend state (all asics).
3672  * Returns 0 for success or an error on failure.
3673  * Called at driver suspend.
3674  */
3675 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
3676 {
3677         struct amdgpu_device *adev;
3678         struct drm_crtc *crtc;
3679         struct drm_connector *connector;
3680         struct drm_connector_list_iter iter;
3681         int r;
3682
3683         adev = drm_to_adev(dev);
3684
3685         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3686                 return 0;
3687
3688         adev->in_suspend = true;
3689         drm_kms_helper_poll_disable(dev);
3690
3691         if (fbcon)
3692                 amdgpu_fbdev_set_suspend(adev, 1);
3693
3694         cancel_delayed_work_sync(&adev->delayed_init_work);
3695
3696         if (!amdgpu_device_has_dc_support(adev)) {
3697                 /* turn off display hw */
3698                 drm_modeset_lock_all(dev);
3699                 drm_connector_list_iter_begin(dev, &iter);
3700                 drm_for_each_connector_iter(connector, &iter)
3701                         drm_helper_connector_dpms(connector,
3702                                                   DRM_MODE_DPMS_OFF);
3703                 drm_connector_list_iter_end(&iter);
3704                 drm_modeset_unlock_all(dev);
3705                         /* unpin the front buffers and cursors */
3706                 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3707                         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3708                         struct drm_framebuffer *fb = crtc->primary->fb;
3709                         struct amdgpu_bo *robj;
3710
3711                         if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
3712                                 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
3713                                 r = amdgpu_bo_reserve(aobj, true);
3714                                 if (r == 0) {
3715                                         amdgpu_bo_unpin(aobj);
3716                                         amdgpu_bo_unreserve(aobj);
3717                                 }
3718                         }
3719
3720                         if (fb == NULL || fb->obj[0] == NULL) {
3721                                 continue;
3722                         }
3723                         robj = gem_to_amdgpu_bo(fb->obj[0]);
3724                         /* don't unpin kernel fb objects */
3725                         if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
3726                                 r = amdgpu_bo_reserve(robj, true);
3727                                 if (r == 0) {
3728                                         amdgpu_bo_unpin(robj);
3729                                         amdgpu_bo_unreserve(robj);
3730                                 }
3731                         }
3732                 }
3733         }
3734
3735         amdgpu_ras_suspend(adev);
3736
3737         r = amdgpu_device_ip_suspend_phase1(adev);
3738
3739         amdgpu_amdkfd_suspend(adev, adev->in_runpm);
3740
3741         /* evict vram memory */
3742         amdgpu_bo_evict_vram(adev);
3743
3744         amdgpu_fence_driver_suspend(adev);
3745
3746         if (adev->in_poweroff_reboot_com ||
3747             !amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev))
3748                 r = amdgpu_device_ip_suspend_phase2(adev);
3749         else
3750                 amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
3751         /* evict remaining vram memory
3752          * This second call to evict vram is to evict the gart page table
3753          * using the CPU.
3754          */
3755         amdgpu_bo_evict_vram(adev);
3756
3757         return 0;
3758 }
3759
3760 /**
3761  * amdgpu_device_resume - initiate device resume
3762  *
3763  * @dev: drm dev pointer
3764  * @fbcon : notify the fbdev of resume
3765  *
3766  * Bring the hw back to operating state (all asics).
3767  * Returns 0 for success or an error on failure.
3768  * Called at driver resume.
3769  */
3770 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
3771 {
3772         struct drm_connector *connector;
3773         struct drm_connector_list_iter iter;
3774         struct amdgpu_device *adev = drm_to_adev(dev);
3775         struct drm_crtc *crtc;
3776         int r = 0;
3777
3778         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3779                 return 0;
3780
3781         if (amdgpu_acpi_is_s0ix_supported(adev))
3782                 amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry);
3783
3784         /* post card */
3785         if (amdgpu_device_need_post(adev)) {
3786                 r = amdgpu_device_asic_init(adev);
3787                 if (r)
3788                         dev_err(adev->dev, "amdgpu asic init failed\n");
3789         }
3790
3791         r = amdgpu_device_ip_resume(adev);
3792         if (r) {
3793                 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
3794                 return r;
3795         }
3796         amdgpu_fence_driver_resume(adev);
3797
3798
3799         r = amdgpu_device_ip_late_init(adev);
3800         if (r)
3801                 return r;
3802
3803         queue_delayed_work(system_wq, &adev->delayed_init_work,
3804                            msecs_to_jiffies(AMDGPU_RESUME_MS));
3805
3806         if (!amdgpu_device_has_dc_support(adev)) {
3807                 /* pin cursors */
3808                 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3809                         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3810
3811                         if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
3812                                 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
3813                                 r = amdgpu_bo_reserve(aobj, true);
3814                                 if (r == 0) {
3815                                         r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
3816                                         if (r != 0)
3817                                                 dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
3818                                         amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
3819                                         amdgpu_bo_unreserve(aobj);
3820                                 }
3821                         }
3822                 }
3823         }
3824         r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
3825         if (r)
3826                 return r;
3827
3828         /* Make sure IB tests flushed */
3829         flush_delayed_work(&adev->delayed_init_work);
3830
3831         /* blat the mode back in */
3832         if (fbcon) {
3833                 if (!amdgpu_device_has_dc_support(adev)) {
3834                         /* pre DCE11 */
3835                         drm_helper_resume_force_mode(dev);
3836
3837                         /* turn on display hw */
3838                         drm_modeset_lock_all(dev);
3839
3840                         drm_connector_list_iter_begin(dev, &iter);
3841                         drm_for_each_connector_iter(connector, &iter)
3842                                 drm_helper_connector_dpms(connector,
3843                                                           DRM_MODE_DPMS_ON);
3844                         drm_connector_list_iter_end(&iter);
3845
3846                         drm_modeset_unlock_all(dev);
3847                 }
3848                 amdgpu_fbdev_set_suspend(adev, 0);
3849         }
3850
3851         drm_kms_helper_poll_enable(dev);
3852
3853         amdgpu_ras_resume(adev);
3854
3855         /*
3856          * Most of the connector probing functions try to acquire runtime pm
3857          * refs to ensure that the GPU is powered on when connector polling is
3858          * performed. Since we're calling this from a runtime PM callback,
3859          * trying to acquire rpm refs will cause us to deadlock.
3860          *
3861          * Since we're guaranteed to be holding the rpm lock, it's safe to
3862          * temporarily disable the rpm helpers so this doesn't deadlock us.
3863          */
3864 #ifdef CONFIG_PM
3865         dev->dev->power.disable_depth++;
3866 #endif
3867         if (!amdgpu_device_has_dc_support(adev))
3868                 drm_helper_hpd_irq_event(dev);
3869         else
3870                 drm_kms_helper_hotplug_event(dev);
3871 #ifdef CONFIG_PM
3872         dev->dev->power.disable_depth--;
3873 #endif
3874         adev->in_suspend = false;
3875
3876         return 0;
3877 }
3878
3879 /**
3880  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
3881  *
3882  * @adev: amdgpu_device pointer
3883  *
3884  * The list of all the hardware IPs that make up the asic is walked and
3885  * the check_soft_reset callbacks are run.  check_soft_reset determines
3886  * if the asic is still hung or not.
3887  * Returns true if any of the IPs are still in a hung state, false if not.
3888  */
3889 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
3890 {
3891         int i;
3892         bool asic_hang = false;
3893
3894         if (amdgpu_sriov_vf(adev))
3895                 return true;
3896
3897         if (amdgpu_asic_need_full_reset(adev))
3898                 return true;
3899
3900         for (i = 0; i < adev->num_ip_blocks; i++) {
3901                 if (!adev->ip_blocks[i].status.valid)
3902                         continue;
3903                 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
3904                         adev->ip_blocks[i].status.hang =
3905                                 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
3906                 if (adev->ip_blocks[i].status.hang) {
3907                         dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
3908                         asic_hang = true;
3909                 }
3910         }
3911         return asic_hang;
3912 }
3913
3914 /**
3915  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
3916  *
3917  * @adev: amdgpu_device pointer
3918  *
3919  * The list of all the hardware IPs that make up the asic is walked and the
3920  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
3921  * handles any IP specific hardware or software state changes that are
3922  * necessary for a soft reset to succeed.
3923  * Returns 0 on success, negative error code on failure.
3924  */
3925 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
3926 {
3927         int i, r = 0;
3928
3929         for (i = 0; i < adev->num_ip_blocks; i++) {
3930                 if (!adev->ip_blocks[i].status.valid)
3931                         continue;
3932                 if (adev->ip_blocks[i].status.hang &&
3933                     adev->ip_blocks[i].version->funcs->pre_soft_reset) {
3934                         r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
3935                         if (r)
3936                                 return r;
3937                 }
3938         }
3939
3940         return 0;
3941 }
3942
3943 /**
3944  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
3945  *
3946  * @adev: amdgpu_device pointer
3947  *
3948  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
3949  * reset is necessary to recover.
3950  * Returns true if a full asic reset is required, false if not.
3951  */
3952 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
3953 {
3954         int i;
3955
3956         if (amdgpu_asic_need_full_reset(adev))
3957                 return true;
3958
3959         for (i = 0; i < adev->num_ip_blocks; i++) {
3960                 if (!adev->ip_blocks[i].status.valid)
3961                         continue;
3962                 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
3963                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
3964                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
3965                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
3966                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3967                         if (adev->ip_blocks[i].status.hang) {
3968                                 dev_info(adev->dev, "Some block need full reset!\n");
3969                                 return true;
3970                         }
3971                 }
3972         }
3973         return false;
3974 }
3975
3976 /**
3977  * amdgpu_device_ip_soft_reset - do a soft reset
3978  *
3979  * @adev: amdgpu_device pointer
3980  *
3981  * The list of all the hardware IPs that make up the asic is walked and the
3982  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
3983  * IP specific hardware or software state changes that are necessary to soft
3984  * reset the IP.
3985  * Returns 0 on success, negative error code on failure.
3986  */
3987 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
3988 {
3989         int i, r = 0;
3990
3991         for (i = 0; i < adev->num_ip_blocks; i++) {
3992                 if (!adev->ip_blocks[i].status.valid)
3993                         continue;
3994                 if (adev->ip_blocks[i].status.hang &&
3995                     adev->ip_blocks[i].version->funcs->soft_reset) {
3996                         r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
3997                         if (r)
3998                                 return r;
3999                 }
4000         }
4001
4002         return 0;
4003 }
4004
4005 /**
4006  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4007  *
4008  * @adev: amdgpu_device pointer
4009  *
4010  * The list of all the hardware IPs that make up the asic is walked and the
4011  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4012  * handles any IP specific hardware or software state changes that are
4013  * necessary after the IP has been soft reset.
4014  * Returns 0 on success, negative error code on failure.
4015  */
4016 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4017 {
4018         int i, r = 0;
4019
4020         for (i = 0; i < adev->num_ip_blocks; i++) {
4021                 if (!adev->ip_blocks[i].status.valid)
4022                         continue;
4023                 if (adev->ip_blocks[i].status.hang &&
4024                     adev->ip_blocks[i].version->funcs->post_soft_reset)
4025                         r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4026                 if (r)
4027                         return r;
4028         }
4029
4030         return 0;
4031 }
4032
4033 /**
4034  * amdgpu_device_recover_vram - Recover some VRAM contents
4035  *
4036  * @adev: amdgpu_device pointer
4037  *
4038  * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4039  * restore things like GPUVM page tables after a GPU reset where
4040  * the contents of VRAM might be lost.
4041  *
4042  * Returns:
4043  * 0 on success, negative error code on failure.
4044  */
4045 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4046 {
4047         struct dma_fence *fence = NULL, *next = NULL;
4048         struct amdgpu_bo *shadow;
4049         long r = 1, tmo;
4050
4051         if (amdgpu_sriov_runtime(adev))
4052                 tmo = msecs_to_jiffies(8000);
4053         else
4054                 tmo = msecs_to_jiffies(100);
4055
4056         dev_info(adev->dev, "recover vram bo from shadow start\n");
4057         mutex_lock(&adev->shadow_list_lock);
4058         list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
4059
4060                 /* No need to recover an evicted BO */
4061                 if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
4062                     shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
4063                     shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
4064                         continue;
4065
4066                 r = amdgpu_bo_restore_shadow(shadow, &next);
4067                 if (r)
4068                         break;
4069
4070                 if (fence) {
4071                         tmo = dma_fence_wait_timeout(fence, false, tmo);
4072                         dma_fence_put(fence);
4073                         fence = next;
4074                         if (tmo == 0) {
4075                                 r = -ETIMEDOUT;
4076                                 break;
4077                         } else if (tmo < 0) {
4078                                 r = tmo;
4079                                 break;
4080                         }
4081                 } else {
4082                         fence = next;
4083                 }
4084         }
4085         mutex_unlock(&adev->shadow_list_lock);
4086
4087         if (fence)
4088                 tmo = dma_fence_wait_timeout(fence, false, tmo);
4089         dma_fence_put(fence);
4090
4091         if (r < 0 || tmo <= 0) {
4092                 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4093                 return -EIO;
4094         }
4095
4096         dev_info(adev->dev, "recover vram bo from shadow done\n");
4097         return 0;
4098 }
4099
4100
4101 /**
4102  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4103  *
4104  * @adev: amdgpu_device pointer
4105  * @from_hypervisor: request from hypervisor
4106  *
4107  * do VF FLR and reinitialize Asic
4108  * return 0 means succeeded otherwise failed
4109  */
4110 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4111                                      bool from_hypervisor)
4112 {
4113         int r;
4114
4115         if (from_hypervisor)
4116                 r = amdgpu_virt_request_full_gpu(adev, true);
4117         else
4118                 r = amdgpu_virt_reset_gpu(adev);
4119         if (r)
4120                 return r;
4121
4122         amdgpu_amdkfd_pre_reset(adev);
4123
4124         /* Resume IP prior to SMC */
4125         r = amdgpu_device_ip_reinit_early_sriov(adev);
4126         if (r)
4127                 goto error;
4128
4129         amdgpu_virt_init_data_exchange(adev);
4130         /* we need recover gart prior to run SMC/CP/SDMA resume */
4131         amdgpu_gtt_mgr_recover(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
4132
4133         r = amdgpu_device_fw_loading(adev);
4134         if (r)
4135                 return r;
4136
4137         /* now we are okay to resume SMC/CP/SDMA */
4138         r = amdgpu_device_ip_reinit_late_sriov(adev);
4139         if (r)
4140                 goto error;
4141
4142         amdgpu_irq_gpu_reset_resume_helper(adev);
4143         r = amdgpu_ib_ring_tests(adev);
4144         amdgpu_amdkfd_post_reset(adev);
4145
4146 error:
4147         amdgpu_virt_release_full_gpu(adev, true);
4148         if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4149                 amdgpu_inc_vram_lost(adev);
4150                 r = amdgpu_device_recover_vram(adev);
4151         }
4152
4153         return r;
4154 }
4155
4156 /**
4157  * amdgpu_device_has_job_running - check if there is any job in mirror list
4158  *
4159  * @adev: amdgpu_device pointer
4160  *
4161  * check if there is any job in mirror list
4162  */
4163 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4164 {
4165         int i;
4166         struct drm_sched_job *job;
4167
4168         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4169                 struct amdgpu_ring *ring = adev->rings[i];
4170
4171                 if (!ring || !ring->sched.thread)
4172                         continue;
4173
4174                 spin_lock(&ring->sched.job_list_lock);
4175                 job = list_first_entry_or_null(&ring->sched.pending_list,
4176                                                struct drm_sched_job, list);
4177                 spin_unlock(&ring->sched.job_list_lock);
4178                 if (job)
4179                         return true;
4180         }
4181         return false;
4182 }
4183
4184 /**
4185  * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4186  *
4187  * @adev: amdgpu_device pointer
4188  *
4189  * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4190  * a hung GPU.
4191  */
4192 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4193 {
4194         if (!amdgpu_device_ip_check_soft_reset(adev)) {
4195                 dev_info(adev->dev, "Timeout, but no hardware hang detected.\n");
4196                 return false;
4197         }
4198
4199         if (amdgpu_gpu_recovery == 0)
4200                 goto disabled;
4201
4202         if (amdgpu_sriov_vf(adev))
4203                 return true;
4204
4205         if (amdgpu_gpu_recovery == -1) {
4206                 switch (adev->asic_type) {
4207                 case CHIP_BONAIRE:
4208                 case CHIP_HAWAII:
4209                 case CHIP_TOPAZ:
4210                 case CHIP_TONGA:
4211                 case CHIP_FIJI:
4212                 case CHIP_POLARIS10:
4213                 case CHIP_POLARIS11:
4214                 case CHIP_POLARIS12:
4215                 case CHIP_VEGAM:
4216                 case CHIP_VEGA20:
4217                 case CHIP_VEGA10:
4218                 case CHIP_VEGA12:
4219                 case CHIP_RAVEN:
4220                 case CHIP_ARCTURUS:
4221                 case CHIP_RENOIR:
4222                 case CHIP_NAVI10:
4223                 case CHIP_NAVI14:
4224                 case CHIP_NAVI12:
4225                 case CHIP_SIENNA_CICHLID:
4226                 case CHIP_NAVY_FLOUNDER:
4227                 case CHIP_DIMGREY_CAVEFISH:
4228                         break;
4229                 default:
4230                         goto disabled;
4231                 }
4232         }
4233
4234         return true;
4235
4236 disabled:
4237                 dev_info(adev->dev, "GPU recovery disabled.\n");
4238                 return false;
4239 }
4240
4241 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4242 {
4243         u32 i;
4244         int ret = 0;
4245
4246         amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4247
4248         dev_info(adev->dev, "GPU mode1 reset\n");
4249
4250         /* disable BM */
4251         pci_clear_master(adev->pdev);
4252
4253         amdgpu_device_cache_pci_state(adev->pdev);
4254
4255         if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4256                 dev_info(adev->dev, "GPU smu mode1 reset\n");
4257                 ret = amdgpu_dpm_mode1_reset(adev);
4258         } else {
4259                 dev_info(adev->dev, "GPU psp mode1 reset\n");
4260                 ret = psp_gpu_reset(adev);
4261         }
4262
4263         if (ret)
4264                 dev_err(adev->dev, "GPU mode1 reset failed\n");
4265
4266         amdgpu_device_load_pci_state(adev->pdev);
4267
4268         /* wait for asic to come out of reset */
4269         for (i = 0; i < adev->usec_timeout; i++) {
4270                 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4271
4272                 if (memsize != 0xffffffff)
4273                         break;
4274                 udelay(1);
4275         }
4276
4277         amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4278         return ret;
4279 }
4280
4281 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4282                                   struct amdgpu_job *job,
4283                                   bool *need_full_reset_arg)
4284 {
4285         int i, r = 0;
4286         bool need_full_reset  = *need_full_reset_arg;
4287
4288         /* no need to dump if device is not in good state during probe period */
4289         if (!adev->gmc.xgmi.pending_reset)
4290                 amdgpu_debugfs_wait_dump(adev);
4291
4292         if (amdgpu_sriov_vf(adev)) {
4293                 /* stop the data exchange thread */
4294                 amdgpu_virt_fini_data_exchange(adev);
4295         }
4296
4297         /* block all schedulers and reset given job's ring */
4298         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4299                 struct amdgpu_ring *ring = adev->rings[i];
4300
4301                 if (!ring || !ring->sched.thread)
4302                         continue;
4303
4304                 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4305                 amdgpu_fence_driver_force_completion(ring);
4306         }
4307
4308         if(job)
4309                 drm_sched_increase_karma(&job->base);
4310
4311         /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4312         if (!amdgpu_sriov_vf(adev)) {
4313
4314                 if (!need_full_reset)
4315                         need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4316
4317                 if (!need_full_reset) {
4318                         amdgpu_device_ip_pre_soft_reset(adev);
4319                         r = amdgpu_device_ip_soft_reset(adev);
4320                         amdgpu_device_ip_post_soft_reset(adev);
4321                         if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4322                                 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4323                                 need_full_reset = true;
4324                         }
4325                 }
4326
4327                 if (need_full_reset)
4328                         r = amdgpu_device_ip_suspend(adev);
4329
4330                 *need_full_reset_arg = need_full_reset;
4331         }
4332
4333         return r;
4334 }
4335
4336 int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
4337                           struct list_head *device_list_handle,
4338                           bool *need_full_reset_arg,
4339                           bool skip_hw_reset)
4340 {
4341         struct amdgpu_device *tmp_adev = NULL;
4342         bool need_full_reset = *need_full_reset_arg, vram_lost = false;
4343         int r = 0;
4344
4345         /*
4346          * ASIC reset has to be done on all XGMI hive nodes ASAP
4347          * to allow proper links negotiation in FW (within 1 sec)
4348          */
4349         if (!skip_hw_reset && need_full_reset) {
4350                 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4351                         /* For XGMI run all resets in parallel to speed up the process */
4352                         if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4353                                 tmp_adev->gmc.xgmi.pending_reset = false;
4354                                 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4355                                         r = -EALREADY;
4356                         } else
4357                                 r = amdgpu_asic_reset(tmp_adev);
4358
4359                         if (r) {
4360                                 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4361                                          r, adev_to_drm(tmp_adev)->unique);
4362                                 break;
4363                         }
4364                 }
4365
4366                 /* For XGMI wait for all resets to complete before proceed */
4367                 if (!r) {
4368                         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4369                                 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4370                                         flush_work(&tmp_adev->xgmi_reset_work);
4371                                         r = tmp_adev->asic_reset_res;
4372                                         if (r)
4373                                                 break;
4374                                 }
4375                         }
4376                 }
4377         }
4378
4379         if (!r && amdgpu_ras_intr_triggered()) {
4380                 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4381                         if (tmp_adev->mmhub.funcs &&
4382                             tmp_adev->mmhub.funcs->reset_ras_error_count)
4383                                 tmp_adev->mmhub.funcs->reset_ras_error_count(tmp_adev);
4384                 }
4385
4386                 amdgpu_ras_intr_cleared();
4387         }
4388
4389         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4390                 if (need_full_reset) {
4391                         /* post card */
4392                         r = amdgpu_device_asic_init(tmp_adev);
4393                         if (r) {
4394                                 dev_warn(tmp_adev->dev, "asic atom init failed!");
4395                         } else {
4396                                 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4397                                 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4398                                 if (r)
4399                                         goto out;
4400
4401                                 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4402                                 if (vram_lost) {
4403                                         DRM_INFO("VRAM is lost due to GPU reset!\n");
4404                                         amdgpu_inc_vram_lost(tmp_adev);
4405                                 }
4406
4407                                 r = amdgpu_gtt_mgr_recover(ttm_manager_type(&tmp_adev->mman.bdev, TTM_PL_TT));
4408                                 if (r)
4409                                         goto out;
4410
4411                                 r = amdgpu_device_fw_loading(tmp_adev);
4412                                 if (r)
4413                                         return r;
4414
4415                                 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4416                                 if (r)
4417                                         goto out;
4418
4419                                 if (vram_lost)
4420                                         amdgpu_device_fill_reset_magic(tmp_adev);
4421
4422                                 /*
4423                                  * Add this ASIC as tracked as reset was already
4424                                  * complete successfully.
4425                                  */
4426                                 amdgpu_register_gpu_instance(tmp_adev);
4427
4428                                 if (!hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4429                                         amdgpu_xgmi_add_device(tmp_adev);
4430
4431                                 r = amdgpu_device_ip_late_init(tmp_adev);
4432                                 if (r)
4433                                         goto out;
4434
4435                                 amdgpu_fbdev_set_suspend(tmp_adev, 0);
4436
4437                                 /*
4438                                  * The GPU enters bad state once faulty pages
4439                                  * by ECC has reached the threshold, and ras
4440                                  * recovery is scheduled next. So add one check
4441                                  * here to break recovery if it indeed exceeds
4442                                  * bad page threshold, and remind user to
4443                                  * retire this GPU or setting one bigger
4444                                  * bad_page_threshold value to fix this once
4445                                  * probing driver again.
4446                                  */
4447                                 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4448                                         /* must succeed. */
4449                                         amdgpu_ras_resume(tmp_adev);
4450                                 } else {
4451                                         r = -EINVAL;
4452                                         goto out;
4453                                 }
4454
4455                                 /* Update PSP FW topology after reset */
4456                                 if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4457                                         r = amdgpu_xgmi_update_topology(hive, tmp_adev);
4458                         }
4459                 }
4460
4461 out:
4462                 if (!r) {
4463                         amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4464                         r = amdgpu_ib_ring_tests(tmp_adev);
4465                         if (r) {
4466                                 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4467                                 r = amdgpu_device_ip_suspend(tmp_adev);
4468                                 need_full_reset = true;
4469                                 r = -EAGAIN;
4470                                 goto end;
4471                         }
4472                 }
4473
4474                 if (!r)
4475                         r = amdgpu_device_recover_vram(tmp_adev);
4476                 else
4477                         tmp_adev->asic_reset_res = r;
4478         }
4479
4480 end:
4481         *need_full_reset_arg = need_full_reset;
4482         return r;
4483 }
4484
4485 static bool amdgpu_device_lock_adev(struct amdgpu_device *adev,
4486                                 struct amdgpu_hive_info *hive)
4487 {
4488         if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
4489                 return false;
4490
4491         if (hive) {
4492                 down_write_nest_lock(&adev->reset_sem, &hive->hive_lock);
4493         } else {
4494                 down_write(&adev->reset_sem);
4495         }
4496
4497         switch (amdgpu_asic_reset_method(adev)) {
4498         case AMD_RESET_METHOD_MODE1:
4499                 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4500                 break;
4501         case AMD_RESET_METHOD_MODE2:
4502                 adev->mp1_state = PP_MP1_STATE_RESET;
4503                 break;
4504         default:
4505                 adev->mp1_state = PP_MP1_STATE_NONE;
4506                 break;
4507         }
4508
4509         return true;
4510 }
4511
4512 static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
4513 {
4514         amdgpu_vf_error_trans_all(adev);
4515         adev->mp1_state = PP_MP1_STATE_NONE;
4516         atomic_set(&adev->in_gpu_reset, 0);
4517         up_write(&adev->reset_sem);
4518 }
4519
4520 /*
4521  * to lockup a list of amdgpu devices in a hive safely, if not a hive
4522  * with multiple nodes, it will be similar as amdgpu_device_lock_adev.
4523  *
4524  * unlock won't require roll back.
4525  */
4526 static int amdgpu_device_lock_hive_adev(struct amdgpu_device *adev, struct amdgpu_hive_info *hive)
4527 {
4528         struct amdgpu_device *tmp_adev = NULL;
4529
4530         if (adev->gmc.xgmi.num_physical_nodes > 1) {
4531                 if (!hive) {
4532                         dev_err(adev->dev, "Hive is NULL while device has multiple xgmi nodes");
4533                         return -ENODEV;
4534                 }
4535                 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4536                         if (!amdgpu_device_lock_adev(tmp_adev, hive))
4537                                 goto roll_back;
4538                 }
4539         } else if (!amdgpu_device_lock_adev(adev, hive))
4540                 return -EAGAIN;
4541
4542         return 0;
4543 roll_back:
4544         if (!list_is_first(&tmp_adev->gmc.xgmi.head, &hive->device_list)) {
4545                 /*
4546                  * if the lockup iteration break in the middle of a hive,
4547                  * it may means there may has a race issue,
4548                  * or a hive device locked up independently.
4549                  * we may be in trouble and may not, so will try to roll back
4550                  * the lock and give out a warnning.
4551                  */
4552                 dev_warn(tmp_adev->dev, "Hive lock iteration broke in the middle. Rolling back to unlock");
4553                 list_for_each_entry_continue_reverse(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4554                         amdgpu_device_unlock_adev(tmp_adev);
4555                 }
4556         }
4557         return -EAGAIN;
4558 }
4559
4560 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
4561 {
4562         struct pci_dev *p = NULL;
4563
4564         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4565                         adev->pdev->bus->number, 1);
4566         if (p) {
4567                 pm_runtime_enable(&(p->dev));
4568                 pm_runtime_resume(&(p->dev));
4569         }
4570 }
4571
4572 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
4573 {
4574         enum amd_reset_method reset_method;
4575         struct pci_dev *p = NULL;
4576         u64 expires;
4577
4578         /*
4579          * For now, only BACO and mode1 reset are confirmed
4580          * to suffer the audio issue without proper suspended.
4581          */
4582         reset_method = amdgpu_asic_reset_method(adev);
4583         if ((reset_method != AMD_RESET_METHOD_BACO) &&
4584              (reset_method != AMD_RESET_METHOD_MODE1))
4585                 return -EINVAL;
4586
4587         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4588                         adev->pdev->bus->number, 1);
4589         if (!p)
4590                 return -ENODEV;
4591
4592         expires = pm_runtime_autosuspend_expiration(&(p->dev));
4593         if (!expires)
4594                 /*
4595                  * If we cannot get the audio device autosuspend delay,
4596                  * a fixed 4S interval will be used. Considering 3S is
4597                  * the audio controller default autosuspend delay setting.
4598                  * 4S used here is guaranteed to cover that.
4599                  */
4600                 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
4601
4602         while (!pm_runtime_status_suspended(&(p->dev))) {
4603                 if (!pm_runtime_suspend(&(p->dev)))
4604                         break;
4605
4606                 if (expires < ktime_get_mono_fast_ns()) {
4607                         dev_warn(adev->dev, "failed to suspend display audio\n");
4608                         /* TODO: abort the succeeding gpu reset? */
4609                         return -ETIMEDOUT;
4610                 }
4611         }
4612
4613         pm_runtime_disable(&(p->dev));
4614
4615         return 0;
4616 }
4617
4618 /**
4619  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
4620  *
4621  * @adev: amdgpu_device pointer
4622  * @job: which job trigger hang
4623  *
4624  * Attempt to reset the GPU if it has hung (all asics).
4625  * Attempt to do soft-reset or full-reset and reinitialize Asic
4626  * Returns 0 for success or an error on failure.
4627  */
4628
4629 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
4630                               struct amdgpu_job *job)
4631 {
4632         struct list_head device_list, *device_list_handle =  NULL;
4633         bool need_full_reset = false;
4634         bool job_signaled = false;
4635         struct amdgpu_hive_info *hive = NULL;
4636         struct amdgpu_device *tmp_adev = NULL;
4637         int i, r = 0;
4638         bool need_emergency_restart = false;
4639         bool audio_suspended = false;
4640
4641         /*
4642          * Special case: RAS triggered and full reset isn't supported
4643          */
4644         need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
4645
4646         /*
4647          * Flush RAM to disk so that after reboot
4648          * the user can read log and see why the system rebooted.
4649          */
4650         if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
4651                 DRM_WARN("Emergency reboot.");
4652
4653                 ksys_sync_helper();
4654                 emergency_restart();
4655         }
4656
4657         dev_info(adev->dev, "GPU %s begin!\n",
4658                 need_emergency_restart ? "jobs stop":"reset");
4659
4660         /*
4661          * Here we trylock to avoid chain of resets executing from
4662          * either trigger by jobs on different adevs in XGMI hive or jobs on
4663          * different schedulers for same device while this TO handler is running.
4664          * We always reset all schedulers for device and all devices for XGMI
4665          * hive so that should take care of them too.
4666          */
4667         hive = amdgpu_get_xgmi_hive(adev);
4668         if (hive) {
4669                 if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) {
4670                         DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
4671                                 job ? job->base.id : -1, hive->hive_id);
4672                         amdgpu_put_xgmi_hive(hive);
4673                         if (job)
4674                                 drm_sched_increase_karma(&job->base);
4675                         return 0;
4676                 }
4677                 mutex_lock(&hive->hive_lock);
4678         }
4679
4680         /*
4681          * lock the device before we try to operate the linked list
4682          * if didn't get the device lock, don't touch the linked list since
4683          * others may iterating it.
4684          */
4685         r = amdgpu_device_lock_hive_adev(adev, hive);
4686         if (r) {
4687                 dev_info(adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress",
4688                                         job ? job->base.id : -1);
4689
4690                 /* even we skipped this reset, still need to set the job to guilty */
4691                 if (job)
4692                         drm_sched_increase_karma(&job->base);
4693                 goto skip_recovery;
4694         }
4695
4696         /*
4697          * Build list of devices to reset.
4698          * In case we are in XGMI hive mode, resort the device list
4699          * to put adev in the 1st position.
4700          */
4701         INIT_LIST_HEAD(&device_list);
4702         if (adev->gmc.xgmi.num_physical_nodes > 1) {
4703                 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
4704                         list_add_tail(&tmp_adev->reset_list, &device_list);
4705                 if (!list_is_first(&adev->reset_list, &device_list))
4706                         list_rotate_to_front(&adev->reset_list, &device_list);
4707                 device_list_handle = &device_list;
4708         } else {
4709                 list_add_tail(&adev->reset_list, &device_list);
4710                 device_list_handle = &device_list;
4711         }
4712
4713         /* block all schedulers and reset given job's ring */
4714         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4715                 /*
4716                  * Try to put the audio codec into suspend state
4717                  * before gpu reset started.
4718                  *
4719                  * Due to the power domain of the graphics device
4720                  * is shared with AZ power domain. Without this,
4721                  * we may change the audio hardware from behind
4722                  * the audio driver's back. That will trigger
4723                  * some audio codec errors.
4724                  */
4725                 if (!amdgpu_device_suspend_display_audio(tmp_adev))
4726                         audio_suspended = true;
4727
4728                 amdgpu_ras_set_error_query_ready(tmp_adev, false);
4729
4730                 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
4731
4732                 if (!amdgpu_sriov_vf(tmp_adev))
4733                         amdgpu_amdkfd_pre_reset(tmp_adev);
4734
4735                 /*
4736                  * Mark these ASICs to be reseted as untracked first
4737                  * And add them back after reset completed
4738                  */
4739                 amdgpu_unregister_gpu_instance(tmp_adev);
4740
4741                 amdgpu_fbdev_set_suspend(tmp_adev, 1);
4742
4743                 /* disable ras on ALL IPs */
4744                 if (!need_emergency_restart &&
4745                       amdgpu_device_ip_need_full_reset(tmp_adev))
4746                         amdgpu_ras_suspend(tmp_adev);
4747
4748                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4749                         struct amdgpu_ring *ring = tmp_adev->rings[i];
4750
4751                         if (!ring || !ring->sched.thread)
4752                                 continue;
4753
4754                         drm_sched_stop(&ring->sched, job ? &job->base : NULL);
4755
4756                         if (need_emergency_restart)
4757                                 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
4758                 }
4759                 atomic_inc(&tmp_adev->gpu_reset_counter);
4760         }
4761
4762         if (need_emergency_restart)
4763                 goto skip_sched_resume;
4764
4765         /*
4766          * Must check guilty signal here since after this point all old
4767          * HW fences are force signaled.
4768          *
4769          * job->base holds a reference to parent fence
4770          */
4771         if (job && job->base.s_fence->parent &&
4772             dma_fence_is_signaled(job->base.s_fence->parent)) {
4773                 job_signaled = true;
4774                 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
4775                 goto skip_hw_reset;
4776         }
4777
4778 retry:  /* Rest of adevs pre asic reset from XGMI hive. */
4779         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4780                 r = amdgpu_device_pre_asic_reset(tmp_adev,
4781                                                  (tmp_adev == adev) ? job : NULL,
4782                                                  &need_full_reset);
4783                 /*TODO Should we stop ?*/
4784                 if (r) {
4785                         dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
4786                                   r, adev_to_drm(tmp_adev)->unique);
4787                         tmp_adev->asic_reset_res = r;
4788                 }
4789         }
4790
4791         /* Actual ASIC resets if needed.*/
4792         /* TODO Implement XGMI hive reset logic for SRIOV */
4793         if (amdgpu_sriov_vf(adev)) {
4794                 r = amdgpu_device_reset_sriov(adev, job ? false : true);
4795                 if (r)
4796                         adev->asic_reset_res = r;
4797         } else {
4798                 r  = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset, false);
4799                 if (r && r == -EAGAIN)
4800                         goto retry;
4801         }
4802
4803 skip_hw_reset:
4804
4805         /* Post ASIC reset for all devs .*/
4806         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4807
4808                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4809                         struct amdgpu_ring *ring = tmp_adev->rings[i];
4810
4811                         if (!ring || !ring->sched.thread)
4812                                 continue;
4813
4814                         /* No point to resubmit jobs if we didn't HW reset*/
4815                         if (!tmp_adev->asic_reset_res && !job_signaled)
4816                                 drm_sched_resubmit_jobs(&ring->sched);
4817
4818                         drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
4819                 }
4820
4821                 if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
4822                         drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
4823                 }
4824
4825                 tmp_adev->asic_reset_res = 0;
4826
4827                 if (r) {
4828                         /* bad news, how to tell it to userspace ? */
4829                         dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
4830                         amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
4831                 } else {
4832                         dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
4833                 }
4834         }
4835
4836 skip_sched_resume:
4837         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4838                 /* unlock kfd: SRIOV would do it separately */
4839                 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
4840                         amdgpu_amdkfd_post_reset(tmp_adev);
4841
4842                 /* kfd_post_reset will do nothing if kfd device is not initialized,
4843                  * need to bring up kfd here if it's not be initialized before
4844                  */
4845                 if (!adev->kfd.init_complete)
4846                         amdgpu_amdkfd_device_init(adev);
4847
4848                 if (audio_suspended)
4849                         amdgpu_device_resume_display_audio(tmp_adev);
4850                 amdgpu_device_unlock_adev(tmp_adev);
4851         }
4852
4853 skip_recovery:
4854         if (hive) {
4855                 atomic_set(&hive->in_reset, 0);
4856                 mutex_unlock(&hive->hive_lock);
4857                 amdgpu_put_xgmi_hive(hive);
4858         }
4859
4860         if (r && r != -EAGAIN)
4861                 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
4862         return r;
4863 }
4864
4865 /**
4866  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
4867  *
4868  * @adev: amdgpu_device pointer
4869  *
4870  * Fetchs and stores in the driver the PCIE capabilities (gen speed
4871  * and lanes) of the slot the device is in. Handles APUs and
4872  * virtualized environments where PCIE config space may not be available.
4873  */
4874 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
4875 {
4876         struct pci_dev *pdev;
4877         enum pci_bus_speed speed_cap, platform_speed_cap;
4878         enum pcie_link_width platform_link_width;
4879
4880         if (amdgpu_pcie_gen_cap)
4881                 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
4882
4883         if (amdgpu_pcie_lane_cap)
4884                 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
4885
4886         /* covers APUs as well */
4887         if (pci_is_root_bus(adev->pdev->bus)) {
4888                 if (adev->pm.pcie_gen_mask == 0)
4889                         adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
4890                 if (adev->pm.pcie_mlw_mask == 0)
4891                         adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
4892                 return;
4893         }
4894
4895         if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
4896                 return;
4897
4898         pcie_bandwidth_available(adev->pdev, NULL,
4899                                  &platform_speed_cap, &platform_link_width);
4900
4901         if (adev->pm.pcie_gen_mask == 0) {
4902                 /* asic caps */
4903                 pdev = adev->pdev;
4904                 speed_cap = pcie_get_speed_cap(pdev);
4905                 if (speed_cap == PCI_SPEED_UNKNOWN) {
4906                         adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4907                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4908                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
4909                 } else {
4910                         if (speed_cap == PCIE_SPEED_32_0GT)
4911                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4912                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4913                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4914                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
4915                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
4916                         else if (speed_cap == PCIE_SPEED_16_0GT)
4917                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4918                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4919                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4920                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
4921                         else if (speed_cap == PCIE_SPEED_8_0GT)
4922                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4923                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4924                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
4925                         else if (speed_cap == PCIE_SPEED_5_0GT)
4926                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4927                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
4928                         else
4929                                 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
4930                 }
4931                 /* platform caps */
4932                 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
4933                         adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4934                                                    CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
4935                 } else {
4936                         if (platform_speed_cap == PCIE_SPEED_32_0GT)
4937                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4938                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4939                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4940                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
4941                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
4942                         else if (platform_speed_cap == PCIE_SPEED_16_0GT)
4943                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4944                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4945                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4946                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
4947                         else if (platform_speed_cap == PCIE_SPEED_8_0GT)
4948                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4949                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4950                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
4951                         else if (platform_speed_cap == PCIE_SPEED_5_0GT)
4952                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4953                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
4954                         else
4955                                 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
4956
4957                 }
4958         }
4959         if (adev->pm.pcie_mlw_mask == 0) {
4960                 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
4961                         adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
4962                 } else {
4963                         switch (platform_link_width) {
4964                         case PCIE_LNK_X32:
4965                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
4966                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
4967                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4968                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4969                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4970                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4971                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4972                                 break;
4973                         case PCIE_LNK_X16:
4974                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
4975                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4976                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4977                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4978                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4979                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4980                                 break;
4981                         case PCIE_LNK_X12:
4982                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4983                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4984                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4985                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4986                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4987                                 break;
4988                         case PCIE_LNK_X8:
4989                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4990                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4991                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4992                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4993                                 break;
4994                         case PCIE_LNK_X4:
4995                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4996                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4997                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4998                                 break;
4999                         case PCIE_LNK_X2:
5000                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5001                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5002                                 break;
5003                         case PCIE_LNK_X1:
5004                                 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5005                                 break;
5006                         default:
5007                                 break;
5008                         }
5009                 }
5010         }
5011 }
5012
5013 int amdgpu_device_baco_enter(struct drm_device *dev)
5014 {
5015         struct amdgpu_device *adev = drm_to_adev(dev);
5016         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5017
5018         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5019                 return -ENOTSUPP;
5020
5021         if (ras && ras->supported && adev->nbio.funcs->enable_doorbell_interrupt)
5022                 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5023
5024         return amdgpu_dpm_baco_enter(adev);
5025 }
5026
5027 int amdgpu_device_baco_exit(struct drm_device *dev)
5028 {
5029         struct amdgpu_device *adev = drm_to_adev(dev);
5030         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5031         int ret = 0;
5032
5033         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5034                 return -ENOTSUPP;
5035
5036         ret = amdgpu_dpm_baco_exit(adev);
5037         if (ret)
5038                 return ret;
5039
5040         if (ras && ras->supported && adev->nbio.funcs->enable_doorbell_interrupt)
5041                 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5042
5043         return 0;
5044 }
5045
5046 static void amdgpu_cancel_all_tdr(struct amdgpu_device *adev)
5047 {
5048         int i;
5049
5050         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5051                 struct amdgpu_ring *ring = adev->rings[i];
5052
5053                 if (!ring || !ring->sched.thread)
5054                         continue;
5055
5056                 cancel_delayed_work_sync(&ring->sched.work_tdr);
5057         }
5058 }
5059
5060 /**
5061  * amdgpu_pci_error_detected - Called when a PCI error is detected.
5062  * @pdev: PCI device struct
5063  * @state: PCI channel state
5064  *
5065  * Description: Called when a PCI error is detected.
5066  *
5067  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5068  */
5069 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5070 {
5071         struct drm_device *dev = pci_get_drvdata(pdev);
5072         struct amdgpu_device *adev = drm_to_adev(dev);
5073         int i;
5074
5075         DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5076
5077         if (adev->gmc.xgmi.num_physical_nodes > 1) {
5078                 DRM_WARN("No support for XGMI hive yet...");
5079                 return PCI_ERS_RESULT_DISCONNECT;
5080         }
5081
5082         switch (state) {
5083         case pci_channel_io_normal:
5084                 return PCI_ERS_RESULT_CAN_RECOVER;
5085         /* Fatal error, prepare for slot reset */
5086         case pci_channel_io_frozen:
5087                 /*
5088                  * Cancel and wait for all TDRs in progress if failing to
5089                  * set  adev->in_gpu_reset in amdgpu_device_lock_adev
5090                  *
5091                  * Locking adev->reset_sem will prevent any external access
5092                  * to GPU during PCI error recovery
5093                  */
5094                 while (!amdgpu_device_lock_adev(adev, NULL))
5095                         amdgpu_cancel_all_tdr(adev);
5096
5097                 /*
5098                  * Block any work scheduling as we do for regular GPU reset
5099                  * for the duration of the recovery
5100                  */
5101                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5102                         struct amdgpu_ring *ring = adev->rings[i];
5103
5104                         if (!ring || !ring->sched.thread)
5105                                 continue;
5106
5107                         drm_sched_stop(&ring->sched, NULL);
5108                 }
5109                 atomic_inc(&adev->gpu_reset_counter);
5110                 return PCI_ERS_RESULT_NEED_RESET;
5111         case pci_channel_io_perm_failure:
5112                 /* Permanent error, prepare for device removal */
5113                 return PCI_ERS_RESULT_DISCONNECT;
5114         }
5115
5116         return PCI_ERS_RESULT_NEED_RESET;
5117 }
5118
5119 /**
5120  * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5121  * @pdev: pointer to PCI device
5122  */
5123 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5124 {
5125
5126         DRM_INFO("PCI error: mmio enabled callback!!\n");
5127
5128         /* TODO - dump whatever for debugging purposes */
5129
5130         /* This called only if amdgpu_pci_error_detected returns
5131          * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5132          * works, no need to reset slot.
5133          */
5134
5135         return PCI_ERS_RESULT_RECOVERED;
5136 }
5137
5138 /**
5139  * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5140  * @pdev: PCI device struct
5141  *
5142  * Description: This routine is called by the pci error recovery
5143  * code after the PCI slot has been reset, just before we
5144  * should resume normal operations.
5145  */
5146 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5147 {
5148         struct drm_device *dev = pci_get_drvdata(pdev);
5149         struct amdgpu_device *adev = drm_to_adev(dev);
5150         int r, i;
5151         bool need_full_reset = true;
5152         u32 memsize;
5153         struct list_head device_list;
5154
5155         DRM_INFO("PCI error: slot reset callback!!\n");
5156
5157         INIT_LIST_HEAD(&device_list);
5158         list_add_tail(&adev->reset_list, &device_list);
5159
5160         /* wait for asic to come out of reset */
5161         msleep(500);
5162
5163         /* Restore PCI confspace */
5164         amdgpu_device_load_pci_state(pdev);
5165
5166         /* confirm  ASIC came out of reset */
5167         for (i = 0; i < adev->usec_timeout; i++) {
5168                 memsize = amdgpu_asic_get_config_memsize(adev);
5169
5170                 if (memsize != 0xffffffff)
5171                         break;
5172                 udelay(1);
5173         }
5174         if (memsize == 0xffffffff) {
5175                 r = -ETIME;
5176                 goto out;
5177         }
5178
5179         adev->in_pci_err_recovery = true;
5180         r = amdgpu_device_pre_asic_reset(adev, NULL, &need_full_reset);
5181         adev->in_pci_err_recovery = false;
5182         if (r)
5183                 goto out;
5184
5185         r = amdgpu_do_asic_reset(NULL, &device_list, &need_full_reset, true);
5186
5187 out:
5188         if (!r) {
5189                 if (amdgpu_device_cache_pci_state(adev->pdev))
5190                         pci_restore_state(adev->pdev);
5191
5192                 DRM_INFO("PCIe error recovery succeeded\n");
5193         } else {
5194                 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5195                 amdgpu_device_unlock_adev(adev);
5196         }
5197
5198         return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5199 }
5200
5201 /**
5202  * amdgpu_pci_resume() - resume normal ops after PCI reset
5203  * @pdev: pointer to PCI device
5204  *
5205  * Called when the error recovery driver tells us that its
5206  * OK to resume normal operation.
5207  */
5208 void amdgpu_pci_resume(struct pci_dev *pdev)
5209 {
5210         struct drm_device *dev = pci_get_drvdata(pdev);
5211         struct amdgpu_device *adev = drm_to_adev(dev);
5212         int i;
5213
5214
5215         DRM_INFO("PCI error: resume callback!!\n");
5216
5217         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5218                 struct amdgpu_ring *ring = adev->rings[i];
5219
5220                 if (!ring || !ring->sched.thread)
5221                         continue;
5222
5223
5224                 drm_sched_resubmit_jobs(&ring->sched);
5225                 drm_sched_start(&ring->sched, true);
5226         }
5227
5228         amdgpu_device_unlock_adev(adev);
5229 }
5230
5231 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5232 {
5233         struct drm_device *dev = pci_get_drvdata(pdev);
5234         struct amdgpu_device *adev = drm_to_adev(dev);
5235         int r;
5236
5237         r = pci_save_state(pdev);
5238         if (!r) {
5239                 kfree(adev->pci_state);
5240
5241                 adev->pci_state = pci_store_saved_state(pdev);
5242
5243                 if (!adev->pci_state) {
5244                         DRM_ERROR("Failed to store PCI saved state");
5245                         return false;
5246                 }
5247         } else {
5248                 DRM_WARN("Failed to save PCI state, err:%d\n", r);
5249                 return false;
5250         }
5251
5252         return true;
5253 }
5254
5255 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5256 {
5257         struct drm_device *dev = pci_get_drvdata(pdev);
5258         struct amdgpu_device *adev = drm_to_adev(dev);
5259         int r;
5260
5261         if (!adev->pci_state)
5262                 return false;
5263
5264         r = pci_load_saved_state(pdev, adev->pci_state);
5265
5266         if (!r) {
5267                 pci_restore_state(pdev);
5268         } else {
5269                 DRM_WARN("Failed to load PCI state, err:%d\n", r);
5270                 return false;
5271         }
5272
5273         return true;
5274 }
5275
5276
This page took 0.354719 seconds and 4 git commands to generate.