]> Git Repo - J-linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
Merge tag 'drm-misc-next-2022-11-10-1' of git://anongit.freedesktop.org/drm/drm-misc...
[J-linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33 #include <linux/iommu.h>
34 #include <linux/pci.h>
35 #include <linux/devcoredump.h>
36 #include <generated/utsrelease.h>
37 #include <linux/pci-p2pdma.h>
38
39 #include <drm/drm_atomic_helper.h>
40 #include <drm/drm_fb_helper.h>
41 #include <drm/drm_probe_helper.h>
42 #include <drm/amdgpu_drm.h>
43 #include <linux/vgaarb.h>
44 #include <linux/vga_switcheroo.h>
45 #include <linux/efi.h>
46 #include "amdgpu.h"
47 #include "amdgpu_trace.h"
48 #include "amdgpu_i2c.h"
49 #include "atom.h"
50 #include "amdgpu_atombios.h"
51 #include "amdgpu_atomfirmware.h"
52 #include "amd_pcie.h"
53 #ifdef CONFIG_DRM_AMDGPU_SI
54 #include "si.h"
55 #endif
56 #ifdef CONFIG_DRM_AMDGPU_CIK
57 #include "cik.h"
58 #endif
59 #include "vi.h"
60 #include "soc15.h"
61 #include "nv.h"
62 #include "bif/bif_4_1_d.h"
63 #include <linux/firmware.h>
64 #include "amdgpu_vf_error.h"
65
66 #include "amdgpu_amdkfd.h"
67 #include "amdgpu_pm.h"
68
69 #include "amdgpu_xgmi.h"
70 #include "amdgpu_ras.h"
71 #include "amdgpu_pmu.h"
72 #include "amdgpu_fru_eeprom.h"
73 #include "amdgpu_reset.h"
74
75 #include <linux/suspend.h>
76 #include <drm/task_barrier.h>
77 #include <linux/pm_runtime.h>
78
79 #include <drm/drm_drv.h>
80
81 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
82 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
83 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
84 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
85 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
86 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
87 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
88
89 #define AMDGPU_RESUME_MS                2000
90 #define AMDGPU_MAX_RETRY_LIMIT          2
91 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
92
93 const char *amdgpu_asic_name[] = {
94         "TAHITI",
95         "PITCAIRN",
96         "VERDE",
97         "OLAND",
98         "HAINAN",
99         "BONAIRE",
100         "KAVERI",
101         "KABINI",
102         "HAWAII",
103         "MULLINS",
104         "TOPAZ",
105         "TONGA",
106         "FIJI",
107         "CARRIZO",
108         "STONEY",
109         "POLARIS10",
110         "POLARIS11",
111         "POLARIS12",
112         "VEGAM",
113         "VEGA10",
114         "VEGA12",
115         "VEGA20",
116         "RAVEN",
117         "ARCTURUS",
118         "RENOIR",
119         "ALDEBARAN",
120         "NAVI10",
121         "CYAN_SKILLFISH",
122         "NAVI14",
123         "NAVI12",
124         "SIENNA_CICHLID",
125         "NAVY_FLOUNDER",
126         "VANGOGH",
127         "DIMGREY_CAVEFISH",
128         "BEIGE_GOBY",
129         "YELLOW_CARP",
130         "IP DISCOVERY",
131         "LAST",
132 };
133
134 /**
135  * DOC: pcie_replay_count
136  *
137  * The amdgpu driver provides a sysfs API for reporting the total number
138  * of PCIe replays (NAKs)
139  * The file pcie_replay_count is used for this and returns the total
140  * number of replays as a sum of the NAKs generated and NAKs received
141  */
142
143 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
144                 struct device_attribute *attr, char *buf)
145 {
146         struct drm_device *ddev = dev_get_drvdata(dev);
147         struct amdgpu_device *adev = drm_to_adev(ddev);
148         uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
149
150         return sysfs_emit(buf, "%llu\n", cnt);
151 }
152
153 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
154                 amdgpu_device_get_pcie_replay_count, NULL);
155
156 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
157
158 /**
159  * DOC: product_name
160  *
161  * The amdgpu driver provides a sysfs API for reporting the product name
162  * for the device
163  * The file serial_number is used for this and returns the product name
164  * as returned from the FRU.
165  * NOTE: This is only available for certain server cards
166  */
167
168 static ssize_t amdgpu_device_get_product_name(struct device *dev,
169                 struct device_attribute *attr, char *buf)
170 {
171         struct drm_device *ddev = dev_get_drvdata(dev);
172         struct amdgpu_device *adev = drm_to_adev(ddev);
173
174         return sysfs_emit(buf, "%s\n", adev->product_name);
175 }
176
177 static DEVICE_ATTR(product_name, S_IRUGO,
178                 amdgpu_device_get_product_name, NULL);
179
180 /**
181  * DOC: product_number
182  *
183  * The amdgpu driver provides a sysfs API for reporting the part number
184  * for the device
185  * The file serial_number is used for this and returns the part number
186  * as returned from the FRU.
187  * NOTE: This is only available for certain server cards
188  */
189
190 static ssize_t amdgpu_device_get_product_number(struct device *dev,
191                 struct device_attribute *attr, char *buf)
192 {
193         struct drm_device *ddev = dev_get_drvdata(dev);
194         struct amdgpu_device *adev = drm_to_adev(ddev);
195
196         return sysfs_emit(buf, "%s\n", adev->product_number);
197 }
198
199 static DEVICE_ATTR(product_number, S_IRUGO,
200                 amdgpu_device_get_product_number, NULL);
201
202 /**
203  * DOC: serial_number
204  *
205  * The amdgpu driver provides a sysfs API for reporting the serial number
206  * for the device
207  * The file serial_number is used for this and returns the serial number
208  * as returned from the FRU.
209  * NOTE: This is only available for certain server cards
210  */
211
212 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
213                 struct device_attribute *attr, char *buf)
214 {
215         struct drm_device *ddev = dev_get_drvdata(dev);
216         struct amdgpu_device *adev = drm_to_adev(ddev);
217
218         return sysfs_emit(buf, "%s\n", adev->serial);
219 }
220
221 static DEVICE_ATTR(serial_number, S_IRUGO,
222                 amdgpu_device_get_serial_number, NULL);
223
224 /**
225  * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
226  *
227  * @dev: drm_device pointer
228  *
229  * Returns true if the device is a dGPU with ATPX power control,
230  * otherwise return false.
231  */
232 bool amdgpu_device_supports_px(struct drm_device *dev)
233 {
234         struct amdgpu_device *adev = drm_to_adev(dev);
235
236         if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
237                 return true;
238         return false;
239 }
240
241 /**
242  * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
243  *
244  * @dev: drm_device pointer
245  *
246  * Returns true if the device is a dGPU with ACPI power control,
247  * otherwise return false.
248  */
249 bool amdgpu_device_supports_boco(struct drm_device *dev)
250 {
251         struct amdgpu_device *adev = drm_to_adev(dev);
252
253         if (adev->has_pr3 ||
254             ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
255                 return true;
256         return false;
257 }
258
259 /**
260  * amdgpu_device_supports_baco - Does the device support BACO
261  *
262  * @dev: drm_device pointer
263  *
264  * Returns true if the device supporte BACO,
265  * otherwise return false.
266  */
267 bool amdgpu_device_supports_baco(struct drm_device *dev)
268 {
269         struct amdgpu_device *adev = drm_to_adev(dev);
270
271         return amdgpu_asic_supports_baco(adev);
272 }
273
274 /**
275  * amdgpu_device_supports_smart_shift - Is the device dGPU with
276  * smart shift support
277  *
278  * @dev: drm_device pointer
279  *
280  * Returns true if the device is a dGPU with Smart Shift support,
281  * otherwise returns false.
282  */
283 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
284 {
285         return (amdgpu_device_supports_boco(dev) &&
286                 amdgpu_acpi_is_power_shift_control_supported());
287 }
288
289 /*
290  * VRAM access helper functions
291  */
292
293 /**
294  * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
295  *
296  * @adev: amdgpu_device pointer
297  * @pos: offset of the buffer in vram
298  * @buf: virtual address of the buffer in system memory
299  * @size: read/write size, sizeof(@buf) must > @size
300  * @write: true - write to vram, otherwise - read from vram
301  */
302 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
303                              void *buf, size_t size, bool write)
304 {
305         unsigned long flags;
306         uint32_t hi = ~0, tmp = 0;
307         uint32_t *data = buf;
308         uint64_t last;
309         int idx;
310
311         if (!drm_dev_enter(adev_to_drm(adev), &idx))
312                 return;
313
314         BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
315
316         spin_lock_irqsave(&adev->mmio_idx_lock, flags);
317         for (last = pos + size; pos < last; pos += 4) {
318                 tmp = pos >> 31;
319
320                 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
321                 if (tmp != hi) {
322                         WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
323                         hi = tmp;
324                 }
325                 if (write)
326                         WREG32_NO_KIQ(mmMM_DATA, *data++);
327                 else
328                         *data++ = RREG32_NO_KIQ(mmMM_DATA);
329         }
330
331         spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
332         drm_dev_exit(idx);
333 }
334
335 /**
336  * amdgpu_device_aper_access - access vram by vram aperature
337  *
338  * @adev: amdgpu_device pointer
339  * @pos: offset of the buffer in vram
340  * @buf: virtual address of the buffer in system memory
341  * @size: read/write size, sizeof(@buf) must > @size
342  * @write: true - write to vram, otherwise - read from vram
343  *
344  * The return value means how many bytes have been transferred.
345  */
346 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
347                                  void *buf, size_t size, bool write)
348 {
349 #ifdef CONFIG_64BIT
350         void __iomem *addr;
351         size_t count = 0;
352         uint64_t last;
353
354         if (!adev->mman.aper_base_kaddr)
355                 return 0;
356
357         last = min(pos + size, adev->gmc.visible_vram_size);
358         if (last > pos) {
359                 addr = adev->mman.aper_base_kaddr + pos;
360                 count = last - pos;
361
362                 if (write) {
363                         memcpy_toio(addr, buf, count);
364                         mb();
365                         amdgpu_device_flush_hdp(adev, NULL);
366                 } else {
367                         amdgpu_device_invalidate_hdp(adev, NULL);
368                         mb();
369                         memcpy_fromio(buf, addr, count);
370                 }
371
372         }
373
374         return count;
375 #else
376         return 0;
377 #endif
378 }
379
380 /**
381  * amdgpu_device_vram_access - read/write a buffer in vram
382  *
383  * @adev: amdgpu_device pointer
384  * @pos: offset of the buffer in vram
385  * @buf: virtual address of the buffer in system memory
386  * @size: read/write size, sizeof(@buf) must > @size
387  * @write: true - write to vram, otherwise - read from vram
388  */
389 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
390                                void *buf, size_t size, bool write)
391 {
392         size_t count;
393
394         /* try to using vram apreature to access vram first */
395         count = amdgpu_device_aper_access(adev, pos, buf, size, write);
396         size -= count;
397         if (size) {
398                 /* using MM to access rest vram */
399                 pos += count;
400                 buf += count;
401                 amdgpu_device_mm_access(adev, pos, buf, size, write);
402         }
403 }
404
405 /*
406  * register access helper functions.
407  */
408
409 /* Check if hw access should be skipped because of hotplug or device error */
410 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
411 {
412         if (adev->no_hw_access)
413                 return true;
414
415 #ifdef CONFIG_LOCKDEP
416         /*
417          * This is a bit complicated to understand, so worth a comment. What we assert
418          * here is that the GPU reset is not running on another thread in parallel.
419          *
420          * For this we trylock the read side of the reset semaphore, if that succeeds
421          * we know that the reset is not running in paralell.
422          *
423          * If the trylock fails we assert that we are either already holding the read
424          * side of the lock or are the reset thread itself and hold the write side of
425          * the lock.
426          */
427         if (in_task()) {
428                 if (down_read_trylock(&adev->reset_domain->sem))
429                         up_read(&adev->reset_domain->sem);
430                 else
431                         lockdep_assert_held(&adev->reset_domain->sem);
432         }
433 #endif
434         return false;
435 }
436
437 /**
438  * amdgpu_device_rreg - read a memory mapped IO or indirect register
439  *
440  * @adev: amdgpu_device pointer
441  * @reg: dword aligned register offset
442  * @acc_flags: access flags which require special behavior
443  *
444  * Returns the 32 bit value from the offset specified.
445  */
446 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
447                             uint32_t reg, uint32_t acc_flags)
448 {
449         uint32_t ret;
450
451         if (amdgpu_device_skip_hw_access(adev))
452                 return 0;
453
454         if ((reg * 4) < adev->rmmio_size) {
455                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
456                     amdgpu_sriov_runtime(adev) &&
457                     down_read_trylock(&adev->reset_domain->sem)) {
458                         ret = amdgpu_kiq_rreg(adev, reg);
459                         up_read(&adev->reset_domain->sem);
460                 } else {
461                         ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
462                 }
463         } else {
464                 ret = adev->pcie_rreg(adev, reg * 4);
465         }
466
467         trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
468
469         return ret;
470 }
471
472 /*
473  * MMIO register read with bytes helper functions
474  * @offset:bytes offset from MMIO start
475  *
476 */
477
478 /**
479  * amdgpu_mm_rreg8 - read a memory mapped IO register
480  *
481  * @adev: amdgpu_device pointer
482  * @offset: byte aligned register offset
483  *
484  * Returns the 8 bit value from the offset specified.
485  */
486 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
487 {
488         if (amdgpu_device_skip_hw_access(adev))
489                 return 0;
490
491         if (offset < adev->rmmio_size)
492                 return (readb(adev->rmmio + offset));
493         BUG();
494 }
495
496 /*
497  * MMIO register write with bytes helper functions
498  * @offset:bytes offset from MMIO start
499  * @value: the value want to be written to the register
500  *
501 */
502 /**
503  * amdgpu_mm_wreg8 - read a memory mapped IO register
504  *
505  * @adev: amdgpu_device pointer
506  * @offset: byte aligned register offset
507  * @value: 8 bit value to write
508  *
509  * Writes the value specified to the offset specified.
510  */
511 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
512 {
513         if (amdgpu_device_skip_hw_access(adev))
514                 return;
515
516         if (offset < adev->rmmio_size)
517                 writeb(value, adev->rmmio + offset);
518         else
519                 BUG();
520 }
521
522 /**
523  * amdgpu_device_wreg - write to a memory mapped IO or indirect register
524  *
525  * @adev: amdgpu_device pointer
526  * @reg: dword aligned register offset
527  * @v: 32 bit value to write to the register
528  * @acc_flags: access flags which require special behavior
529  *
530  * Writes the value specified to the offset specified.
531  */
532 void amdgpu_device_wreg(struct amdgpu_device *adev,
533                         uint32_t reg, uint32_t v,
534                         uint32_t acc_flags)
535 {
536         if (amdgpu_device_skip_hw_access(adev))
537                 return;
538
539         if ((reg * 4) < adev->rmmio_size) {
540                 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
541                     amdgpu_sriov_runtime(adev) &&
542                     down_read_trylock(&adev->reset_domain->sem)) {
543                         amdgpu_kiq_wreg(adev, reg, v);
544                         up_read(&adev->reset_domain->sem);
545                 } else {
546                         writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
547                 }
548         } else {
549                 adev->pcie_wreg(adev, reg * 4, v);
550         }
551
552         trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
553 }
554
555 /**
556  * amdgpu_mm_wreg_mmio_rlc -  write register either with direct/indirect mmio or with RLC path if in range
557  *
558  * @adev: amdgpu_device pointer
559  * @reg: mmio/rlc register
560  * @v: value to write
561  *
562  * this function is invoked only for the debugfs register access
563  */
564 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
565                              uint32_t reg, uint32_t v)
566 {
567         if (amdgpu_device_skip_hw_access(adev))
568                 return;
569
570         if (amdgpu_sriov_fullaccess(adev) &&
571             adev->gfx.rlc.funcs &&
572             adev->gfx.rlc.funcs->is_rlcg_access_range) {
573                 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
574                         return amdgpu_sriov_wreg(adev, reg, v, 0, 0);
575         } else if ((reg * 4) >= adev->rmmio_size) {
576                 adev->pcie_wreg(adev, reg * 4, v);
577         } else {
578                 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
579         }
580 }
581
582 /**
583  * amdgpu_mm_rdoorbell - read a doorbell dword
584  *
585  * @adev: amdgpu_device pointer
586  * @index: doorbell index
587  *
588  * Returns the value in the doorbell aperture at the
589  * requested doorbell index (CIK).
590  */
591 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
592 {
593         if (amdgpu_device_skip_hw_access(adev))
594                 return 0;
595
596         if (index < adev->doorbell.num_doorbells) {
597                 return readl(adev->doorbell.ptr + index);
598         } else {
599                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
600                 return 0;
601         }
602 }
603
604 /**
605  * amdgpu_mm_wdoorbell - write a doorbell dword
606  *
607  * @adev: amdgpu_device pointer
608  * @index: doorbell index
609  * @v: value to write
610  *
611  * Writes @v to the doorbell aperture at the
612  * requested doorbell index (CIK).
613  */
614 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
615 {
616         if (amdgpu_device_skip_hw_access(adev))
617                 return;
618
619         if (index < adev->doorbell.num_doorbells) {
620                 writel(v, adev->doorbell.ptr + index);
621         } else {
622                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
623         }
624 }
625
626 /**
627  * amdgpu_mm_rdoorbell64 - read a doorbell Qword
628  *
629  * @adev: amdgpu_device pointer
630  * @index: doorbell index
631  *
632  * Returns the value in the doorbell aperture at the
633  * requested doorbell index (VEGA10+).
634  */
635 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
636 {
637         if (amdgpu_device_skip_hw_access(adev))
638                 return 0;
639
640         if (index < adev->doorbell.num_doorbells) {
641                 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
642         } else {
643                 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
644                 return 0;
645         }
646 }
647
648 /**
649  * amdgpu_mm_wdoorbell64 - write a doorbell Qword
650  *
651  * @adev: amdgpu_device pointer
652  * @index: doorbell index
653  * @v: value to write
654  *
655  * Writes @v to the doorbell aperture at the
656  * requested doorbell index (VEGA10+).
657  */
658 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
659 {
660         if (amdgpu_device_skip_hw_access(adev))
661                 return;
662
663         if (index < adev->doorbell.num_doorbells) {
664                 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
665         } else {
666                 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
667         }
668 }
669
670 /**
671  * amdgpu_device_indirect_rreg - read an indirect register
672  *
673  * @adev: amdgpu_device pointer
674  * @pcie_index: mmio register offset
675  * @pcie_data: mmio register offset
676  * @reg_addr: indirect register address to read from
677  *
678  * Returns the value of indirect register @reg_addr
679  */
680 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
681                                 u32 pcie_index, u32 pcie_data,
682                                 u32 reg_addr)
683 {
684         unsigned long flags;
685         u32 r;
686         void __iomem *pcie_index_offset;
687         void __iomem *pcie_data_offset;
688
689         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
690         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
691         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
692
693         writel(reg_addr, pcie_index_offset);
694         readl(pcie_index_offset);
695         r = readl(pcie_data_offset);
696         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
697
698         return r;
699 }
700
701 /**
702  * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
703  *
704  * @adev: amdgpu_device pointer
705  * @pcie_index: mmio register offset
706  * @pcie_data: mmio register offset
707  * @reg_addr: indirect register address to read from
708  *
709  * Returns the value of indirect register @reg_addr
710  */
711 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
712                                   u32 pcie_index, u32 pcie_data,
713                                   u32 reg_addr)
714 {
715         unsigned long flags;
716         u64 r;
717         void __iomem *pcie_index_offset;
718         void __iomem *pcie_data_offset;
719
720         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
721         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
722         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
723
724         /* read low 32 bits */
725         writel(reg_addr, pcie_index_offset);
726         readl(pcie_index_offset);
727         r = readl(pcie_data_offset);
728         /* read high 32 bits */
729         writel(reg_addr + 4, pcie_index_offset);
730         readl(pcie_index_offset);
731         r |= ((u64)readl(pcie_data_offset) << 32);
732         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
733
734         return r;
735 }
736
737 /**
738  * amdgpu_device_indirect_wreg - write an indirect register address
739  *
740  * @adev: amdgpu_device pointer
741  * @pcie_index: mmio register offset
742  * @pcie_data: mmio register offset
743  * @reg_addr: indirect register offset
744  * @reg_data: indirect register data
745  *
746  */
747 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
748                                  u32 pcie_index, u32 pcie_data,
749                                  u32 reg_addr, u32 reg_data)
750 {
751         unsigned long flags;
752         void __iomem *pcie_index_offset;
753         void __iomem *pcie_data_offset;
754
755         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
756         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
757         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
758
759         writel(reg_addr, pcie_index_offset);
760         readl(pcie_index_offset);
761         writel(reg_data, pcie_data_offset);
762         readl(pcie_data_offset);
763         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
764 }
765
766 /**
767  * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
768  *
769  * @adev: amdgpu_device pointer
770  * @pcie_index: mmio register offset
771  * @pcie_data: mmio register offset
772  * @reg_addr: indirect register offset
773  * @reg_data: indirect register data
774  *
775  */
776 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
777                                    u32 pcie_index, u32 pcie_data,
778                                    u32 reg_addr, u64 reg_data)
779 {
780         unsigned long flags;
781         void __iomem *pcie_index_offset;
782         void __iomem *pcie_data_offset;
783
784         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
785         pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
786         pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
787
788         /* write low 32 bits */
789         writel(reg_addr, pcie_index_offset);
790         readl(pcie_index_offset);
791         writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
792         readl(pcie_data_offset);
793         /* write high 32 bits */
794         writel(reg_addr + 4, pcie_index_offset);
795         readl(pcie_index_offset);
796         writel((u32)(reg_data >> 32), pcie_data_offset);
797         readl(pcie_data_offset);
798         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
799 }
800
801 /**
802  * amdgpu_invalid_rreg - dummy reg read function
803  *
804  * @adev: amdgpu_device pointer
805  * @reg: offset of register
806  *
807  * Dummy register read function.  Used for register blocks
808  * that certain asics don't have (all asics).
809  * Returns the value in the register.
810  */
811 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
812 {
813         DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
814         BUG();
815         return 0;
816 }
817
818 /**
819  * amdgpu_invalid_wreg - dummy reg write function
820  *
821  * @adev: amdgpu_device pointer
822  * @reg: offset of register
823  * @v: value to write to the register
824  *
825  * Dummy register read function.  Used for register blocks
826  * that certain asics don't have (all asics).
827  */
828 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
829 {
830         DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
831                   reg, v);
832         BUG();
833 }
834
835 /**
836  * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
837  *
838  * @adev: amdgpu_device pointer
839  * @reg: offset of register
840  *
841  * Dummy register read function.  Used for register blocks
842  * that certain asics don't have (all asics).
843  * Returns the value in the register.
844  */
845 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
846 {
847         DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
848         BUG();
849         return 0;
850 }
851
852 /**
853  * amdgpu_invalid_wreg64 - dummy reg write function
854  *
855  * @adev: amdgpu_device pointer
856  * @reg: offset of register
857  * @v: value to write to the register
858  *
859  * Dummy register read function.  Used for register blocks
860  * that certain asics don't have (all asics).
861  */
862 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
863 {
864         DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
865                   reg, v);
866         BUG();
867 }
868
869 /**
870  * amdgpu_block_invalid_rreg - dummy reg read function
871  *
872  * @adev: amdgpu_device pointer
873  * @block: offset of instance
874  * @reg: offset of register
875  *
876  * Dummy register read function.  Used for register blocks
877  * that certain asics don't have (all asics).
878  * Returns the value in the register.
879  */
880 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
881                                           uint32_t block, uint32_t reg)
882 {
883         DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
884                   reg, block);
885         BUG();
886         return 0;
887 }
888
889 /**
890  * amdgpu_block_invalid_wreg - dummy reg write function
891  *
892  * @adev: amdgpu_device pointer
893  * @block: offset of instance
894  * @reg: offset of register
895  * @v: value to write to the register
896  *
897  * Dummy register read function.  Used for register blocks
898  * that certain asics don't have (all asics).
899  */
900 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
901                                       uint32_t block,
902                                       uint32_t reg, uint32_t v)
903 {
904         DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
905                   reg, block, v);
906         BUG();
907 }
908
909 /**
910  * amdgpu_device_asic_init - Wrapper for atom asic_init
911  *
912  * @adev: amdgpu_device pointer
913  *
914  * Does any asic specific work and then calls atom asic init.
915  */
916 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
917 {
918         amdgpu_asic_pre_asic_init(adev);
919
920         if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0))
921                 return amdgpu_atomfirmware_asic_init(adev, true);
922         else
923                 return amdgpu_atom_asic_init(adev->mode_info.atom_context);
924 }
925
926 /**
927  * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
928  *
929  * @adev: amdgpu_device pointer
930  *
931  * Allocates a scratch page of VRAM for use by various things in the
932  * driver.
933  */
934 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
935 {
936         return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
937                                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
938                                        &adev->vram_scratch.robj,
939                                        &adev->vram_scratch.gpu_addr,
940                                        (void **)&adev->vram_scratch.ptr);
941 }
942
943 /**
944  * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
945  *
946  * @adev: amdgpu_device pointer
947  *
948  * Frees the VRAM scratch page.
949  */
950 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
951 {
952         amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
953 }
954
955 /**
956  * amdgpu_device_program_register_sequence - program an array of registers.
957  *
958  * @adev: amdgpu_device pointer
959  * @registers: pointer to the register array
960  * @array_size: size of the register array
961  *
962  * Programs an array or registers with and and or masks.
963  * This is a helper for setting golden registers.
964  */
965 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
966                                              const u32 *registers,
967                                              const u32 array_size)
968 {
969         u32 tmp, reg, and_mask, or_mask;
970         int i;
971
972         if (array_size % 3)
973                 return;
974
975         for (i = 0; i < array_size; i +=3) {
976                 reg = registers[i + 0];
977                 and_mask = registers[i + 1];
978                 or_mask = registers[i + 2];
979
980                 if (and_mask == 0xffffffff) {
981                         tmp = or_mask;
982                 } else {
983                         tmp = RREG32(reg);
984                         tmp &= ~and_mask;
985                         if (adev->family >= AMDGPU_FAMILY_AI)
986                                 tmp |= (or_mask & and_mask);
987                         else
988                                 tmp |= or_mask;
989                 }
990                 WREG32(reg, tmp);
991         }
992 }
993
994 /**
995  * amdgpu_device_pci_config_reset - reset the GPU
996  *
997  * @adev: amdgpu_device pointer
998  *
999  * Resets the GPU using the pci config reset sequence.
1000  * Only applicable to asics prior to vega10.
1001  */
1002 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1003 {
1004         pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1005 }
1006
1007 /**
1008  * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1009  *
1010  * @adev: amdgpu_device pointer
1011  *
1012  * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1013  */
1014 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1015 {
1016         return pci_reset_function(adev->pdev);
1017 }
1018
1019 /*
1020  * GPU doorbell aperture helpers function.
1021  */
1022 /**
1023  * amdgpu_device_doorbell_init - Init doorbell driver information.
1024  *
1025  * @adev: amdgpu_device pointer
1026  *
1027  * Init doorbell driver information (CIK)
1028  * Returns 0 on success, error on failure.
1029  */
1030 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
1031 {
1032
1033         /* No doorbell on SI hardware generation */
1034         if (adev->asic_type < CHIP_BONAIRE) {
1035                 adev->doorbell.base = 0;
1036                 adev->doorbell.size = 0;
1037                 adev->doorbell.num_doorbells = 0;
1038                 adev->doorbell.ptr = NULL;
1039                 return 0;
1040         }
1041
1042         if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
1043                 return -EINVAL;
1044
1045         amdgpu_asic_init_doorbell_index(adev);
1046
1047         /* doorbell bar mapping */
1048         adev->doorbell.base = pci_resource_start(adev->pdev, 2);
1049         adev->doorbell.size = pci_resource_len(adev->pdev, 2);
1050
1051         if (adev->enable_mes) {
1052                 adev->doorbell.num_doorbells =
1053                         adev->doorbell.size / sizeof(u32);
1054         } else {
1055                 adev->doorbell.num_doorbells =
1056                         min_t(u32, adev->doorbell.size / sizeof(u32),
1057                               adev->doorbell_index.max_assignment+1);
1058                 if (adev->doorbell.num_doorbells == 0)
1059                         return -EINVAL;
1060
1061                 /* For Vega, reserve and map two pages on doorbell BAR since SDMA
1062                  * paging queue doorbell use the second page. The
1063                  * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1064                  * doorbells are in the first page. So with paging queue enabled,
1065                  * the max num_doorbells should + 1 page (0x400 in dword)
1066                  */
1067                 if (adev->asic_type >= CHIP_VEGA10)
1068                         adev->doorbell.num_doorbells += 0x400;
1069         }
1070
1071         adev->doorbell.ptr = ioremap(adev->doorbell.base,
1072                                      adev->doorbell.num_doorbells *
1073                                      sizeof(u32));
1074         if (adev->doorbell.ptr == NULL)
1075                 return -ENOMEM;
1076
1077         return 0;
1078 }
1079
1080 /**
1081  * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1082  *
1083  * @adev: amdgpu_device pointer
1084  *
1085  * Tear down doorbell driver information (CIK)
1086  */
1087 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1088 {
1089         iounmap(adev->doorbell.ptr);
1090         adev->doorbell.ptr = NULL;
1091 }
1092
1093
1094
1095 /*
1096  * amdgpu_device_wb_*()
1097  * Writeback is the method by which the GPU updates special pages in memory
1098  * with the status of certain GPU events (fences, ring pointers,etc.).
1099  */
1100
1101 /**
1102  * amdgpu_device_wb_fini - Disable Writeback and free memory
1103  *
1104  * @adev: amdgpu_device pointer
1105  *
1106  * Disables Writeback and frees the Writeback memory (all asics).
1107  * Used at driver shutdown.
1108  */
1109 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1110 {
1111         if (adev->wb.wb_obj) {
1112                 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1113                                       &adev->wb.gpu_addr,
1114                                       (void **)&adev->wb.wb);
1115                 adev->wb.wb_obj = NULL;
1116         }
1117 }
1118
1119 /**
1120  * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1121  *
1122  * @adev: amdgpu_device pointer
1123  *
1124  * Initializes writeback and allocates writeback memory (all asics).
1125  * Used at driver startup.
1126  * Returns 0 on success or an -error on failure.
1127  */
1128 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1129 {
1130         int r;
1131
1132         if (adev->wb.wb_obj == NULL) {
1133                 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1134                 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1135                                             PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1136                                             &adev->wb.wb_obj, &adev->wb.gpu_addr,
1137                                             (void **)&adev->wb.wb);
1138                 if (r) {
1139                         dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1140                         return r;
1141                 }
1142
1143                 adev->wb.num_wb = AMDGPU_MAX_WB;
1144                 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1145
1146                 /* clear wb memory */
1147                 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1148         }
1149
1150         return 0;
1151 }
1152
1153 /**
1154  * amdgpu_device_wb_get - Allocate a wb entry
1155  *
1156  * @adev: amdgpu_device pointer
1157  * @wb: wb index
1158  *
1159  * Allocate a wb slot for use by the driver (all asics).
1160  * Returns 0 on success or -EINVAL on failure.
1161  */
1162 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1163 {
1164         unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1165
1166         if (offset < adev->wb.num_wb) {
1167                 __set_bit(offset, adev->wb.used);
1168                 *wb = offset << 3; /* convert to dw offset */
1169                 return 0;
1170         } else {
1171                 return -EINVAL;
1172         }
1173 }
1174
1175 /**
1176  * amdgpu_device_wb_free - Free a wb entry
1177  *
1178  * @adev: amdgpu_device pointer
1179  * @wb: wb index
1180  *
1181  * Free a wb slot allocated for use by the driver (all asics)
1182  */
1183 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1184 {
1185         wb >>= 3;
1186         if (wb < adev->wb.num_wb)
1187                 __clear_bit(wb, adev->wb.used);
1188 }
1189
1190 /**
1191  * amdgpu_device_resize_fb_bar - try to resize FB BAR
1192  *
1193  * @adev: amdgpu_device pointer
1194  *
1195  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1196  * to fail, but if any of the BARs is not accessible after the size we abort
1197  * driver loading by returning -ENODEV.
1198  */
1199 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1200 {
1201         int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1202         struct pci_bus *root;
1203         struct resource *res;
1204         unsigned i;
1205         u16 cmd;
1206         int r;
1207
1208         /* Bypass for VF */
1209         if (amdgpu_sriov_vf(adev))
1210                 return 0;
1211
1212         /* skip if the bios has already enabled large BAR */
1213         if (adev->gmc.real_vram_size &&
1214             (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1215                 return 0;
1216
1217         /* Check if the root BUS has 64bit memory resources */
1218         root = adev->pdev->bus;
1219         while (root->parent)
1220                 root = root->parent;
1221
1222         pci_bus_for_each_resource(root, res, i) {
1223                 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1224                     res->start > 0x100000000ull)
1225                         break;
1226         }
1227
1228         /* Trying to resize is pointless without a root hub window above 4GB */
1229         if (!res)
1230                 return 0;
1231
1232         /* Limit the BAR size to what is available */
1233         rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1234                         rbar_size);
1235
1236         /* Disable memory decoding while we change the BAR addresses and size */
1237         pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1238         pci_write_config_word(adev->pdev, PCI_COMMAND,
1239                               cmd & ~PCI_COMMAND_MEMORY);
1240
1241         /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1242         amdgpu_device_doorbell_fini(adev);
1243         if (adev->asic_type >= CHIP_BONAIRE)
1244                 pci_release_resource(adev->pdev, 2);
1245
1246         pci_release_resource(adev->pdev, 0);
1247
1248         r = pci_resize_resource(adev->pdev, 0, rbar_size);
1249         if (r == -ENOSPC)
1250                 DRM_INFO("Not enough PCI address space for a large BAR.");
1251         else if (r && r != -ENOTSUPP)
1252                 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1253
1254         pci_assign_unassigned_bus_resources(adev->pdev->bus);
1255
1256         /* When the doorbell or fb BAR isn't available we have no chance of
1257          * using the device.
1258          */
1259         r = amdgpu_device_doorbell_init(adev);
1260         if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1261                 return -ENODEV;
1262
1263         pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1264
1265         return 0;
1266 }
1267
1268 /*
1269  * GPU helpers function.
1270  */
1271 /**
1272  * amdgpu_device_need_post - check if the hw need post or not
1273  *
1274  * @adev: amdgpu_device pointer
1275  *
1276  * Check if the asic has been initialized (all asics) at driver startup
1277  * or post is needed if  hw reset is performed.
1278  * Returns true if need or false if not.
1279  */
1280 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1281 {
1282         uint32_t reg;
1283
1284         if (amdgpu_sriov_vf(adev))
1285                 return false;
1286
1287         if (amdgpu_passthrough(adev)) {
1288                 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1289                  * some old smc fw still need driver do vPost otherwise gpu hang, while
1290                  * those smc fw version above 22.15 doesn't have this flaw, so we force
1291                  * vpost executed for smc version below 22.15
1292                  */
1293                 if (adev->asic_type == CHIP_FIJI) {
1294                         int err;
1295                         uint32_t fw_ver;
1296                         err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1297                         /* force vPost if error occured */
1298                         if (err)
1299                                 return true;
1300
1301                         fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1302                         if (fw_ver < 0x00160e00)
1303                                 return true;
1304                 }
1305         }
1306
1307         /* Don't post if we need to reset whole hive on init */
1308         if (adev->gmc.xgmi.pending_reset)
1309                 return false;
1310
1311         if (adev->has_hw_reset) {
1312                 adev->has_hw_reset = false;
1313                 return true;
1314         }
1315
1316         /* bios scratch used on CIK+ */
1317         if (adev->asic_type >= CHIP_BONAIRE)
1318                 return amdgpu_atombios_scratch_need_asic_init(adev);
1319
1320         /* check MEM_SIZE for older asics */
1321         reg = amdgpu_asic_get_config_memsize(adev);
1322
1323         if ((reg != 0) && (reg != 0xffffffff))
1324                 return false;
1325
1326         return true;
1327 }
1328
1329 /**
1330  * amdgpu_device_should_use_aspm - check if the device should program ASPM
1331  *
1332  * @adev: amdgpu_device pointer
1333  *
1334  * Confirm whether the module parameter and pcie bridge agree that ASPM should
1335  * be set for this device.
1336  *
1337  * Returns true if it should be used or false if not.
1338  */
1339 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1340 {
1341         switch (amdgpu_aspm) {
1342         case -1:
1343                 break;
1344         case 0:
1345                 return false;
1346         case 1:
1347                 return true;
1348         default:
1349                 return false;
1350         }
1351         return pcie_aspm_enabled(adev->pdev);
1352 }
1353
1354 /* if we get transitioned to only one device, take VGA back */
1355 /**
1356  * amdgpu_device_vga_set_decode - enable/disable vga decode
1357  *
1358  * @pdev: PCI device pointer
1359  * @state: enable/disable vga decode
1360  *
1361  * Enable/disable vga decode (all asics).
1362  * Returns VGA resource flags.
1363  */
1364 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1365                 bool state)
1366 {
1367         struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1368         amdgpu_asic_set_vga_state(adev, state);
1369         if (state)
1370                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1371                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1372         else
1373                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1374 }
1375
1376 /**
1377  * amdgpu_device_check_block_size - validate the vm block size
1378  *
1379  * @adev: amdgpu_device pointer
1380  *
1381  * Validates the vm block size specified via module parameter.
1382  * The vm block size defines number of bits in page table versus page directory,
1383  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1384  * page table and the remaining bits are in the page directory.
1385  */
1386 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1387 {
1388         /* defines number of bits in page table versus page directory,
1389          * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1390          * page table and the remaining bits are in the page directory */
1391         if (amdgpu_vm_block_size == -1)
1392                 return;
1393
1394         if (amdgpu_vm_block_size < 9) {
1395                 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1396                          amdgpu_vm_block_size);
1397                 amdgpu_vm_block_size = -1;
1398         }
1399 }
1400
1401 /**
1402  * amdgpu_device_check_vm_size - validate the vm size
1403  *
1404  * @adev: amdgpu_device pointer
1405  *
1406  * Validates the vm size in GB specified via module parameter.
1407  * The VM size is the size of the GPU virtual memory space in GB.
1408  */
1409 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1410 {
1411         /* no need to check the default value */
1412         if (amdgpu_vm_size == -1)
1413                 return;
1414
1415         if (amdgpu_vm_size < 1) {
1416                 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1417                          amdgpu_vm_size);
1418                 amdgpu_vm_size = -1;
1419         }
1420 }
1421
1422 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1423 {
1424         struct sysinfo si;
1425         bool is_os_64 = (sizeof(void *) == 8);
1426         uint64_t total_memory;
1427         uint64_t dram_size_seven_GB = 0x1B8000000;
1428         uint64_t dram_size_three_GB = 0xB8000000;
1429
1430         if (amdgpu_smu_memory_pool_size == 0)
1431                 return;
1432
1433         if (!is_os_64) {
1434                 DRM_WARN("Not 64-bit OS, feature not supported\n");
1435                 goto def_value;
1436         }
1437         si_meminfo(&si);
1438         total_memory = (uint64_t)si.totalram * si.mem_unit;
1439
1440         if ((amdgpu_smu_memory_pool_size == 1) ||
1441                 (amdgpu_smu_memory_pool_size == 2)) {
1442                 if (total_memory < dram_size_three_GB)
1443                         goto def_value1;
1444         } else if ((amdgpu_smu_memory_pool_size == 4) ||
1445                 (amdgpu_smu_memory_pool_size == 8)) {
1446                 if (total_memory < dram_size_seven_GB)
1447                         goto def_value1;
1448         } else {
1449                 DRM_WARN("Smu memory pool size not supported\n");
1450                 goto def_value;
1451         }
1452         adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1453
1454         return;
1455
1456 def_value1:
1457         DRM_WARN("No enough system memory\n");
1458 def_value:
1459         adev->pm.smu_prv_buffer_size = 0;
1460 }
1461
1462 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1463 {
1464         if (!(adev->flags & AMD_IS_APU) ||
1465             adev->asic_type < CHIP_RAVEN)
1466                 return 0;
1467
1468         switch (adev->asic_type) {
1469         case CHIP_RAVEN:
1470                 if (adev->pdev->device == 0x15dd)
1471                         adev->apu_flags |= AMD_APU_IS_RAVEN;
1472                 if (adev->pdev->device == 0x15d8)
1473                         adev->apu_flags |= AMD_APU_IS_PICASSO;
1474                 break;
1475         case CHIP_RENOIR:
1476                 if ((adev->pdev->device == 0x1636) ||
1477                     (adev->pdev->device == 0x164c))
1478                         adev->apu_flags |= AMD_APU_IS_RENOIR;
1479                 else
1480                         adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1481                 break;
1482         case CHIP_VANGOGH:
1483                 adev->apu_flags |= AMD_APU_IS_VANGOGH;
1484                 break;
1485         case CHIP_YELLOW_CARP:
1486                 break;
1487         case CHIP_CYAN_SKILLFISH:
1488                 if ((adev->pdev->device == 0x13FE) ||
1489                     (adev->pdev->device == 0x143F))
1490                         adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1491                 break;
1492         default:
1493                 break;
1494         }
1495
1496         return 0;
1497 }
1498
1499 /**
1500  * amdgpu_device_check_arguments - validate module params
1501  *
1502  * @adev: amdgpu_device pointer
1503  *
1504  * Validates certain module parameters and updates
1505  * the associated values used by the driver (all asics).
1506  */
1507 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1508 {
1509         if (amdgpu_sched_jobs < 4) {
1510                 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1511                          amdgpu_sched_jobs);
1512                 amdgpu_sched_jobs = 4;
1513         } else if (!is_power_of_2(amdgpu_sched_jobs)){
1514                 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1515                          amdgpu_sched_jobs);
1516                 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1517         }
1518
1519         if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1520                 /* gart size must be greater or equal to 32M */
1521                 dev_warn(adev->dev, "gart size (%d) too small\n",
1522                          amdgpu_gart_size);
1523                 amdgpu_gart_size = -1;
1524         }
1525
1526         if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1527                 /* gtt size must be greater or equal to 32M */
1528                 dev_warn(adev->dev, "gtt size (%d) too small\n",
1529                                  amdgpu_gtt_size);
1530                 amdgpu_gtt_size = -1;
1531         }
1532
1533         /* valid range is between 4 and 9 inclusive */
1534         if (amdgpu_vm_fragment_size != -1 &&
1535             (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1536                 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1537                 amdgpu_vm_fragment_size = -1;
1538         }
1539
1540         if (amdgpu_sched_hw_submission < 2) {
1541                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1542                          amdgpu_sched_hw_submission);
1543                 amdgpu_sched_hw_submission = 2;
1544         } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1545                 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1546                          amdgpu_sched_hw_submission);
1547                 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1548         }
1549
1550         if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1551                 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1552                 amdgpu_reset_method = -1;
1553         }
1554
1555         amdgpu_device_check_smu_prv_buffer_size(adev);
1556
1557         amdgpu_device_check_vm_size(adev);
1558
1559         amdgpu_device_check_block_size(adev);
1560
1561         adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1562
1563         return 0;
1564 }
1565
1566 /**
1567  * amdgpu_switcheroo_set_state - set switcheroo state
1568  *
1569  * @pdev: pci dev pointer
1570  * @state: vga_switcheroo state
1571  *
1572  * Callback for the switcheroo driver.  Suspends or resumes
1573  * the asics before or after it is powered up using ACPI methods.
1574  */
1575 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1576                                         enum vga_switcheroo_state state)
1577 {
1578         struct drm_device *dev = pci_get_drvdata(pdev);
1579         int r;
1580
1581         if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1582                 return;
1583
1584         if (state == VGA_SWITCHEROO_ON) {
1585                 pr_info("switched on\n");
1586                 /* don't suspend or resume card normally */
1587                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1588
1589                 pci_set_power_state(pdev, PCI_D0);
1590                 amdgpu_device_load_pci_state(pdev);
1591                 r = pci_enable_device(pdev);
1592                 if (r)
1593                         DRM_WARN("pci_enable_device failed (%d)\n", r);
1594                 amdgpu_device_resume(dev, true);
1595
1596                 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1597         } else {
1598                 pr_info("switched off\n");
1599                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1600                 amdgpu_device_suspend(dev, true);
1601                 amdgpu_device_cache_pci_state(pdev);
1602                 /* Shut down the device */
1603                 pci_disable_device(pdev);
1604                 pci_set_power_state(pdev, PCI_D3cold);
1605                 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1606         }
1607 }
1608
1609 /**
1610  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1611  *
1612  * @pdev: pci dev pointer
1613  *
1614  * Callback for the switcheroo driver.  Check of the switcheroo
1615  * state can be changed.
1616  * Returns true if the state can be changed, false if not.
1617  */
1618 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1619 {
1620         struct drm_device *dev = pci_get_drvdata(pdev);
1621
1622         /*
1623         * FIXME: open_count is protected by drm_global_mutex but that would lead to
1624         * locking inversion with the driver load path. And the access here is
1625         * completely racy anyway. So don't bother with locking for now.
1626         */
1627         return atomic_read(&dev->open_count) == 0;
1628 }
1629
1630 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1631         .set_gpu_state = amdgpu_switcheroo_set_state,
1632         .reprobe = NULL,
1633         .can_switch = amdgpu_switcheroo_can_switch,
1634 };
1635
1636 /**
1637  * amdgpu_device_ip_set_clockgating_state - set the CG state
1638  *
1639  * @dev: amdgpu_device pointer
1640  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1641  * @state: clockgating state (gate or ungate)
1642  *
1643  * Sets the requested clockgating state for all instances of
1644  * the hardware IP specified.
1645  * Returns the error code from the last instance.
1646  */
1647 int amdgpu_device_ip_set_clockgating_state(void *dev,
1648                                            enum amd_ip_block_type block_type,
1649                                            enum amd_clockgating_state state)
1650 {
1651         struct amdgpu_device *adev = dev;
1652         int i, r = 0;
1653
1654         for (i = 0; i < adev->num_ip_blocks; i++) {
1655                 if (!adev->ip_blocks[i].status.valid)
1656                         continue;
1657                 if (adev->ip_blocks[i].version->type != block_type)
1658                         continue;
1659                 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1660                         continue;
1661                 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1662                         (void *)adev, state);
1663                 if (r)
1664                         DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1665                                   adev->ip_blocks[i].version->funcs->name, r);
1666         }
1667         return r;
1668 }
1669
1670 /**
1671  * amdgpu_device_ip_set_powergating_state - set the PG state
1672  *
1673  * @dev: amdgpu_device pointer
1674  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1675  * @state: powergating state (gate or ungate)
1676  *
1677  * Sets the requested powergating state for all instances of
1678  * the hardware IP specified.
1679  * Returns the error code from the last instance.
1680  */
1681 int amdgpu_device_ip_set_powergating_state(void *dev,
1682                                            enum amd_ip_block_type block_type,
1683                                            enum amd_powergating_state state)
1684 {
1685         struct amdgpu_device *adev = dev;
1686         int i, r = 0;
1687
1688         for (i = 0; i < adev->num_ip_blocks; i++) {
1689                 if (!adev->ip_blocks[i].status.valid)
1690                         continue;
1691                 if (adev->ip_blocks[i].version->type != block_type)
1692                         continue;
1693                 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1694                         continue;
1695                 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1696                         (void *)adev, state);
1697                 if (r)
1698                         DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1699                                   adev->ip_blocks[i].version->funcs->name, r);
1700         }
1701         return r;
1702 }
1703
1704 /**
1705  * amdgpu_device_ip_get_clockgating_state - get the CG state
1706  *
1707  * @adev: amdgpu_device pointer
1708  * @flags: clockgating feature flags
1709  *
1710  * Walks the list of IPs on the device and updates the clockgating
1711  * flags for each IP.
1712  * Updates @flags with the feature flags for each hardware IP where
1713  * clockgating is enabled.
1714  */
1715 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1716                                             u64 *flags)
1717 {
1718         int i;
1719
1720         for (i = 0; i < adev->num_ip_blocks; i++) {
1721                 if (!adev->ip_blocks[i].status.valid)
1722                         continue;
1723                 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1724                         adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1725         }
1726 }
1727
1728 /**
1729  * amdgpu_device_ip_wait_for_idle - wait for idle
1730  *
1731  * @adev: amdgpu_device pointer
1732  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1733  *
1734  * Waits for the request hardware IP to be idle.
1735  * Returns 0 for success or a negative error code on failure.
1736  */
1737 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1738                                    enum amd_ip_block_type block_type)
1739 {
1740         int i, r;
1741
1742         for (i = 0; i < adev->num_ip_blocks; i++) {
1743                 if (!adev->ip_blocks[i].status.valid)
1744                         continue;
1745                 if (adev->ip_blocks[i].version->type == block_type) {
1746                         r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1747                         if (r)
1748                                 return r;
1749                         break;
1750                 }
1751         }
1752         return 0;
1753
1754 }
1755
1756 /**
1757  * amdgpu_device_ip_is_idle - is the hardware IP idle
1758  *
1759  * @adev: amdgpu_device pointer
1760  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1761  *
1762  * Check if the hardware IP is idle or not.
1763  * Returns true if it the IP is idle, false if not.
1764  */
1765 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1766                               enum amd_ip_block_type block_type)
1767 {
1768         int i;
1769
1770         for (i = 0; i < adev->num_ip_blocks; i++) {
1771                 if (!adev->ip_blocks[i].status.valid)
1772                         continue;
1773                 if (adev->ip_blocks[i].version->type == block_type)
1774                         return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1775         }
1776         return true;
1777
1778 }
1779
1780 /**
1781  * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1782  *
1783  * @adev: amdgpu_device pointer
1784  * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1785  *
1786  * Returns a pointer to the hardware IP block structure
1787  * if it exists for the asic, otherwise NULL.
1788  */
1789 struct amdgpu_ip_block *
1790 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1791                               enum amd_ip_block_type type)
1792 {
1793         int i;
1794
1795         for (i = 0; i < adev->num_ip_blocks; i++)
1796                 if (adev->ip_blocks[i].version->type == type)
1797                         return &adev->ip_blocks[i];
1798
1799         return NULL;
1800 }
1801
1802 /**
1803  * amdgpu_device_ip_block_version_cmp
1804  *
1805  * @adev: amdgpu_device pointer
1806  * @type: enum amd_ip_block_type
1807  * @major: major version
1808  * @minor: minor version
1809  *
1810  * return 0 if equal or greater
1811  * return 1 if smaller or the ip_block doesn't exist
1812  */
1813 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1814                                        enum amd_ip_block_type type,
1815                                        u32 major, u32 minor)
1816 {
1817         struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1818
1819         if (ip_block && ((ip_block->version->major > major) ||
1820                         ((ip_block->version->major == major) &&
1821                         (ip_block->version->minor >= minor))))
1822                 return 0;
1823
1824         return 1;
1825 }
1826
1827 /**
1828  * amdgpu_device_ip_block_add
1829  *
1830  * @adev: amdgpu_device pointer
1831  * @ip_block_version: pointer to the IP to add
1832  *
1833  * Adds the IP block driver information to the collection of IPs
1834  * on the asic.
1835  */
1836 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1837                                const struct amdgpu_ip_block_version *ip_block_version)
1838 {
1839         if (!ip_block_version)
1840                 return -EINVAL;
1841
1842         switch (ip_block_version->type) {
1843         case AMD_IP_BLOCK_TYPE_VCN:
1844                 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1845                         return 0;
1846                 break;
1847         case AMD_IP_BLOCK_TYPE_JPEG:
1848                 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1849                         return 0;
1850                 break;
1851         default:
1852                 break;
1853         }
1854
1855         DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1856                   ip_block_version->funcs->name);
1857
1858         adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1859
1860         return 0;
1861 }
1862
1863 /**
1864  * amdgpu_device_enable_virtual_display - enable virtual display feature
1865  *
1866  * @adev: amdgpu_device pointer
1867  *
1868  * Enabled the virtual display feature if the user has enabled it via
1869  * the module parameter virtual_display.  This feature provides a virtual
1870  * display hardware on headless boards or in virtualized environments.
1871  * This function parses and validates the configuration string specified by
1872  * the user and configues the virtual display configuration (number of
1873  * virtual connectors, crtcs, etc.) specified.
1874  */
1875 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1876 {
1877         adev->enable_virtual_display = false;
1878
1879         if (amdgpu_virtual_display) {
1880                 const char *pci_address_name = pci_name(adev->pdev);
1881                 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1882
1883                 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1884                 pciaddstr_tmp = pciaddstr;
1885                 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1886                         pciaddname = strsep(&pciaddname_tmp, ",");
1887                         if (!strcmp("all", pciaddname)
1888                             || !strcmp(pci_address_name, pciaddname)) {
1889                                 long num_crtc;
1890                                 int res = -1;
1891
1892                                 adev->enable_virtual_display = true;
1893
1894                                 if (pciaddname_tmp)
1895                                         res = kstrtol(pciaddname_tmp, 10,
1896                                                       &num_crtc);
1897
1898                                 if (!res) {
1899                                         if (num_crtc < 1)
1900                                                 num_crtc = 1;
1901                                         if (num_crtc > 6)
1902                                                 num_crtc = 6;
1903                                         adev->mode_info.num_crtc = num_crtc;
1904                                 } else {
1905                                         adev->mode_info.num_crtc = 1;
1906                                 }
1907                                 break;
1908                         }
1909                 }
1910
1911                 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1912                          amdgpu_virtual_display, pci_address_name,
1913                          adev->enable_virtual_display, adev->mode_info.num_crtc);
1914
1915                 kfree(pciaddstr);
1916         }
1917 }
1918
1919 /**
1920  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1921  *
1922  * @adev: amdgpu_device pointer
1923  *
1924  * Parses the asic configuration parameters specified in the gpu info
1925  * firmware and makes them availale to the driver for use in configuring
1926  * the asic.
1927  * Returns 0 on success, -EINVAL on failure.
1928  */
1929 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1930 {
1931         const char *chip_name;
1932         char fw_name[40];
1933         int err;
1934         const struct gpu_info_firmware_header_v1_0 *hdr;
1935
1936         adev->firmware.gpu_info_fw = NULL;
1937
1938         if (adev->mman.discovery_bin) {
1939                 /*
1940                  * FIXME: The bounding box is still needed by Navi12, so
1941                  * temporarily read it from gpu_info firmware. Should be dropped
1942                  * when DAL no longer needs it.
1943                  */
1944                 if (adev->asic_type != CHIP_NAVI12)
1945                         return 0;
1946         }
1947
1948         switch (adev->asic_type) {
1949         default:
1950                 return 0;
1951         case CHIP_VEGA10:
1952                 chip_name = "vega10";
1953                 break;
1954         case CHIP_VEGA12:
1955                 chip_name = "vega12";
1956                 break;
1957         case CHIP_RAVEN:
1958                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1959                         chip_name = "raven2";
1960                 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1961                         chip_name = "picasso";
1962                 else
1963                         chip_name = "raven";
1964                 break;
1965         case CHIP_ARCTURUS:
1966                 chip_name = "arcturus";
1967                 break;
1968         case CHIP_NAVI12:
1969                 chip_name = "navi12";
1970                 break;
1971         }
1972
1973         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1974         err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1975         if (err) {
1976                 dev_err(adev->dev,
1977                         "Failed to load gpu_info firmware \"%s\"\n",
1978                         fw_name);
1979                 goto out;
1980         }
1981         err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1982         if (err) {
1983                 dev_err(adev->dev,
1984                         "Failed to validate gpu_info firmware \"%s\"\n",
1985                         fw_name);
1986                 goto out;
1987         }
1988
1989         hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1990         amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1991
1992         switch (hdr->version_major) {
1993         case 1:
1994         {
1995                 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1996                         (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1997                                                                 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1998
1999                 /*
2000                  * Should be droped when DAL no longer needs it.
2001                  */
2002                 if (adev->asic_type == CHIP_NAVI12)
2003                         goto parse_soc_bounding_box;
2004
2005                 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2006                 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2007                 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2008                 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2009                 adev->gfx.config.max_texture_channel_caches =
2010                         le32_to_cpu(gpu_info_fw->gc_num_tccs);
2011                 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2012                 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2013                 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2014                 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2015                 adev->gfx.config.double_offchip_lds_buf =
2016                         le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2017                 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2018                 adev->gfx.cu_info.max_waves_per_simd =
2019                         le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2020                 adev->gfx.cu_info.max_scratch_slots_per_cu =
2021                         le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2022                 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2023                 if (hdr->version_minor >= 1) {
2024                         const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2025                                 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2026                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2027                         adev->gfx.config.num_sc_per_sh =
2028                                 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2029                         adev->gfx.config.num_packer_per_sc =
2030                                 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2031                 }
2032
2033 parse_soc_bounding_box:
2034                 /*
2035                  * soc bounding box info is not integrated in disocovery table,
2036                  * we always need to parse it from gpu info firmware if needed.
2037                  */
2038                 if (hdr->version_minor == 2) {
2039                         const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2040                                 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2041                                                                         le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2042                         adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2043                 }
2044                 break;
2045         }
2046         default:
2047                 dev_err(adev->dev,
2048                         "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2049                 err = -EINVAL;
2050                 goto out;
2051         }
2052 out:
2053         return err;
2054 }
2055
2056 /**
2057  * amdgpu_device_ip_early_init - run early init for hardware IPs
2058  *
2059  * @adev: amdgpu_device pointer
2060  *
2061  * Early initialization pass for hardware IPs.  The hardware IPs that make
2062  * up each asic are discovered each IP's early_init callback is run.  This
2063  * is the first stage in initializing the asic.
2064  * Returns 0 on success, negative error code on failure.
2065  */
2066 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2067 {
2068         struct drm_device *dev = adev_to_drm(adev);
2069         struct pci_dev *parent;
2070         int i, r;
2071
2072         amdgpu_device_enable_virtual_display(adev);
2073
2074         if (amdgpu_sriov_vf(adev)) {
2075                 r = amdgpu_virt_request_full_gpu(adev, true);
2076                 if (r)
2077                         return r;
2078         }
2079
2080         switch (adev->asic_type) {
2081 #ifdef CONFIG_DRM_AMDGPU_SI
2082         case CHIP_VERDE:
2083         case CHIP_TAHITI:
2084         case CHIP_PITCAIRN:
2085         case CHIP_OLAND:
2086         case CHIP_HAINAN:
2087                 adev->family = AMDGPU_FAMILY_SI;
2088                 r = si_set_ip_blocks(adev);
2089                 if (r)
2090                         return r;
2091                 break;
2092 #endif
2093 #ifdef CONFIG_DRM_AMDGPU_CIK
2094         case CHIP_BONAIRE:
2095         case CHIP_HAWAII:
2096         case CHIP_KAVERI:
2097         case CHIP_KABINI:
2098         case CHIP_MULLINS:
2099                 if (adev->flags & AMD_IS_APU)
2100                         adev->family = AMDGPU_FAMILY_KV;
2101                 else
2102                         adev->family = AMDGPU_FAMILY_CI;
2103
2104                 r = cik_set_ip_blocks(adev);
2105                 if (r)
2106                         return r;
2107                 break;
2108 #endif
2109         case CHIP_TOPAZ:
2110         case CHIP_TONGA:
2111         case CHIP_FIJI:
2112         case CHIP_POLARIS10:
2113         case CHIP_POLARIS11:
2114         case CHIP_POLARIS12:
2115         case CHIP_VEGAM:
2116         case CHIP_CARRIZO:
2117         case CHIP_STONEY:
2118                 if (adev->flags & AMD_IS_APU)
2119                         adev->family = AMDGPU_FAMILY_CZ;
2120                 else
2121                         adev->family = AMDGPU_FAMILY_VI;
2122
2123                 r = vi_set_ip_blocks(adev);
2124                 if (r)
2125                         return r;
2126                 break;
2127         default:
2128                 r = amdgpu_discovery_set_ip_blocks(adev);
2129                 if (r)
2130                         return r;
2131                 break;
2132         }
2133
2134         if (amdgpu_has_atpx() &&
2135             (amdgpu_is_atpx_hybrid() ||
2136              amdgpu_has_atpx_dgpu_power_cntl()) &&
2137             ((adev->flags & AMD_IS_APU) == 0) &&
2138             !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
2139                 adev->flags |= AMD_IS_PX;
2140
2141         if (!(adev->flags & AMD_IS_APU)) {
2142                 parent = pci_upstream_bridge(adev->pdev);
2143                 adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2144         }
2145
2146         amdgpu_amdkfd_device_probe(adev);
2147
2148         adev->pm.pp_feature = amdgpu_pp_feature_mask;
2149         if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2150                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2151         if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2152                 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2153
2154         for (i = 0; i < adev->num_ip_blocks; i++) {
2155                 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2156                         DRM_ERROR("disabled ip block: %d <%s>\n",
2157                                   i, adev->ip_blocks[i].version->funcs->name);
2158                         adev->ip_blocks[i].status.valid = false;
2159                 } else {
2160                         if (adev->ip_blocks[i].version->funcs->early_init) {
2161                                 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2162                                 if (r == -ENOENT) {
2163                                         adev->ip_blocks[i].status.valid = false;
2164                                 } else if (r) {
2165                                         DRM_ERROR("early_init of IP block <%s> failed %d\n",
2166                                                   adev->ip_blocks[i].version->funcs->name, r);
2167                                         return r;
2168                                 } else {
2169                                         adev->ip_blocks[i].status.valid = true;
2170                                 }
2171                         } else {
2172                                 adev->ip_blocks[i].status.valid = true;
2173                         }
2174                 }
2175                 /* get the vbios after the asic_funcs are set up */
2176                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2177                         r = amdgpu_device_parse_gpu_info_fw(adev);
2178                         if (r)
2179                                 return r;
2180
2181                         /* Read BIOS */
2182                         if (!amdgpu_get_bios(adev))
2183                                 return -EINVAL;
2184
2185                         r = amdgpu_atombios_init(adev);
2186                         if (r) {
2187                                 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2188                                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2189                                 return r;
2190                         }
2191
2192                         /*get pf2vf msg info at it's earliest time*/
2193                         if (amdgpu_sriov_vf(adev))
2194                                 amdgpu_virt_init_data_exchange(adev);
2195
2196                 }
2197         }
2198
2199         adev->cg_flags &= amdgpu_cg_mask;
2200         adev->pg_flags &= amdgpu_pg_mask;
2201
2202         return 0;
2203 }
2204
2205 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2206 {
2207         int i, r;
2208
2209         for (i = 0; i < adev->num_ip_blocks; i++) {
2210                 if (!adev->ip_blocks[i].status.sw)
2211                         continue;
2212                 if (adev->ip_blocks[i].status.hw)
2213                         continue;
2214                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2215                     (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2216                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2217                         r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2218                         if (r) {
2219                                 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2220                                           adev->ip_blocks[i].version->funcs->name, r);
2221                                 return r;
2222                         }
2223                         adev->ip_blocks[i].status.hw = true;
2224                 }
2225         }
2226
2227         return 0;
2228 }
2229
2230 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2231 {
2232         int i, r;
2233
2234         for (i = 0; i < adev->num_ip_blocks; i++) {
2235                 if (!adev->ip_blocks[i].status.sw)
2236                         continue;
2237                 if (adev->ip_blocks[i].status.hw)
2238                         continue;
2239                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2240                 if (r) {
2241                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2242                                   adev->ip_blocks[i].version->funcs->name, r);
2243                         return r;
2244                 }
2245                 adev->ip_blocks[i].status.hw = true;
2246         }
2247
2248         return 0;
2249 }
2250
2251 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2252 {
2253         int r = 0;
2254         int i;
2255         uint32_t smu_version;
2256
2257         if (adev->asic_type >= CHIP_VEGA10) {
2258                 for (i = 0; i < adev->num_ip_blocks; i++) {
2259                         if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2260                                 continue;
2261
2262                         if (!adev->ip_blocks[i].status.sw)
2263                                 continue;
2264
2265                         /* no need to do the fw loading again if already done*/
2266                         if (adev->ip_blocks[i].status.hw == true)
2267                                 break;
2268
2269                         if (amdgpu_in_reset(adev) || adev->in_suspend) {
2270                                 r = adev->ip_blocks[i].version->funcs->resume(adev);
2271                                 if (r) {
2272                                         DRM_ERROR("resume of IP block <%s> failed %d\n",
2273                                                           adev->ip_blocks[i].version->funcs->name, r);
2274                                         return r;
2275                                 }
2276                         } else {
2277                                 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2278                                 if (r) {
2279                                         DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2280                                                           adev->ip_blocks[i].version->funcs->name, r);
2281                                         return r;
2282                                 }
2283                         }
2284
2285                         adev->ip_blocks[i].status.hw = true;
2286                         break;
2287                 }
2288         }
2289
2290         if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2291                 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2292
2293         return r;
2294 }
2295
2296 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2297 {
2298         long timeout;
2299         int r, i;
2300
2301         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2302                 struct amdgpu_ring *ring = adev->rings[i];
2303
2304                 /* No need to setup the GPU scheduler for rings that don't need it */
2305                 if (!ring || ring->no_scheduler)
2306                         continue;
2307
2308                 switch (ring->funcs->type) {
2309                 case AMDGPU_RING_TYPE_GFX:
2310                         timeout = adev->gfx_timeout;
2311                         break;
2312                 case AMDGPU_RING_TYPE_COMPUTE:
2313                         timeout = adev->compute_timeout;
2314                         break;
2315                 case AMDGPU_RING_TYPE_SDMA:
2316                         timeout = adev->sdma_timeout;
2317                         break;
2318                 default:
2319                         timeout = adev->video_timeout;
2320                         break;
2321                 }
2322
2323                 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
2324                                    ring->num_hw_submission, amdgpu_job_hang_limit,
2325                                    timeout, adev->reset_domain->wq,
2326                                    ring->sched_score, ring->name,
2327                                    adev->dev);
2328                 if (r) {
2329                         DRM_ERROR("Failed to create scheduler on ring %s.\n",
2330                                   ring->name);
2331                         return r;
2332                 }
2333         }
2334
2335         return 0;
2336 }
2337
2338
2339 /**
2340  * amdgpu_device_ip_init - run init for hardware IPs
2341  *
2342  * @adev: amdgpu_device pointer
2343  *
2344  * Main initialization pass for hardware IPs.  The list of all the hardware
2345  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2346  * are run.  sw_init initializes the software state associated with each IP
2347  * and hw_init initializes the hardware associated with each IP.
2348  * Returns 0 on success, negative error code on failure.
2349  */
2350 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2351 {
2352         int i, r;
2353
2354         r = amdgpu_ras_init(adev);
2355         if (r)
2356                 return r;
2357
2358         for (i = 0; i < adev->num_ip_blocks; i++) {
2359                 if (!adev->ip_blocks[i].status.valid)
2360                         continue;
2361                 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2362                 if (r) {
2363                         DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2364                                   adev->ip_blocks[i].version->funcs->name, r);
2365                         goto init_failed;
2366                 }
2367                 adev->ip_blocks[i].status.sw = true;
2368
2369                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2370                         /* need to do common hw init early so everything is set up for gmc */
2371                         r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2372                         if (r) {
2373                                 DRM_ERROR("hw_init %d failed %d\n", i, r);
2374                                 goto init_failed;
2375                         }
2376                         adev->ip_blocks[i].status.hw = true;
2377                 } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2378                         /* need to do gmc hw init early so we can allocate gpu mem */
2379                         /* Try to reserve bad pages early */
2380                         if (amdgpu_sriov_vf(adev))
2381                                 amdgpu_virt_exchange_data(adev);
2382
2383                         r = amdgpu_device_vram_scratch_init(adev);
2384                         if (r) {
2385                                 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2386                                 goto init_failed;
2387                         }
2388                         r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2389                         if (r) {
2390                                 DRM_ERROR("hw_init %d failed %d\n", i, r);
2391                                 goto init_failed;
2392                         }
2393                         r = amdgpu_device_wb_init(adev);
2394                         if (r) {
2395                                 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2396                                 goto init_failed;
2397                         }
2398                         adev->ip_blocks[i].status.hw = true;
2399
2400                         /* right after GMC hw init, we create CSA */
2401                         if (amdgpu_mcbp) {
2402                                 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2403                                                                 AMDGPU_GEM_DOMAIN_VRAM,
2404                                                                 AMDGPU_CSA_SIZE);
2405                                 if (r) {
2406                                         DRM_ERROR("allocate CSA failed %d\n", r);
2407                                         goto init_failed;
2408                                 }
2409                         }
2410                 }
2411         }
2412
2413         if (amdgpu_sriov_vf(adev))
2414                 amdgpu_virt_init_data_exchange(adev);
2415
2416         r = amdgpu_ib_pool_init(adev);
2417         if (r) {
2418                 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2419                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2420                 goto init_failed;
2421         }
2422
2423         r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2424         if (r)
2425                 goto init_failed;
2426
2427         r = amdgpu_device_ip_hw_init_phase1(adev);
2428         if (r)
2429                 goto init_failed;
2430
2431         r = amdgpu_device_fw_loading(adev);
2432         if (r)
2433                 goto init_failed;
2434
2435         r = amdgpu_device_ip_hw_init_phase2(adev);
2436         if (r)
2437                 goto init_failed;
2438
2439         /*
2440          * retired pages will be loaded from eeprom and reserved here,
2441          * it should be called after amdgpu_device_ip_hw_init_phase2  since
2442          * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2443          * for I2C communication which only true at this point.
2444          *
2445          * amdgpu_ras_recovery_init may fail, but the upper only cares the
2446          * failure from bad gpu situation and stop amdgpu init process
2447          * accordingly. For other failed cases, it will still release all
2448          * the resource and print error message, rather than returning one
2449          * negative value to upper level.
2450          *
2451          * Note: theoretically, this should be called before all vram allocations
2452          * to protect retired page from abusing
2453          */
2454         r = amdgpu_ras_recovery_init(adev);
2455         if (r)
2456                 goto init_failed;
2457
2458         /**
2459          * In case of XGMI grab extra reference for reset domain for this device
2460          */
2461         if (adev->gmc.xgmi.num_physical_nodes > 1) {
2462                 if (amdgpu_xgmi_add_device(adev) == 0) {
2463                         if (!amdgpu_sriov_vf(adev)) {
2464                                 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2465
2466                                 if (!hive->reset_domain ||
2467                                     !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2468                                         r = -ENOENT;
2469                                         amdgpu_put_xgmi_hive(hive);
2470                                         goto init_failed;
2471                                 }
2472
2473                                 /* Drop the early temporary reset domain we created for device */
2474                                 amdgpu_reset_put_reset_domain(adev->reset_domain);
2475                                 adev->reset_domain = hive->reset_domain;
2476                                 amdgpu_put_xgmi_hive(hive);
2477                         }
2478                 }
2479         }
2480
2481         r = amdgpu_device_init_schedulers(adev);
2482         if (r)
2483                 goto init_failed;
2484
2485         /* Don't init kfd if whole hive need to be reset during init */
2486         if (!adev->gmc.xgmi.pending_reset)
2487                 amdgpu_amdkfd_device_init(adev);
2488
2489         amdgpu_fru_get_product_info(adev);
2490
2491 init_failed:
2492         if (amdgpu_sriov_vf(adev))
2493                 amdgpu_virt_release_full_gpu(adev, true);
2494
2495         return r;
2496 }
2497
2498 /**
2499  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2500  *
2501  * @adev: amdgpu_device pointer
2502  *
2503  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2504  * this function before a GPU reset.  If the value is retained after a
2505  * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2506  */
2507 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2508 {
2509         memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2510 }
2511
2512 /**
2513  * amdgpu_device_check_vram_lost - check if vram is valid
2514  *
2515  * @adev: amdgpu_device pointer
2516  *
2517  * Checks the reset magic value written to the gart pointer in VRAM.
2518  * The driver calls this after a GPU reset to see if the contents of
2519  * VRAM is lost or now.
2520  * returns true if vram is lost, false if not.
2521  */
2522 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2523 {
2524         if (memcmp(adev->gart.ptr, adev->reset_magic,
2525                         AMDGPU_RESET_MAGIC_NUM))
2526                 return true;
2527
2528         if (!amdgpu_in_reset(adev))
2529                 return false;
2530
2531         /*
2532          * For all ASICs with baco/mode1 reset, the VRAM is
2533          * always assumed to be lost.
2534          */
2535         switch (amdgpu_asic_reset_method(adev)) {
2536         case AMD_RESET_METHOD_BACO:
2537         case AMD_RESET_METHOD_MODE1:
2538                 return true;
2539         default:
2540                 return false;
2541         }
2542 }
2543
2544 /**
2545  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2546  *
2547  * @adev: amdgpu_device pointer
2548  * @state: clockgating state (gate or ungate)
2549  *
2550  * The list of all the hardware IPs that make up the asic is walked and the
2551  * set_clockgating_state callbacks are run.
2552  * Late initialization pass enabling clockgating for hardware IPs.
2553  * Fini or suspend, pass disabling clockgating for hardware IPs.
2554  * Returns 0 on success, negative error code on failure.
2555  */
2556
2557 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2558                                enum amd_clockgating_state state)
2559 {
2560         int i, j, r;
2561
2562         if (amdgpu_emu_mode == 1)
2563                 return 0;
2564
2565         for (j = 0; j < adev->num_ip_blocks; j++) {
2566                 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2567                 if (!adev->ip_blocks[i].status.late_initialized)
2568                         continue;
2569                 /* skip CG for GFX on S0ix */
2570                 if (adev->in_s0ix &&
2571                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2572                         continue;
2573                 /* skip CG for VCE/UVD, it's handled specially */
2574                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2575                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2576                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2577                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2578                     adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2579                         /* enable clockgating to save power */
2580                         r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2581                                                                                      state);
2582                         if (r) {
2583                                 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2584                                           adev->ip_blocks[i].version->funcs->name, r);
2585                                 return r;
2586                         }
2587                 }
2588         }
2589
2590         return 0;
2591 }
2592
2593 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2594                                enum amd_powergating_state state)
2595 {
2596         int i, j, r;
2597
2598         if (amdgpu_emu_mode == 1)
2599                 return 0;
2600
2601         for (j = 0; j < adev->num_ip_blocks; j++) {
2602                 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2603                 if (!adev->ip_blocks[i].status.late_initialized)
2604                         continue;
2605                 /* skip PG for GFX on S0ix */
2606                 if (adev->in_s0ix &&
2607                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2608                         continue;
2609                 /* skip CG for VCE/UVD, it's handled specially */
2610                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2611                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2612                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2613                     adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2614                     adev->ip_blocks[i].version->funcs->set_powergating_state) {
2615                         /* enable powergating to save power */
2616                         r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2617                                                                                         state);
2618                         if (r) {
2619                                 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2620                                           adev->ip_blocks[i].version->funcs->name, r);
2621                                 return r;
2622                         }
2623                 }
2624         }
2625         return 0;
2626 }
2627
2628 static int amdgpu_device_enable_mgpu_fan_boost(void)
2629 {
2630         struct amdgpu_gpu_instance *gpu_ins;
2631         struct amdgpu_device *adev;
2632         int i, ret = 0;
2633
2634         mutex_lock(&mgpu_info.mutex);
2635
2636         /*
2637          * MGPU fan boost feature should be enabled
2638          * only when there are two or more dGPUs in
2639          * the system
2640          */
2641         if (mgpu_info.num_dgpu < 2)
2642                 goto out;
2643
2644         for (i = 0; i < mgpu_info.num_dgpu; i++) {
2645                 gpu_ins = &(mgpu_info.gpu_ins[i]);
2646                 adev = gpu_ins->adev;
2647                 if (!(adev->flags & AMD_IS_APU) &&
2648                     !gpu_ins->mgpu_fan_enabled) {
2649                         ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2650                         if (ret)
2651                                 break;
2652
2653                         gpu_ins->mgpu_fan_enabled = 1;
2654                 }
2655         }
2656
2657 out:
2658         mutex_unlock(&mgpu_info.mutex);
2659
2660         return ret;
2661 }
2662
2663 /**
2664  * amdgpu_device_ip_late_init - run late init for hardware IPs
2665  *
2666  * @adev: amdgpu_device pointer
2667  *
2668  * Late initialization pass for hardware IPs.  The list of all the hardware
2669  * IPs that make up the asic is walked and the late_init callbacks are run.
2670  * late_init covers any special initialization that an IP requires
2671  * after all of the have been initialized or something that needs to happen
2672  * late in the init process.
2673  * Returns 0 on success, negative error code on failure.
2674  */
2675 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2676 {
2677         struct amdgpu_gpu_instance *gpu_instance;
2678         int i = 0, r;
2679
2680         for (i = 0; i < adev->num_ip_blocks; i++) {
2681                 if (!adev->ip_blocks[i].status.hw)
2682                         continue;
2683                 if (adev->ip_blocks[i].version->funcs->late_init) {
2684                         r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2685                         if (r) {
2686                                 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2687                                           adev->ip_blocks[i].version->funcs->name, r);
2688                                 return r;
2689                         }
2690                 }
2691                 adev->ip_blocks[i].status.late_initialized = true;
2692         }
2693
2694         r = amdgpu_ras_late_init(adev);
2695         if (r) {
2696                 DRM_ERROR("amdgpu_ras_late_init failed %d", r);
2697                 return r;
2698         }
2699
2700         amdgpu_ras_set_error_query_ready(adev, true);
2701
2702         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2703         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2704
2705         amdgpu_device_fill_reset_magic(adev);
2706
2707         r = amdgpu_device_enable_mgpu_fan_boost();
2708         if (r)
2709                 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2710
2711         /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
2712         if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)||
2713                                adev->asic_type == CHIP_ALDEBARAN ))
2714                 amdgpu_dpm_handle_passthrough_sbr(adev, true);
2715
2716         if (adev->gmc.xgmi.num_physical_nodes > 1) {
2717                 mutex_lock(&mgpu_info.mutex);
2718
2719                 /*
2720                  * Reset device p-state to low as this was booted with high.
2721                  *
2722                  * This should be performed only after all devices from the same
2723                  * hive get initialized.
2724                  *
2725                  * However, it's unknown how many device in the hive in advance.
2726                  * As this is counted one by one during devices initializations.
2727                  *
2728                  * So, we wait for all XGMI interlinked devices initialized.
2729                  * This may bring some delays as those devices may come from
2730                  * different hives. But that should be OK.
2731                  */
2732                 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2733                         for (i = 0; i < mgpu_info.num_gpu; i++) {
2734                                 gpu_instance = &(mgpu_info.gpu_ins[i]);
2735                                 if (gpu_instance->adev->flags & AMD_IS_APU)
2736                                         continue;
2737
2738                                 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2739                                                 AMDGPU_XGMI_PSTATE_MIN);
2740                                 if (r) {
2741                                         DRM_ERROR("pstate setting failed (%d).\n", r);
2742                                         break;
2743                                 }
2744                         }
2745                 }
2746
2747                 mutex_unlock(&mgpu_info.mutex);
2748         }
2749
2750         return 0;
2751 }
2752
2753 /**
2754  * amdgpu_device_smu_fini_early - smu hw_fini wrapper
2755  *
2756  * @adev: amdgpu_device pointer
2757  *
2758  * For ASICs need to disable SMC first
2759  */
2760 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
2761 {
2762         int i, r;
2763
2764         if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
2765                 return;
2766
2767         for (i = 0; i < adev->num_ip_blocks; i++) {
2768                 if (!adev->ip_blocks[i].status.hw)
2769                         continue;
2770                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2771                         r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2772                         /* XXX handle errors */
2773                         if (r) {
2774                                 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2775                                           adev->ip_blocks[i].version->funcs->name, r);
2776                         }
2777                         adev->ip_blocks[i].status.hw = false;
2778                         break;
2779                 }
2780         }
2781 }
2782
2783 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2784 {
2785         int i, r;
2786
2787         for (i = 0; i < adev->num_ip_blocks; i++) {
2788                 if (!adev->ip_blocks[i].version->funcs->early_fini)
2789                         continue;
2790
2791                 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2792                 if (r) {
2793                         DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2794                                   adev->ip_blocks[i].version->funcs->name, r);
2795                 }
2796         }
2797
2798         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2799         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2800
2801         amdgpu_amdkfd_suspend(adev, false);
2802
2803         /* Workaroud for ASICs need to disable SMC first */
2804         amdgpu_device_smu_fini_early(adev);
2805
2806         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2807                 if (!adev->ip_blocks[i].status.hw)
2808                         continue;
2809
2810                 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2811                 /* XXX handle errors */
2812                 if (r) {
2813                         DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2814                                   adev->ip_blocks[i].version->funcs->name, r);
2815                 }
2816
2817                 adev->ip_blocks[i].status.hw = false;
2818         }
2819
2820         if (amdgpu_sriov_vf(adev)) {
2821                 if (amdgpu_virt_release_full_gpu(adev, false))
2822                         DRM_ERROR("failed to release exclusive mode on fini\n");
2823         }
2824
2825         return 0;
2826 }
2827
2828 /**
2829  * amdgpu_device_ip_fini - run fini for hardware IPs
2830  *
2831  * @adev: amdgpu_device pointer
2832  *
2833  * Main teardown pass for hardware IPs.  The list of all the hardware
2834  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2835  * are run.  hw_fini tears down the hardware associated with each IP
2836  * and sw_fini tears down any software state associated with each IP.
2837  * Returns 0 on success, negative error code on failure.
2838  */
2839 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2840 {
2841         int i, r;
2842
2843         if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2844                 amdgpu_virt_release_ras_err_handler_data(adev);
2845
2846         if (adev->gmc.xgmi.num_physical_nodes > 1)
2847                 amdgpu_xgmi_remove_device(adev);
2848
2849         amdgpu_amdkfd_device_fini_sw(adev);
2850
2851         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2852                 if (!adev->ip_blocks[i].status.sw)
2853                         continue;
2854
2855                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2856                         amdgpu_ucode_free_bo(adev);
2857                         amdgpu_free_static_csa(&adev->virt.csa_obj);
2858                         amdgpu_device_wb_fini(adev);
2859                         amdgpu_device_vram_scratch_fini(adev);
2860                         amdgpu_ib_pool_fini(adev);
2861                 }
2862
2863                 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2864                 /* XXX handle errors */
2865                 if (r) {
2866                         DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2867                                   adev->ip_blocks[i].version->funcs->name, r);
2868                 }
2869                 adev->ip_blocks[i].status.sw = false;
2870                 adev->ip_blocks[i].status.valid = false;
2871         }
2872
2873         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2874                 if (!adev->ip_blocks[i].status.late_initialized)
2875                         continue;
2876                 if (adev->ip_blocks[i].version->funcs->late_fini)
2877                         adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2878                 adev->ip_blocks[i].status.late_initialized = false;
2879         }
2880
2881         amdgpu_ras_fini(adev);
2882
2883         return 0;
2884 }
2885
2886 /**
2887  * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2888  *
2889  * @work: work_struct.
2890  */
2891 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2892 {
2893         struct amdgpu_device *adev =
2894                 container_of(work, struct amdgpu_device, delayed_init_work.work);
2895         int r;
2896
2897         r = amdgpu_ib_ring_tests(adev);
2898         if (r)
2899                 DRM_ERROR("ib ring test failed (%d).\n", r);
2900 }
2901
2902 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2903 {
2904         struct amdgpu_device *adev =
2905                 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2906
2907         WARN_ON_ONCE(adev->gfx.gfx_off_state);
2908         WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2909
2910         if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2911                 adev->gfx.gfx_off_state = true;
2912 }
2913
2914 /**
2915  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2916  *
2917  * @adev: amdgpu_device pointer
2918  *
2919  * Main suspend function for hardware IPs.  The list of all the hardware
2920  * IPs that make up the asic is walked, clockgating is disabled and the
2921  * suspend callbacks are run.  suspend puts the hardware and software state
2922  * in each IP into a state suitable for suspend.
2923  * Returns 0 on success, negative error code on failure.
2924  */
2925 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2926 {
2927         int i, r;
2928
2929         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2930         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2931
2932         /*
2933          * Per PMFW team's suggestion, driver needs to handle gfxoff
2934          * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
2935          * scenario. Add the missing df cstate disablement here.
2936          */
2937         if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
2938                 dev_warn(adev->dev, "Failed to disallow df cstate");
2939
2940         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2941                 if (!adev->ip_blocks[i].status.valid)
2942                         continue;
2943
2944                 /* displays are handled separately */
2945                 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2946                         continue;
2947
2948                 /* XXX handle errors */
2949                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2950                 /* XXX handle errors */
2951                 if (r) {
2952                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
2953                                   adev->ip_blocks[i].version->funcs->name, r);
2954                         return r;
2955                 }
2956
2957                 adev->ip_blocks[i].status.hw = false;
2958         }
2959
2960         return 0;
2961 }
2962
2963 /**
2964  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2965  *
2966  * @adev: amdgpu_device pointer
2967  *
2968  * Main suspend function for hardware IPs.  The list of all the hardware
2969  * IPs that make up the asic is walked, clockgating is disabled and the
2970  * suspend callbacks are run.  suspend puts the hardware and software state
2971  * in each IP into a state suitable for suspend.
2972  * Returns 0 on success, negative error code on failure.
2973  */
2974 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2975 {
2976         int i, r;
2977
2978         if (adev->in_s0ix)
2979                 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
2980
2981         for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2982                 if (!adev->ip_blocks[i].status.valid)
2983                         continue;
2984                 /* displays are handled in phase1 */
2985                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2986                         continue;
2987                 /* PSP lost connection when err_event_athub occurs */
2988                 if (amdgpu_ras_intr_triggered() &&
2989                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2990                         adev->ip_blocks[i].status.hw = false;
2991                         continue;
2992                 }
2993
2994                 /* skip unnecessary suspend if we do not initialize them yet */
2995                 if (adev->gmc.xgmi.pending_reset &&
2996                     !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2997                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
2998                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2999                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
3000                         adev->ip_blocks[i].status.hw = false;
3001                         continue;
3002                 }
3003
3004                 /* skip suspend of gfx and psp for S0ix
3005                  * gfx is in gfxoff state, so on resume it will exit gfxoff just
3006                  * like at runtime. PSP is also part of the always on hardware
3007                  * so no need to suspend it.
3008                  */
3009                 if (adev->in_s0ix &&
3010                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3011                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
3012                         continue;
3013
3014                 /* XXX handle errors */
3015                 r = adev->ip_blocks[i].version->funcs->suspend(adev);
3016                 /* XXX handle errors */
3017                 if (r) {
3018                         DRM_ERROR("suspend of IP block <%s> failed %d\n",
3019                                   adev->ip_blocks[i].version->funcs->name, r);
3020                 }
3021                 adev->ip_blocks[i].status.hw = false;
3022                 /* handle putting the SMC in the appropriate state */
3023                 if(!amdgpu_sriov_vf(adev)){
3024                         if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3025                                 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3026                                 if (r) {
3027                                         DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3028                                                         adev->mp1_state, r);
3029                                         return r;
3030                                 }
3031                         }
3032                 }
3033         }
3034
3035         return 0;
3036 }
3037
3038 /**
3039  * amdgpu_device_ip_suspend - run suspend for hardware IPs
3040  *
3041  * @adev: amdgpu_device pointer
3042  *
3043  * Main suspend function for hardware IPs.  The list of all the hardware
3044  * IPs that make up the asic is walked, clockgating is disabled and the
3045  * suspend callbacks are run.  suspend puts the hardware and software state
3046  * in each IP into a state suitable for suspend.
3047  * Returns 0 on success, negative error code on failure.
3048  */
3049 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3050 {
3051         int r;
3052
3053         if (amdgpu_sriov_vf(adev)) {
3054                 amdgpu_virt_fini_data_exchange(adev);
3055                 amdgpu_virt_request_full_gpu(adev, false);
3056         }
3057
3058         r = amdgpu_device_ip_suspend_phase1(adev);
3059         if (r)
3060                 return r;
3061         r = amdgpu_device_ip_suspend_phase2(adev);
3062
3063         if (amdgpu_sriov_vf(adev))
3064                 amdgpu_virt_release_full_gpu(adev, false);
3065
3066         return r;
3067 }
3068
3069 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3070 {
3071         int i, r;
3072
3073         static enum amd_ip_block_type ip_order[] = {
3074                 AMD_IP_BLOCK_TYPE_COMMON,
3075                 AMD_IP_BLOCK_TYPE_GMC,
3076                 AMD_IP_BLOCK_TYPE_PSP,
3077                 AMD_IP_BLOCK_TYPE_IH,
3078         };
3079
3080         for (i = 0; i < adev->num_ip_blocks; i++) {
3081                 int j;
3082                 struct amdgpu_ip_block *block;
3083
3084                 block = &adev->ip_blocks[i];
3085                 block->status.hw = false;
3086
3087                 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3088
3089                         if (block->version->type != ip_order[j] ||
3090                                 !block->status.valid)
3091                                 continue;
3092
3093                         r = block->version->funcs->hw_init(adev);
3094                         DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3095                         if (r)
3096                                 return r;
3097                         block->status.hw = true;
3098                 }
3099         }
3100
3101         return 0;
3102 }
3103
3104 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3105 {
3106         int i, r;
3107
3108         static enum amd_ip_block_type ip_order[] = {
3109                 AMD_IP_BLOCK_TYPE_SMC,
3110                 AMD_IP_BLOCK_TYPE_DCE,
3111                 AMD_IP_BLOCK_TYPE_GFX,
3112                 AMD_IP_BLOCK_TYPE_SDMA,
3113                 AMD_IP_BLOCK_TYPE_UVD,
3114                 AMD_IP_BLOCK_TYPE_VCE,
3115                 AMD_IP_BLOCK_TYPE_VCN
3116         };
3117
3118         for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3119                 int j;
3120                 struct amdgpu_ip_block *block;
3121
3122                 for (j = 0; j < adev->num_ip_blocks; j++) {
3123                         block = &adev->ip_blocks[j];
3124
3125                         if (block->version->type != ip_order[i] ||
3126                                 !block->status.valid ||
3127                                 block->status.hw)
3128                                 continue;
3129
3130                         if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3131                                 r = block->version->funcs->resume(adev);
3132                         else
3133                                 r = block->version->funcs->hw_init(adev);
3134
3135                         DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3136                         if (r)
3137                                 return r;
3138                         block->status.hw = true;
3139                 }
3140         }
3141
3142         return 0;
3143 }
3144
3145 /**
3146  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3147  *
3148  * @adev: amdgpu_device pointer
3149  *
3150  * First resume function for hardware IPs.  The list of all the hardware
3151  * IPs that make up the asic is walked and the resume callbacks are run for
3152  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
3153  * after a suspend and updates the software state as necessary.  This
3154  * function is also used for restoring the GPU after a GPU reset.
3155  * Returns 0 on success, negative error code on failure.
3156  */
3157 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3158 {
3159         int i, r;
3160
3161         for (i = 0; i < adev->num_ip_blocks; i++) {
3162                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3163                         continue;
3164                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3165                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3166                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3167                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
3168
3169                         r = adev->ip_blocks[i].version->funcs->resume(adev);
3170                         if (r) {
3171                                 DRM_ERROR("resume of IP block <%s> failed %d\n",
3172                                           adev->ip_blocks[i].version->funcs->name, r);
3173                                 return r;
3174                         }
3175                         adev->ip_blocks[i].status.hw = true;
3176                 }
3177         }
3178
3179         return 0;
3180 }
3181
3182 /**
3183  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3184  *
3185  * @adev: amdgpu_device pointer
3186  *
3187  * First resume function for hardware IPs.  The list of all the hardware
3188  * IPs that make up the asic is walked and the resume callbacks are run for
3189  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
3190  * functional state after a suspend and updates the software state as
3191  * necessary.  This function is also used for restoring the GPU after a GPU
3192  * reset.
3193  * Returns 0 on success, negative error code on failure.
3194  */
3195 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3196 {
3197         int i, r;
3198
3199         for (i = 0; i < adev->num_ip_blocks; i++) {
3200                 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3201                         continue;
3202                 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3203                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3204                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3205                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3206                         continue;
3207                 r = adev->ip_blocks[i].version->funcs->resume(adev);
3208                 if (r) {
3209                         DRM_ERROR("resume of IP block <%s> failed %d\n",
3210                                   adev->ip_blocks[i].version->funcs->name, r);
3211                         return r;
3212                 }
3213                 adev->ip_blocks[i].status.hw = true;
3214
3215                 if (adev->in_s0ix && adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3216                         /* disable gfxoff for IP resume. The gfxoff will be re-enabled in
3217                          * amdgpu_device_resume() after IP resume.
3218                          */
3219                         amdgpu_gfx_off_ctrl(adev, false);
3220                         DRM_DEBUG("will disable gfxoff for re-initializing other blocks\n");
3221                 }
3222
3223         }
3224
3225         return 0;
3226 }
3227
3228 /**
3229  * amdgpu_device_ip_resume - run resume for hardware IPs
3230  *
3231  * @adev: amdgpu_device pointer
3232  *
3233  * Main resume function for hardware IPs.  The hardware IPs
3234  * are split into two resume functions because they are
3235  * are also used in in recovering from a GPU reset and some additional
3236  * steps need to be take between them.  In this case (S3/S4) they are
3237  * run sequentially.
3238  * Returns 0 on success, negative error code on failure.
3239  */
3240 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3241 {
3242         int r;
3243
3244         r = amdgpu_amdkfd_resume_iommu(adev);
3245         if (r)
3246                 return r;
3247
3248         r = amdgpu_device_ip_resume_phase1(adev);
3249         if (r)
3250                 return r;
3251
3252         r = amdgpu_device_fw_loading(adev);
3253         if (r)
3254                 return r;
3255
3256         r = amdgpu_device_ip_resume_phase2(adev);
3257
3258         return r;
3259 }
3260
3261 /**
3262  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3263  *
3264  * @adev: amdgpu_device pointer
3265  *
3266  * Query the VBIOS data tables to determine if the board supports SR-IOV.
3267  */
3268 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3269 {
3270         if (amdgpu_sriov_vf(adev)) {
3271                 if (adev->is_atom_fw) {
3272                         if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3273                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3274                 } else {
3275                         if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3276                                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3277                 }
3278
3279                 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3280                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3281         }
3282 }
3283
3284 /**
3285  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3286  *
3287  * @asic_type: AMD asic type
3288  *
3289  * Check if there is DC (new modesetting infrastructre) support for an asic.
3290  * returns true if DC has support, false if not.
3291  */
3292 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3293 {
3294         switch (asic_type) {
3295 #ifdef CONFIG_DRM_AMDGPU_SI
3296         case CHIP_HAINAN:
3297 #endif
3298         case CHIP_TOPAZ:
3299                 /* chips with no display hardware */
3300                 return false;
3301 #if defined(CONFIG_DRM_AMD_DC)
3302         case CHIP_TAHITI:
3303         case CHIP_PITCAIRN:
3304         case CHIP_VERDE:
3305         case CHIP_OLAND:
3306                 /*
3307                  * We have systems in the wild with these ASICs that require
3308                  * LVDS and VGA support which is not supported with DC.
3309                  *
3310                  * Fallback to the non-DC driver here by default so as not to
3311                  * cause regressions.
3312                  */
3313 #if defined(CONFIG_DRM_AMD_DC_SI)
3314                 return amdgpu_dc > 0;
3315 #else
3316                 return false;
3317 #endif
3318         case CHIP_BONAIRE:
3319         case CHIP_KAVERI:
3320         case CHIP_KABINI:
3321         case CHIP_MULLINS:
3322                 /*
3323                  * We have systems in the wild with these ASICs that require
3324                  * VGA support which is not supported with DC.
3325                  *
3326                  * Fallback to the non-DC driver here by default so as not to
3327                  * cause regressions.
3328                  */
3329                 return amdgpu_dc > 0;
3330         default:
3331                 return amdgpu_dc != 0;
3332 #else
3333         default:
3334                 if (amdgpu_dc > 0)
3335                         DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3336                                          "but isn't supported by ASIC, ignoring\n");
3337                 return false;
3338 #endif
3339         }
3340 }
3341
3342 /**
3343  * amdgpu_device_has_dc_support - check if dc is supported
3344  *
3345  * @adev: amdgpu_device pointer
3346  *
3347  * Returns true for supported, false for not supported
3348  */
3349 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3350 {
3351         if (amdgpu_sriov_vf(adev) ||
3352             adev->enable_virtual_display ||
3353             (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3354                 return false;
3355
3356         return amdgpu_device_asic_has_dc_support(adev->asic_type);
3357 }
3358
3359 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3360 {
3361         struct amdgpu_device *adev =
3362                 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3363         struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3364
3365         /* It's a bug to not have a hive within this function */
3366         if (WARN_ON(!hive))
3367                 return;
3368
3369         /*
3370          * Use task barrier to synchronize all xgmi reset works across the
3371          * hive. task_barrier_enter and task_barrier_exit will block
3372          * until all the threads running the xgmi reset works reach
3373          * those points. task_barrier_full will do both blocks.
3374          */
3375         if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3376
3377                 task_barrier_enter(&hive->tb);
3378                 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3379
3380                 if (adev->asic_reset_res)
3381                         goto fail;
3382
3383                 task_barrier_exit(&hive->tb);
3384                 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3385
3386                 if (adev->asic_reset_res)
3387                         goto fail;
3388
3389                 if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
3390                     adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
3391                         adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
3392         } else {
3393
3394                 task_barrier_full(&hive->tb);
3395                 adev->asic_reset_res =  amdgpu_asic_reset(adev);
3396         }
3397
3398 fail:
3399         if (adev->asic_reset_res)
3400                 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3401                          adev->asic_reset_res, adev_to_drm(adev)->unique);
3402         amdgpu_put_xgmi_hive(hive);
3403 }
3404
3405 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3406 {
3407         char *input = amdgpu_lockup_timeout;
3408         char *timeout_setting = NULL;
3409         int index = 0;
3410         long timeout;
3411         int ret = 0;
3412
3413         /*
3414          * By default timeout for non compute jobs is 10000
3415          * and 60000 for compute jobs.
3416          * In SR-IOV or passthrough mode, timeout for compute
3417          * jobs are 60000 by default.
3418          */
3419         adev->gfx_timeout = msecs_to_jiffies(10000);
3420         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3421         if (amdgpu_sriov_vf(adev))
3422                 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3423                                         msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3424         else
3425                 adev->compute_timeout =  msecs_to_jiffies(60000);
3426
3427         if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3428                 while ((timeout_setting = strsep(&input, ",")) &&
3429                                 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3430                         ret = kstrtol(timeout_setting, 0, &timeout);
3431                         if (ret)
3432                                 return ret;
3433
3434                         if (timeout == 0) {
3435                                 index++;
3436                                 continue;
3437                         } else if (timeout < 0) {
3438                                 timeout = MAX_SCHEDULE_TIMEOUT;
3439                                 dev_warn(adev->dev, "lockup timeout disabled");
3440                                 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3441                         } else {
3442                                 timeout = msecs_to_jiffies(timeout);
3443                         }
3444
3445                         switch (index++) {
3446                         case 0:
3447                                 adev->gfx_timeout = timeout;
3448                                 break;
3449                         case 1:
3450                                 adev->compute_timeout = timeout;
3451                                 break;
3452                         case 2:
3453                                 adev->sdma_timeout = timeout;
3454                                 break;
3455                         case 3:
3456                                 adev->video_timeout = timeout;
3457                                 break;
3458                         default:
3459                                 break;
3460                         }
3461                 }
3462                 /*
3463                  * There is only one value specified and
3464                  * it should apply to all non-compute jobs.
3465                  */
3466                 if (index == 1) {
3467                         adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3468                         if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3469                                 adev->compute_timeout = adev->gfx_timeout;
3470                 }
3471         }
3472
3473         return ret;
3474 }
3475
3476 /**
3477  * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3478  *
3479  * @adev: amdgpu_device pointer
3480  *
3481  * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3482  */
3483 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3484 {
3485         struct iommu_domain *domain;
3486
3487         domain = iommu_get_domain_for_dev(adev->dev);
3488         if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3489                 adev->ram_is_direct_mapped = true;
3490 }
3491
3492 static const struct attribute *amdgpu_dev_attributes[] = {
3493         &dev_attr_product_name.attr,
3494         &dev_attr_product_number.attr,
3495         &dev_attr_serial_number.attr,
3496         &dev_attr_pcie_replay_count.attr,
3497         NULL
3498 };
3499
3500 /**
3501  * amdgpu_device_init - initialize the driver
3502  *
3503  * @adev: amdgpu_device pointer
3504  * @flags: driver flags
3505  *
3506  * Initializes the driver info and hw (all asics).
3507  * Returns 0 for success or an error on failure.
3508  * Called at driver startup.
3509  */
3510 int amdgpu_device_init(struct amdgpu_device *adev,
3511                        uint32_t flags)
3512 {
3513         struct drm_device *ddev = adev_to_drm(adev);
3514         struct pci_dev *pdev = adev->pdev;
3515         int r, i;
3516         bool px = false;
3517         u32 max_MBps;
3518
3519         adev->shutdown = false;
3520         adev->flags = flags;
3521
3522         if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3523                 adev->asic_type = amdgpu_force_asic_type;
3524         else
3525                 adev->asic_type = flags & AMD_ASIC_MASK;
3526
3527         adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3528         if (amdgpu_emu_mode == 1)
3529                 adev->usec_timeout *= 10;
3530         adev->gmc.gart_size = 512 * 1024 * 1024;
3531         adev->accel_working = false;
3532         adev->num_rings = 0;
3533         RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
3534         adev->mman.buffer_funcs = NULL;
3535         adev->mman.buffer_funcs_ring = NULL;
3536         adev->vm_manager.vm_pte_funcs = NULL;
3537         adev->vm_manager.vm_pte_num_scheds = 0;
3538         adev->gmc.gmc_funcs = NULL;
3539         adev->harvest_ip_mask = 0x0;
3540         adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3541         bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3542
3543         adev->smc_rreg = &amdgpu_invalid_rreg;
3544         adev->smc_wreg = &amdgpu_invalid_wreg;
3545         adev->pcie_rreg = &amdgpu_invalid_rreg;
3546         adev->pcie_wreg = &amdgpu_invalid_wreg;
3547         adev->pciep_rreg = &amdgpu_invalid_rreg;
3548         adev->pciep_wreg = &amdgpu_invalid_wreg;
3549         adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3550         adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3551         adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3552         adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3553         adev->didt_rreg = &amdgpu_invalid_rreg;
3554         adev->didt_wreg = &amdgpu_invalid_wreg;
3555         adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3556         adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3557         adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3558         adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3559
3560         DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3561                  amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3562                  pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3563
3564         /* mutex initialization are all done here so we
3565          * can recall function without having locking issues */
3566         mutex_init(&adev->firmware.mutex);
3567         mutex_init(&adev->pm.mutex);
3568         mutex_init(&adev->gfx.gpu_clock_mutex);
3569         mutex_init(&adev->srbm_mutex);
3570         mutex_init(&adev->gfx.pipe_reserve_mutex);
3571         mutex_init(&adev->gfx.gfx_off_mutex);
3572         mutex_init(&adev->grbm_idx_mutex);
3573         mutex_init(&adev->mn_lock);
3574         mutex_init(&adev->virt.vf_errors.lock);
3575         hash_init(adev->mn_hash);
3576         mutex_init(&adev->psp.mutex);
3577         mutex_init(&adev->notifier_lock);
3578         mutex_init(&adev->pm.stable_pstate_ctx_lock);
3579         mutex_init(&adev->benchmark_mutex);
3580
3581         amdgpu_device_init_apu_flags(adev);
3582
3583         r = amdgpu_device_check_arguments(adev);
3584         if (r)
3585                 return r;
3586
3587         spin_lock_init(&adev->mmio_idx_lock);
3588         spin_lock_init(&adev->smc_idx_lock);
3589         spin_lock_init(&adev->pcie_idx_lock);
3590         spin_lock_init(&adev->uvd_ctx_idx_lock);
3591         spin_lock_init(&adev->didt_idx_lock);
3592         spin_lock_init(&adev->gc_cac_idx_lock);
3593         spin_lock_init(&adev->se_cac_idx_lock);
3594         spin_lock_init(&adev->audio_endpt_idx_lock);
3595         spin_lock_init(&adev->mm_stats.lock);
3596
3597         INIT_LIST_HEAD(&adev->shadow_list);
3598         mutex_init(&adev->shadow_list_lock);
3599
3600         INIT_LIST_HEAD(&adev->reset_list);
3601
3602         INIT_LIST_HEAD(&adev->ras_list);
3603
3604         INIT_DELAYED_WORK(&adev->delayed_init_work,
3605                           amdgpu_device_delayed_init_work_handler);
3606         INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3607                           amdgpu_device_delay_enable_gfx_off);
3608
3609         INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3610
3611         adev->gfx.gfx_off_req_count = 1;
3612         adev->gfx.gfx_off_residency = 0;
3613         adev->gfx.gfx_off_entrycount = 0;
3614         adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3615
3616         atomic_set(&adev->throttling_logging_enabled, 1);
3617         /*
3618          * If throttling continues, logging will be performed every minute
3619          * to avoid log flooding. "-1" is subtracted since the thermal
3620          * throttling interrupt comes every second. Thus, the total logging
3621          * interval is 59 seconds(retelimited printk interval) + 1(waiting
3622          * for throttling interrupt) = 60 seconds.
3623          */
3624         ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3625         ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3626
3627         /* Registers mapping */
3628         /* TODO: block userspace mapping of io register */
3629         if (adev->asic_type >= CHIP_BONAIRE) {
3630                 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3631                 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3632         } else {
3633                 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3634                 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3635         }
3636
3637         for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3638                 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3639
3640         adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3641         if (adev->rmmio == NULL) {
3642                 return -ENOMEM;
3643         }
3644         DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3645         DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3646
3647         amdgpu_device_get_pcie_info(adev);
3648
3649         if (amdgpu_mcbp)
3650                 DRM_INFO("MCBP is enabled\n");
3651
3652         /*
3653          * Reset domain needs to be present early, before XGMI hive discovered
3654          * (if any) and intitialized to use reset sem and in_gpu reset flag
3655          * early on during init and before calling to RREG32.
3656          */
3657         adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
3658         if (!adev->reset_domain)
3659                 return -ENOMEM;
3660
3661         /* detect hw virtualization here */
3662         amdgpu_detect_virtualization(adev);
3663
3664         r = amdgpu_device_get_job_timeout_settings(adev);
3665         if (r) {
3666                 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3667                 return r;
3668         }
3669
3670         /* early init functions */
3671         r = amdgpu_device_ip_early_init(adev);
3672         if (r)
3673                 return r;
3674
3675         /* Enable TMZ based on IP_VERSION */
3676         amdgpu_gmc_tmz_set(adev);
3677
3678         amdgpu_gmc_noretry_set(adev);
3679         /* Need to get xgmi info early to decide the reset behavior*/
3680         if (adev->gmc.xgmi.supported) {
3681                 r = adev->gfxhub.funcs->get_xgmi_info(adev);
3682                 if (r)
3683                         return r;
3684         }
3685
3686         /* enable PCIE atomic ops */
3687         if (amdgpu_sriov_vf(adev))
3688                 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3689                         adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
3690                         (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3691         else
3692                 adev->have_atomics_support =
3693                         !pci_enable_atomic_ops_to_root(adev->pdev,
3694                                           PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3695                                           PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3696         if (!adev->have_atomics_support)
3697                 dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3698
3699         /* doorbell bar mapping and doorbell index init*/
3700         amdgpu_device_doorbell_init(adev);
3701
3702         if (amdgpu_emu_mode == 1) {
3703                 /* post the asic on emulation mode */
3704                 emu_soc_asic_init(adev);
3705                 goto fence_driver_init;
3706         }
3707
3708         amdgpu_reset_init(adev);
3709
3710         /* detect if we are with an SRIOV vbios */
3711         amdgpu_device_detect_sriov_bios(adev);
3712
3713         /* check if we need to reset the asic
3714          *  E.g., driver was not cleanly unloaded previously, etc.
3715          */
3716         if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3717                 if (adev->gmc.xgmi.num_physical_nodes) {
3718                         dev_info(adev->dev, "Pending hive reset.\n");
3719                         adev->gmc.xgmi.pending_reset = true;
3720                         /* Only need to init necessary block for SMU to handle the reset */
3721                         for (i = 0; i < adev->num_ip_blocks; i++) {
3722                                 if (!adev->ip_blocks[i].status.valid)
3723                                         continue;
3724                                 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3725                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3726                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3727                                       adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3728                                         DRM_DEBUG("IP %s disabled for hw_init.\n",
3729                                                 adev->ip_blocks[i].version->funcs->name);
3730                                         adev->ip_blocks[i].status.hw = true;
3731                                 }
3732                         }
3733                 } else {
3734                         r = amdgpu_asic_reset(adev);
3735                         if (r) {
3736                                 dev_err(adev->dev, "asic reset on init failed\n");
3737                                 goto failed;
3738                         }
3739                 }
3740         }
3741
3742         pci_enable_pcie_error_reporting(adev->pdev);
3743
3744         /* Post card if necessary */
3745         if (amdgpu_device_need_post(adev)) {
3746                 if (!adev->bios) {
3747                         dev_err(adev->dev, "no vBIOS found\n");
3748                         r = -EINVAL;
3749                         goto failed;
3750                 }
3751                 DRM_INFO("GPU posting now...\n");
3752                 r = amdgpu_device_asic_init(adev);
3753                 if (r) {
3754                         dev_err(adev->dev, "gpu post error!\n");
3755                         goto failed;
3756                 }
3757         }
3758
3759         if (adev->is_atom_fw) {
3760                 /* Initialize clocks */
3761                 r = amdgpu_atomfirmware_get_clock_info(adev);
3762                 if (r) {
3763                         dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3764                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3765                         goto failed;
3766                 }
3767         } else {
3768                 /* Initialize clocks */
3769                 r = amdgpu_atombios_get_clock_info(adev);
3770                 if (r) {
3771                         dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3772                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3773                         goto failed;
3774                 }
3775                 /* init i2c buses */
3776                 if (!amdgpu_device_has_dc_support(adev))
3777                         amdgpu_atombios_i2c_init(adev);
3778         }
3779
3780 fence_driver_init:
3781         /* Fence driver */
3782         r = amdgpu_fence_driver_sw_init(adev);
3783         if (r) {
3784                 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3785                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3786                 goto failed;
3787         }
3788
3789         /* init the mode config */
3790         drm_mode_config_init(adev_to_drm(adev));
3791
3792         r = amdgpu_device_ip_init(adev);
3793         if (r) {
3794                 /* failed in exclusive mode due to timeout */
3795                 if (amdgpu_sriov_vf(adev) &&
3796                     !amdgpu_sriov_runtime(adev) &&
3797                     amdgpu_virt_mmio_blocked(adev) &&
3798                     !amdgpu_virt_wait_reset(adev)) {
3799                         dev_err(adev->dev, "VF exclusive mode timeout\n");
3800                         /* Don't send request since VF is inactive. */
3801                         adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3802                         adev->virt.ops = NULL;
3803                         r = -EAGAIN;
3804                         goto release_ras_con;
3805                 }
3806                 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3807                 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3808                 goto release_ras_con;
3809         }
3810
3811         amdgpu_fence_driver_hw_init(adev);
3812
3813         dev_info(adev->dev,
3814                 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3815                         adev->gfx.config.max_shader_engines,
3816                         adev->gfx.config.max_sh_per_se,
3817                         adev->gfx.config.max_cu_per_sh,
3818                         adev->gfx.cu_info.number);
3819
3820         adev->accel_working = true;
3821
3822         amdgpu_vm_check_compute_bug(adev);
3823
3824         /* Initialize the buffer migration limit. */
3825         if (amdgpu_moverate >= 0)
3826                 max_MBps = amdgpu_moverate;
3827         else
3828                 max_MBps = 8; /* Allow 8 MB/s. */
3829         /* Get a log2 for easy divisions. */
3830         adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3831
3832         r = amdgpu_pm_sysfs_init(adev);
3833         if (r) {
3834                 adev->pm_sysfs_en = false;
3835                 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3836         } else
3837                 adev->pm_sysfs_en = true;
3838
3839         r = amdgpu_ucode_sysfs_init(adev);
3840         if (r) {
3841                 adev->ucode_sysfs_en = false;
3842                 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3843         } else
3844                 adev->ucode_sysfs_en = true;
3845
3846         r = amdgpu_psp_sysfs_init(adev);
3847         if (r) {
3848                 adev->psp_sysfs_en = false;
3849                 if (!amdgpu_sriov_vf(adev))
3850                         DRM_ERROR("Creating psp sysfs failed\n");
3851         } else
3852                 adev->psp_sysfs_en = true;
3853
3854         /*
3855          * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3856          * Otherwise the mgpu fan boost feature will be skipped due to the
3857          * gpu instance is counted less.
3858          */
3859         amdgpu_register_gpu_instance(adev);
3860
3861         /* enable clockgating, etc. after ib tests, etc. since some blocks require
3862          * explicit gating rather than handling it automatically.
3863          */
3864         if (!adev->gmc.xgmi.pending_reset) {
3865                 r = amdgpu_device_ip_late_init(adev);
3866                 if (r) {
3867                         dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3868                         amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3869                         goto release_ras_con;
3870                 }
3871                 /* must succeed. */
3872                 amdgpu_ras_resume(adev);
3873                 queue_delayed_work(system_wq, &adev->delayed_init_work,
3874                                    msecs_to_jiffies(AMDGPU_RESUME_MS));
3875         }
3876
3877         if (amdgpu_sriov_vf(adev))
3878                 flush_delayed_work(&adev->delayed_init_work);
3879
3880         r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3881         if (r)
3882                 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3883
3884         if (IS_ENABLED(CONFIG_PERF_EVENTS))
3885                 r = amdgpu_pmu_init(adev);
3886         if (r)
3887                 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3888
3889         /* Have stored pci confspace at hand for restore in sudden PCI error */
3890         if (amdgpu_device_cache_pci_state(adev->pdev))
3891                 pci_restore_state(pdev);
3892
3893         /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3894         /* this will fail for cards that aren't VGA class devices, just
3895          * ignore it */
3896         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3897                 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
3898
3899         if (amdgpu_device_supports_px(ddev)) {
3900                 px = true;
3901                 vga_switcheroo_register_client(adev->pdev,
3902                                                &amdgpu_switcheroo_ops, px);
3903                 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3904         }
3905
3906         if (adev->gmc.xgmi.pending_reset)
3907                 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3908                                    msecs_to_jiffies(AMDGPU_RESUME_MS));
3909
3910         amdgpu_device_check_iommu_direct_map(adev);
3911
3912         return 0;
3913
3914 release_ras_con:
3915         amdgpu_release_ras_context(adev);
3916
3917 failed:
3918         amdgpu_vf_error_trans_all(adev);
3919
3920         return r;
3921 }
3922
3923 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3924 {
3925
3926         /* Clear all CPU mappings pointing to this device */
3927         unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3928
3929         /* Unmap all mapped bars - Doorbell, registers and VRAM */
3930         amdgpu_device_doorbell_fini(adev);
3931
3932         iounmap(adev->rmmio);
3933         adev->rmmio = NULL;
3934         if (adev->mman.aper_base_kaddr)
3935                 iounmap(adev->mman.aper_base_kaddr);
3936         adev->mman.aper_base_kaddr = NULL;
3937
3938         /* Memory manager related */
3939         if (!adev->gmc.xgmi.connected_to_cpu) {
3940                 arch_phys_wc_del(adev->gmc.vram_mtrr);
3941                 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
3942         }
3943 }
3944
3945 /**
3946  * amdgpu_device_fini_hw - tear down the driver
3947  *
3948  * @adev: amdgpu_device pointer
3949  *
3950  * Tear down the driver info (all asics).
3951  * Called at driver shutdown.
3952  */
3953 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
3954 {
3955         dev_info(adev->dev, "amdgpu: finishing device.\n");
3956         flush_delayed_work(&adev->delayed_init_work);
3957         adev->shutdown = true;
3958
3959         /* make sure IB test finished before entering exclusive mode
3960          * to avoid preemption on IB test
3961          * */
3962         if (amdgpu_sriov_vf(adev)) {
3963                 amdgpu_virt_request_full_gpu(adev, false);
3964                 amdgpu_virt_fini_data_exchange(adev);
3965         }
3966
3967         /* disable all interrupts */
3968         amdgpu_irq_disable_all(adev);
3969         if (adev->mode_info.mode_config_initialized){
3970                 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
3971                         drm_helper_force_disable_all(adev_to_drm(adev));
3972                 else
3973                         drm_atomic_helper_shutdown(adev_to_drm(adev));
3974         }
3975         amdgpu_fence_driver_hw_fini(adev);
3976
3977         if (adev->mman.initialized) {
3978                 flush_delayed_work(&adev->mman.bdev.wq);
3979                 ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
3980         }
3981
3982         if (adev->pm_sysfs_en)
3983                 amdgpu_pm_sysfs_fini(adev);
3984         if (adev->ucode_sysfs_en)
3985                 amdgpu_ucode_sysfs_fini(adev);
3986         if (adev->psp_sysfs_en)
3987                 amdgpu_psp_sysfs_fini(adev);
3988         sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3989
3990         /* disable ras feature must before hw fini */
3991         amdgpu_ras_pre_fini(adev);
3992
3993         amdgpu_device_ip_fini_early(adev);
3994
3995         amdgpu_irq_fini_hw(adev);
3996
3997         if (adev->mman.initialized)
3998                 ttm_device_clear_dma_mappings(&adev->mman.bdev);
3999
4000         amdgpu_gart_dummy_page_fini(adev);
4001
4002         amdgpu_device_unmap_mmio(adev);
4003
4004 }
4005
4006 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
4007 {
4008         int idx;
4009
4010         amdgpu_fence_driver_sw_fini(adev);
4011         amdgpu_device_ip_fini(adev);
4012         release_firmware(adev->firmware.gpu_info_fw);
4013         adev->firmware.gpu_info_fw = NULL;
4014         adev->accel_working = false;
4015         dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
4016
4017         amdgpu_reset_fini(adev);
4018
4019         /* free i2c buses */
4020         if (!amdgpu_device_has_dc_support(adev))
4021                 amdgpu_i2c_fini(adev);
4022
4023         if (amdgpu_emu_mode != 1)
4024                 amdgpu_atombios_fini(adev);
4025
4026         kfree(adev->bios);
4027         adev->bios = NULL;
4028         if (amdgpu_device_supports_px(adev_to_drm(adev))) {
4029                 vga_switcheroo_unregister_client(adev->pdev);
4030                 vga_switcheroo_fini_domain_pm_ops(adev->dev);
4031         }
4032         if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4033                 vga_client_unregister(adev->pdev);
4034
4035         if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4036
4037                 iounmap(adev->rmmio);
4038                 adev->rmmio = NULL;
4039                 amdgpu_device_doorbell_fini(adev);
4040                 drm_dev_exit(idx);
4041         }
4042
4043         if (IS_ENABLED(CONFIG_PERF_EVENTS))
4044                 amdgpu_pmu_fini(adev);
4045         if (adev->mman.discovery_bin)
4046                 amdgpu_discovery_fini(adev);
4047
4048         amdgpu_reset_put_reset_domain(adev->reset_domain);
4049         adev->reset_domain = NULL;
4050
4051         kfree(adev->pci_state);
4052
4053 }
4054
4055 /**
4056  * amdgpu_device_evict_resources - evict device resources
4057  * @adev: amdgpu device object
4058  *
4059  * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4060  * of the vram memory type. Mainly used for evicting device resources
4061  * at suspend time.
4062  *
4063  */
4064 static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
4065 {
4066         int ret;
4067
4068         /* No need to evict vram on APUs for suspend to ram or s2idle */
4069         if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4070                 return 0;
4071
4072         ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
4073         if (ret)
4074                 DRM_WARN("evicting device resources failed\n");
4075         return ret;
4076 }
4077
4078 /*
4079  * Suspend & resume.
4080  */
4081 /**
4082  * amdgpu_device_suspend - initiate device suspend
4083  *
4084  * @dev: drm dev pointer
4085  * @fbcon : notify the fbdev of suspend
4086  *
4087  * Puts the hw in the suspend state (all asics).
4088  * Returns 0 for success or an error on failure.
4089  * Called at driver suspend.
4090  */
4091 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4092 {
4093         struct amdgpu_device *adev = drm_to_adev(dev);
4094         int r = 0;
4095
4096         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4097                 return 0;
4098
4099         adev->in_suspend = true;
4100
4101         if (amdgpu_sriov_vf(adev)) {
4102                 amdgpu_virt_fini_data_exchange(adev);
4103                 r = amdgpu_virt_request_full_gpu(adev, false);
4104                 if (r)
4105                         return r;
4106         }
4107
4108         if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4109                 DRM_WARN("smart shift update failed\n");
4110
4111         drm_kms_helper_poll_disable(dev);
4112
4113         if (fbcon)
4114                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4115
4116         cancel_delayed_work_sync(&adev->delayed_init_work);
4117
4118         amdgpu_ras_suspend(adev);
4119
4120         amdgpu_device_ip_suspend_phase1(adev);
4121
4122         if (!adev->in_s0ix)
4123                 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4124
4125         r = amdgpu_device_evict_resources(adev);
4126         if (r)
4127                 return r;
4128
4129         amdgpu_fence_driver_hw_fini(adev);
4130
4131         amdgpu_device_ip_suspend_phase2(adev);
4132
4133         if (amdgpu_sriov_vf(adev))
4134                 amdgpu_virt_release_full_gpu(adev, false);
4135
4136         return 0;
4137 }
4138
4139 /**
4140  * amdgpu_device_resume - initiate device resume
4141  *
4142  * @dev: drm dev pointer
4143  * @fbcon : notify the fbdev of resume
4144  *
4145  * Bring the hw back to operating state (all asics).
4146  * Returns 0 for success or an error on failure.
4147  * Called at driver resume.
4148  */
4149 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4150 {
4151         struct amdgpu_device *adev = drm_to_adev(dev);
4152         int r = 0;
4153
4154         if (amdgpu_sriov_vf(adev)) {
4155                 r = amdgpu_virt_request_full_gpu(adev, true);
4156                 if (r)
4157                         return r;
4158         }
4159
4160         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4161                 return 0;
4162
4163         if (adev->in_s0ix)
4164                 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4165
4166         /* post card */
4167         if (amdgpu_device_need_post(adev)) {
4168                 r = amdgpu_device_asic_init(adev);
4169                 if (r)
4170                         dev_err(adev->dev, "amdgpu asic init failed\n");
4171         }
4172
4173         r = amdgpu_device_ip_resume(adev);
4174
4175         /* no matter what r is, always need to properly release full GPU */
4176         if (amdgpu_sriov_vf(adev)) {
4177                 amdgpu_virt_init_data_exchange(adev);
4178                 amdgpu_virt_release_full_gpu(adev, true);
4179         }
4180
4181         if (r) {
4182                 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4183                 return r;
4184         }
4185         amdgpu_fence_driver_hw_init(adev);
4186
4187         r = amdgpu_device_ip_late_init(adev);
4188         if (r)
4189                 return r;
4190
4191         queue_delayed_work(system_wq, &adev->delayed_init_work,
4192                            msecs_to_jiffies(AMDGPU_RESUME_MS));
4193
4194         if (!adev->in_s0ix) {
4195                 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4196                 if (r)
4197                         return r;
4198         }
4199
4200         /* Make sure IB tests flushed */
4201         if (amdgpu_sriov_vf(adev))
4202                 amdgpu_irq_gpu_reset_resume_helper(adev);
4203         flush_delayed_work(&adev->delayed_init_work);
4204
4205         if (adev->in_s0ix) {
4206                 /* re-enable gfxoff after IP resume. This re-enables gfxoff after
4207                  * it was disabled for IP resume in amdgpu_device_ip_resume_phase2().
4208                  */
4209                 amdgpu_gfx_off_ctrl(adev, true);
4210                 DRM_DEBUG("will enable gfxoff for the mission mode\n");
4211         }
4212         if (fbcon)
4213                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4214
4215         drm_kms_helper_poll_enable(dev);
4216
4217         amdgpu_ras_resume(adev);
4218
4219         /*
4220          * Most of the connector probing functions try to acquire runtime pm
4221          * refs to ensure that the GPU is powered on when connector polling is
4222          * performed. Since we're calling this from a runtime PM callback,
4223          * trying to acquire rpm refs will cause us to deadlock.
4224          *
4225          * Since we're guaranteed to be holding the rpm lock, it's safe to
4226          * temporarily disable the rpm helpers so this doesn't deadlock us.
4227          */
4228 #ifdef CONFIG_PM
4229         dev->dev->power.disable_depth++;
4230 #endif
4231         if (!amdgpu_device_has_dc_support(adev))
4232                 drm_helper_hpd_irq_event(dev);
4233         else
4234                 drm_kms_helper_hotplug_event(dev);
4235 #ifdef CONFIG_PM
4236         dev->dev->power.disable_depth--;
4237 #endif
4238         adev->in_suspend = false;
4239
4240         if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4241                 DRM_WARN("smart shift update failed\n");
4242
4243         return 0;
4244 }
4245
4246 /**
4247  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4248  *
4249  * @adev: amdgpu_device pointer
4250  *
4251  * The list of all the hardware IPs that make up the asic is walked and
4252  * the check_soft_reset callbacks are run.  check_soft_reset determines
4253  * if the asic is still hung or not.
4254  * Returns true if any of the IPs are still in a hung state, false if not.
4255  */
4256 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4257 {
4258         int i;
4259         bool asic_hang = false;
4260
4261         if (amdgpu_sriov_vf(adev))
4262                 return true;
4263
4264         if (amdgpu_asic_need_full_reset(adev))
4265                 return true;
4266
4267         for (i = 0; i < adev->num_ip_blocks; i++) {
4268                 if (!adev->ip_blocks[i].status.valid)
4269                         continue;
4270                 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4271                         adev->ip_blocks[i].status.hang =
4272                                 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4273                 if (adev->ip_blocks[i].status.hang) {
4274                         dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4275                         asic_hang = true;
4276                 }
4277         }
4278         return asic_hang;
4279 }
4280
4281 /**
4282  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4283  *
4284  * @adev: amdgpu_device pointer
4285  *
4286  * The list of all the hardware IPs that make up the asic is walked and the
4287  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
4288  * handles any IP specific hardware or software state changes that are
4289  * necessary for a soft reset to succeed.
4290  * Returns 0 on success, negative error code on failure.
4291  */
4292 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4293 {
4294         int i, r = 0;
4295
4296         for (i = 0; i < adev->num_ip_blocks; i++) {
4297                 if (!adev->ip_blocks[i].status.valid)
4298                         continue;
4299                 if (adev->ip_blocks[i].status.hang &&
4300                     adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4301                         r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4302                         if (r)
4303                                 return r;
4304                 }
4305         }
4306
4307         return 0;
4308 }
4309
4310 /**
4311  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4312  *
4313  * @adev: amdgpu_device pointer
4314  *
4315  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
4316  * reset is necessary to recover.
4317  * Returns true if a full asic reset is required, false if not.
4318  */
4319 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4320 {
4321         int i;
4322
4323         if (amdgpu_asic_need_full_reset(adev))
4324                 return true;
4325
4326         for (i = 0; i < adev->num_ip_blocks; i++) {
4327                 if (!adev->ip_blocks[i].status.valid)
4328                         continue;
4329                 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4330                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4331                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4332                     (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4333                      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4334                         if (adev->ip_blocks[i].status.hang) {
4335                                 dev_info(adev->dev, "Some block need full reset!\n");
4336                                 return true;
4337                         }
4338                 }
4339         }
4340         return false;
4341 }
4342
4343 /**
4344  * amdgpu_device_ip_soft_reset - do a soft reset
4345  *
4346  * @adev: amdgpu_device pointer
4347  *
4348  * The list of all the hardware IPs that make up the asic is walked and the
4349  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
4350  * IP specific hardware or software state changes that are necessary to soft
4351  * reset the IP.
4352  * Returns 0 on success, negative error code on failure.
4353  */
4354 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4355 {
4356         int i, r = 0;
4357
4358         for (i = 0; i < adev->num_ip_blocks; i++) {
4359                 if (!adev->ip_blocks[i].status.valid)
4360                         continue;
4361                 if (adev->ip_blocks[i].status.hang &&
4362                     adev->ip_blocks[i].version->funcs->soft_reset) {
4363                         r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4364                         if (r)
4365                                 return r;
4366                 }
4367         }
4368
4369         return 0;
4370 }
4371
4372 /**
4373  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4374  *
4375  * @adev: amdgpu_device pointer
4376  *
4377  * The list of all the hardware IPs that make up the asic is walked and the
4378  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4379  * handles any IP specific hardware or software state changes that are
4380  * necessary after the IP has been soft reset.
4381  * Returns 0 on success, negative error code on failure.
4382  */
4383 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4384 {
4385         int i, r = 0;
4386
4387         for (i = 0; i < adev->num_ip_blocks; i++) {
4388                 if (!adev->ip_blocks[i].status.valid)
4389                         continue;
4390                 if (adev->ip_blocks[i].status.hang &&
4391                     adev->ip_blocks[i].version->funcs->post_soft_reset)
4392                         r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4393                 if (r)
4394                         return r;
4395         }
4396
4397         return 0;
4398 }
4399
4400 /**
4401  * amdgpu_device_recover_vram - Recover some VRAM contents
4402  *
4403  * @adev: amdgpu_device pointer
4404  *
4405  * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4406  * restore things like GPUVM page tables after a GPU reset where
4407  * the contents of VRAM might be lost.
4408  *
4409  * Returns:
4410  * 0 on success, negative error code on failure.
4411  */
4412 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4413 {
4414         struct dma_fence *fence = NULL, *next = NULL;
4415         struct amdgpu_bo *shadow;
4416         struct amdgpu_bo_vm *vmbo;
4417         long r = 1, tmo;
4418
4419         if (amdgpu_sriov_runtime(adev))
4420                 tmo = msecs_to_jiffies(8000);
4421         else
4422                 tmo = msecs_to_jiffies(100);
4423
4424         dev_info(adev->dev, "recover vram bo from shadow start\n");
4425         mutex_lock(&adev->shadow_list_lock);
4426         list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4427                 shadow = &vmbo->bo;
4428                 /* No need to recover an evicted BO */
4429                 if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4430                     shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4431                     shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4432                         continue;
4433
4434                 r = amdgpu_bo_restore_shadow(shadow, &next);
4435                 if (r)
4436                         break;
4437
4438                 if (fence) {
4439                         tmo = dma_fence_wait_timeout(fence, false, tmo);
4440                         dma_fence_put(fence);
4441                         fence = next;
4442                         if (tmo == 0) {
4443                                 r = -ETIMEDOUT;
4444                                 break;
4445                         } else if (tmo < 0) {
4446                                 r = tmo;
4447                                 break;
4448                         }
4449                 } else {
4450                         fence = next;
4451                 }
4452         }
4453         mutex_unlock(&adev->shadow_list_lock);
4454
4455         if (fence)
4456                 tmo = dma_fence_wait_timeout(fence, false, tmo);
4457         dma_fence_put(fence);
4458
4459         if (r < 0 || tmo <= 0) {
4460                 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4461                 return -EIO;
4462         }
4463
4464         dev_info(adev->dev, "recover vram bo from shadow done\n");
4465         return 0;
4466 }
4467
4468
4469 /**
4470  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4471  *
4472  * @adev: amdgpu_device pointer
4473  * @from_hypervisor: request from hypervisor
4474  *
4475  * do VF FLR and reinitialize Asic
4476  * return 0 means succeeded otherwise failed
4477  */
4478 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4479                                      bool from_hypervisor)
4480 {
4481         int r;
4482         struct amdgpu_hive_info *hive = NULL;
4483         int retry_limit = 0;
4484
4485 retry:
4486         amdgpu_amdkfd_pre_reset(adev);
4487
4488         if (from_hypervisor)
4489                 r = amdgpu_virt_request_full_gpu(adev, true);
4490         else
4491                 r = amdgpu_virt_reset_gpu(adev);
4492         if (r)
4493                 return r;
4494
4495         /* Resume IP prior to SMC */
4496         r = amdgpu_device_ip_reinit_early_sriov(adev);
4497         if (r)
4498                 goto error;
4499
4500         amdgpu_virt_init_data_exchange(adev);
4501
4502         r = amdgpu_device_fw_loading(adev);
4503         if (r)
4504                 return r;
4505
4506         /* now we are okay to resume SMC/CP/SDMA */
4507         r = amdgpu_device_ip_reinit_late_sriov(adev);
4508         if (r)
4509                 goto error;
4510
4511         hive = amdgpu_get_xgmi_hive(adev);
4512         /* Update PSP FW topology after reset */
4513         if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4514                 r = amdgpu_xgmi_update_topology(hive, adev);
4515
4516         if (hive)
4517                 amdgpu_put_xgmi_hive(hive);
4518
4519         if (!r) {
4520                 amdgpu_irq_gpu_reset_resume_helper(adev);
4521                 r = amdgpu_ib_ring_tests(adev);
4522
4523                 amdgpu_amdkfd_post_reset(adev);
4524         }
4525
4526 error:
4527         if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4528                 amdgpu_inc_vram_lost(adev);
4529                 r = amdgpu_device_recover_vram(adev);
4530         }
4531         amdgpu_virt_release_full_gpu(adev, true);
4532
4533         if (AMDGPU_RETRY_SRIOV_RESET(r)) {
4534                 if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
4535                         retry_limit++;
4536                         goto retry;
4537                 } else
4538                         DRM_ERROR("GPU reset retry is beyond the retry limit\n");
4539         }
4540
4541         return r;
4542 }
4543
4544 /**
4545  * amdgpu_device_has_job_running - check if there is any job in mirror list
4546  *
4547  * @adev: amdgpu_device pointer
4548  *
4549  * check if there is any job in mirror list
4550  */
4551 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4552 {
4553         int i;
4554         struct drm_sched_job *job;
4555
4556         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4557                 struct amdgpu_ring *ring = adev->rings[i];
4558
4559                 if (!ring || !ring->sched.thread)
4560                         continue;
4561
4562                 spin_lock(&ring->sched.job_list_lock);
4563                 job = list_first_entry_or_null(&ring->sched.pending_list,
4564                                                struct drm_sched_job, list);
4565                 spin_unlock(&ring->sched.job_list_lock);
4566                 if (job)
4567                         return true;
4568         }
4569         return false;
4570 }
4571
4572 /**
4573  * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4574  *
4575  * @adev: amdgpu_device pointer
4576  *
4577  * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4578  * a hung GPU.
4579  */
4580 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4581 {
4582
4583         if (amdgpu_gpu_recovery == 0)
4584                 goto disabled;
4585
4586         if (!amdgpu_device_ip_check_soft_reset(adev)) {
4587                 dev_info(adev->dev,"Timeout, but no hardware hang detected.\n");
4588                 return false;
4589         }
4590
4591         if (amdgpu_sriov_vf(adev))
4592                 return true;
4593
4594         if (amdgpu_gpu_recovery == -1) {
4595                 switch (adev->asic_type) {
4596 #ifdef CONFIG_DRM_AMDGPU_SI
4597                 case CHIP_VERDE:
4598                 case CHIP_TAHITI:
4599                 case CHIP_PITCAIRN:
4600                 case CHIP_OLAND:
4601                 case CHIP_HAINAN:
4602 #endif
4603 #ifdef CONFIG_DRM_AMDGPU_CIK
4604                 case CHIP_KAVERI:
4605                 case CHIP_KABINI:
4606                 case CHIP_MULLINS:
4607 #endif
4608                 case CHIP_CARRIZO:
4609                 case CHIP_STONEY:
4610                 case CHIP_CYAN_SKILLFISH:
4611                         goto disabled;
4612                 default:
4613                         break;
4614                 }
4615         }
4616
4617         return true;
4618
4619 disabled:
4620                 dev_info(adev->dev, "GPU recovery disabled.\n");
4621                 return false;
4622 }
4623
4624 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4625 {
4626         u32 i;
4627         int ret = 0;
4628
4629         amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4630
4631         dev_info(adev->dev, "GPU mode1 reset\n");
4632
4633         /* disable BM */
4634         pci_clear_master(adev->pdev);
4635
4636         amdgpu_device_cache_pci_state(adev->pdev);
4637
4638         if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4639                 dev_info(adev->dev, "GPU smu mode1 reset\n");
4640                 ret = amdgpu_dpm_mode1_reset(adev);
4641         } else {
4642                 dev_info(adev->dev, "GPU psp mode1 reset\n");
4643                 ret = psp_gpu_reset(adev);
4644         }
4645
4646         if (ret)
4647                 dev_err(adev->dev, "GPU mode1 reset failed\n");
4648
4649         amdgpu_device_load_pci_state(adev->pdev);
4650
4651         /* wait for asic to come out of reset */
4652         for (i = 0; i < adev->usec_timeout; i++) {
4653                 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4654
4655                 if (memsize != 0xffffffff)
4656                         break;
4657                 udelay(1);
4658         }
4659
4660         amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4661         return ret;
4662 }
4663
4664 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4665                                  struct amdgpu_reset_context *reset_context)
4666 {
4667         int i, r = 0;
4668         struct amdgpu_job *job = NULL;
4669         bool need_full_reset =
4670                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4671
4672         if (reset_context->reset_req_dev == adev)
4673                 job = reset_context->job;
4674
4675         if (amdgpu_sriov_vf(adev)) {
4676                 /* stop the data exchange thread */
4677                 amdgpu_virt_fini_data_exchange(adev);
4678         }
4679
4680         amdgpu_fence_driver_isr_toggle(adev, true);
4681
4682         /* block all schedulers and reset given job's ring */
4683         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4684                 struct amdgpu_ring *ring = adev->rings[i];
4685
4686                 if (!ring || !ring->sched.thread)
4687                         continue;
4688
4689                 /*clear job fence from fence drv to avoid force_completion
4690                  *leave NULL and vm flush fence in fence drv */
4691                 amdgpu_fence_driver_clear_job_fences(ring);
4692
4693                 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4694                 amdgpu_fence_driver_force_completion(ring);
4695         }
4696
4697         amdgpu_fence_driver_isr_toggle(adev, false);
4698
4699         if (job && job->vm)
4700                 drm_sched_increase_karma(&job->base);
4701
4702         r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4703         /* If reset handler not implemented, continue; otherwise return */
4704         if (r == -ENOSYS)
4705                 r = 0;
4706         else
4707                 return r;
4708
4709         /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4710         if (!amdgpu_sriov_vf(adev)) {
4711
4712                 if (!need_full_reset)
4713                         need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4714
4715                 if (!need_full_reset && amdgpu_gpu_recovery) {
4716                         amdgpu_device_ip_pre_soft_reset(adev);
4717                         r = amdgpu_device_ip_soft_reset(adev);
4718                         amdgpu_device_ip_post_soft_reset(adev);
4719                         if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4720                                 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4721                                 need_full_reset = true;
4722                         }
4723                 }
4724
4725                 if (need_full_reset)
4726                         r = amdgpu_device_ip_suspend(adev);
4727                 if (need_full_reset)
4728                         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4729                 else
4730                         clear_bit(AMDGPU_NEED_FULL_RESET,
4731                                   &reset_context->flags);
4732         }
4733
4734         return r;
4735 }
4736
4737 static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
4738 {
4739         int i;
4740
4741         lockdep_assert_held(&adev->reset_domain->sem);
4742
4743         for (i = 0; i < adev->num_regs; i++) {
4744                 adev->reset_dump_reg_value[i] = RREG32(adev->reset_dump_reg_list[i]);
4745                 trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i],
4746                                              adev->reset_dump_reg_value[i]);
4747         }
4748
4749         return 0;
4750 }
4751
4752 #ifdef CONFIG_DEV_COREDUMP
4753 static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset,
4754                 size_t count, void *data, size_t datalen)
4755 {
4756         struct drm_printer p;
4757         struct amdgpu_device *adev = data;
4758         struct drm_print_iterator iter;
4759         int i;
4760
4761         iter.data = buffer;
4762         iter.offset = 0;
4763         iter.start = offset;
4764         iter.remain = count;
4765
4766         p = drm_coredump_printer(&iter);
4767
4768         drm_printf(&p, "**** AMDGPU Device Coredump ****\n");
4769         drm_printf(&p, "kernel: " UTS_RELEASE "\n");
4770         drm_printf(&p, "module: " KBUILD_MODNAME "\n");
4771         drm_printf(&p, "time: %lld.%09ld\n", adev->reset_time.tv_sec, adev->reset_time.tv_nsec);
4772         if (adev->reset_task_info.pid)
4773                 drm_printf(&p, "process_name: %s PID: %d\n",
4774                            adev->reset_task_info.process_name,
4775                            adev->reset_task_info.pid);
4776
4777         if (adev->reset_vram_lost)
4778                 drm_printf(&p, "VRAM is lost due to GPU reset!\n");
4779         if (adev->num_regs) {
4780                 drm_printf(&p, "AMDGPU register dumps:\nOffset:     Value:\n");
4781
4782                 for (i = 0; i < adev->num_regs; i++)
4783                         drm_printf(&p, "0x%08x: 0x%08x\n",
4784                                    adev->reset_dump_reg_list[i],
4785                                    adev->reset_dump_reg_value[i]);
4786         }
4787
4788         return count - iter.remain;
4789 }
4790
4791 static void amdgpu_devcoredump_free(void *data)
4792 {
4793 }
4794
4795 static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev)
4796 {
4797         struct drm_device *dev = adev_to_drm(adev);
4798
4799         ktime_get_ts64(&adev->reset_time);
4800         dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL,
4801                       amdgpu_devcoredump_read, amdgpu_devcoredump_free);
4802 }
4803 #endif
4804
4805 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4806                          struct amdgpu_reset_context *reset_context)
4807 {
4808         struct amdgpu_device *tmp_adev = NULL;
4809         bool need_full_reset, skip_hw_reset, vram_lost = false;
4810         int r = 0;
4811         bool gpu_reset_for_dev_remove = 0;
4812
4813         /* Try reset handler method first */
4814         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4815                                     reset_list);
4816         amdgpu_reset_reg_dumps(tmp_adev);
4817
4818         reset_context->reset_device_list = device_list_handle;
4819         r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4820         /* If reset handler not implemented, continue; otherwise return */
4821         if (r == -ENOSYS)
4822                 r = 0;
4823         else
4824                 return r;
4825
4826         /* Reset handler not implemented, use the default method */
4827         need_full_reset =
4828                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4829         skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4830
4831         gpu_reset_for_dev_remove =
4832                 test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
4833                         test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4834
4835         /*
4836          * ASIC reset has to be done on all XGMI hive nodes ASAP
4837          * to allow proper links negotiation in FW (within 1 sec)
4838          */
4839         if (!skip_hw_reset && need_full_reset) {
4840                 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4841                         /* For XGMI run all resets in parallel to speed up the process */
4842                         if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4843                                 tmp_adev->gmc.xgmi.pending_reset = false;
4844                                 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4845                                         r = -EALREADY;
4846                         } else
4847                                 r = amdgpu_asic_reset(tmp_adev);
4848
4849                         if (r) {
4850                                 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4851                                          r, adev_to_drm(tmp_adev)->unique);
4852                                 break;
4853                         }
4854                 }
4855
4856                 /* For XGMI wait for all resets to complete before proceed */
4857                 if (!r) {
4858                         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4859                                 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4860                                         flush_work(&tmp_adev->xgmi_reset_work);
4861                                         r = tmp_adev->asic_reset_res;
4862                                         if (r)
4863                                                 break;
4864                                 }
4865                         }
4866                 }
4867         }
4868
4869         if (!r && amdgpu_ras_intr_triggered()) {
4870                 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4871                         if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops &&
4872                             tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
4873                                 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev);
4874                 }
4875
4876                 amdgpu_ras_intr_cleared();
4877         }
4878
4879         /* Since the mode1 reset affects base ip blocks, the
4880          * phase1 ip blocks need to be resumed. Otherwise there
4881          * will be a BIOS signature error and the psp bootloader
4882          * can't load kdb on the next amdgpu install.
4883          */
4884         if (gpu_reset_for_dev_remove) {
4885                 list_for_each_entry(tmp_adev, device_list_handle, reset_list)
4886                         amdgpu_device_ip_resume_phase1(tmp_adev);
4887
4888                 goto end;
4889         }
4890
4891         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4892                 if (need_full_reset) {
4893                         /* post card */
4894                         r = amdgpu_device_asic_init(tmp_adev);
4895                         if (r) {
4896                                 dev_warn(tmp_adev->dev, "asic atom init failed!");
4897                         } else {
4898                                 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4899                                 r = amdgpu_amdkfd_resume_iommu(tmp_adev);
4900                                 if (r)
4901                                         goto out;
4902
4903                                 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4904                                 if (r)
4905                                         goto out;
4906
4907                                 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4908 #ifdef CONFIG_DEV_COREDUMP
4909                                 tmp_adev->reset_vram_lost = vram_lost;
4910                                 memset(&tmp_adev->reset_task_info, 0,
4911                                                 sizeof(tmp_adev->reset_task_info));
4912                                 if (reset_context->job && reset_context->job->vm)
4913                                         tmp_adev->reset_task_info =
4914                                                 reset_context->job->vm->task_info;
4915                                 amdgpu_reset_capture_coredumpm(tmp_adev);
4916 #endif
4917                                 if (vram_lost) {
4918                                         DRM_INFO("VRAM is lost due to GPU reset!\n");
4919                                         amdgpu_inc_vram_lost(tmp_adev);
4920                                 }
4921
4922                                 r = amdgpu_device_fw_loading(tmp_adev);
4923                                 if (r)
4924                                         return r;
4925
4926                                 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4927                                 if (r)
4928                                         goto out;
4929
4930                                 if (vram_lost)
4931                                         amdgpu_device_fill_reset_magic(tmp_adev);
4932
4933                                 /*
4934                                  * Add this ASIC as tracked as reset was already
4935                                  * complete successfully.
4936                                  */
4937                                 amdgpu_register_gpu_instance(tmp_adev);
4938
4939                                 if (!reset_context->hive &&
4940                                     tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4941                                         amdgpu_xgmi_add_device(tmp_adev);
4942
4943                                 r = amdgpu_device_ip_late_init(tmp_adev);
4944                                 if (r)
4945                                         goto out;
4946
4947                                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
4948
4949                                 /*
4950                                  * The GPU enters bad state once faulty pages
4951                                  * by ECC has reached the threshold, and ras
4952                                  * recovery is scheduled next. So add one check
4953                                  * here to break recovery if it indeed exceeds
4954                                  * bad page threshold, and remind user to
4955                                  * retire this GPU or setting one bigger
4956                                  * bad_page_threshold value to fix this once
4957                                  * probing driver again.
4958                                  */
4959                                 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4960                                         /* must succeed. */
4961                                         amdgpu_ras_resume(tmp_adev);
4962                                 } else {
4963                                         r = -EINVAL;
4964                                         goto out;
4965                                 }
4966
4967                                 /* Update PSP FW topology after reset */
4968                                 if (reset_context->hive &&
4969                                     tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4970                                         r = amdgpu_xgmi_update_topology(
4971                                                 reset_context->hive, tmp_adev);
4972                         }
4973                 }
4974
4975 out:
4976                 if (!r) {
4977                         amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4978                         r = amdgpu_ib_ring_tests(tmp_adev);
4979                         if (r) {
4980                                 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4981                                 need_full_reset = true;
4982                                 r = -EAGAIN;
4983                                 goto end;
4984                         }
4985                 }
4986
4987                 if (!r)
4988                         r = amdgpu_device_recover_vram(tmp_adev);
4989                 else
4990                         tmp_adev->asic_reset_res = r;
4991         }
4992
4993 end:
4994         if (need_full_reset)
4995                 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4996         else
4997                 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4998         return r;
4999 }
5000
5001 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
5002 {
5003
5004         switch (amdgpu_asic_reset_method(adev)) {
5005         case AMD_RESET_METHOD_MODE1:
5006                 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
5007                 break;
5008         case AMD_RESET_METHOD_MODE2:
5009                 adev->mp1_state = PP_MP1_STATE_RESET;
5010                 break;
5011         default:
5012                 adev->mp1_state = PP_MP1_STATE_NONE;
5013                 break;
5014         }
5015 }
5016
5017 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
5018 {
5019         amdgpu_vf_error_trans_all(adev);
5020         adev->mp1_state = PP_MP1_STATE_NONE;
5021 }
5022
5023 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
5024 {
5025         struct pci_dev *p = NULL;
5026
5027         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5028                         adev->pdev->bus->number, 1);
5029         if (p) {
5030                 pm_runtime_enable(&(p->dev));
5031                 pm_runtime_resume(&(p->dev));
5032         }
5033 }
5034
5035 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
5036 {
5037         enum amd_reset_method reset_method;
5038         struct pci_dev *p = NULL;
5039         u64 expires;
5040
5041         /*
5042          * For now, only BACO and mode1 reset are confirmed
5043          * to suffer the audio issue without proper suspended.
5044          */
5045         reset_method = amdgpu_asic_reset_method(adev);
5046         if ((reset_method != AMD_RESET_METHOD_BACO) &&
5047              (reset_method != AMD_RESET_METHOD_MODE1))
5048                 return -EINVAL;
5049
5050         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5051                         adev->pdev->bus->number, 1);
5052         if (!p)
5053                 return -ENODEV;
5054
5055         expires = pm_runtime_autosuspend_expiration(&(p->dev));
5056         if (!expires)
5057                 /*
5058                  * If we cannot get the audio device autosuspend delay,
5059                  * a fixed 4S interval will be used. Considering 3S is
5060                  * the audio controller default autosuspend delay setting.
5061                  * 4S used here is guaranteed to cover that.
5062                  */
5063                 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
5064
5065         while (!pm_runtime_status_suspended(&(p->dev))) {
5066                 if (!pm_runtime_suspend(&(p->dev)))
5067                         break;
5068
5069                 if (expires < ktime_get_mono_fast_ns()) {
5070                         dev_warn(adev->dev, "failed to suspend display audio\n");
5071                         /* TODO: abort the succeeding gpu reset? */
5072                         return -ETIMEDOUT;
5073                 }
5074         }
5075
5076         pm_runtime_disable(&(p->dev));
5077
5078         return 0;
5079 }
5080
5081 static void amdgpu_device_recheck_guilty_jobs(
5082         struct amdgpu_device *adev, struct list_head *device_list_handle,
5083         struct amdgpu_reset_context *reset_context)
5084 {
5085         int i, r = 0;
5086
5087         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5088                 struct amdgpu_ring *ring = adev->rings[i];
5089                 int ret = 0;
5090                 struct drm_sched_job *s_job;
5091
5092                 if (!ring || !ring->sched.thread)
5093                         continue;
5094
5095                 s_job = list_first_entry_or_null(&ring->sched.pending_list,
5096                                 struct drm_sched_job, list);
5097                 if (s_job == NULL)
5098                         continue;
5099
5100                 /* clear job's guilty and depend the folowing step to decide the real one */
5101                 drm_sched_reset_karma(s_job);
5102                 drm_sched_resubmit_jobs_ext(&ring->sched, 1);
5103
5104                 if (!s_job->s_fence->parent) {
5105                         DRM_WARN("Failed to get a HW fence for job!");
5106                         continue;
5107                 }
5108
5109                 ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
5110                 if (ret == 0) { /* timeout */
5111                         DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
5112                                                 ring->sched.name, s_job->id);
5113
5114
5115                         amdgpu_fence_driver_isr_toggle(adev, true);
5116
5117                         /* Clear this failed job from fence array */
5118                         amdgpu_fence_driver_clear_job_fences(ring);
5119
5120                         amdgpu_fence_driver_isr_toggle(adev, false);
5121
5122                         /* Since the job won't signal and we go for
5123                          * another resubmit drop this parent pointer
5124                          */
5125                         dma_fence_put(s_job->s_fence->parent);
5126                         s_job->s_fence->parent = NULL;
5127
5128                         /* set guilty */
5129                         drm_sched_increase_karma(s_job);
5130                         amdgpu_reset_prepare_hwcontext(adev, reset_context);
5131 retry:
5132                         /* do hw reset */
5133                         if (amdgpu_sriov_vf(adev)) {
5134                                 amdgpu_virt_fini_data_exchange(adev);
5135                                 r = amdgpu_device_reset_sriov(adev, false);
5136                                 if (r)
5137                                         adev->asic_reset_res = r;
5138                         } else {
5139                                 clear_bit(AMDGPU_SKIP_HW_RESET,
5140                                           &reset_context->flags);
5141                                 r = amdgpu_do_asic_reset(device_list_handle,
5142                                                          reset_context);
5143                                 if (r && r == -EAGAIN)
5144                                         goto retry;
5145                         }
5146
5147                         /*
5148                          * add reset counter so that the following
5149                          * resubmitted job could flush vmid
5150                          */
5151                         atomic_inc(&adev->gpu_reset_counter);
5152                         continue;
5153                 }
5154
5155                 /* got the hw fence, signal finished fence */
5156                 atomic_dec(ring->sched.score);
5157                 dma_fence_get(&s_job->s_fence->finished);
5158                 dma_fence_signal(&s_job->s_fence->finished);
5159                 dma_fence_put(&s_job->s_fence->finished);
5160
5161                 /* remove node from list and free the job */
5162                 spin_lock(&ring->sched.job_list_lock);
5163                 list_del_init(&s_job->list);
5164                 spin_unlock(&ring->sched.job_list_lock);
5165                 ring->sched.ops->free_job(s_job);
5166         }
5167 }
5168
5169 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5170 {
5171         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5172
5173 #if defined(CONFIG_DEBUG_FS)
5174         if (!amdgpu_sriov_vf(adev))
5175                 cancel_work(&adev->reset_work);
5176 #endif
5177
5178         if (adev->kfd.dev)
5179                 cancel_work(&adev->kfd.reset_work);
5180
5181         if (amdgpu_sriov_vf(adev))
5182                 cancel_work(&adev->virt.flr_work);
5183
5184         if (con && adev->ras_enabled)
5185                 cancel_work(&con->recovery_work);
5186
5187 }
5188
5189
5190 /**
5191  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5192  *
5193  * @adev: amdgpu_device pointer
5194  * @job: which job trigger hang
5195  *
5196  * Attempt to reset the GPU if it has hung (all asics).
5197  * Attempt to do soft-reset or full-reset and reinitialize Asic
5198  * Returns 0 for success or an error on failure.
5199  */
5200
5201 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5202                               struct amdgpu_job *job,
5203                               struct amdgpu_reset_context *reset_context)
5204 {
5205         struct list_head device_list, *device_list_handle =  NULL;
5206         bool job_signaled = false;
5207         struct amdgpu_hive_info *hive = NULL;
5208         struct amdgpu_device *tmp_adev = NULL;
5209         int i, r = 0;
5210         bool need_emergency_restart = false;
5211         bool audio_suspended = false;
5212         int tmp_vram_lost_counter;
5213         bool gpu_reset_for_dev_remove = false;
5214
5215         gpu_reset_for_dev_remove =
5216                         test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
5217                                 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5218
5219         /*
5220          * Special case: RAS triggered and full reset isn't supported
5221          */
5222         need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5223
5224         /*
5225          * Flush RAM to disk so that after reboot
5226          * the user can read log and see why the system rebooted.
5227          */
5228         if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
5229                 DRM_WARN("Emergency reboot.");
5230
5231                 ksys_sync_helper();
5232                 emergency_restart();
5233         }
5234
5235         dev_info(adev->dev, "GPU %s begin!\n",
5236                 need_emergency_restart ? "jobs stop":"reset");
5237
5238         if (!amdgpu_sriov_vf(adev))
5239                 hive = amdgpu_get_xgmi_hive(adev);
5240         if (hive)
5241                 mutex_lock(&hive->hive_lock);
5242
5243         reset_context->job = job;
5244         reset_context->hive = hive;
5245         /*
5246          * Build list of devices to reset.
5247          * In case we are in XGMI hive mode, resort the device list
5248          * to put adev in the 1st position.
5249          */
5250         INIT_LIST_HEAD(&device_list);
5251         if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5252                 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
5253                         list_add_tail(&tmp_adev->reset_list, &device_list);
5254                         if (gpu_reset_for_dev_remove && adev->shutdown)
5255                                 tmp_adev->shutdown = true;
5256                 }
5257                 if (!list_is_first(&adev->reset_list, &device_list))
5258                         list_rotate_to_front(&adev->reset_list, &device_list);
5259                 device_list_handle = &device_list;
5260         } else {
5261                 list_add_tail(&adev->reset_list, &device_list);
5262                 device_list_handle = &device_list;
5263         }
5264
5265         /* We need to lock reset domain only once both for XGMI and single device */
5266         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5267                                     reset_list);
5268         amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5269
5270         /* block all schedulers and reset given job's ring */
5271         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5272
5273                 amdgpu_device_set_mp1_state(tmp_adev);
5274
5275                 /*
5276                  * Try to put the audio codec into suspend state
5277                  * before gpu reset started.
5278                  *
5279                  * Due to the power domain of the graphics device
5280                  * is shared with AZ power domain. Without this,
5281                  * we may change the audio hardware from behind
5282                  * the audio driver's back. That will trigger
5283                  * some audio codec errors.
5284                  */
5285                 if (!amdgpu_device_suspend_display_audio(tmp_adev))
5286                         audio_suspended = true;
5287
5288                 amdgpu_ras_set_error_query_ready(tmp_adev, false);
5289
5290                 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5291
5292                 if (!amdgpu_sriov_vf(tmp_adev))
5293                         amdgpu_amdkfd_pre_reset(tmp_adev);
5294
5295                 /*
5296                  * Mark these ASICs to be reseted as untracked first
5297                  * And add them back after reset completed
5298                  */
5299                 amdgpu_unregister_gpu_instance(tmp_adev);
5300
5301                 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
5302
5303                 /* disable ras on ALL IPs */
5304                 if (!need_emergency_restart &&
5305                       amdgpu_device_ip_need_full_reset(tmp_adev))
5306                         amdgpu_ras_suspend(tmp_adev);
5307
5308                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5309                         struct amdgpu_ring *ring = tmp_adev->rings[i];
5310
5311                         if (!ring || !ring->sched.thread)
5312                                 continue;
5313
5314                         drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5315
5316                         if (need_emergency_restart)
5317                                 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5318                 }
5319                 atomic_inc(&tmp_adev->gpu_reset_counter);
5320         }
5321
5322         if (need_emergency_restart)
5323                 goto skip_sched_resume;
5324
5325         /*
5326          * Must check guilty signal here since after this point all old
5327          * HW fences are force signaled.
5328          *
5329          * job->base holds a reference to parent fence
5330          */
5331         if (job && dma_fence_is_signaled(&job->hw_fence)) {
5332                 job_signaled = true;
5333                 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5334                 goto skip_hw_reset;
5335         }
5336
5337 retry:  /* Rest of adevs pre asic reset from XGMI hive. */
5338         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5339                 if (gpu_reset_for_dev_remove) {
5340                         /* Workaroud for ASICs need to disable SMC first */
5341                         amdgpu_device_smu_fini_early(tmp_adev);
5342                 }
5343                 r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
5344                 /*TODO Should we stop ?*/
5345                 if (r) {
5346                         dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5347                                   r, adev_to_drm(tmp_adev)->unique);
5348                         tmp_adev->asic_reset_res = r;
5349                 }
5350
5351                 /*
5352                  * Drop all pending non scheduler resets. Scheduler resets
5353                  * were already dropped during drm_sched_stop
5354                  */
5355                 amdgpu_device_stop_pending_resets(tmp_adev);
5356         }
5357
5358         tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
5359         /* Actual ASIC resets if needed.*/
5360         /* Host driver will handle XGMI hive reset for SRIOV */
5361         if (amdgpu_sriov_vf(adev)) {
5362                 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5363                 if (r)
5364                         adev->asic_reset_res = r;
5365
5366                 /* Aldebaran supports ras in SRIOV, so need resume ras during reset */
5367                 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
5368                         amdgpu_ras_resume(adev);
5369         } else {
5370                 r = amdgpu_do_asic_reset(device_list_handle, reset_context);
5371                 if (r && r == -EAGAIN)
5372                         goto retry;
5373
5374                 if (!r && gpu_reset_for_dev_remove)
5375                         goto recover_end;
5376         }
5377
5378 skip_hw_reset:
5379
5380         /* Post ASIC reset for all devs .*/
5381         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5382
5383                 /*
5384                  * Sometimes a later bad compute job can block a good gfx job as gfx
5385                  * and compute ring share internal GC HW mutually. We add an additional
5386                  * guilty jobs recheck step to find the real guilty job, it synchronously
5387                  * submits and pends for the first job being signaled. If it gets timeout,
5388                  * we identify it as a real guilty job.
5389                  */
5390                 if (amdgpu_gpu_recovery == 2 &&
5391                         !(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
5392                         amdgpu_device_recheck_guilty_jobs(
5393                                 tmp_adev, device_list_handle, reset_context);
5394
5395                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5396                         struct amdgpu_ring *ring = tmp_adev->rings[i];
5397
5398                         if (!ring || !ring->sched.thread)
5399                                 continue;
5400
5401                         /* No point to resubmit jobs if we didn't HW reset*/
5402                         if (!tmp_adev->asic_reset_res && !job_signaled)
5403                                 drm_sched_resubmit_jobs(&ring->sched);
5404
5405                         drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
5406                 }
5407
5408                 if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3))
5409                         amdgpu_mes_self_test(tmp_adev);
5410
5411                 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
5412                         drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5413                 }
5414
5415                 if (tmp_adev->asic_reset_res)
5416                         r = tmp_adev->asic_reset_res;
5417
5418                 tmp_adev->asic_reset_res = 0;
5419
5420                 if (r) {
5421                         /* bad news, how to tell it to userspace ? */
5422                         dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5423                         amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5424                 } else {
5425                         dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5426                         if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5427                                 DRM_WARN("smart shift update failed\n");
5428                 }
5429         }
5430
5431 skip_sched_resume:
5432         list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5433                 /* unlock kfd: SRIOV would do it separately */
5434                 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5435                         amdgpu_amdkfd_post_reset(tmp_adev);
5436
5437                 /* kfd_post_reset will do nothing if kfd device is not initialized,
5438                  * need to bring up kfd here if it's not be initialized before
5439                  */
5440                 if (!adev->kfd.init_complete)
5441                         amdgpu_amdkfd_device_init(adev);
5442
5443                 if (audio_suspended)
5444                         amdgpu_device_resume_display_audio(tmp_adev);
5445
5446                 amdgpu_device_unset_mp1_state(tmp_adev);
5447         }
5448
5449 recover_end:
5450         tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5451                                             reset_list);
5452         amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5453
5454         if (hive) {
5455                 mutex_unlock(&hive->hive_lock);
5456                 amdgpu_put_xgmi_hive(hive);
5457         }
5458
5459         if (r)
5460                 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5461
5462         atomic_set(&adev->reset_domain->reset_res, r);
5463         return r;
5464 }
5465
5466 /**
5467  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5468  *
5469  * @adev: amdgpu_device pointer
5470  *
5471  * Fetchs and stores in the driver the PCIE capabilities (gen speed
5472  * and lanes) of the slot the device is in. Handles APUs and
5473  * virtualized environments where PCIE config space may not be available.
5474  */
5475 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5476 {
5477         struct pci_dev *pdev;
5478         enum pci_bus_speed speed_cap, platform_speed_cap;
5479         enum pcie_link_width platform_link_width;
5480
5481         if (amdgpu_pcie_gen_cap)
5482                 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5483
5484         if (amdgpu_pcie_lane_cap)
5485                 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5486
5487         /* covers APUs as well */
5488         if (pci_is_root_bus(adev->pdev->bus)) {
5489                 if (adev->pm.pcie_gen_mask == 0)
5490                         adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5491                 if (adev->pm.pcie_mlw_mask == 0)
5492                         adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5493                 return;
5494         }
5495
5496         if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5497                 return;
5498
5499         pcie_bandwidth_available(adev->pdev, NULL,
5500                                  &platform_speed_cap, &platform_link_width);
5501
5502         if (adev->pm.pcie_gen_mask == 0) {
5503                 /* asic caps */
5504                 pdev = adev->pdev;
5505                 speed_cap = pcie_get_speed_cap(pdev);
5506                 if (speed_cap == PCI_SPEED_UNKNOWN) {
5507                         adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5508                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5509                                                   CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5510                 } else {
5511                         if (speed_cap == PCIE_SPEED_32_0GT)
5512                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5513                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5514                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5515                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5516                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5517                         else if (speed_cap == PCIE_SPEED_16_0GT)
5518                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5519                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5520                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5521                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5522                         else if (speed_cap == PCIE_SPEED_8_0GT)
5523                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5524                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5525                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5526                         else if (speed_cap == PCIE_SPEED_5_0GT)
5527                                 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5528                                                           CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5529                         else
5530                                 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5531                 }
5532                 /* platform caps */
5533                 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5534                         adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5535                                                    CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5536                 } else {
5537                         if (platform_speed_cap == PCIE_SPEED_32_0GT)
5538                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5539                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5540                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5541                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5542                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5543                         else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5544                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5545                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5546                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5547                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5548                         else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5549                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5550                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5551                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5552                         else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5553                                 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5554                                                            CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5555                         else
5556                                 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5557
5558                 }
5559         }
5560         if (adev->pm.pcie_mlw_mask == 0) {
5561                 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5562                         adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5563                 } else {
5564                         switch (platform_link_width) {
5565                         case PCIE_LNK_X32:
5566                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5567                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5568                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5569                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5570                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5571                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5572                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5573                                 break;
5574                         case PCIE_LNK_X16:
5575                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5576                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5577                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5578                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5579                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5580                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5581                                 break;
5582                         case PCIE_LNK_X12:
5583                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5584                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5585                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5586                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5587                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5588                                 break;
5589                         case PCIE_LNK_X8:
5590                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5591                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5592                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5593                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5594                                 break;
5595                         case PCIE_LNK_X4:
5596                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5597                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5598                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5599                                 break;
5600                         case PCIE_LNK_X2:
5601                                 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5602                                                           CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5603                                 break;
5604                         case PCIE_LNK_X1:
5605                                 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5606                                 break;
5607                         default:
5608                                 break;
5609                         }
5610                 }
5611         }
5612 }
5613
5614 /**
5615  * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
5616  *
5617  * @adev: amdgpu_device pointer
5618  * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
5619  *
5620  * Return true if @peer_adev can access (DMA) @adev through the PCIe
5621  * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
5622  * @peer_adev.
5623  */
5624 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
5625                                       struct amdgpu_device *peer_adev)
5626 {
5627 #ifdef CONFIG_HSA_AMD_P2P
5628         uint64_t address_mask = peer_adev->dev->dma_mask ?
5629                 ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
5630         resource_size_t aper_limit =
5631                 adev->gmc.aper_base + adev->gmc.aper_size - 1;
5632         bool p2p_access =
5633                 !adev->gmc.xgmi.connected_to_cpu &&
5634                 !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
5635
5636         return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
5637                 adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
5638                 !(adev->gmc.aper_base & address_mask ||
5639                   aper_limit & address_mask));
5640 #else
5641         return false;
5642 #endif
5643 }
5644
5645 int amdgpu_device_baco_enter(struct drm_device *dev)
5646 {
5647         struct amdgpu_device *adev = drm_to_adev(dev);
5648         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5649
5650         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5651                 return -ENOTSUPP;
5652
5653         if (ras && adev->ras_enabled &&
5654             adev->nbio.funcs->enable_doorbell_interrupt)
5655                 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5656
5657         return amdgpu_dpm_baco_enter(adev);
5658 }
5659
5660 int amdgpu_device_baco_exit(struct drm_device *dev)
5661 {
5662         struct amdgpu_device *adev = drm_to_adev(dev);
5663         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5664         int ret = 0;
5665
5666         if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5667                 return -ENOTSUPP;
5668
5669         ret = amdgpu_dpm_baco_exit(adev);
5670         if (ret)
5671                 return ret;
5672
5673         if (ras && adev->ras_enabled &&
5674             adev->nbio.funcs->enable_doorbell_interrupt)
5675                 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5676
5677         if (amdgpu_passthrough(adev) &&
5678             adev->nbio.funcs->clear_doorbell_interrupt)
5679                 adev->nbio.funcs->clear_doorbell_interrupt(adev);
5680
5681         return 0;
5682 }
5683
5684 /**
5685  * amdgpu_pci_error_detected - Called when a PCI error is detected.
5686  * @pdev: PCI device struct
5687  * @state: PCI channel state
5688  *
5689  * Description: Called when a PCI error is detected.
5690  *
5691  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5692  */
5693 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5694 {
5695         struct drm_device *dev = pci_get_drvdata(pdev);
5696         struct amdgpu_device *adev = drm_to_adev(dev);
5697         int i;
5698
5699         DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5700
5701         if (adev->gmc.xgmi.num_physical_nodes > 1) {
5702                 DRM_WARN("No support for XGMI hive yet...");
5703                 return PCI_ERS_RESULT_DISCONNECT;
5704         }
5705
5706         adev->pci_channel_state = state;
5707
5708         switch (state) {
5709         case pci_channel_io_normal:
5710                 return PCI_ERS_RESULT_CAN_RECOVER;
5711         /* Fatal error, prepare for slot reset */
5712         case pci_channel_io_frozen:
5713                 /*
5714                  * Locking adev->reset_domain->sem will prevent any external access
5715                  * to GPU during PCI error recovery
5716                  */
5717                 amdgpu_device_lock_reset_domain(adev->reset_domain);
5718                 amdgpu_device_set_mp1_state(adev);
5719
5720                 /*
5721                  * Block any work scheduling as we do for regular GPU reset
5722                  * for the duration of the recovery
5723                  */
5724                 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5725                         struct amdgpu_ring *ring = adev->rings[i];
5726
5727                         if (!ring || !ring->sched.thread)
5728                                 continue;
5729
5730                         drm_sched_stop(&ring->sched, NULL);
5731                 }
5732                 atomic_inc(&adev->gpu_reset_counter);
5733                 return PCI_ERS_RESULT_NEED_RESET;
5734         case pci_channel_io_perm_failure:
5735                 /* Permanent error, prepare for device removal */
5736                 return PCI_ERS_RESULT_DISCONNECT;
5737         }
5738
5739         return PCI_ERS_RESULT_NEED_RESET;
5740 }
5741
5742 /**
5743  * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5744  * @pdev: pointer to PCI device
5745  */
5746 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5747 {
5748
5749         DRM_INFO("PCI error: mmio enabled callback!!\n");
5750
5751         /* TODO - dump whatever for debugging purposes */
5752
5753         /* This called only if amdgpu_pci_error_detected returns
5754          * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5755          * works, no need to reset slot.
5756          */
5757
5758         return PCI_ERS_RESULT_RECOVERED;
5759 }
5760
5761 /**
5762  * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5763  * @pdev: PCI device struct
5764  *
5765  * Description: This routine is called by the pci error recovery
5766  * code after the PCI slot has been reset, just before we
5767  * should resume normal operations.
5768  */
5769 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5770 {
5771         struct drm_device *dev = pci_get_drvdata(pdev);
5772         struct amdgpu_device *adev = drm_to_adev(dev);
5773         int r, i;
5774         struct amdgpu_reset_context reset_context;
5775         u32 memsize;
5776         struct list_head device_list;
5777
5778         DRM_INFO("PCI error: slot reset callback!!\n");
5779
5780         memset(&reset_context, 0, sizeof(reset_context));
5781
5782         INIT_LIST_HEAD(&device_list);
5783         list_add_tail(&adev->reset_list, &device_list);
5784
5785         /* wait for asic to come out of reset */
5786         msleep(500);
5787
5788         /* Restore PCI confspace */
5789         amdgpu_device_load_pci_state(pdev);
5790
5791         /* confirm  ASIC came out of reset */
5792         for (i = 0; i < adev->usec_timeout; i++) {
5793                 memsize = amdgpu_asic_get_config_memsize(adev);
5794
5795                 if (memsize != 0xffffffff)
5796                         break;
5797                 udelay(1);
5798         }
5799         if (memsize == 0xffffffff) {
5800                 r = -ETIME;
5801                 goto out;
5802         }
5803
5804         reset_context.method = AMD_RESET_METHOD_NONE;
5805         reset_context.reset_req_dev = adev;
5806         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5807         set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5808
5809         adev->no_hw_access = true;
5810         r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5811         adev->no_hw_access = false;
5812         if (r)
5813                 goto out;
5814
5815         r = amdgpu_do_asic_reset(&device_list, &reset_context);
5816
5817 out:
5818         if (!r) {
5819                 if (amdgpu_device_cache_pci_state(adev->pdev))
5820                         pci_restore_state(adev->pdev);
5821
5822                 DRM_INFO("PCIe error recovery succeeded\n");
5823         } else {
5824                 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5825                 amdgpu_device_unset_mp1_state(adev);
5826                 amdgpu_device_unlock_reset_domain(adev->reset_domain);
5827         }
5828
5829         return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5830 }
5831
5832 /**
5833  * amdgpu_pci_resume() - resume normal ops after PCI reset
5834  * @pdev: pointer to PCI device
5835  *
5836  * Called when the error recovery driver tells us that its
5837  * OK to resume normal operation.
5838  */
5839 void amdgpu_pci_resume(struct pci_dev *pdev)
5840 {
5841         struct drm_device *dev = pci_get_drvdata(pdev);
5842         struct amdgpu_device *adev = drm_to_adev(dev);
5843         int i;
5844
5845
5846         DRM_INFO("PCI error: resume callback!!\n");
5847
5848         /* Only continue execution for the case of pci_channel_io_frozen */
5849         if (adev->pci_channel_state != pci_channel_io_frozen)
5850                 return;
5851
5852         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5853                 struct amdgpu_ring *ring = adev->rings[i];
5854
5855                 if (!ring || !ring->sched.thread)
5856                         continue;
5857
5858
5859                 drm_sched_resubmit_jobs(&ring->sched);
5860                 drm_sched_start(&ring->sched, true);
5861         }
5862
5863         amdgpu_device_unset_mp1_state(adev);
5864         amdgpu_device_unlock_reset_domain(adev->reset_domain);
5865 }
5866
5867 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5868 {
5869         struct drm_device *dev = pci_get_drvdata(pdev);
5870         struct amdgpu_device *adev = drm_to_adev(dev);
5871         int r;
5872
5873         r = pci_save_state(pdev);
5874         if (!r) {
5875                 kfree(adev->pci_state);
5876
5877                 adev->pci_state = pci_store_saved_state(pdev);
5878
5879                 if (!adev->pci_state) {
5880                         DRM_ERROR("Failed to store PCI saved state");
5881                         return false;
5882                 }
5883         } else {
5884                 DRM_WARN("Failed to save PCI state, err:%d\n", r);
5885                 return false;
5886         }
5887
5888         return true;
5889 }
5890
5891 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5892 {
5893         struct drm_device *dev = pci_get_drvdata(pdev);
5894         struct amdgpu_device *adev = drm_to_adev(dev);
5895         int r;
5896
5897         if (!adev->pci_state)
5898                 return false;
5899
5900         r = pci_load_saved_state(pdev, adev->pci_state);
5901
5902         if (!r) {
5903                 pci_restore_state(pdev);
5904         } else {
5905                 DRM_WARN("Failed to load PCI state, err:%d\n", r);
5906                 return false;
5907         }
5908
5909         return true;
5910 }
5911
5912 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5913                 struct amdgpu_ring *ring)
5914 {
5915 #ifdef CONFIG_X86_64
5916         if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5917                 return;
5918 #endif
5919         if (adev->gmc.xgmi.connected_to_cpu)
5920                 return;
5921
5922         if (ring && ring->funcs->emit_hdp_flush)
5923                 amdgpu_ring_emit_hdp_flush(ring);
5924         else
5925                 amdgpu_asic_flush_hdp(adev, ring);
5926 }
5927
5928 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5929                 struct amdgpu_ring *ring)
5930 {
5931 #ifdef CONFIG_X86_64
5932         if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5933                 return;
5934 #endif
5935         if (adev->gmc.xgmi.connected_to_cpu)
5936                 return;
5937
5938         amdgpu_asic_invalidate_hdp(adev, ring);
5939 }
5940
5941 int amdgpu_in_reset(struct amdgpu_device *adev)
5942 {
5943         return atomic_read(&adev->reset_domain->in_gpu_reset);
5944         }
5945         
5946 /**
5947  * amdgpu_device_halt() - bring hardware to some kind of halt state
5948  *
5949  * @adev: amdgpu_device pointer
5950  *
5951  * Bring hardware to some kind of halt state so that no one can touch it
5952  * any more. It will help to maintain error context when error occurred.
5953  * Compare to a simple hang, the system will keep stable at least for SSH
5954  * access. Then it should be trivial to inspect the hardware state and
5955  * see what's going on. Implemented as following:
5956  *
5957  * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
5958  *    clears all CPU mappings to device, disallows remappings through page faults
5959  * 2. amdgpu_irq_disable_all() disables all interrupts
5960  * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
5961  * 4. set adev->no_hw_access to avoid potential crashes after setp 5
5962  * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
5963  * 6. pci_disable_device() and pci_wait_for_pending_transaction()
5964  *    flush any in flight DMA operations
5965  */
5966 void amdgpu_device_halt(struct amdgpu_device *adev)
5967 {
5968         struct pci_dev *pdev = adev->pdev;
5969         struct drm_device *ddev = adev_to_drm(adev);
5970
5971         drm_dev_unplug(ddev);
5972
5973         amdgpu_irq_disable_all(adev);
5974
5975         amdgpu_fence_driver_hw_fini(adev);
5976
5977         adev->no_hw_access = true;
5978
5979         amdgpu_device_unmap_mmio(adev);
5980
5981         pci_disable_device(pdev);
5982         pci_wait_for_pending_transaction(pdev);
5983 }
5984
5985 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
5986                                 u32 reg)
5987 {
5988         unsigned long flags, address, data;
5989         u32 r;
5990
5991         address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5992         data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5993
5994         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5995         WREG32(address, reg * 4);
5996         (void)RREG32(address);
5997         r = RREG32(data);
5998         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5999         return r;
6000 }
6001
6002 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
6003                                 u32 reg, u32 v)
6004 {
6005         unsigned long flags, address, data;
6006
6007         address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
6008         data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
6009
6010         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
6011         WREG32(address, reg * 4);
6012         (void)RREG32(address);
6013         WREG32(data, v);
6014         (void)RREG32(data);
6015         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
6016 }
6017
6018 /**
6019  * amdgpu_device_switch_gang - switch to a new gang
6020  * @adev: amdgpu_device pointer
6021  * @gang: the gang to switch to
6022  *
6023  * Try to switch to a new gang.
6024  * Returns: NULL if we switched to the new gang or a reference to the current
6025  * gang leader.
6026  */
6027 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
6028                                             struct dma_fence *gang)
6029 {
6030         struct dma_fence *old = NULL;
6031
6032         do {
6033                 dma_fence_put(old);
6034                 rcu_read_lock();
6035                 old = dma_fence_get_rcu_safe(&adev->gang_submit);
6036                 rcu_read_unlock();
6037
6038                 if (old == gang)
6039                         break;
6040
6041                 if (!dma_fence_is_signaled(old))
6042                         return old;
6043
6044         } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
6045                          old, gang) != old);
6046
6047         dma_fence_put(old);
6048         return NULL;
6049 }
This page took 0.402482 seconds and 4 git commands to generate.