2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/kthread.h>
29 #include <linux/console.h>
30 #include <linux/slab.h>
31 #include <linux/debugfs.h>
33 #include <drm/drm_crtc_helper.h>
34 #include <drm/amdgpu_drm.h>
35 #include <linux/vgaarb.h>
36 #include <linux/vga_switcheroo.h>
37 #include <linux/efi.h>
39 #include "amdgpu_trace.h"
40 #include "amdgpu_i2c.h"
42 #include "amdgpu_atombios.h"
44 #ifdef CONFIG_DRM_AMDGPU_CIK
48 #include "bif/bif_4_1_d.h"
49 #include <linux/pci.h>
51 static int amdgpu_debugfs_regs_init(struct amdgpu_device
*adev
);
52 static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device
*adev
);
54 static const char *amdgpu_asic_name
[] = {
70 bool amdgpu_device_is_px(struct drm_device
*dev
)
72 struct amdgpu_device
*adev
= dev
->dev_private
;
74 if (adev
->flags
& AMD_IS_PX
)
80 * MMIO register access helper functions.
82 uint32_t amdgpu_mm_rreg(struct amdgpu_device
*adev
, uint32_t reg
,
87 if ((reg
* 4) < adev
->rmmio_size
&& !always_indirect
)
88 ret
= readl(((void __iomem
*)adev
->rmmio
) + (reg
* 4));
92 spin_lock_irqsave(&adev
->mmio_idx_lock
, flags
);
93 writel((reg
* 4), ((void __iomem
*)adev
->rmmio
) + (mmMM_INDEX
* 4));
94 ret
= readl(((void __iomem
*)adev
->rmmio
) + (mmMM_DATA
* 4));
95 spin_unlock_irqrestore(&adev
->mmio_idx_lock
, flags
);
97 trace_amdgpu_mm_rreg(adev
->pdev
->device
, reg
, ret
);
101 void amdgpu_mm_wreg(struct amdgpu_device
*adev
, uint32_t reg
, uint32_t v
,
102 bool always_indirect
)
104 trace_amdgpu_mm_wreg(adev
->pdev
->device
, reg
, v
);
106 if ((reg
* 4) < adev
->rmmio_size
&& !always_indirect
)
107 writel(v
, ((void __iomem
*)adev
->rmmio
) + (reg
* 4));
111 spin_lock_irqsave(&adev
->mmio_idx_lock
, flags
);
112 writel((reg
* 4), ((void __iomem
*)adev
->rmmio
) + (mmMM_INDEX
* 4));
113 writel(v
, ((void __iomem
*)adev
->rmmio
) + (mmMM_DATA
* 4));
114 spin_unlock_irqrestore(&adev
->mmio_idx_lock
, flags
);
118 u32
amdgpu_io_rreg(struct amdgpu_device
*adev
, u32 reg
)
120 if ((reg
* 4) < adev
->rio_mem_size
)
121 return ioread32(adev
->rio_mem
+ (reg
* 4));
123 iowrite32((reg
* 4), adev
->rio_mem
+ (mmMM_INDEX
* 4));
124 return ioread32(adev
->rio_mem
+ (mmMM_DATA
* 4));
128 void amdgpu_io_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
131 if ((reg
* 4) < adev
->rio_mem_size
)
132 iowrite32(v
, adev
->rio_mem
+ (reg
* 4));
134 iowrite32((reg
* 4), adev
->rio_mem
+ (mmMM_INDEX
* 4));
135 iowrite32(v
, adev
->rio_mem
+ (mmMM_DATA
* 4));
140 * amdgpu_mm_rdoorbell - read a doorbell dword
142 * @adev: amdgpu_device pointer
143 * @index: doorbell index
145 * Returns the value in the doorbell aperture at the
146 * requested doorbell index (CIK).
148 u32
amdgpu_mm_rdoorbell(struct amdgpu_device
*adev
, u32 index
)
150 if (index
< adev
->doorbell
.num_doorbells
) {
151 return readl(adev
->doorbell
.ptr
+ index
);
153 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index
);
159 * amdgpu_mm_wdoorbell - write a doorbell dword
161 * @adev: amdgpu_device pointer
162 * @index: doorbell index
165 * Writes @v to the doorbell aperture at the
166 * requested doorbell index (CIK).
168 void amdgpu_mm_wdoorbell(struct amdgpu_device
*adev
, u32 index
, u32 v
)
170 if (index
< adev
->doorbell
.num_doorbells
) {
171 writel(v
, adev
->doorbell
.ptr
+ index
);
173 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index
);
178 * amdgpu_invalid_rreg - dummy reg read function
180 * @adev: amdgpu device pointer
181 * @reg: offset of register
183 * Dummy register read function. Used for register blocks
184 * that certain asics don't have (all asics).
185 * Returns the value in the register.
187 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device
*adev
, uint32_t reg
)
189 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg
);
195 * amdgpu_invalid_wreg - dummy reg write function
197 * @adev: amdgpu device pointer
198 * @reg: offset of register
199 * @v: value to write to the register
201 * Dummy register read function. Used for register blocks
202 * that certain asics don't have (all asics).
204 static void amdgpu_invalid_wreg(struct amdgpu_device
*adev
, uint32_t reg
, uint32_t v
)
206 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
212 * amdgpu_block_invalid_rreg - dummy reg read function
214 * @adev: amdgpu device pointer
215 * @block: offset of instance
216 * @reg: offset of register
218 * Dummy register read function. Used for register blocks
219 * that certain asics don't have (all asics).
220 * Returns the value in the register.
222 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device
*adev
,
223 uint32_t block
, uint32_t reg
)
225 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
232 * amdgpu_block_invalid_wreg - dummy reg write function
234 * @adev: amdgpu device pointer
235 * @block: offset of instance
236 * @reg: offset of register
237 * @v: value to write to the register
239 * Dummy register read function. Used for register blocks
240 * that certain asics don't have (all asics).
242 static void amdgpu_block_invalid_wreg(struct amdgpu_device
*adev
,
244 uint32_t reg
, uint32_t v
)
246 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
251 static int amdgpu_vram_scratch_init(struct amdgpu_device
*adev
)
255 if (adev
->vram_scratch
.robj
== NULL
) {
256 r
= amdgpu_bo_create(adev
, AMDGPU_GPU_PAGE_SIZE
,
257 PAGE_SIZE
, true, AMDGPU_GEM_DOMAIN_VRAM
,
258 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
,
259 NULL
, NULL
, &adev
->vram_scratch
.robj
);
265 r
= amdgpu_bo_reserve(adev
->vram_scratch
.robj
, false);
266 if (unlikely(r
!= 0))
268 r
= amdgpu_bo_pin(adev
->vram_scratch
.robj
,
269 AMDGPU_GEM_DOMAIN_VRAM
, &adev
->vram_scratch
.gpu_addr
);
271 amdgpu_bo_unreserve(adev
->vram_scratch
.robj
);
274 r
= amdgpu_bo_kmap(adev
->vram_scratch
.robj
,
275 (void **)&adev
->vram_scratch
.ptr
);
277 amdgpu_bo_unpin(adev
->vram_scratch
.robj
);
278 amdgpu_bo_unreserve(adev
->vram_scratch
.robj
);
283 static void amdgpu_vram_scratch_fini(struct amdgpu_device
*adev
)
287 if (adev
->vram_scratch
.robj
== NULL
) {
290 r
= amdgpu_bo_reserve(adev
->vram_scratch
.robj
, false);
291 if (likely(r
== 0)) {
292 amdgpu_bo_kunmap(adev
->vram_scratch
.robj
);
293 amdgpu_bo_unpin(adev
->vram_scratch
.robj
);
294 amdgpu_bo_unreserve(adev
->vram_scratch
.robj
);
296 amdgpu_bo_unref(&adev
->vram_scratch
.robj
);
300 * amdgpu_program_register_sequence - program an array of registers.
302 * @adev: amdgpu_device pointer
303 * @registers: pointer to the register array
304 * @array_size: size of the register array
306 * Programs an array or registers with and and or masks.
307 * This is a helper for setting golden registers.
309 void amdgpu_program_register_sequence(struct amdgpu_device
*adev
,
310 const u32
*registers
,
311 const u32 array_size
)
313 u32 tmp
, reg
, and_mask
, or_mask
;
319 for (i
= 0; i
< array_size
; i
+=3) {
320 reg
= registers
[i
+ 0];
321 and_mask
= registers
[i
+ 1];
322 or_mask
= registers
[i
+ 2];
324 if (and_mask
== 0xffffffff) {
335 void amdgpu_pci_config_reset(struct amdgpu_device
*adev
)
337 pci_write_config_dword(adev
->pdev
, 0x7c, AMDGPU_ASIC_RESET_DATA
);
341 * GPU doorbell aperture helpers function.
344 * amdgpu_doorbell_init - Init doorbell driver information.
346 * @adev: amdgpu_device pointer
348 * Init doorbell driver information (CIK)
349 * Returns 0 on success, error on failure.
351 static int amdgpu_doorbell_init(struct amdgpu_device
*adev
)
353 /* doorbell bar mapping */
354 adev
->doorbell
.base
= pci_resource_start(adev
->pdev
, 2);
355 adev
->doorbell
.size
= pci_resource_len(adev
->pdev
, 2);
357 adev
->doorbell
.num_doorbells
= min_t(u32
, adev
->doorbell
.size
/ sizeof(u32
),
358 AMDGPU_DOORBELL_MAX_ASSIGNMENT
+1);
359 if (adev
->doorbell
.num_doorbells
== 0)
362 adev
->doorbell
.ptr
= ioremap(adev
->doorbell
.base
, adev
->doorbell
.num_doorbells
* sizeof(u32
));
363 if (adev
->doorbell
.ptr
== NULL
) {
366 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)adev
->doorbell
.base
);
367 DRM_INFO("doorbell mmio size: %u\n", (unsigned)adev
->doorbell
.size
);
373 * amdgpu_doorbell_fini - Tear down doorbell driver information.
375 * @adev: amdgpu_device pointer
377 * Tear down doorbell driver information (CIK)
379 static void amdgpu_doorbell_fini(struct amdgpu_device
*adev
)
381 iounmap(adev
->doorbell
.ptr
);
382 adev
->doorbell
.ptr
= NULL
;
386 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
389 * @adev: amdgpu_device pointer
390 * @aperture_base: output returning doorbell aperture base physical address
391 * @aperture_size: output returning doorbell aperture size in bytes
392 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
394 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
395 * takes doorbells required for its own rings and reports the setup to amdkfd.
396 * amdgpu reserved doorbells are at the start of the doorbell aperture.
398 void amdgpu_doorbell_get_kfd_info(struct amdgpu_device
*adev
,
399 phys_addr_t
*aperture_base
,
400 size_t *aperture_size
,
401 size_t *start_offset
)
404 * The first num_doorbells are used by amdgpu.
405 * amdkfd takes whatever's left in the aperture.
407 if (adev
->doorbell
.size
> adev
->doorbell
.num_doorbells
* sizeof(u32
)) {
408 *aperture_base
= adev
->doorbell
.base
;
409 *aperture_size
= adev
->doorbell
.size
;
410 *start_offset
= adev
->doorbell
.num_doorbells
* sizeof(u32
);
420 * Writeback is the the method by which the the GPU updates special pages
421 * in memory with the status of certain GPU events (fences, ring pointers,
426 * amdgpu_wb_fini - Disable Writeback and free memory
428 * @adev: amdgpu_device pointer
430 * Disables Writeback and frees the Writeback memory (all asics).
431 * Used at driver shutdown.
433 static void amdgpu_wb_fini(struct amdgpu_device
*adev
)
435 if (adev
->wb
.wb_obj
) {
436 if (!amdgpu_bo_reserve(adev
->wb
.wb_obj
, false)) {
437 amdgpu_bo_kunmap(adev
->wb
.wb_obj
);
438 amdgpu_bo_unpin(adev
->wb
.wb_obj
);
439 amdgpu_bo_unreserve(adev
->wb
.wb_obj
);
441 amdgpu_bo_unref(&adev
->wb
.wb_obj
);
443 adev
->wb
.wb_obj
= NULL
;
448 * amdgpu_wb_init- Init Writeback driver info and allocate memory
450 * @adev: amdgpu_device pointer
452 * Disables Writeback and frees the Writeback memory (all asics).
453 * Used at driver startup.
454 * Returns 0 on success or an -error on failure.
456 static int amdgpu_wb_init(struct amdgpu_device
*adev
)
460 if (adev
->wb
.wb_obj
== NULL
) {
461 r
= amdgpu_bo_create(adev
, AMDGPU_MAX_WB
* 4, PAGE_SIZE
, true,
462 AMDGPU_GEM_DOMAIN_GTT
, 0, NULL
, NULL
,
465 dev_warn(adev
->dev
, "(%d) create WB bo failed\n", r
);
468 r
= amdgpu_bo_reserve(adev
->wb
.wb_obj
, false);
469 if (unlikely(r
!= 0)) {
470 amdgpu_wb_fini(adev
);
473 r
= amdgpu_bo_pin(adev
->wb
.wb_obj
, AMDGPU_GEM_DOMAIN_GTT
,
476 amdgpu_bo_unreserve(adev
->wb
.wb_obj
);
477 dev_warn(adev
->dev
, "(%d) pin WB bo failed\n", r
);
478 amdgpu_wb_fini(adev
);
481 r
= amdgpu_bo_kmap(adev
->wb
.wb_obj
, (void **)&adev
->wb
.wb
);
482 amdgpu_bo_unreserve(adev
->wb
.wb_obj
);
484 dev_warn(adev
->dev
, "(%d) map WB bo failed\n", r
);
485 amdgpu_wb_fini(adev
);
489 adev
->wb
.num_wb
= AMDGPU_MAX_WB
;
490 memset(&adev
->wb
.used
, 0, sizeof(adev
->wb
.used
));
492 /* clear wb memory */
493 memset((char *)adev
->wb
.wb
, 0, AMDGPU_GPU_PAGE_SIZE
);
500 * amdgpu_wb_get - Allocate a wb entry
502 * @adev: amdgpu_device pointer
505 * Allocate a wb slot for use by the driver (all asics).
506 * Returns 0 on success or -EINVAL on failure.
508 int amdgpu_wb_get(struct amdgpu_device
*adev
, u32
*wb
)
510 unsigned long offset
= find_first_zero_bit(adev
->wb
.used
, adev
->wb
.num_wb
);
511 if (offset
< adev
->wb
.num_wb
) {
512 __set_bit(offset
, adev
->wb
.used
);
521 * amdgpu_wb_free - Free a wb entry
523 * @adev: amdgpu_device pointer
526 * Free a wb slot allocated for use by the driver (all asics)
528 void amdgpu_wb_free(struct amdgpu_device
*adev
, u32 wb
)
530 if (wb
< adev
->wb
.num_wb
)
531 __clear_bit(wb
, adev
->wb
.used
);
535 * amdgpu_vram_location - try to find VRAM location
536 * @adev: amdgpu device structure holding all necessary informations
537 * @mc: memory controller structure holding memory informations
538 * @base: base address at which to put VRAM
540 * Function will place try to place VRAM at base address provided
541 * as parameter (which is so far either PCI aperture address or
542 * for IGP TOM base address).
544 * If there is not enough space to fit the unvisible VRAM in the 32bits
545 * address space then we limit the VRAM size to the aperture.
547 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
548 * this shouldn't be a problem as we are using the PCI aperture as a reference.
549 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
552 * Note: we use mc_vram_size as on some board we need to program the mc to
553 * cover the whole aperture even if VRAM size is inferior to aperture size
554 * Novell bug 204882 + along with lots of ubuntu ones
556 * Note: when limiting vram it's safe to overwritte real_vram_size because
557 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
558 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
561 * Note: IGP TOM addr should be the same as the aperture addr, we don't
562 * explicitly check for that thought.
564 * FIXME: when reducing VRAM size align new size on power of 2.
566 void amdgpu_vram_location(struct amdgpu_device
*adev
, struct amdgpu_mc
*mc
, u64 base
)
568 uint64_t limit
= (uint64_t)amdgpu_vram_limit
<< 20;
570 mc
->vram_start
= base
;
571 if (mc
->mc_vram_size
> (adev
->mc
.mc_mask
- base
+ 1)) {
572 dev_warn(adev
->dev
, "limiting VRAM to PCI aperture size\n");
573 mc
->real_vram_size
= mc
->aper_size
;
574 mc
->mc_vram_size
= mc
->aper_size
;
576 mc
->vram_end
= mc
->vram_start
+ mc
->mc_vram_size
- 1;
577 if (limit
&& limit
< mc
->real_vram_size
)
578 mc
->real_vram_size
= limit
;
579 dev_info(adev
->dev
, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
580 mc
->mc_vram_size
>> 20, mc
->vram_start
,
581 mc
->vram_end
, mc
->real_vram_size
>> 20);
585 * amdgpu_gtt_location - try to find GTT location
586 * @adev: amdgpu device structure holding all necessary informations
587 * @mc: memory controller structure holding memory informations
589 * Function will place try to place GTT before or after VRAM.
591 * If GTT size is bigger than space left then we ajust GTT size.
592 * Thus function will never fails.
594 * FIXME: when reducing GTT size align new size on power of 2.
596 void amdgpu_gtt_location(struct amdgpu_device
*adev
, struct amdgpu_mc
*mc
)
598 u64 size_af
, size_bf
;
600 size_af
= ((adev
->mc
.mc_mask
- mc
->vram_end
) + mc
->gtt_base_align
) & ~mc
->gtt_base_align
;
601 size_bf
= mc
->vram_start
& ~mc
->gtt_base_align
;
602 if (size_bf
> size_af
) {
603 if (mc
->gtt_size
> size_bf
) {
604 dev_warn(adev
->dev
, "limiting GTT\n");
605 mc
->gtt_size
= size_bf
;
607 mc
->gtt_start
= (mc
->vram_start
& ~mc
->gtt_base_align
) - mc
->gtt_size
;
609 if (mc
->gtt_size
> size_af
) {
610 dev_warn(adev
->dev
, "limiting GTT\n");
611 mc
->gtt_size
= size_af
;
613 mc
->gtt_start
= (mc
->vram_end
+ 1 + mc
->gtt_base_align
) & ~mc
->gtt_base_align
;
615 mc
->gtt_end
= mc
->gtt_start
+ mc
->gtt_size
- 1;
616 dev_info(adev
->dev
, "GTT: %lluM 0x%016llX - 0x%016llX\n",
617 mc
->gtt_size
>> 20, mc
->gtt_start
, mc
->gtt_end
);
621 * GPU helpers function.
624 * amdgpu_card_posted - check if the hw has already been initialized
626 * @adev: amdgpu_device pointer
628 * Check if the asic has been initialized (all asics).
629 * Used at driver startup.
630 * Returns true if initialized or false if not.
632 bool amdgpu_card_posted(struct amdgpu_device
*adev
)
636 /* then check MEM_SIZE, in case the crtcs are off */
637 reg
= RREG32(mmCONFIG_MEMSIZE
);
647 * amdgpu_dummy_page_init - init dummy page used by the driver
649 * @adev: amdgpu_device pointer
651 * Allocate the dummy page used by the driver (all asics).
652 * This dummy page is used by the driver as a filler for gart entries
653 * when pages are taken out of the GART
654 * Returns 0 on sucess, -ENOMEM on failure.
656 int amdgpu_dummy_page_init(struct amdgpu_device
*adev
)
658 if (adev
->dummy_page
.page
)
660 adev
->dummy_page
.page
= alloc_page(GFP_DMA32
| GFP_KERNEL
| __GFP_ZERO
);
661 if (adev
->dummy_page
.page
== NULL
)
663 adev
->dummy_page
.addr
= pci_map_page(adev
->pdev
, adev
->dummy_page
.page
,
664 0, PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
665 if (pci_dma_mapping_error(adev
->pdev
, adev
->dummy_page
.addr
)) {
666 dev_err(&adev
->pdev
->dev
, "Failed to DMA MAP the dummy page\n");
667 __free_page(adev
->dummy_page
.page
);
668 adev
->dummy_page
.page
= NULL
;
675 * amdgpu_dummy_page_fini - free dummy page used by the driver
677 * @adev: amdgpu_device pointer
679 * Frees the dummy page used by the driver (all asics).
681 void amdgpu_dummy_page_fini(struct amdgpu_device
*adev
)
683 if (adev
->dummy_page
.page
== NULL
)
685 pci_unmap_page(adev
->pdev
, adev
->dummy_page
.addr
,
686 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
687 __free_page(adev
->dummy_page
.page
);
688 adev
->dummy_page
.page
= NULL
;
692 /* ATOM accessor methods */
694 * ATOM is an interpreted byte code stored in tables in the vbios. The
695 * driver registers callbacks to access registers and the interpreter
696 * in the driver parses the tables and executes then to program specific
697 * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
698 * atombios.h, and atom.c
702 * cail_pll_read - read PLL register
704 * @info: atom card_info pointer
705 * @reg: PLL register offset
707 * Provides a PLL register accessor for the atom interpreter (r4xx+).
708 * Returns the value of the PLL register.
710 static uint32_t cail_pll_read(struct card_info
*info
, uint32_t reg
)
716 * cail_pll_write - write PLL register
718 * @info: atom card_info pointer
719 * @reg: PLL register offset
720 * @val: value to write to the pll register
722 * Provides a PLL register accessor for the atom interpreter (r4xx+).
724 static void cail_pll_write(struct card_info
*info
, uint32_t reg
, uint32_t val
)
730 * cail_mc_read - read MC (Memory Controller) register
732 * @info: atom card_info pointer
733 * @reg: MC register offset
735 * Provides an MC register accessor for the atom interpreter (r4xx+).
736 * Returns the value of the MC register.
738 static uint32_t cail_mc_read(struct card_info
*info
, uint32_t reg
)
744 * cail_mc_write - write MC (Memory Controller) register
746 * @info: atom card_info pointer
747 * @reg: MC register offset
748 * @val: value to write to the pll register
750 * Provides a MC register accessor for the atom interpreter (r4xx+).
752 static void cail_mc_write(struct card_info
*info
, uint32_t reg
, uint32_t val
)
758 * cail_reg_write - write MMIO register
760 * @info: atom card_info pointer
761 * @reg: MMIO register offset
762 * @val: value to write to the pll register
764 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
766 static void cail_reg_write(struct card_info
*info
, uint32_t reg
, uint32_t val
)
768 struct amdgpu_device
*adev
= info
->dev
->dev_private
;
774 * cail_reg_read - read MMIO register
776 * @info: atom card_info pointer
777 * @reg: MMIO register offset
779 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
780 * Returns the value of the MMIO register.
782 static uint32_t cail_reg_read(struct card_info
*info
, uint32_t reg
)
784 struct amdgpu_device
*adev
= info
->dev
->dev_private
;
792 * cail_ioreg_write - write IO register
794 * @info: atom card_info pointer
795 * @reg: IO register offset
796 * @val: value to write to the pll register
798 * Provides a IO register accessor for the atom interpreter (r4xx+).
800 static void cail_ioreg_write(struct card_info
*info
, uint32_t reg
, uint32_t val
)
802 struct amdgpu_device
*adev
= info
->dev
->dev_private
;
808 * cail_ioreg_read - read IO register
810 * @info: atom card_info pointer
811 * @reg: IO register offset
813 * Provides an IO register accessor for the atom interpreter (r4xx+).
814 * Returns the value of the IO register.
816 static uint32_t cail_ioreg_read(struct card_info
*info
, uint32_t reg
)
818 struct amdgpu_device
*adev
= info
->dev
->dev_private
;
826 * amdgpu_atombios_fini - free the driver info and callbacks for atombios
828 * @adev: amdgpu_device pointer
830 * Frees the driver info and register access callbacks for the ATOM
831 * interpreter (r4xx+).
832 * Called at driver shutdown.
834 static void amdgpu_atombios_fini(struct amdgpu_device
*adev
)
836 if (adev
->mode_info
.atom_context
) {
837 kfree(adev
->mode_info
.atom_context
->scratch
);
838 kfree(adev
->mode_info
.atom_context
->iio
);
840 kfree(adev
->mode_info
.atom_context
);
841 adev
->mode_info
.atom_context
= NULL
;
842 kfree(adev
->mode_info
.atom_card_info
);
843 adev
->mode_info
.atom_card_info
= NULL
;
847 * amdgpu_atombios_init - init the driver info and callbacks for atombios
849 * @adev: amdgpu_device pointer
851 * Initializes the driver info and register access callbacks for the
852 * ATOM interpreter (r4xx+).
853 * Returns 0 on sucess, -ENOMEM on failure.
854 * Called at driver startup.
856 static int amdgpu_atombios_init(struct amdgpu_device
*adev
)
858 struct card_info
*atom_card_info
=
859 kzalloc(sizeof(struct card_info
), GFP_KERNEL
);
864 adev
->mode_info
.atom_card_info
= atom_card_info
;
865 atom_card_info
->dev
= adev
->ddev
;
866 atom_card_info
->reg_read
= cail_reg_read
;
867 atom_card_info
->reg_write
= cail_reg_write
;
868 /* needed for iio ops */
870 atom_card_info
->ioreg_read
= cail_ioreg_read
;
871 atom_card_info
->ioreg_write
= cail_ioreg_write
;
873 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
874 atom_card_info
->ioreg_read
= cail_reg_read
;
875 atom_card_info
->ioreg_write
= cail_reg_write
;
877 atom_card_info
->mc_read
= cail_mc_read
;
878 atom_card_info
->mc_write
= cail_mc_write
;
879 atom_card_info
->pll_read
= cail_pll_read
;
880 atom_card_info
->pll_write
= cail_pll_write
;
882 adev
->mode_info
.atom_context
= amdgpu_atom_parse(atom_card_info
, adev
->bios
);
883 if (!adev
->mode_info
.atom_context
) {
884 amdgpu_atombios_fini(adev
);
888 mutex_init(&adev
->mode_info
.atom_context
->mutex
);
889 amdgpu_atombios_scratch_regs_init(adev
);
890 amdgpu_atom_allocate_fb_scratch(adev
->mode_info
.atom_context
);
894 /* if we get transitioned to only one device, take VGA back */
896 * amdgpu_vga_set_decode - enable/disable vga decode
898 * @cookie: amdgpu_device pointer
899 * @state: enable/disable vga decode
901 * Enable/disable vga decode (all asics).
902 * Returns VGA resource flags.
904 static unsigned int amdgpu_vga_set_decode(void *cookie
, bool state
)
906 struct amdgpu_device
*adev
= cookie
;
907 amdgpu_asic_set_vga_state(adev
, state
);
909 return VGA_RSRC_LEGACY_IO
| VGA_RSRC_LEGACY_MEM
|
910 VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
;
912 return VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
;
916 * amdgpu_check_pot_argument - check that argument is a power of two
918 * @arg: value to check
920 * Validates that a certain argument is a power of two (all asics).
921 * Returns true if argument is valid.
923 static bool amdgpu_check_pot_argument(int arg
)
925 return (arg
& (arg
- 1)) == 0;
929 * amdgpu_check_arguments - validate module params
931 * @adev: amdgpu_device pointer
933 * Validates certain module parameters and updates
934 * the associated values used by the driver (all asics).
936 static void amdgpu_check_arguments(struct amdgpu_device
*adev
)
938 if (amdgpu_sched_jobs
< 4) {
939 dev_warn(adev
->dev
, "sched jobs (%d) must be at least 4\n",
941 amdgpu_sched_jobs
= 4;
942 } else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs
)){
943 dev_warn(adev
->dev
, "sched jobs (%d) must be a power of 2\n",
945 amdgpu_sched_jobs
= roundup_pow_of_two(amdgpu_sched_jobs
);
948 if (amdgpu_gart_size
!= -1) {
949 /* gtt size must be greater or equal to 32M */
950 if (amdgpu_gart_size
< 32) {
951 dev_warn(adev
->dev
, "gart size (%d) too small\n",
953 amdgpu_gart_size
= -1;
957 if (!amdgpu_check_pot_argument(amdgpu_vm_size
)) {
958 dev_warn(adev
->dev
, "VM size (%d) must be a power of 2\n",
963 if (amdgpu_vm_size
< 1) {
964 dev_warn(adev
->dev
, "VM size (%d) too small, min is 1GB\n",
970 * Max GPUVM size for Cayman, SI and CI are 40 bits.
972 if (amdgpu_vm_size
> 1024) {
973 dev_warn(adev
->dev
, "VM size (%d) too large, max is 1TB\n",
978 /* defines number of bits in page table versus page directory,
979 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
980 * page table and the remaining bits are in the page directory */
981 if (amdgpu_vm_block_size
== -1) {
983 /* Total bits covered by PD + PTs */
984 unsigned bits
= ilog2(amdgpu_vm_size
) + 18;
986 /* Make sure the PD is 4K in size up to 8GB address space.
987 Above that split equal between PD and PTs */
988 if (amdgpu_vm_size
<= 8)
989 amdgpu_vm_block_size
= bits
- 9;
991 amdgpu_vm_block_size
= (bits
+ 3) / 2;
993 } else if (amdgpu_vm_block_size
< 9) {
994 dev_warn(adev
->dev
, "VM page table size (%d) too small\n",
995 amdgpu_vm_block_size
);
996 amdgpu_vm_block_size
= 9;
999 if (amdgpu_vm_block_size
> 24 ||
1000 (amdgpu_vm_size
* 1024) < (1ull << amdgpu_vm_block_size
)) {
1001 dev_warn(adev
->dev
, "VM page table size (%d) too large\n",
1002 amdgpu_vm_block_size
);
1003 amdgpu_vm_block_size
= 9;
1008 * amdgpu_switcheroo_set_state - set switcheroo state
1010 * @pdev: pci dev pointer
1011 * @state: vga_switcheroo state
1013 * Callback for the switcheroo driver. Suspends or resumes the
1014 * the asics before or after it is powered up using ACPI methods.
1016 static void amdgpu_switcheroo_set_state(struct pci_dev
*pdev
, enum vga_switcheroo_state state
)
1018 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1020 if (amdgpu_device_is_px(dev
) && state
== VGA_SWITCHEROO_OFF
)
1023 if (state
== VGA_SWITCHEROO_ON
) {
1024 unsigned d3_delay
= dev
->pdev
->d3_delay
;
1026 printk(KERN_INFO
"amdgpu: switched on\n");
1027 /* don't suspend or resume card normally */
1028 dev
->switch_power_state
= DRM_SWITCH_POWER_CHANGING
;
1030 amdgpu_resume_kms(dev
, true, true);
1032 dev
->pdev
->d3_delay
= d3_delay
;
1034 dev
->switch_power_state
= DRM_SWITCH_POWER_ON
;
1035 drm_kms_helper_poll_enable(dev
);
1037 printk(KERN_INFO
"amdgpu: switched off\n");
1038 drm_kms_helper_poll_disable(dev
);
1039 dev
->switch_power_state
= DRM_SWITCH_POWER_CHANGING
;
1040 amdgpu_suspend_kms(dev
, true, true);
1041 dev
->switch_power_state
= DRM_SWITCH_POWER_OFF
;
1046 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1048 * @pdev: pci dev pointer
1050 * Callback for the switcheroo driver. Check of the switcheroo
1051 * state can be changed.
1052 * Returns true if the state can be changed, false if not.
1054 static bool amdgpu_switcheroo_can_switch(struct pci_dev
*pdev
)
1056 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1059 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1060 * locking inversion with the driver load path. And the access here is
1061 * completely racy anyway. So don't bother with locking for now.
1063 return dev
->open_count
== 0;
1066 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops
= {
1067 .set_gpu_state
= amdgpu_switcheroo_set_state
,
1069 .can_switch
= amdgpu_switcheroo_can_switch
,
1072 int amdgpu_set_clockgating_state(struct amdgpu_device
*adev
,
1073 enum amd_ip_block_type block_type
,
1074 enum amd_clockgating_state state
)
1078 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1079 if (!adev
->ip_block_status
[i
].valid
)
1081 if (adev
->ip_blocks
[i
].type
== block_type
) {
1082 r
= adev
->ip_blocks
[i
].funcs
->set_clockgating_state((void *)adev
,
1092 int amdgpu_set_powergating_state(struct amdgpu_device
*adev
,
1093 enum amd_ip_block_type block_type
,
1094 enum amd_powergating_state state
)
1098 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1099 if (!adev
->ip_block_status
[i
].valid
)
1101 if (adev
->ip_blocks
[i
].type
== block_type
) {
1102 r
= adev
->ip_blocks
[i
].funcs
->set_powergating_state((void *)adev
,
1112 int amdgpu_wait_for_idle(struct amdgpu_device
*adev
,
1113 enum amd_ip_block_type block_type
)
1117 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1118 if (!adev
->ip_block_status
[i
].valid
)
1120 if (adev
->ip_blocks
[i
].type
== block_type
) {
1121 r
= adev
->ip_blocks
[i
].funcs
->wait_for_idle((void *)adev
);
1131 bool amdgpu_is_idle(struct amdgpu_device
*adev
,
1132 enum amd_ip_block_type block_type
)
1136 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1137 if (!adev
->ip_block_status
[i
].valid
)
1139 if (adev
->ip_blocks
[i
].type
== block_type
)
1140 return adev
->ip_blocks
[i
].funcs
->is_idle((void *)adev
);
1146 const struct amdgpu_ip_block_version
* amdgpu_get_ip_block(
1147 struct amdgpu_device
*adev
,
1148 enum amd_ip_block_type type
)
1152 for (i
= 0; i
< adev
->num_ip_blocks
; i
++)
1153 if (adev
->ip_blocks
[i
].type
== type
)
1154 return &adev
->ip_blocks
[i
];
1160 * amdgpu_ip_block_version_cmp
1162 * @adev: amdgpu_device pointer
1163 * @type: enum amd_ip_block_type
1164 * @major: major version
1165 * @minor: minor version
1167 * return 0 if equal or greater
1168 * return 1 if smaller or the ip_block doesn't exist
1170 int amdgpu_ip_block_version_cmp(struct amdgpu_device
*adev
,
1171 enum amd_ip_block_type type
,
1172 u32 major
, u32 minor
)
1174 const struct amdgpu_ip_block_version
*ip_block
;
1175 ip_block
= amdgpu_get_ip_block(adev
, type
);
1177 if (ip_block
&& ((ip_block
->major
> major
) ||
1178 ((ip_block
->major
== major
) &&
1179 (ip_block
->minor
>= minor
))))
1185 static void amdgpu_whether_enable_virtual_display(struct amdgpu_device
*adev
)
1187 adev
->enable_virtual_display
= false;
1189 if (amdgpu_virtual_display
) {
1190 struct drm_device
*ddev
= adev
->ddev
;
1191 const char *pci_address_name
= pci_name(ddev
->pdev
);
1192 char *pciaddstr
, *pciaddstr_tmp
, *pciaddname
;
1194 pciaddstr
= kstrdup(amdgpu_virtual_display
, GFP_KERNEL
);
1195 pciaddstr_tmp
= pciaddstr
;
1196 while ((pciaddname
= strsep(&pciaddstr_tmp
, ";"))) {
1197 if (!strcmp(pci_address_name
, pciaddname
)) {
1198 adev
->enable_virtual_display
= true;
1203 DRM_INFO("virtual display string:%s, %s:virtual_display:%d\n",
1204 amdgpu_virtual_display
, pci_address_name
,
1205 adev
->enable_virtual_display
);
1211 static int amdgpu_early_init(struct amdgpu_device
*adev
)
1215 amdgpu_whether_enable_virtual_display(adev
);
1217 switch (adev
->asic_type
) {
1221 case CHIP_POLARIS11
:
1222 case CHIP_POLARIS10
:
1225 if (adev
->asic_type
== CHIP_CARRIZO
|| adev
->asic_type
== CHIP_STONEY
)
1226 adev
->family
= AMDGPU_FAMILY_CZ
;
1228 adev
->family
= AMDGPU_FAMILY_VI
;
1230 r
= vi_set_ip_blocks(adev
);
1234 #ifdef CONFIG_DRM_AMDGPU_CIK
1240 if ((adev
->asic_type
== CHIP_BONAIRE
) || (adev
->asic_type
== CHIP_HAWAII
))
1241 adev
->family
= AMDGPU_FAMILY_CI
;
1243 adev
->family
= AMDGPU_FAMILY_KV
;
1245 r
= cik_set_ip_blocks(adev
);
1251 /* FIXME: not supported yet */
1255 adev
->ip_block_status
= kcalloc(adev
->num_ip_blocks
,
1256 sizeof(struct amdgpu_ip_block_status
), GFP_KERNEL
);
1257 if (adev
->ip_block_status
== NULL
)
1260 if (adev
->ip_blocks
== NULL
) {
1261 DRM_ERROR("No IP blocks found!\n");
1265 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1266 if ((amdgpu_ip_block_mask
& (1 << i
)) == 0) {
1267 DRM_ERROR("disabled ip block: %d\n", i
);
1268 adev
->ip_block_status
[i
].valid
= false;
1270 if (adev
->ip_blocks
[i
].funcs
->early_init
) {
1271 r
= adev
->ip_blocks
[i
].funcs
->early_init((void *)adev
);
1273 adev
->ip_block_status
[i
].valid
= false;
1275 DRM_ERROR("early_init of IP block <%s> failed %d\n", adev
->ip_blocks
[i
].funcs
->name
, r
);
1278 adev
->ip_block_status
[i
].valid
= true;
1281 adev
->ip_block_status
[i
].valid
= true;
1286 adev
->cg_flags
&= amdgpu_cg_mask
;
1287 adev
->pg_flags
&= amdgpu_pg_mask
;
1292 static int amdgpu_init(struct amdgpu_device
*adev
)
1296 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1297 if (!adev
->ip_block_status
[i
].valid
)
1299 r
= adev
->ip_blocks
[i
].funcs
->sw_init((void *)adev
);
1301 DRM_ERROR("sw_init of IP block <%s> failed %d\n", adev
->ip_blocks
[i
].funcs
->name
, r
);
1304 adev
->ip_block_status
[i
].sw
= true;
1305 /* need to do gmc hw init early so we can allocate gpu mem */
1306 if (adev
->ip_blocks
[i
].type
== AMD_IP_BLOCK_TYPE_GMC
) {
1307 r
= amdgpu_vram_scratch_init(adev
);
1309 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r
);
1312 r
= adev
->ip_blocks
[i
].funcs
->hw_init((void *)adev
);
1314 DRM_ERROR("hw_init %d failed %d\n", i
, r
);
1317 r
= amdgpu_wb_init(adev
);
1319 DRM_ERROR("amdgpu_wb_init failed %d\n", r
);
1322 adev
->ip_block_status
[i
].hw
= true;
1326 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1327 if (!adev
->ip_block_status
[i
].sw
)
1329 /* gmc hw init is done early */
1330 if (adev
->ip_blocks
[i
].type
== AMD_IP_BLOCK_TYPE_GMC
)
1332 r
= adev
->ip_blocks
[i
].funcs
->hw_init((void *)adev
);
1334 DRM_ERROR("hw_init of IP block <%s> failed %d\n", adev
->ip_blocks
[i
].funcs
->name
, r
);
1337 adev
->ip_block_status
[i
].hw
= true;
1343 static int amdgpu_late_init(struct amdgpu_device
*adev
)
1347 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1348 if (!adev
->ip_block_status
[i
].valid
)
1350 /* enable clockgating to save power */
1351 r
= adev
->ip_blocks
[i
].funcs
->set_clockgating_state((void *)adev
,
1354 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", adev
->ip_blocks
[i
].funcs
->name
, r
);
1357 if (adev
->ip_blocks
[i
].funcs
->late_init
) {
1358 r
= adev
->ip_blocks
[i
].funcs
->late_init((void *)adev
);
1360 DRM_ERROR("late_init of IP block <%s> failed %d\n", adev
->ip_blocks
[i
].funcs
->name
, r
);
1369 static int amdgpu_fini(struct amdgpu_device
*adev
)
1373 for (i
= adev
->num_ip_blocks
- 1; i
>= 0; i
--) {
1374 if (!adev
->ip_block_status
[i
].hw
)
1376 if (adev
->ip_blocks
[i
].type
== AMD_IP_BLOCK_TYPE_GMC
) {
1377 amdgpu_wb_fini(adev
);
1378 amdgpu_vram_scratch_fini(adev
);
1380 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1381 r
= adev
->ip_blocks
[i
].funcs
->set_clockgating_state((void *)adev
,
1382 AMD_CG_STATE_UNGATE
);
1384 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev
->ip_blocks
[i
].funcs
->name
, r
);
1387 r
= adev
->ip_blocks
[i
].funcs
->hw_fini((void *)adev
);
1388 /* XXX handle errors */
1390 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", adev
->ip_blocks
[i
].funcs
->name
, r
);
1392 adev
->ip_block_status
[i
].hw
= false;
1395 for (i
= adev
->num_ip_blocks
- 1; i
>= 0; i
--) {
1396 if (!adev
->ip_block_status
[i
].sw
)
1398 r
= adev
->ip_blocks
[i
].funcs
->sw_fini((void *)adev
);
1399 /* XXX handle errors */
1401 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", adev
->ip_blocks
[i
].funcs
->name
, r
);
1403 adev
->ip_block_status
[i
].sw
= false;
1404 adev
->ip_block_status
[i
].valid
= false;
1407 for (i
= adev
->num_ip_blocks
- 1; i
>= 0; i
--) {
1408 if (adev
->ip_blocks
[i
].funcs
->late_fini
)
1409 adev
->ip_blocks
[i
].funcs
->late_fini((void *)adev
);
1415 static int amdgpu_suspend(struct amdgpu_device
*adev
)
1419 /* ungate SMC block first */
1420 r
= amdgpu_set_clockgating_state(adev
, AMD_IP_BLOCK_TYPE_SMC
,
1421 AMD_CG_STATE_UNGATE
);
1423 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r
);
1426 for (i
= adev
->num_ip_blocks
- 1; i
>= 0; i
--) {
1427 if (!adev
->ip_block_status
[i
].valid
)
1429 /* ungate blocks so that suspend can properly shut them down */
1430 if (i
!= AMD_IP_BLOCK_TYPE_SMC
) {
1431 r
= adev
->ip_blocks
[i
].funcs
->set_clockgating_state((void *)adev
,
1432 AMD_CG_STATE_UNGATE
);
1434 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev
->ip_blocks
[i
].funcs
->name
, r
);
1437 /* XXX handle errors */
1438 r
= adev
->ip_blocks
[i
].funcs
->suspend(adev
);
1439 /* XXX handle errors */
1441 DRM_ERROR("suspend of IP block <%s> failed %d\n", adev
->ip_blocks
[i
].funcs
->name
, r
);
1448 static int amdgpu_resume(struct amdgpu_device
*adev
)
1452 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1453 if (!adev
->ip_block_status
[i
].valid
)
1455 r
= adev
->ip_blocks
[i
].funcs
->resume(adev
);
1457 DRM_ERROR("resume of IP block <%s> failed %d\n", adev
->ip_blocks
[i
].funcs
->name
, r
);
1465 static bool amdgpu_device_is_virtual(void)
1468 return boot_cpu_has(X86_FEATURE_HYPERVISOR
);
1475 * amdgpu_device_init - initialize the driver
1477 * @adev: amdgpu_device pointer
1478 * @pdev: drm dev pointer
1479 * @pdev: pci dev pointer
1480 * @flags: driver flags
1482 * Initializes the driver info and hw (all asics).
1483 * Returns 0 for success or an error on failure.
1484 * Called at driver startup.
1486 int amdgpu_device_init(struct amdgpu_device
*adev
,
1487 struct drm_device
*ddev
,
1488 struct pci_dev
*pdev
,
1492 bool runtime
= false;
1494 adev
->shutdown
= false;
1495 adev
->dev
= &pdev
->dev
;
1498 adev
->flags
= flags
;
1499 adev
->asic_type
= flags
& AMD_ASIC_MASK
;
1500 adev
->is_atom_bios
= false;
1501 adev
->usec_timeout
= AMDGPU_MAX_USEC_TIMEOUT
;
1502 adev
->mc
.gtt_size
= 512 * 1024 * 1024;
1503 adev
->accel_working
= false;
1504 adev
->num_rings
= 0;
1505 adev
->mman
.buffer_funcs
= NULL
;
1506 adev
->mman
.buffer_funcs_ring
= NULL
;
1507 adev
->vm_manager
.vm_pte_funcs
= NULL
;
1508 adev
->vm_manager
.vm_pte_num_rings
= 0;
1509 adev
->gart
.gart_funcs
= NULL
;
1510 adev
->fence_context
= fence_context_alloc(AMDGPU_MAX_RINGS
);
1512 adev
->smc_rreg
= &amdgpu_invalid_rreg
;
1513 adev
->smc_wreg
= &amdgpu_invalid_wreg
;
1514 adev
->pcie_rreg
= &amdgpu_invalid_rreg
;
1515 adev
->pcie_wreg
= &amdgpu_invalid_wreg
;
1516 adev
->uvd_ctx_rreg
= &amdgpu_invalid_rreg
;
1517 adev
->uvd_ctx_wreg
= &amdgpu_invalid_wreg
;
1518 adev
->didt_rreg
= &amdgpu_invalid_rreg
;
1519 adev
->didt_wreg
= &amdgpu_invalid_wreg
;
1520 adev
->gc_cac_rreg
= &amdgpu_invalid_rreg
;
1521 adev
->gc_cac_wreg
= &amdgpu_invalid_wreg
;
1522 adev
->audio_endpt_rreg
= &amdgpu_block_invalid_rreg
;
1523 adev
->audio_endpt_wreg
= &amdgpu_block_invalid_wreg
;
1526 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1527 amdgpu_asic_name
[adev
->asic_type
], pdev
->vendor
, pdev
->device
,
1528 pdev
->subsystem_vendor
, pdev
->subsystem_device
, pdev
->revision
);
1530 /* mutex initialization are all done here so we
1531 * can recall function without having locking issues */
1532 mutex_init(&adev
->vm_manager
.lock
);
1533 atomic_set(&adev
->irq
.ih
.lock
, 0);
1534 mutex_init(&adev
->pm
.mutex
);
1535 mutex_init(&adev
->gfx
.gpu_clock_mutex
);
1536 mutex_init(&adev
->srbm_mutex
);
1537 mutex_init(&adev
->grbm_idx_mutex
);
1538 mutex_init(&adev
->mn_lock
);
1539 hash_init(adev
->mn_hash
);
1541 amdgpu_check_arguments(adev
);
1543 /* Registers mapping */
1544 /* TODO: block userspace mapping of io register */
1545 spin_lock_init(&adev
->mmio_idx_lock
);
1546 spin_lock_init(&adev
->smc_idx_lock
);
1547 spin_lock_init(&adev
->pcie_idx_lock
);
1548 spin_lock_init(&adev
->uvd_ctx_idx_lock
);
1549 spin_lock_init(&adev
->didt_idx_lock
);
1550 spin_lock_init(&adev
->gc_cac_idx_lock
);
1551 spin_lock_init(&adev
->audio_endpt_idx_lock
);
1553 INIT_LIST_HEAD(&adev
->shadow_list
);
1554 mutex_init(&adev
->shadow_list_lock
);
1556 adev
->rmmio_base
= pci_resource_start(adev
->pdev
, 5);
1557 adev
->rmmio_size
= pci_resource_len(adev
->pdev
, 5);
1558 adev
->rmmio
= ioremap(adev
->rmmio_base
, adev
->rmmio_size
);
1559 if (adev
->rmmio
== NULL
) {
1562 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev
->rmmio_base
);
1563 DRM_INFO("register mmio size: %u\n", (unsigned)adev
->rmmio_size
);
1565 /* doorbell bar mapping */
1566 amdgpu_doorbell_init(adev
);
1568 /* io port mapping */
1569 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++) {
1570 if (pci_resource_flags(adev
->pdev
, i
) & IORESOURCE_IO
) {
1571 adev
->rio_mem_size
= pci_resource_len(adev
->pdev
, i
);
1572 adev
->rio_mem
= pci_iomap(adev
->pdev
, i
, adev
->rio_mem_size
);
1576 if (adev
->rio_mem
== NULL
)
1577 DRM_ERROR("Unable to find PCI I/O BAR\n");
1579 /* early init functions */
1580 r
= amdgpu_early_init(adev
);
1584 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
1585 /* this will fail for cards that aren't VGA class devices, just
1587 vga_client_register(adev
->pdev
, adev
, NULL
, amdgpu_vga_set_decode
);
1589 if (amdgpu_runtime_pm
== 1)
1591 if (amdgpu_device_is_px(ddev
))
1593 vga_switcheroo_register_client(adev
->pdev
, &amdgpu_switcheroo_ops
, runtime
);
1595 vga_switcheroo_init_domain_pm_ops(adev
->dev
, &adev
->vga_pm_domain
);
1598 if (!amdgpu_get_bios(adev
)) {
1602 /* Must be an ATOMBIOS */
1603 if (!adev
->is_atom_bios
) {
1604 dev_err(adev
->dev
, "Expecting atombios for GPU\n");
1608 r
= amdgpu_atombios_init(adev
);
1610 dev_err(adev
->dev
, "amdgpu_atombios_init failed\n");
1614 /* See if the asic supports SR-IOV */
1615 adev
->virtualization
.supports_sr_iov
=
1616 amdgpu_atombios_has_gpu_virtualization_table(adev
);
1618 /* Check if we are executing in a virtualized environment */
1619 adev
->virtualization
.is_virtual
= amdgpu_device_is_virtual();
1620 adev
->virtualization
.caps
= amdgpu_asic_get_virtual_caps(adev
);
1622 /* Post card if necessary */
1623 if (!amdgpu_card_posted(adev
) ||
1624 (adev
->virtualization
.is_virtual
&&
1625 !(adev
->virtualization
.caps
& AMDGPU_VIRT_CAPS_SRIOV_EN
))) {
1627 dev_err(adev
->dev
, "Card not posted and no BIOS - ignoring\n");
1631 DRM_INFO("GPU not posted. posting now...\n");
1632 amdgpu_atom_asic_init(adev
->mode_info
.atom_context
);
1635 /* Initialize clocks */
1636 r
= amdgpu_atombios_get_clock_info(adev
);
1638 dev_err(adev
->dev
, "amdgpu_atombios_get_clock_info failed\n");
1641 /* init i2c buses */
1642 amdgpu_atombios_i2c_init(adev
);
1645 r
= amdgpu_fence_driver_init(adev
);
1647 dev_err(adev
->dev
, "amdgpu_fence_driver_init failed\n");
1651 /* init the mode config */
1652 drm_mode_config_init(adev
->ddev
);
1654 r
= amdgpu_init(adev
);
1656 dev_err(adev
->dev
, "amdgpu_init failed\n");
1661 adev
->accel_working
= true;
1663 amdgpu_fbdev_init(adev
);
1665 r
= amdgpu_ib_pool_init(adev
);
1667 dev_err(adev
->dev
, "IB initialization failed (%d).\n", r
);
1671 r
= amdgpu_ib_ring_tests(adev
);
1673 DRM_ERROR("ib ring test failed (%d).\n", r
);
1675 r
= amdgpu_gem_debugfs_init(adev
);
1677 DRM_ERROR("registering gem debugfs failed (%d).\n", r
);
1680 r
= amdgpu_debugfs_regs_init(adev
);
1682 DRM_ERROR("registering register debugfs failed (%d).\n", r
);
1685 r
= amdgpu_debugfs_firmware_init(adev
);
1687 DRM_ERROR("registering firmware debugfs failed (%d).\n", r
);
1691 if ((amdgpu_testing
& 1)) {
1692 if (adev
->accel_working
)
1693 amdgpu_test_moves(adev
);
1695 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
1697 if ((amdgpu_testing
& 2)) {
1698 if (adev
->accel_working
)
1699 amdgpu_test_syncing(adev
);
1701 DRM_INFO("amdgpu: acceleration disabled, skipping sync tests\n");
1703 if (amdgpu_benchmarking
) {
1704 if (adev
->accel_working
)
1705 amdgpu_benchmark(adev
, amdgpu_benchmarking
);
1707 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
1710 /* enable clockgating, etc. after ib tests, etc. since some blocks require
1711 * explicit gating rather than handling it automatically.
1713 r
= amdgpu_late_init(adev
);
1715 dev_err(adev
->dev
, "amdgpu_late_init failed\n");
1723 vga_switcheroo_fini_domain_pm_ops(adev
->dev
);
1727 static void amdgpu_debugfs_remove_files(struct amdgpu_device
*adev
);
1730 * amdgpu_device_fini - tear down the driver
1732 * @adev: amdgpu_device pointer
1734 * Tear down the driver info (all asics).
1735 * Called at driver shutdown.
1737 void amdgpu_device_fini(struct amdgpu_device
*adev
)
1741 DRM_INFO("amdgpu: finishing device.\n");
1742 adev
->shutdown
= true;
1743 /* evict vram memory */
1744 amdgpu_bo_evict_vram(adev
);
1745 amdgpu_ib_pool_fini(adev
);
1746 amdgpu_fence_driver_fini(adev
);
1747 drm_crtc_force_disable_all(adev
->ddev
);
1748 amdgpu_fbdev_fini(adev
);
1749 r
= amdgpu_fini(adev
);
1750 kfree(adev
->ip_block_status
);
1751 adev
->ip_block_status
= NULL
;
1752 adev
->accel_working
= false;
1753 /* free i2c buses */
1754 amdgpu_i2c_fini(adev
);
1755 amdgpu_atombios_fini(adev
);
1758 vga_switcheroo_unregister_client(adev
->pdev
);
1759 if (adev
->flags
& AMD_IS_PX
)
1760 vga_switcheroo_fini_domain_pm_ops(adev
->dev
);
1761 vga_client_register(adev
->pdev
, NULL
, NULL
, NULL
);
1763 pci_iounmap(adev
->pdev
, adev
->rio_mem
);
1764 adev
->rio_mem
= NULL
;
1765 iounmap(adev
->rmmio
);
1767 amdgpu_doorbell_fini(adev
);
1768 amdgpu_debugfs_regs_cleanup(adev
);
1769 amdgpu_debugfs_remove_files(adev
);
1777 * amdgpu_suspend_kms - initiate device suspend
1779 * @pdev: drm dev pointer
1780 * @state: suspend state
1782 * Puts the hw in the suspend state (all asics).
1783 * Returns 0 for success or an error on failure.
1784 * Called at driver suspend.
1786 int amdgpu_suspend_kms(struct drm_device
*dev
, bool suspend
, bool fbcon
)
1788 struct amdgpu_device
*adev
;
1789 struct drm_crtc
*crtc
;
1790 struct drm_connector
*connector
;
1793 if (dev
== NULL
|| dev
->dev_private
== NULL
) {
1797 adev
= dev
->dev_private
;
1799 if (dev
->switch_power_state
== DRM_SWITCH_POWER_OFF
)
1802 drm_kms_helper_poll_disable(dev
);
1804 /* turn off display hw */
1805 drm_modeset_lock_all(dev
);
1806 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
1807 drm_helper_connector_dpms(connector
, DRM_MODE_DPMS_OFF
);
1809 drm_modeset_unlock_all(dev
);
1811 /* unpin the front buffers and cursors */
1812 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
1813 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
1814 struct amdgpu_framebuffer
*rfb
= to_amdgpu_framebuffer(crtc
->primary
->fb
);
1815 struct amdgpu_bo
*robj
;
1817 if (amdgpu_crtc
->cursor_bo
) {
1818 struct amdgpu_bo
*aobj
= gem_to_amdgpu_bo(amdgpu_crtc
->cursor_bo
);
1819 r
= amdgpu_bo_reserve(aobj
, false);
1821 amdgpu_bo_unpin(aobj
);
1822 amdgpu_bo_unreserve(aobj
);
1826 if (rfb
== NULL
|| rfb
->obj
== NULL
) {
1829 robj
= gem_to_amdgpu_bo(rfb
->obj
);
1830 /* don't unpin kernel fb objects */
1831 if (!amdgpu_fbdev_robj_is_fb(adev
, robj
)) {
1832 r
= amdgpu_bo_reserve(robj
, false);
1834 amdgpu_bo_unpin(robj
);
1835 amdgpu_bo_unreserve(robj
);
1839 /* evict vram memory */
1840 amdgpu_bo_evict_vram(adev
);
1842 amdgpu_fence_driver_suspend(adev
);
1844 r
= amdgpu_suspend(adev
);
1846 /* evict remaining vram memory */
1847 amdgpu_bo_evict_vram(adev
);
1849 pci_save_state(dev
->pdev
);
1851 /* Shut down the device */
1852 pci_disable_device(dev
->pdev
);
1853 pci_set_power_state(dev
->pdev
, PCI_D3hot
);
1858 amdgpu_fbdev_set_suspend(adev
, 1);
1865 * amdgpu_resume_kms - initiate device resume
1867 * @pdev: drm dev pointer
1869 * Bring the hw back to operating state (all asics).
1870 * Returns 0 for success or an error on failure.
1871 * Called at driver resume.
1873 int amdgpu_resume_kms(struct drm_device
*dev
, bool resume
, bool fbcon
)
1875 struct drm_connector
*connector
;
1876 struct amdgpu_device
*adev
= dev
->dev_private
;
1877 struct drm_crtc
*crtc
;
1880 if (dev
->switch_power_state
== DRM_SWITCH_POWER_OFF
)
1887 pci_set_power_state(dev
->pdev
, PCI_D0
);
1888 pci_restore_state(dev
->pdev
);
1889 if (pci_enable_device(dev
->pdev
)) {
1897 if (!amdgpu_card_posted(adev
))
1898 amdgpu_atom_asic_init(adev
->mode_info
.atom_context
);
1900 r
= amdgpu_resume(adev
);
1902 DRM_ERROR("amdgpu_resume failed (%d).\n", r
);
1904 amdgpu_fence_driver_resume(adev
);
1907 r
= amdgpu_ib_ring_tests(adev
);
1909 DRM_ERROR("ib ring test failed (%d).\n", r
);
1912 r
= amdgpu_late_init(adev
);
1917 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
1918 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
1920 if (amdgpu_crtc
->cursor_bo
) {
1921 struct amdgpu_bo
*aobj
= gem_to_amdgpu_bo(amdgpu_crtc
->cursor_bo
);
1922 r
= amdgpu_bo_reserve(aobj
, false);
1924 r
= amdgpu_bo_pin(aobj
,
1925 AMDGPU_GEM_DOMAIN_VRAM
,
1926 &amdgpu_crtc
->cursor_addr
);
1928 DRM_ERROR("Failed to pin cursor BO (%d)\n", r
);
1929 amdgpu_bo_unreserve(aobj
);
1934 /* blat the mode back in */
1936 drm_helper_resume_force_mode(dev
);
1937 /* turn on display hw */
1938 drm_modeset_lock_all(dev
);
1939 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
1940 drm_helper_connector_dpms(connector
, DRM_MODE_DPMS_ON
);
1942 drm_modeset_unlock_all(dev
);
1945 drm_kms_helper_poll_enable(dev
);
1948 * Most of the connector probing functions try to acquire runtime pm
1949 * refs to ensure that the GPU is powered on when connector polling is
1950 * performed. Since we're calling this from a runtime PM callback,
1951 * trying to acquire rpm refs will cause us to deadlock.
1953 * Since we're guaranteed to be holding the rpm lock, it's safe to
1954 * temporarily disable the rpm helpers so this doesn't deadlock us.
1957 dev
->dev
->power
.disable_depth
++;
1959 drm_helper_hpd_irq_event(dev
);
1961 dev
->dev
->power
.disable_depth
--;
1965 amdgpu_fbdev_set_suspend(adev
, 0);
1972 static bool amdgpu_check_soft_reset(struct amdgpu_device
*adev
)
1975 bool asic_hang
= false;
1977 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1978 if (!adev
->ip_block_status
[i
].valid
)
1980 if (adev
->ip_blocks
[i
].funcs
->check_soft_reset
)
1981 adev
->ip_blocks
[i
].funcs
->check_soft_reset(adev
);
1982 if (adev
->ip_block_status
[i
].hang
) {
1983 DRM_INFO("IP block:%d is hang!\n", i
);
1990 int amdgpu_pre_soft_reset(struct amdgpu_device
*adev
)
1994 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1995 if (!adev
->ip_block_status
[i
].valid
)
1997 if (adev
->ip_block_status
[i
].hang
&&
1998 adev
->ip_blocks
[i
].funcs
->pre_soft_reset
) {
1999 r
= adev
->ip_blocks
[i
].funcs
->pre_soft_reset(adev
);
2008 static bool amdgpu_need_full_reset(struct amdgpu_device
*adev
)
2010 if (adev
->ip_block_status
[AMD_IP_BLOCK_TYPE_GMC
].hang
||
2011 adev
->ip_block_status
[AMD_IP_BLOCK_TYPE_SMC
].hang
||
2012 adev
->ip_block_status
[AMD_IP_BLOCK_TYPE_ACP
].hang
||
2013 adev
->ip_block_status
[AMD_IP_BLOCK_TYPE_DCE
].hang
) {
2014 DRM_INFO("Some block need full reset!\n");
2020 static int amdgpu_soft_reset(struct amdgpu_device
*adev
)
2024 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
2025 if (!adev
->ip_block_status
[i
].valid
)
2027 if (adev
->ip_block_status
[i
].hang
&&
2028 adev
->ip_blocks
[i
].funcs
->soft_reset
) {
2029 r
= adev
->ip_blocks
[i
].funcs
->soft_reset(adev
);
2038 static int amdgpu_post_soft_reset(struct amdgpu_device
*adev
)
2042 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
2043 if (!adev
->ip_block_status
[i
].valid
)
2045 if (adev
->ip_block_status
[i
].hang
&&
2046 adev
->ip_blocks
[i
].funcs
->post_soft_reset
)
2047 r
= adev
->ip_blocks
[i
].funcs
->post_soft_reset(adev
);
2055 bool amdgpu_need_backup(struct amdgpu_device
*adev
)
2057 if (adev
->flags
& AMD_IS_APU
)
2060 return amdgpu_lockup_timeout
> 0 ? true : false;
2063 static int amdgpu_recover_vram_from_shadow(struct amdgpu_device
*adev
,
2064 struct amdgpu_ring
*ring
,
2065 struct amdgpu_bo
*bo
,
2066 struct fence
**fence
)
2074 r
= amdgpu_bo_reserve(bo
, false);
2077 domain
= amdgpu_mem_type_to_domain(bo
->tbo
.mem
.mem_type
);
2078 /* if bo has been evicted, then no need to recover */
2079 if (domain
== AMDGPU_GEM_DOMAIN_VRAM
) {
2080 r
= amdgpu_bo_restore_from_shadow(adev
, ring
, bo
,
2083 DRM_ERROR("recover page table failed!\n");
2088 amdgpu_bo_unreserve(bo
);
2093 * amdgpu_gpu_reset - reset the asic
2095 * @adev: amdgpu device pointer
2097 * Attempt the reset the GPU if it has hung (all asics).
2098 * Returns 0 for success or an error on failure.
2100 int amdgpu_gpu_reset(struct amdgpu_device
*adev
)
2104 bool need_full_reset
;
2106 if (!amdgpu_check_soft_reset(adev
)) {
2107 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2111 atomic_inc(&adev
->gpu_reset_counter
);
2114 resched
= ttm_bo_lock_delayed_workqueue(&adev
->mman
.bdev
);
2116 /* block scheduler */
2117 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
2118 struct amdgpu_ring
*ring
= adev
->rings
[i
];
2122 kthread_park(ring
->sched
.thread
);
2123 amd_sched_hw_job_reset(&ring
->sched
);
2125 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2126 amdgpu_fence_driver_force_completion(adev
);
2128 need_full_reset
= amdgpu_need_full_reset(adev
);
2130 if (!need_full_reset
) {
2131 amdgpu_pre_soft_reset(adev
);
2132 r
= amdgpu_soft_reset(adev
);
2133 amdgpu_post_soft_reset(adev
);
2134 if (r
|| amdgpu_check_soft_reset(adev
)) {
2135 DRM_INFO("soft reset failed, will fallback to full reset!\n");
2136 need_full_reset
= true;
2140 if (need_full_reset
) {
2142 amdgpu_atombios_scratch_regs_save(adev
);
2143 r
= amdgpu_suspend(adev
);
2146 /* Disable fb access */
2147 if (adev
->mode_info
.num_crtc
) {
2148 struct amdgpu_mode_mc_save save
;
2149 amdgpu_display_stop_mc_access(adev
, &save
);
2150 amdgpu_wait_for_idle(adev
, AMD_IP_BLOCK_TYPE_GMC
);
2153 r
= amdgpu_asic_reset(adev
);
2155 amdgpu_atom_asic_init(adev
->mode_info
.atom_context
);
2158 dev_info(adev
->dev
, "GPU reset succeeded, trying to resume\n");
2159 r
= amdgpu_resume(adev
);
2161 /* restore scratch */
2162 amdgpu_atombios_scratch_regs_restore(adev
);
2165 amdgpu_irq_gpu_reset_resume_helper(adev
);
2166 r
= amdgpu_ib_ring_tests(adev
);
2168 dev_err(adev
->dev
, "ib ring test failed (%d).\n", r
);
2169 r
= amdgpu_suspend(adev
);
2170 need_full_reset
= true;
2174 * recovery vm page tables, since we cannot depend on VRAM is
2175 * consistent after gpu full reset.
2177 if (need_full_reset
&& amdgpu_need_backup(adev
)) {
2178 struct amdgpu_ring
*ring
= adev
->mman
.buffer_funcs_ring
;
2179 struct amdgpu_bo
*bo
, *tmp
;
2180 struct fence
*fence
= NULL
, *next
= NULL
;
2182 DRM_INFO("recover vram bo from shadow\n");
2183 mutex_lock(&adev
->shadow_list_lock
);
2184 list_for_each_entry_safe(bo
, tmp
, &adev
->shadow_list
, shadow_list
) {
2185 amdgpu_recover_vram_from_shadow(adev
, ring
, bo
, &next
);
2187 r
= fence_wait(fence
, false);
2189 WARN(r
, "recovery from shadow isn't comleted\n");
2197 mutex_unlock(&adev
->shadow_list_lock
);
2199 r
= fence_wait(fence
, false);
2201 WARN(r
, "recovery from shadow isn't comleted\n");
2205 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
2206 struct amdgpu_ring
*ring
= adev
->rings
[i
];
2210 amd_sched_job_recovery(&ring
->sched
);
2211 kthread_unpark(ring
->sched
.thread
);
2214 dev_err(adev
->dev
, "asic resume failed (%d).\n", r
);
2215 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
2216 if (adev
->rings
[i
]) {
2217 kthread_unpark(adev
->rings
[i
]->sched
.thread
);
2222 drm_helper_resume_force_mode(adev
->ddev
);
2224 ttm_bo_unlock_delayed_workqueue(&adev
->mman
.bdev
, resched
);
2226 /* bad news, how to tell it to userspace ? */
2227 dev_info(adev
->dev
, "GPU reset failed\n");
2233 void amdgpu_get_pcie_info(struct amdgpu_device
*adev
)
2238 if (amdgpu_pcie_gen_cap
)
2239 adev
->pm
.pcie_gen_mask
= amdgpu_pcie_gen_cap
;
2241 if (amdgpu_pcie_lane_cap
)
2242 adev
->pm
.pcie_mlw_mask
= amdgpu_pcie_lane_cap
;
2244 /* covers APUs as well */
2245 if (pci_is_root_bus(adev
->pdev
->bus
)) {
2246 if (adev
->pm
.pcie_gen_mask
== 0)
2247 adev
->pm
.pcie_gen_mask
= AMDGPU_DEFAULT_PCIE_GEN_MASK
;
2248 if (adev
->pm
.pcie_mlw_mask
== 0)
2249 adev
->pm
.pcie_mlw_mask
= AMDGPU_DEFAULT_PCIE_MLW_MASK
;
2253 if (adev
->pm
.pcie_gen_mask
== 0) {
2254 ret
= drm_pcie_get_speed_cap_mask(adev
->ddev
, &mask
);
2256 adev
->pm
.pcie_gen_mask
= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1
|
2257 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2
|
2258 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3
);
2260 if (mask
& DRM_PCIE_SPEED_25
)
2261 adev
->pm
.pcie_gen_mask
|= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1
;
2262 if (mask
& DRM_PCIE_SPEED_50
)
2263 adev
->pm
.pcie_gen_mask
|= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2
;
2264 if (mask
& DRM_PCIE_SPEED_80
)
2265 adev
->pm
.pcie_gen_mask
|= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3
;
2267 adev
->pm
.pcie_gen_mask
= AMDGPU_DEFAULT_PCIE_GEN_MASK
;
2270 if (adev
->pm
.pcie_mlw_mask
== 0) {
2271 ret
= drm_pcie_get_max_link_width(adev
->ddev
, &mask
);
2275 adev
->pm
.pcie_mlw_mask
= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32
|
2276 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16
|
2277 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12
|
2278 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8
|
2279 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4
|
2280 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2
|
2281 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1
);
2284 adev
->pm
.pcie_mlw_mask
= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16
|
2285 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12
|
2286 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8
|
2287 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4
|
2288 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2
|
2289 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1
);
2292 adev
->pm
.pcie_mlw_mask
= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12
|
2293 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8
|
2294 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4
|
2295 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2
|
2296 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1
);
2299 adev
->pm
.pcie_mlw_mask
= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8
|
2300 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4
|
2301 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2
|
2302 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1
);
2305 adev
->pm
.pcie_mlw_mask
= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4
|
2306 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2
|
2307 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1
);
2310 adev
->pm
.pcie_mlw_mask
= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2
|
2311 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1
);
2314 adev
->pm
.pcie_mlw_mask
= CAIL_PCIE_LINK_WIDTH_SUPPORT_X1
;
2320 adev
->pm
.pcie_mlw_mask
= AMDGPU_DEFAULT_PCIE_MLW_MASK
;
2328 int amdgpu_debugfs_add_files(struct amdgpu_device
*adev
,
2329 const struct drm_info_list
*files
,
2334 for (i
= 0; i
< adev
->debugfs_count
; i
++) {
2335 if (adev
->debugfs
[i
].files
== files
) {
2336 /* Already registered */
2341 i
= adev
->debugfs_count
+ 1;
2342 if (i
> AMDGPU_DEBUGFS_MAX_COMPONENTS
) {
2343 DRM_ERROR("Reached maximum number of debugfs components.\n");
2344 DRM_ERROR("Report so we increase "
2345 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
2348 adev
->debugfs
[adev
->debugfs_count
].files
= files
;
2349 adev
->debugfs
[adev
->debugfs_count
].num_files
= nfiles
;
2350 adev
->debugfs_count
= i
;
2351 #if defined(CONFIG_DEBUG_FS)
2352 drm_debugfs_create_files(files
, nfiles
,
2353 adev
->ddev
->control
->debugfs_root
,
2354 adev
->ddev
->control
);
2355 drm_debugfs_create_files(files
, nfiles
,
2356 adev
->ddev
->primary
->debugfs_root
,
2357 adev
->ddev
->primary
);
2362 static void amdgpu_debugfs_remove_files(struct amdgpu_device
*adev
)
2364 #if defined(CONFIG_DEBUG_FS)
2367 for (i
= 0; i
< adev
->debugfs_count
; i
++) {
2368 drm_debugfs_remove_files(adev
->debugfs
[i
].files
,
2369 adev
->debugfs
[i
].num_files
,
2370 adev
->ddev
->control
);
2371 drm_debugfs_remove_files(adev
->debugfs
[i
].files
,
2372 adev
->debugfs
[i
].num_files
,
2373 adev
->ddev
->primary
);
2378 #if defined(CONFIG_DEBUG_FS)
2380 static ssize_t
amdgpu_debugfs_regs_read(struct file
*f
, char __user
*buf
,
2381 size_t size
, loff_t
*pos
)
2383 struct amdgpu_device
*adev
= f
->f_inode
->i_private
;
2386 bool pm_pg_lock
, use_bank
;
2387 unsigned instance_bank
, sh_bank
, se_bank
;
2389 if (size
& 0x3 || *pos
& 0x3)
2392 /* are we reading registers for which a PG lock is necessary? */
2393 pm_pg_lock
= (*pos
>> 23) & 1;
2395 if (*pos
& (1ULL << 62)) {
2396 se_bank
= (*pos
>> 24) & 0x3FF;
2397 sh_bank
= (*pos
>> 34) & 0x3FF;
2398 instance_bank
= (*pos
>> 44) & 0x3FF;
2407 if (sh_bank
>= adev
->gfx
.config
.max_sh_per_se
||
2408 se_bank
>= adev
->gfx
.config
.max_shader_engines
)
2410 mutex_lock(&adev
->grbm_idx_mutex
);
2411 amdgpu_gfx_select_se_sh(adev
, se_bank
,
2412 sh_bank
, instance_bank
);
2416 mutex_lock(&adev
->pm
.mutex
);
2421 if (*pos
> adev
->rmmio_size
)
2424 value
= RREG32(*pos
>> 2);
2425 r
= put_user(value
, (uint32_t *)buf
);
2439 amdgpu_gfx_select_se_sh(adev
, 0xffffffff, 0xffffffff, 0xffffffff);
2440 mutex_unlock(&adev
->grbm_idx_mutex
);
2444 mutex_unlock(&adev
->pm
.mutex
);
2449 static ssize_t
amdgpu_debugfs_regs_write(struct file
*f
, const char __user
*buf
,
2450 size_t size
, loff_t
*pos
)
2452 struct amdgpu_device
*adev
= f
->f_inode
->i_private
;
2456 if (size
& 0x3 || *pos
& 0x3)
2462 if (*pos
> adev
->rmmio_size
)
2465 r
= get_user(value
, (uint32_t *)buf
);
2469 WREG32(*pos
>> 2, value
);
2480 static ssize_t
amdgpu_debugfs_regs_pcie_read(struct file
*f
, char __user
*buf
,
2481 size_t size
, loff_t
*pos
)
2483 struct amdgpu_device
*adev
= f
->f_inode
->i_private
;
2487 if (size
& 0x3 || *pos
& 0x3)
2493 value
= RREG32_PCIE(*pos
>> 2);
2494 r
= put_user(value
, (uint32_t *)buf
);
2507 static ssize_t
amdgpu_debugfs_regs_pcie_write(struct file
*f
, const char __user
*buf
,
2508 size_t size
, loff_t
*pos
)
2510 struct amdgpu_device
*adev
= f
->f_inode
->i_private
;
2514 if (size
& 0x3 || *pos
& 0x3)
2520 r
= get_user(value
, (uint32_t *)buf
);
2524 WREG32_PCIE(*pos
>> 2, value
);
2535 static ssize_t
amdgpu_debugfs_regs_didt_read(struct file
*f
, char __user
*buf
,
2536 size_t size
, loff_t
*pos
)
2538 struct amdgpu_device
*adev
= f
->f_inode
->i_private
;
2542 if (size
& 0x3 || *pos
& 0x3)
2548 value
= RREG32_DIDT(*pos
>> 2);
2549 r
= put_user(value
, (uint32_t *)buf
);
2562 static ssize_t
amdgpu_debugfs_regs_didt_write(struct file
*f
, const char __user
*buf
,
2563 size_t size
, loff_t
*pos
)
2565 struct amdgpu_device
*adev
= f
->f_inode
->i_private
;
2569 if (size
& 0x3 || *pos
& 0x3)
2575 r
= get_user(value
, (uint32_t *)buf
);
2579 WREG32_DIDT(*pos
>> 2, value
);
2590 static ssize_t
amdgpu_debugfs_regs_smc_read(struct file
*f
, char __user
*buf
,
2591 size_t size
, loff_t
*pos
)
2593 struct amdgpu_device
*adev
= f
->f_inode
->i_private
;
2597 if (size
& 0x3 || *pos
& 0x3)
2603 value
= RREG32_SMC(*pos
>> 2);
2604 r
= put_user(value
, (uint32_t *)buf
);
2617 static ssize_t
amdgpu_debugfs_regs_smc_write(struct file
*f
, const char __user
*buf
,
2618 size_t size
, loff_t
*pos
)
2620 struct amdgpu_device
*adev
= f
->f_inode
->i_private
;
2624 if (size
& 0x3 || *pos
& 0x3)
2630 r
= get_user(value
, (uint32_t *)buf
);
2634 WREG32_SMC(*pos
>> 2, value
);
2645 static ssize_t
amdgpu_debugfs_gca_config_read(struct file
*f
, char __user
*buf
,
2646 size_t size
, loff_t
*pos
)
2648 struct amdgpu_device
*adev
= f
->f_inode
->i_private
;
2651 uint32_t *config
, no_regs
= 0;
2653 if (size
& 0x3 || *pos
& 0x3)
2656 config
= kmalloc(256 * sizeof(*config
), GFP_KERNEL
);
2660 /* version, increment each time something is added */
2661 config
[no_regs
++] = 2;
2662 config
[no_regs
++] = adev
->gfx
.config
.max_shader_engines
;
2663 config
[no_regs
++] = adev
->gfx
.config
.max_tile_pipes
;
2664 config
[no_regs
++] = adev
->gfx
.config
.max_cu_per_sh
;
2665 config
[no_regs
++] = adev
->gfx
.config
.max_sh_per_se
;
2666 config
[no_regs
++] = adev
->gfx
.config
.max_backends_per_se
;
2667 config
[no_regs
++] = adev
->gfx
.config
.max_texture_channel_caches
;
2668 config
[no_regs
++] = adev
->gfx
.config
.max_gprs
;
2669 config
[no_regs
++] = adev
->gfx
.config
.max_gs_threads
;
2670 config
[no_regs
++] = adev
->gfx
.config
.max_hw_contexts
;
2671 config
[no_regs
++] = adev
->gfx
.config
.sc_prim_fifo_size_frontend
;
2672 config
[no_regs
++] = adev
->gfx
.config
.sc_prim_fifo_size_backend
;
2673 config
[no_regs
++] = adev
->gfx
.config
.sc_hiz_tile_fifo_size
;
2674 config
[no_regs
++] = adev
->gfx
.config
.sc_earlyz_tile_fifo_size
;
2675 config
[no_regs
++] = adev
->gfx
.config
.num_tile_pipes
;
2676 config
[no_regs
++] = adev
->gfx
.config
.backend_enable_mask
;
2677 config
[no_regs
++] = adev
->gfx
.config
.mem_max_burst_length_bytes
;
2678 config
[no_regs
++] = adev
->gfx
.config
.mem_row_size_in_kb
;
2679 config
[no_regs
++] = adev
->gfx
.config
.shader_engine_tile_size
;
2680 config
[no_regs
++] = adev
->gfx
.config
.num_gpus
;
2681 config
[no_regs
++] = adev
->gfx
.config
.multi_gpu_tile_size
;
2682 config
[no_regs
++] = adev
->gfx
.config
.mc_arb_ramcfg
;
2683 config
[no_regs
++] = adev
->gfx
.config
.gb_addr_config
;
2684 config
[no_regs
++] = adev
->gfx
.config
.num_rbs
;
2687 config
[no_regs
++] = adev
->rev_id
;
2688 config
[no_regs
++] = adev
->pg_flags
;
2689 config
[no_regs
++] = adev
->cg_flags
;
2692 config
[no_regs
++] = adev
->family
;
2693 config
[no_regs
++] = adev
->external_rev_id
;
2695 while (size
&& (*pos
< no_regs
* 4)) {
2698 value
= config
[*pos
>> 2];
2699 r
= put_user(value
, (uint32_t *)buf
);
2716 static const struct file_operations amdgpu_debugfs_regs_fops
= {
2717 .owner
= THIS_MODULE
,
2718 .read
= amdgpu_debugfs_regs_read
,
2719 .write
= amdgpu_debugfs_regs_write
,
2720 .llseek
= default_llseek
2722 static const struct file_operations amdgpu_debugfs_regs_didt_fops
= {
2723 .owner
= THIS_MODULE
,
2724 .read
= amdgpu_debugfs_regs_didt_read
,
2725 .write
= amdgpu_debugfs_regs_didt_write
,
2726 .llseek
= default_llseek
2728 static const struct file_operations amdgpu_debugfs_regs_pcie_fops
= {
2729 .owner
= THIS_MODULE
,
2730 .read
= amdgpu_debugfs_regs_pcie_read
,
2731 .write
= amdgpu_debugfs_regs_pcie_write
,
2732 .llseek
= default_llseek
2734 static const struct file_operations amdgpu_debugfs_regs_smc_fops
= {
2735 .owner
= THIS_MODULE
,
2736 .read
= amdgpu_debugfs_regs_smc_read
,
2737 .write
= amdgpu_debugfs_regs_smc_write
,
2738 .llseek
= default_llseek
2741 static const struct file_operations amdgpu_debugfs_gca_config_fops
= {
2742 .owner
= THIS_MODULE
,
2743 .read
= amdgpu_debugfs_gca_config_read
,
2744 .llseek
= default_llseek
2747 static const struct file_operations
*debugfs_regs
[] = {
2748 &amdgpu_debugfs_regs_fops
,
2749 &amdgpu_debugfs_regs_didt_fops
,
2750 &amdgpu_debugfs_regs_pcie_fops
,
2751 &amdgpu_debugfs_regs_smc_fops
,
2752 &amdgpu_debugfs_gca_config_fops
,
2755 static const char *debugfs_regs_names
[] = {
2760 "amdgpu_gca_config",
2763 static int amdgpu_debugfs_regs_init(struct amdgpu_device
*adev
)
2765 struct drm_minor
*minor
= adev
->ddev
->primary
;
2766 struct dentry
*ent
, *root
= minor
->debugfs_root
;
2769 for (i
= 0; i
< ARRAY_SIZE(debugfs_regs
); i
++) {
2770 ent
= debugfs_create_file(debugfs_regs_names
[i
],
2771 S_IFREG
| S_IRUGO
, root
,
2772 adev
, debugfs_regs
[i
]);
2774 for (j
= 0; j
< i
; j
++) {
2775 debugfs_remove(adev
->debugfs_regs
[i
]);
2776 adev
->debugfs_regs
[i
] = NULL
;
2778 return PTR_ERR(ent
);
2782 i_size_write(ent
->d_inode
, adev
->rmmio_size
);
2783 adev
->debugfs_regs
[i
] = ent
;
2789 static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device
*adev
)
2793 for (i
= 0; i
< ARRAY_SIZE(debugfs_regs
); i
++) {
2794 if (adev
->debugfs_regs
[i
]) {
2795 debugfs_remove(adev
->debugfs_regs
[i
]);
2796 adev
->debugfs_regs
[i
] = NULL
;
2801 int amdgpu_debugfs_init(struct drm_minor
*minor
)
2806 void amdgpu_debugfs_cleanup(struct drm_minor
*minor
)
2810 static int amdgpu_debugfs_regs_init(struct amdgpu_device
*adev
)
2814 static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device
*adev
) { }