2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/kthread.h>
29 #include <linux/console.h>
30 #include <linux/slab.h>
31 #include <linux/debugfs.h>
33 #include <drm/drm_crtc_helper.h>
34 #include <drm/amdgpu_drm.h>
35 #include <linux/vgaarb.h>
36 #include <linux/vga_switcheroo.h>
37 #include <linux/efi.h>
39 #include "amdgpu_trace.h"
40 #include "amdgpu_i2c.h"
42 #include "amdgpu_atombios.h"
44 #ifdef CONFIG_DRM_AMDGPU_CIK
48 #include "bif/bif_4_1_d.h"
50 static int amdgpu_debugfs_regs_init(struct amdgpu_device
*adev
);
51 static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device
*adev
);
53 static const char *amdgpu_asic_name
[] = {
69 bool amdgpu_device_is_px(struct drm_device
*dev
)
71 struct amdgpu_device
*adev
= dev
->dev_private
;
73 if (adev
->flags
& AMD_IS_PX
)
79 * MMIO register access helper functions.
81 uint32_t amdgpu_mm_rreg(struct amdgpu_device
*adev
, uint32_t reg
,
86 if ((reg
* 4) < adev
->rmmio_size
&& !always_indirect
)
87 ret
= readl(((void __iomem
*)adev
->rmmio
) + (reg
* 4));
91 spin_lock_irqsave(&adev
->mmio_idx_lock
, flags
);
92 writel((reg
* 4), ((void __iomem
*)adev
->rmmio
) + (mmMM_INDEX
* 4));
93 ret
= readl(((void __iomem
*)adev
->rmmio
) + (mmMM_DATA
* 4));
94 spin_unlock_irqrestore(&adev
->mmio_idx_lock
, flags
);
96 trace_amdgpu_mm_rreg(adev
->pdev
->device
, reg
, ret
);
100 void amdgpu_mm_wreg(struct amdgpu_device
*adev
, uint32_t reg
, uint32_t v
,
101 bool always_indirect
)
103 trace_amdgpu_mm_wreg(adev
->pdev
->device
, reg
, v
);
105 if ((reg
* 4) < adev
->rmmio_size
&& !always_indirect
)
106 writel(v
, ((void __iomem
*)adev
->rmmio
) + (reg
* 4));
110 spin_lock_irqsave(&adev
->mmio_idx_lock
, flags
);
111 writel((reg
* 4), ((void __iomem
*)adev
->rmmio
) + (mmMM_INDEX
* 4));
112 writel(v
, ((void __iomem
*)adev
->rmmio
) + (mmMM_DATA
* 4));
113 spin_unlock_irqrestore(&adev
->mmio_idx_lock
, flags
);
117 u32
amdgpu_io_rreg(struct amdgpu_device
*adev
, u32 reg
)
119 if ((reg
* 4) < adev
->rio_mem_size
)
120 return ioread32(adev
->rio_mem
+ (reg
* 4));
122 iowrite32((reg
* 4), adev
->rio_mem
+ (mmMM_INDEX
* 4));
123 return ioread32(adev
->rio_mem
+ (mmMM_DATA
* 4));
127 void amdgpu_io_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
130 if ((reg
* 4) < adev
->rio_mem_size
)
131 iowrite32(v
, adev
->rio_mem
+ (reg
* 4));
133 iowrite32((reg
* 4), adev
->rio_mem
+ (mmMM_INDEX
* 4));
134 iowrite32(v
, adev
->rio_mem
+ (mmMM_DATA
* 4));
139 * amdgpu_mm_rdoorbell - read a doorbell dword
141 * @adev: amdgpu_device pointer
142 * @index: doorbell index
144 * Returns the value in the doorbell aperture at the
145 * requested doorbell index (CIK).
147 u32
amdgpu_mm_rdoorbell(struct amdgpu_device
*adev
, u32 index
)
149 if (index
< adev
->doorbell
.num_doorbells
) {
150 return readl(adev
->doorbell
.ptr
+ index
);
152 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index
);
158 * amdgpu_mm_wdoorbell - write a doorbell dword
160 * @adev: amdgpu_device pointer
161 * @index: doorbell index
164 * Writes @v to the doorbell aperture at the
165 * requested doorbell index (CIK).
167 void amdgpu_mm_wdoorbell(struct amdgpu_device
*adev
, u32 index
, u32 v
)
169 if (index
< adev
->doorbell
.num_doorbells
) {
170 writel(v
, adev
->doorbell
.ptr
+ index
);
172 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index
);
177 * amdgpu_invalid_rreg - dummy reg read function
179 * @adev: amdgpu device pointer
180 * @reg: offset of register
182 * Dummy register read function. Used for register blocks
183 * that certain asics don't have (all asics).
184 * Returns the value in the register.
186 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device
*adev
, uint32_t reg
)
188 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg
);
194 * amdgpu_invalid_wreg - dummy reg write function
196 * @adev: amdgpu device pointer
197 * @reg: offset of register
198 * @v: value to write to the register
200 * Dummy register read function. Used for register blocks
201 * that certain asics don't have (all asics).
203 static void amdgpu_invalid_wreg(struct amdgpu_device
*adev
, uint32_t reg
, uint32_t v
)
205 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
211 * amdgpu_block_invalid_rreg - dummy reg read function
213 * @adev: amdgpu device pointer
214 * @block: offset of instance
215 * @reg: offset of register
217 * Dummy register read function. Used for register blocks
218 * that certain asics don't have (all asics).
219 * Returns the value in the register.
221 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device
*adev
,
222 uint32_t block
, uint32_t reg
)
224 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
231 * amdgpu_block_invalid_wreg - dummy reg write function
233 * @adev: amdgpu device pointer
234 * @block: offset of instance
235 * @reg: offset of register
236 * @v: value to write to the register
238 * Dummy register read function. Used for register blocks
239 * that certain asics don't have (all asics).
241 static void amdgpu_block_invalid_wreg(struct amdgpu_device
*adev
,
243 uint32_t reg
, uint32_t v
)
245 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
250 static int amdgpu_vram_scratch_init(struct amdgpu_device
*adev
)
254 if (adev
->vram_scratch
.robj
== NULL
) {
255 r
= amdgpu_bo_create(adev
, AMDGPU_GPU_PAGE_SIZE
,
256 PAGE_SIZE
, true, AMDGPU_GEM_DOMAIN_VRAM
,
257 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
,
258 NULL
, NULL
, &adev
->vram_scratch
.robj
);
264 r
= amdgpu_bo_reserve(adev
->vram_scratch
.robj
, false);
265 if (unlikely(r
!= 0))
267 r
= amdgpu_bo_pin(adev
->vram_scratch
.robj
,
268 AMDGPU_GEM_DOMAIN_VRAM
, &adev
->vram_scratch
.gpu_addr
);
270 amdgpu_bo_unreserve(adev
->vram_scratch
.robj
);
273 r
= amdgpu_bo_kmap(adev
->vram_scratch
.robj
,
274 (void **)&adev
->vram_scratch
.ptr
);
276 amdgpu_bo_unpin(adev
->vram_scratch
.robj
);
277 amdgpu_bo_unreserve(adev
->vram_scratch
.robj
);
282 static void amdgpu_vram_scratch_fini(struct amdgpu_device
*adev
)
286 if (adev
->vram_scratch
.robj
== NULL
) {
289 r
= amdgpu_bo_reserve(adev
->vram_scratch
.robj
, false);
290 if (likely(r
== 0)) {
291 amdgpu_bo_kunmap(adev
->vram_scratch
.robj
);
292 amdgpu_bo_unpin(adev
->vram_scratch
.robj
);
293 amdgpu_bo_unreserve(adev
->vram_scratch
.robj
);
295 amdgpu_bo_unref(&adev
->vram_scratch
.robj
);
299 * amdgpu_program_register_sequence - program an array of registers.
301 * @adev: amdgpu_device pointer
302 * @registers: pointer to the register array
303 * @array_size: size of the register array
305 * Programs an array or registers with and and or masks.
306 * This is a helper for setting golden registers.
308 void amdgpu_program_register_sequence(struct amdgpu_device
*adev
,
309 const u32
*registers
,
310 const u32 array_size
)
312 u32 tmp
, reg
, and_mask
, or_mask
;
318 for (i
= 0; i
< array_size
; i
+=3) {
319 reg
= registers
[i
+ 0];
320 and_mask
= registers
[i
+ 1];
321 or_mask
= registers
[i
+ 2];
323 if (and_mask
== 0xffffffff) {
334 void amdgpu_pci_config_reset(struct amdgpu_device
*adev
)
336 pci_write_config_dword(adev
->pdev
, 0x7c, AMDGPU_ASIC_RESET_DATA
);
340 * GPU doorbell aperture helpers function.
343 * amdgpu_doorbell_init - Init doorbell driver information.
345 * @adev: amdgpu_device pointer
347 * Init doorbell driver information (CIK)
348 * Returns 0 on success, error on failure.
350 static int amdgpu_doorbell_init(struct amdgpu_device
*adev
)
352 /* doorbell bar mapping */
353 adev
->doorbell
.base
= pci_resource_start(adev
->pdev
, 2);
354 adev
->doorbell
.size
= pci_resource_len(adev
->pdev
, 2);
356 adev
->doorbell
.num_doorbells
= min_t(u32
, adev
->doorbell
.size
/ sizeof(u32
),
357 AMDGPU_DOORBELL_MAX_ASSIGNMENT
+1);
358 if (adev
->doorbell
.num_doorbells
== 0)
361 adev
->doorbell
.ptr
= ioremap(adev
->doorbell
.base
, adev
->doorbell
.num_doorbells
* sizeof(u32
));
362 if (adev
->doorbell
.ptr
== NULL
) {
365 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)adev
->doorbell
.base
);
366 DRM_INFO("doorbell mmio size: %u\n", (unsigned)adev
->doorbell
.size
);
372 * amdgpu_doorbell_fini - Tear down doorbell driver information.
374 * @adev: amdgpu_device pointer
376 * Tear down doorbell driver information (CIK)
378 static void amdgpu_doorbell_fini(struct amdgpu_device
*adev
)
380 iounmap(adev
->doorbell
.ptr
);
381 adev
->doorbell
.ptr
= NULL
;
385 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
388 * @adev: amdgpu_device pointer
389 * @aperture_base: output returning doorbell aperture base physical address
390 * @aperture_size: output returning doorbell aperture size in bytes
391 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
393 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
394 * takes doorbells required for its own rings and reports the setup to amdkfd.
395 * amdgpu reserved doorbells are at the start of the doorbell aperture.
397 void amdgpu_doorbell_get_kfd_info(struct amdgpu_device
*adev
,
398 phys_addr_t
*aperture_base
,
399 size_t *aperture_size
,
400 size_t *start_offset
)
403 * The first num_doorbells are used by amdgpu.
404 * amdkfd takes whatever's left in the aperture.
406 if (adev
->doorbell
.size
> adev
->doorbell
.num_doorbells
* sizeof(u32
)) {
407 *aperture_base
= adev
->doorbell
.base
;
408 *aperture_size
= adev
->doorbell
.size
;
409 *start_offset
= adev
->doorbell
.num_doorbells
* sizeof(u32
);
419 * Writeback is the the method by which the the GPU updates special pages
420 * in memory with the status of certain GPU events (fences, ring pointers,
425 * amdgpu_wb_fini - Disable Writeback and free memory
427 * @adev: amdgpu_device pointer
429 * Disables Writeback and frees the Writeback memory (all asics).
430 * Used at driver shutdown.
432 static void amdgpu_wb_fini(struct amdgpu_device
*adev
)
434 if (adev
->wb
.wb_obj
) {
435 if (!amdgpu_bo_reserve(adev
->wb
.wb_obj
, false)) {
436 amdgpu_bo_kunmap(adev
->wb
.wb_obj
);
437 amdgpu_bo_unpin(adev
->wb
.wb_obj
);
438 amdgpu_bo_unreserve(adev
->wb
.wb_obj
);
440 amdgpu_bo_unref(&adev
->wb
.wb_obj
);
442 adev
->wb
.wb_obj
= NULL
;
447 * amdgpu_wb_init- Init Writeback driver info and allocate memory
449 * @adev: amdgpu_device pointer
451 * Disables Writeback and frees the Writeback memory (all asics).
452 * Used at driver startup.
453 * Returns 0 on success or an -error on failure.
455 static int amdgpu_wb_init(struct amdgpu_device
*adev
)
459 if (adev
->wb
.wb_obj
== NULL
) {
460 r
= amdgpu_bo_create(adev
, AMDGPU_MAX_WB
* 4, PAGE_SIZE
, true,
461 AMDGPU_GEM_DOMAIN_GTT
, 0, NULL
, NULL
,
464 dev_warn(adev
->dev
, "(%d) create WB bo failed\n", r
);
467 r
= amdgpu_bo_reserve(adev
->wb
.wb_obj
, false);
468 if (unlikely(r
!= 0)) {
469 amdgpu_wb_fini(adev
);
472 r
= amdgpu_bo_pin(adev
->wb
.wb_obj
, AMDGPU_GEM_DOMAIN_GTT
,
475 amdgpu_bo_unreserve(adev
->wb
.wb_obj
);
476 dev_warn(adev
->dev
, "(%d) pin WB bo failed\n", r
);
477 amdgpu_wb_fini(adev
);
480 r
= amdgpu_bo_kmap(adev
->wb
.wb_obj
, (void **)&adev
->wb
.wb
);
481 amdgpu_bo_unreserve(adev
->wb
.wb_obj
);
483 dev_warn(adev
->dev
, "(%d) map WB bo failed\n", r
);
484 amdgpu_wb_fini(adev
);
488 adev
->wb
.num_wb
= AMDGPU_MAX_WB
;
489 memset(&adev
->wb
.used
, 0, sizeof(adev
->wb
.used
));
491 /* clear wb memory */
492 memset((char *)adev
->wb
.wb
, 0, AMDGPU_GPU_PAGE_SIZE
);
499 * amdgpu_wb_get - Allocate a wb entry
501 * @adev: amdgpu_device pointer
504 * Allocate a wb slot for use by the driver (all asics).
505 * Returns 0 on success or -EINVAL on failure.
507 int amdgpu_wb_get(struct amdgpu_device
*adev
, u32
*wb
)
509 unsigned long offset
= find_first_zero_bit(adev
->wb
.used
, adev
->wb
.num_wb
);
510 if (offset
< adev
->wb
.num_wb
) {
511 __set_bit(offset
, adev
->wb
.used
);
520 * amdgpu_wb_free - Free a wb entry
522 * @adev: amdgpu_device pointer
525 * Free a wb slot allocated for use by the driver (all asics)
527 void amdgpu_wb_free(struct amdgpu_device
*adev
, u32 wb
)
529 if (wb
< adev
->wb
.num_wb
)
530 __clear_bit(wb
, adev
->wb
.used
);
534 * amdgpu_vram_location - try to find VRAM location
535 * @adev: amdgpu device structure holding all necessary informations
536 * @mc: memory controller structure holding memory informations
537 * @base: base address at which to put VRAM
539 * Function will place try to place VRAM at base address provided
540 * as parameter (which is so far either PCI aperture address or
541 * for IGP TOM base address).
543 * If there is not enough space to fit the unvisible VRAM in the 32bits
544 * address space then we limit the VRAM size to the aperture.
546 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
547 * this shouldn't be a problem as we are using the PCI aperture as a reference.
548 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
551 * Note: we use mc_vram_size as on some board we need to program the mc to
552 * cover the whole aperture even if VRAM size is inferior to aperture size
553 * Novell bug 204882 + along with lots of ubuntu ones
555 * Note: when limiting vram it's safe to overwritte real_vram_size because
556 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
557 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
560 * Note: IGP TOM addr should be the same as the aperture addr, we don't
561 * explicitly check for that thought.
563 * FIXME: when reducing VRAM size align new size on power of 2.
565 void amdgpu_vram_location(struct amdgpu_device
*adev
, struct amdgpu_mc
*mc
, u64 base
)
567 uint64_t limit
= (uint64_t)amdgpu_vram_limit
<< 20;
569 mc
->vram_start
= base
;
570 if (mc
->mc_vram_size
> (adev
->mc
.mc_mask
- base
+ 1)) {
571 dev_warn(adev
->dev
, "limiting VRAM to PCI aperture size\n");
572 mc
->real_vram_size
= mc
->aper_size
;
573 mc
->mc_vram_size
= mc
->aper_size
;
575 mc
->vram_end
= mc
->vram_start
+ mc
->mc_vram_size
- 1;
576 if (limit
&& limit
< mc
->real_vram_size
)
577 mc
->real_vram_size
= limit
;
578 dev_info(adev
->dev
, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
579 mc
->mc_vram_size
>> 20, mc
->vram_start
,
580 mc
->vram_end
, mc
->real_vram_size
>> 20);
584 * amdgpu_gtt_location - try to find GTT location
585 * @adev: amdgpu device structure holding all necessary informations
586 * @mc: memory controller structure holding memory informations
588 * Function will place try to place GTT before or after VRAM.
590 * If GTT size is bigger than space left then we ajust GTT size.
591 * Thus function will never fails.
593 * FIXME: when reducing GTT size align new size on power of 2.
595 void amdgpu_gtt_location(struct amdgpu_device
*adev
, struct amdgpu_mc
*mc
)
597 u64 size_af
, size_bf
;
599 size_af
= ((adev
->mc
.mc_mask
- mc
->vram_end
) + mc
->gtt_base_align
) & ~mc
->gtt_base_align
;
600 size_bf
= mc
->vram_start
& ~mc
->gtt_base_align
;
601 if (size_bf
> size_af
) {
602 if (mc
->gtt_size
> size_bf
) {
603 dev_warn(adev
->dev
, "limiting GTT\n");
604 mc
->gtt_size
= size_bf
;
606 mc
->gtt_start
= (mc
->vram_start
& ~mc
->gtt_base_align
) - mc
->gtt_size
;
608 if (mc
->gtt_size
> size_af
) {
609 dev_warn(adev
->dev
, "limiting GTT\n");
610 mc
->gtt_size
= size_af
;
612 mc
->gtt_start
= (mc
->vram_end
+ 1 + mc
->gtt_base_align
) & ~mc
->gtt_base_align
;
614 mc
->gtt_end
= mc
->gtt_start
+ mc
->gtt_size
- 1;
615 dev_info(adev
->dev
, "GTT: %lluM 0x%016llX - 0x%016llX\n",
616 mc
->gtt_size
>> 20, mc
->gtt_start
, mc
->gtt_end
);
620 * GPU helpers function.
623 * amdgpu_card_posted - check if the hw has already been initialized
625 * @adev: amdgpu_device pointer
627 * Check if the asic has been initialized (all asics).
628 * Used at driver startup.
629 * Returns true if initialized or false if not.
631 bool amdgpu_card_posted(struct amdgpu_device
*adev
)
635 /* then check MEM_SIZE, in case the crtcs are off */
636 reg
= RREG32(mmCONFIG_MEMSIZE
);
646 * amdgpu_dummy_page_init - init dummy page used by the driver
648 * @adev: amdgpu_device pointer
650 * Allocate the dummy page used by the driver (all asics).
651 * This dummy page is used by the driver as a filler for gart entries
652 * when pages are taken out of the GART
653 * Returns 0 on sucess, -ENOMEM on failure.
655 int amdgpu_dummy_page_init(struct amdgpu_device
*adev
)
657 if (adev
->dummy_page
.page
)
659 adev
->dummy_page
.page
= alloc_page(GFP_DMA32
| GFP_KERNEL
| __GFP_ZERO
);
660 if (adev
->dummy_page
.page
== NULL
)
662 adev
->dummy_page
.addr
= pci_map_page(adev
->pdev
, adev
->dummy_page
.page
,
663 0, PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
664 if (pci_dma_mapping_error(adev
->pdev
, adev
->dummy_page
.addr
)) {
665 dev_err(&adev
->pdev
->dev
, "Failed to DMA MAP the dummy page\n");
666 __free_page(adev
->dummy_page
.page
);
667 adev
->dummy_page
.page
= NULL
;
674 * amdgpu_dummy_page_fini - free dummy page used by the driver
676 * @adev: amdgpu_device pointer
678 * Frees the dummy page used by the driver (all asics).
680 void amdgpu_dummy_page_fini(struct amdgpu_device
*adev
)
682 if (adev
->dummy_page
.page
== NULL
)
684 pci_unmap_page(adev
->pdev
, adev
->dummy_page
.addr
,
685 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
686 __free_page(adev
->dummy_page
.page
);
687 adev
->dummy_page
.page
= NULL
;
691 /* ATOM accessor methods */
693 * ATOM is an interpreted byte code stored in tables in the vbios. The
694 * driver registers callbacks to access registers and the interpreter
695 * in the driver parses the tables and executes then to program specific
696 * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
697 * atombios.h, and atom.c
701 * cail_pll_read - read PLL register
703 * @info: atom card_info pointer
704 * @reg: PLL register offset
706 * Provides a PLL register accessor for the atom interpreter (r4xx+).
707 * Returns the value of the PLL register.
709 static uint32_t cail_pll_read(struct card_info
*info
, uint32_t reg
)
715 * cail_pll_write - write PLL register
717 * @info: atom card_info pointer
718 * @reg: PLL register offset
719 * @val: value to write to the pll register
721 * Provides a PLL register accessor for the atom interpreter (r4xx+).
723 static void cail_pll_write(struct card_info
*info
, uint32_t reg
, uint32_t val
)
729 * cail_mc_read - read MC (Memory Controller) register
731 * @info: atom card_info pointer
732 * @reg: MC register offset
734 * Provides an MC register accessor for the atom interpreter (r4xx+).
735 * Returns the value of the MC register.
737 static uint32_t cail_mc_read(struct card_info
*info
, uint32_t reg
)
743 * cail_mc_write - write MC (Memory Controller) register
745 * @info: atom card_info pointer
746 * @reg: MC register offset
747 * @val: value to write to the pll register
749 * Provides a MC register accessor for the atom interpreter (r4xx+).
751 static void cail_mc_write(struct card_info
*info
, uint32_t reg
, uint32_t val
)
757 * cail_reg_write - write MMIO register
759 * @info: atom card_info pointer
760 * @reg: MMIO register offset
761 * @val: value to write to the pll register
763 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
765 static void cail_reg_write(struct card_info
*info
, uint32_t reg
, uint32_t val
)
767 struct amdgpu_device
*adev
= info
->dev
->dev_private
;
773 * cail_reg_read - read MMIO register
775 * @info: atom card_info pointer
776 * @reg: MMIO register offset
778 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
779 * Returns the value of the MMIO register.
781 static uint32_t cail_reg_read(struct card_info
*info
, uint32_t reg
)
783 struct amdgpu_device
*adev
= info
->dev
->dev_private
;
791 * cail_ioreg_write - write IO register
793 * @info: atom card_info pointer
794 * @reg: IO register offset
795 * @val: value to write to the pll register
797 * Provides a IO register accessor for the atom interpreter (r4xx+).
799 static void cail_ioreg_write(struct card_info
*info
, uint32_t reg
, uint32_t val
)
801 struct amdgpu_device
*adev
= info
->dev
->dev_private
;
807 * cail_ioreg_read - read IO register
809 * @info: atom card_info pointer
810 * @reg: IO register offset
812 * Provides an IO register accessor for the atom interpreter (r4xx+).
813 * Returns the value of the IO register.
815 static uint32_t cail_ioreg_read(struct card_info
*info
, uint32_t reg
)
817 struct amdgpu_device
*adev
= info
->dev
->dev_private
;
825 * amdgpu_atombios_fini - free the driver info and callbacks for atombios
827 * @adev: amdgpu_device pointer
829 * Frees the driver info and register access callbacks for the ATOM
830 * interpreter (r4xx+).
831 * Called at driver shutdown.
833 static void amdgpu_atombios_fini(struct amdgpu_device
*adev
)
835 if (adev
->mode_info
.atom_context
) {
836 kfree(adev
->mode_info
.atom_context
->scratch
);
837 kfree(adev
->mode_info
.atom_context
->iio
);
839 kfree(adev
->mode_info
.atom_context
);
840 adev
->mode_info
.atom_context
= NULL
;
841 kfree(adev
->mode_info
.atom_card_info
);
842 adev
->mode_info
.atom_card_info
= NULL
;
846 * amdgpu_atombios_init - init the driver info and callbacks for atombios
848 * @adev: amdgpu_device pointer
850 * Initializes the driver info and register access callbacks for the
851 * ATOM interpreter (r4xx+).
852 * Returns 0 on sucess, -ENOMEM on failure.
853 * Called at driver startup.
855 static int amdgpu_atombios_init(struct amdgpu_device
*adev
)
857 struct card_info
*atom_card_info
=
858 kzalloc(sizeof(struct card_info
), GFP_KERNEL
);
863 adev
->mode_info
.atom_card_info
= atom_card_info
;
864 atom_card_info
->dev
= adev
->ddev
;
865 atom_card_info
->reg_read
= cail_reg_read
;
866 atom_card_info
->reg_write
= cail_reg_write
;
867 /* needed for iio ops */
869 atom_card_info
->ioreg_read
= cail_ioreg_read
;
870 atom_card_info
->ioreg_write
= cail_ioreg_write
;
872 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
873 atom_card_info
->ioreg_read
= cail_reg_read
;
874 atom_card_info
->ioreg_write
= cail_reg_write
;
876 atom_card_info
->mc_read
= cail_mc_read
;
877 atom_card_info
->mc_write
= cail_mc_write
;
878 atom_card_info
->pll_read
= cail_pll_read
;
879 atom_card_info
->pll_write
= cail_pll_write
;
881 adev
->mode_info
.atom_context
= amdgpu_atom_parse(atom_card_info
, adev
->bios
);
882 if (!adev
->mode_info
.atom_context
) {
883 amdgpu_atombios_fini(adev
);
887 mutex_init(&adev
->mode_info
.atom_context
->mutex
);
888 amdgpu_atombios_scratch_regs_init(adev
);
889 amdgpu_atom_allocate_fb_scratch(adev
->mode_info
.atom_context
);
893 /* if we get transitioned to only one device, take VGA back */
895 * amdgpu_vga_set_decode - enable/disable vga decode
897 * @cookie: amdgpu_device pointer
898 * @state: enable/disable vga decode
900 * Enable/disable vga decode (all asics).
901 * Returns VGA resource flags.
903 static unsigned int amdgpu_vga_set_decode(void *cookie
, bool state
)
905 struct amdgpu_device
*adev
= cookie
;
906 amdgpu_asic_set_vga_state(adev
, state
);
908 return VGA_RSRC_LEGACY_IO
| VGA_RSRC_LEGACY_MEM
|
909 VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
;
911 return VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
;
915 * amdgpu_check_pot_argument - check that argument is a power of two
917 * @arg: value to check
919 * Validates that a certain argument is a power of two (all asics).
920 * Returns true if argument is valid.
922 static bool amdgpu_check_pot_argument(int arg
)
924 return (arg
& (arg
- 1)) == 0;
928 * amdgpu_check_arguments - validate module params
930 * @adev: amdgpu_device pointer
932 * Validates certain module parameters and updates
933 * the associated values used by the driver (all asics).
935 static void amdgpu_check_arguments(struct amdgpu_device
*adev
)
937 if (amdgpu_sched_jobs
< 4) {
938 dev_warn(adev
->dev
, "sched jobs (%d) must be at least 4\n",
940 amdgpu_sched_jobs
= 4;
941 } else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs
)){
942 dev_warn(adev
->dev
, "sched jobs (%d) must be a power of 2\n",
944 amdgpu_sched_jobs
= roundup_pow_of_two(amdgpu_sched_jobs
);
947 if (amdgpu_gart_size
!= -1) {
948 /* gtt size must be greater or equal to 32M */
949 if (amdgpu_gart_size
< 32) {
950 dev_warn(adev
->dev
, "gart size (%d) too small\n",
952 amdgpu_gart_size
= -1;
956 if (!amdgpu_check_pot_argument(amdgpu_vm_size
)) {
957 dev_warn(adev
->dev
, "VM size (%d) must be a power of 2\n",
962 if (amdgpu_vm_size
< 1) {
963 dev_warn(adev
->dev
, "VM size (%d) too small, min is 1GB\n",
969 * Max GPUVM size for Cayman, SI and CI are 40 bits.
971 if (amdgpu_vm_size
> 1024) {
972 dev_warn(adev
->dev
, "VM size (%d) too large, max is 1TB\n",
977 /* defines number of bits in page table versus page directory,
978 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
979 * page table and the remaining bits are in the page directory */
980 if (amdgpu_vm_block_size
== -1) {
982 /* Total bits covered by PD + PTs */
983 unsigned bits
= ilog2(amdgpu_vm_size
) + 18;
985 /* Make sure the PD is 4K in size up to 8GB address space.
986 Above that split equal between PD and PTs */
987 if (amdgpu_vm_size
<= 8)
988 amdgpu_vm_block_size
= bits
- 9;
990 amdgpu_vm_block_size
= (bits
+ 3) / 2;
992 } else if (amdgpu_vm_block_size
< 9) {
993 dev_warn(adev
->dev
, "VM page table size (%d) too small\n",
994 amdgpu_vm_block_size
);
995 amdgpu_vm_block_size
= 9;
998 if (amdgpu_vm_block_size
> 24 ||
999 (amdgpu_vm_size
* 1024) < (1ull << amdgpu_vm_block_size
)) {
1000 dev_warn(adev
->dev
, "VM page table size (%d) too large\n",
1001 amdgpu_vm_block_size
);
1002 amdgpu_vm_block_size
= 9;
1007 * amdgpu_switcheroo_set_state - set switcheroo state
1009 * @pdev: pci dev pointer
1010 * @state: vga_switcheroo state
1012 * Callback for the switcheroo driver. Suspends or resumes the
1013 * the asics before or after it is powered up using ACPI methods.
1015 static void amdgpu_switcheroo_set_state(struct pci_dev
*pdev
, enum vga_switcheroo_state state
)
1017 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1019 if (amdgpu_device_is_px(dev
) && state
== VGA_SWITCHEROO_OFF
)
1022 if (state
== VGA_SWITCHEROO_ON
) {
1023 unsigned d3_delay
= dev
->pdev
->d3_delay
;
1025 printk(KERN_INFO
"amdgpu: switched on\n");
1026 /* don't suspend or resume card normally */
1027 dev
->switch_power_state
= DRM_SWITCH_POWER_CHANGING
;
1029 amdgpu_resume_kms(dev
, true, true);
1031 dev
->pdev
->d3_delay
= d3_delay
;
1033 dev
->switch_power_state
= DRM_SWITCH_POWER_ON
;
1034 drm_kms_helper_poll_enable(dev
);
1036 printk(KERN_INFO
"amdgpu: switched off\n");
1037 drm_kms_helper_poll_disable(dev
);
1038 dev
->switch_power_state
= DRM_SWITCH_POWER_CHANGING
;
1039 amdgpu_suspend_kms(dev
, true, true);
1040 dev
->switch_power_state
= DRM_SWITCH_POWER_OFF
;
1045 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1047 * @pdev: pci dev pointer
1049 * Callback for the switcheroo driver. Check of the switcheroo
1050 * state can be changed.
1051 * Returns true if the state can be changed, false if not.
1053 static bool amdgpu_switcheroo_can_switch(struct pci_dev
*pdev
)
1055 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1058 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1059 * locking inversion with the driver load path. And the access here is
1060 * completely racy anyway. So don't bother with locking for now.
1062 return dev
->open_count
== 0;
1065 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops
= {
1066 .set_gpu_state
= amdgpu_switcheroo_set_state
,
1068 .can_switch
= amdgpu_switcheroo_can_switch
,
1071 int amdgpu_set_clockgating_state(struct amdgpu_device
*adev
,
1072 enum amd_ip_block_type block_type
,
1073 enum amd_clockgating_state state
)
1077 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1078 if (!adev
->ip_block_status
[i
].valid
)
1080 if (adev
->ip_blocks
[i
].type
== block_type
) {
1081 r
= adev
->ip_blocks
[i
].funcs
->set_clockgating_state((void *)adev
,
1091 int amdgpu_set_powergating_state(struct amdgpu_device
*adev
,
1092 enum amd_ip_block_type block_type
,
1093 enum amd_powergating_state state
)
1097 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1098 if (!adev
->ip_block_status
[i
].valid
)
1100 if (adev
->ip_blocks
[i
].type
== block_type
) {
1101 r
= adev
->ip_blocks
[i
].funcs
->set_powergating_state((void *)adev
,
1111 int amdgpu_wait_for_idle(struct amdgpu_device
*adev
,
1112 enum amd_ip_block_type block_type
)
1116 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1117 if (!adev
->ip_block_status
[i
].valid
)
1119 if (adev
->ip_blocks
[i
].type
== block_type
) {
1120 r
= adev
->ip_blocks
[i
].funcs
->wait_for_idle((void *)adev
);
1130 bool amdgpu_is_idle(struct amdgpu_device
*adev
,
1131 enum amd_ip_block_type block_type
)
1135 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1136 if (!adev
->ip_block_status
[i
].valid
)
1138 if (adev
->ip_blocks
[i
].type
== block_type
)
1139 return adev
->ip_blocks
[i
].funcs
->is_idle((void *)adev
);
1145 const struct amdgpu_ip_block_version
* amdgpu_get_ip_block(
1146 struct amdgpu_device
*adev
,
1147 enum amd_ip_block_type type
)
1151 for (i
= 0; i
< adev
->num_ip_blocks
; i
++)
1152 if (adev
->ip_blocks
[i
].type
== type
)
1153 return &adev
->ip_blocks
[i
];
1159 * amdgpu_ip_block_version_cmp
1161 * @adev: amdgpu_device pointer
1162 * @type: enum amd_ip_block_type
1163 * @major: major version
1164 * @minor: minor version
1166 * return 0 if equal or greater
1167 * return 1 if smaller or the ip_block doesn't exist
1169 int amdgpu_ip_block_version_cmp(struct amdgpu_device
*adev
,
1170 enum amd_ip_block_type type
,
1171 u32 major
, u32 minor
)
1173 const struct amdgpu_ip_block_version
*ip_block
;
1174 ip_block
= amdgpu_get_ip_block(adev
, type
);
1176 if (ip_block
&& ((ip_block
->major
> major
) ||
1177 ((ip_block
->major
== major
) &&
1178 (ip_block
->minor
>= minor
))))
1184 static int amdgpu_early_init(struct amdgpu_device
*adev
)
1188 switch (adev
->asic_type
) {
1192 case CHIP_POLARIS11
:
1193 case CHIP_POLARIS10
:
1196 if (adev
->asic_type
== CHIP_CARRIZO
|| adev
->asic_type
== CHIP_STONEY
)
1197 adev
->family
= AMDGPU_FAMILY_CZ
;
1199 adev
->family
= AMDGPU_FAMILY_VI
;
1201 r
= vi_set_ip_blocks(adev
);
1205 #ifdef CONFIG_DRM_AMDGPU_CIK
1211 if ((adev
->asic_type
== CHIP_BONAIRE
) || (adev
->asic_type
== CHIP_HAWAII
))
1212 adev
->family
= AMDGPU_FAMILY_CI
;
1214 adev
->family
= AMDGPU_FAMILY_KV
;
1216 r
= cik_set_ip_blocks(adev
);
1222 /* FIXME: not supported yet */
1226 adev
->ip_block_status
= kcalloc(adev
->num_ip_blocks
,
1227 sizeof(struct amdgpu_ip_block_status
), GFP_KERNEL
);
1228 if (adev
->ip_block_status
== NULL
)
1231 if (adev
->ip_blocks
== NULL
) {
1232 DRM_ERROR("No IP blocks found!\n");
1236 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1237 if ((amdgpu_ip_block_mask
& (1 << i
)) == 0) {
1238 DRM_ERROR("disabled ip block: %d\n", i
);
1239 adev
->ip_block_status
[i
].valid
= false;
1241 if (adev
->ip_blocks
[i
].funcs
->early_init
) {
1242 r
= adev
->ip_blocks
[i
].funcs
->early_init((void *)adev
);
1244 adev
->ip_block_status
[i
].valid
= false;
1246 DRM_ERROR("early_init of IP block <%s> failed %d\n", adev
->ip_blocks
[i
].funcs
->name
, r
);
1249 adev
->ip_block_status
[i
].valid
= true;
1252 adev
->ip_block_status
[i
].valid
= true;
1257 adev
->cg_flags
&= amdgpu_cg_mask
;
1258 adev
->pg_flags
&= amdgpu_pg_mask
;
1263 static int amdgpu_init(struct amdgpu_device
*adev
)
1267 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1268 if (!adev
->ip_block_status
[i
].valid
)
1270 r
= adev
->ip_blocks
[i
].funcs
->sw_init((void *)adev
);
1272 DRM_ERROR("sw_init of IP block <%s> failed %d\n", adev
->ip_blocks
[i
].funcs
->name
, r
);
1275 adev
->ip_block_status
[i
].sw
= true;
1276 /* need to do gmc hw init early so we can allocate gpu mem */
1277 if (adev
->ip_blocks
[i
].type
== AMD_IP_BLOCK_TYPE_GMC
) {
1278 r
= amdgpu_vram_scratch_init(adev
);
1280 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r
);
1283 r
= adev
->ip_blocks
[i
].funcs
->hw_init((void *)adev
);
1285 DRM_ERROR("hw_init %d failed %d\n", i
, r
);
1288 r
= amdgpu_wb_init(adev
);
1290 DRM_ERROR("amdgpu_wb_init failed %d\n", r
);
1293 adev
->ip_block_status
[i
].hw
= true;
1297 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1298 if (!adev
->ip_block_status
[i
].sw
)
1300 /* gmc hw init is done early */
1301 if (adev
->ip_blocks
[i
].type
== AMD_IP_BLOCK_TYPE_GMC
)
1303 r
= adev
->ip_blocks
[i
].funcs
->hw_init((void *)adev
);
1305 DRM_ERROR("hw_init of IP block <%s> failed %d\n", adev
->ip_blocks
[i
].funcs
->name
, r
);
1308 adev
->ip_block_status
[i
].hw
= true;
1314 static int amdgpu_late_init(struct amdgpu_device
*adev
)
1318 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1319 if (!adev
->ip_block_status
[i
].valid
)
1321 /* enable clockgating to save power */
1322 r
= adev
->ip_blocks
[i
].funcs
->set_clockgating_state((void *)adev
,
1325 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", adev
->ip_blocks
[i
].funcs
->name
, r
);
1328 if (adev
->ip_blocks
[i
].funcs
->late_init
) {
1329 r
= adev
->ip_blocks
[i
].funcs
->late_init((void *)adev
);
1331 DRM_ERROR("late_init of IP block <%s> failed %d\n", adev
->ip_blocks
[i
].funcs
->name
, r
);
1340 static int amdgpu_fini(struct amdgpu_device
*adev
)
1344 for (i
= adev
->num_ip_blocks
- 1; i
>= 0; i
--) {
1345 if (!adev
->ip_block_status
[i
].hw
)
1347 if (adev
->ip_blocks
[i
].type
== AMD_IP_BLOCK_TYPE_GMC
) {
1348 amdgpu_wb_fini(adev
);
1349 amdgpu_vram_scratch_fini(adev
);
1351 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1352 r
= adev
->ip_blocks
[i
].funcs
->set_clockgating_state((void *)adev
,
1353 AMD_CG_STATE_UNGATE
);
1355 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev
->ip_blocks
[i
].funcs
->name
, r
);
1358 r
= adev
->ip_blocks
[i
].funcs
->hw_fini((void *)adev
);
1359 /* XXX handle errors */
1361 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", adev
->ip_blocks
[i
].funcs
->name
, r
);
1363 adev
->ip_block_status
[i
].hw
= false;
1366 for (i
= adev
->num_ip_blocks
- 1; i
>= 0; i
--) {
1367 if (!adev
->ip_block_status
[i
].sw
)
1369 r
= adev
->ip_blocks
[i
].funcs
->sw_fini((void *)adev
);
1370 /* XXX handle errors */
1372 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", adev
->ip_blocks
[i
].funcs
->name
, r
);
1374 adev
->ip_block_status
[i
].sw
= false;
1375 adev
->ip_block_status
[i
].valid
= false;
1378 for (i
= adev
->num_ip_blocks
- 1; i
>= 0; i
--) {
1379 if (adev
->ip_blocks
[i
].funcs
->late_fini
)
1380 adev
->ip_blocks
[i
].funcs
->late_fini((void *)adev
);
1386 static int amdgpu_suspend(struct amdgpu_device
*adev
)
1390 /* ungate SMC block first */
1391 r
= amdgpu_set_clockgating_state(adev
, AMD_IP_BLOCK_TYPE_SMC
,
1392 AMD_CG_STATE_UNGATE
);
1394 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r
);
1397 for (i
= adev
->num_ip_blocks
- 1; i
>= 0; i
--) {
1398 if (!adev
->ip_block_status
[i
].valid
)
1400 /* ungate blocks so that suspend can properly shut them down */
1401 if (i
!= AMD_IP_BLOCK_TYPE_SMC
) {
1402 r
= adev
->ip_blocks
[i
].funcs
->set_clockgating_state((void *)adev
,
1403 AMD_CG_STATE_UNGATE
);
1405 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev
->ip_blocks
[i
].funcs
->name
, r
);
1408 /* XXX handle errors */
1409 r
= adev
->ip_blocks
[i
].funcs
->suspend(adev
);
1410 /* XXX handle errors */
1412 DRM_ERROR("suspend of IP block <%s> failed %d\n", adev
->ip_blocks
[i
].funcs
->name
, r
);
1419 static int amdgpu_resume(struct amdgpu_device
*adev
)
1423 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1424 if (!adev
->ip_block_status
[i
].valid
)
1426 r
= adev
->ip_blocks
[i
].funcs
->resume(adev
);
1428 DRM_ERROR("resume of IP block <%s> failed %d\n", adev
->ip_blocks
[i
].funcs
->name
, r
);
1436 static bool amdgpu_device_is_virtual(void)
1439 return boot_cpu_has(X86_FEATURE_HYPERVISOR
);
1446 * amdgpu_device_init - initialize the driver
1448 * @adev: amdgpu_device pointer
1449 * @pdev: drm dev pointer
1450 * @pdev: pci dev pointer
1451 * @flags: driver flags
1453 * Initializes the driver info and hw (all asics).
1454 * Returns 0 for success or an error on failure.
1455 * Called at driver startup.
1457 int amdgpu_device_init(struct amdgpu_device
*adev
,
1458 struct drm_device
*ddev
,
1459 struct pci_dev
*pdev
,
1463 bool runtime
= false;
1465 adev
->shutdown
= false;
1466 adev
->dev
= &pdev
->dev
;
1469 adev
->flags
= flags
;
1470 adev
->asic_type
= flags
& AMD_ASIC_MASK
;
1471 adev
->is_atom_bios
= false;
1472 adev
->usec_timeout
= AMDGPU_MAX_USEC_TIMEOUT
;
1473 adev
->mc
.gtt_size
= 512 * 1024 * 1024;
1474 adev
->accel_working
= false;
1475 adev
->num_rings
= 0;
1476 adev
->mman
.buffer_funcs
= NULL
;
1477 adev
->mman
.buffer_funcs_ring
= NULL
;
1478 adev
->vm_manager
.vm_pte_funcs
= NULL
;
1479 adev
->vm_manager
.vm_pte_num_rings
= 0;
1480 adev
->gart
.gart_funcs
= NULL
;
1481 adev
->fence_context
= fence_context_alloc(AMDGPU_MAX_RINGS
);
1483 adev
->smc_rreg
= &amdgpu_invalid_rreg
;
1484 adev
->smc_wreg
= &amdgpu_invalid_wreg
;
1485 adev
->pcie_rreg
= &amdgpu_invalid_rreg
;
1486 adev
->pcie_wreg
= &amdgpu_invalid_wreg
;
1487 adev
->uvd_ctx_rreg
= &amdgpu_invalid_rreg
;
1488 adev
->uvd_ctx_wreg
= &amdgpu_invalid_wreg
;
1489 adev
->didt_rreg
= &amdgpu_invalid_rreg
;
1490 adev
->didt_wreg
= &amdgpu_invalid_wreg
;
1491 adev
->gc_cac_rreg
= &amdgpu_invalid_rreg
;
1492 adev
->gc_cac_wreg
= &amdgpu_invalid_wreg
;
1493 adev
->audio_endpt_rreg
= &amdgpu_block_invalid_rreg
;
1494 adev
->audio_endpt_wreg
= &amdgpu_block_invalid_wreg
;
1497 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1498 amdgpu_asic_name
[adev
->asic_type
], pdev
->vendor
, pdev
->device
,
1499 pdev
->subsystem_vendor
, pdev
->subsystem_device
, pdev
->revision
);
1501 /* mutex initialization are all done here so we
1502 * can recall function without having locking issues */
1503 mutex_init(&adev
->vm_manager
.lock
);
1504 atomic_set(&adev
->irq
.ih
.lock
, 0);
1505 mutex_init(&adev
->pm
.mutex
);
1506 mutex_init(&adev
->gfx
.gpu_clock_mutex
);
1507 mutex_init(&adev
->srbm_mutex
);
1508 mutex_init(&adev
->grbm_idx_mutex
);
1509 mutex_init(&adev
->mn_lock
);
1510 hash_init(adev
->mn_hash
);
1512 amdgpu_check_arguments(adev
);
1514 /* Registers mapping */
1515 /* TODO: block userspace mapping of io register */
1516 spin_lock_init(&adev
->mmio_idx_lock
);
1517 spin_lock_init(&adev
->smc_idx_lock
);
1518 spin_lock_init(&adev
->pcie_idx_lock
);
1519 spin_lock_init(&adev
->uvd_ctx_idx_lock
);
1520 spin_lock_init(&adev
->didt_idx_lock
);
1521 spin_lock_init(&adev
->gc_cac_idx_lock
);
1522 spin_lock_init(&adev
->audio_endpt_idx_lock
);
1524 adev
->rmmio_base
= pci_resource_start(adev
->pdev
, 5);
1525 adev
->rmmio_size
= pci_resource_len(adev
->pdev
, 5);
1526 adev
->rmmio
= ioremap(adev
->rmmio_base
, adev
->rmmio_size
);
1527 if (adev
->rmmio
== NULL
) {
1530 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev
->rmmio_base
);
1531 DRM_INFO("register mmio size: %u\n", (unsigned)adev
->rmmio_size
);
1533 /* doorbell bar mapping */
1534 amdgpu_doorbell_init(adev
);
1536 /* io port mapping */
1537 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++) {
1538 if (pci_resource_flags(adev
->pdev
, i
) & IORESOURCE_IO
) {
1539 adev
->rio_mem_size
= pci_resource_len(adev
->pdev
, i
);
1540 adev
->rio_mem
= pci_iomap(adev
->pdev
, i
, adev
->rio_mem_size
);
1544 if (adev
->rio_mem
== NULL
)
1545 DRM_ERROR("Unable to find PCI I/O BAR\n");
1547 /* early init functions */
1548 r
= amdgpu_early_init(adev
);
1552 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
1553 /* this will fail for cards that aren't VGA class devices, just
1555 vga_client_register(adev
->pdev
, adev
, NULL
, amdgpu_vga_set_decode
);
1557 if (amdgpu_runtime_pm
== 1)
1559 if (amdgpu_device_is_px(ddev
))
1561 vga_switcheroo_register_client(adev
->pdev
, &amdgpu_switcheroo_ops
, runtime
);
1563 vga_switcheroo_init_domain_pm_ops(adev
->dev
, &adev
->vga_pm_domain
);
1566 if (!amdgpu_get_bios(adev
)) {
1570 /* Must be an ATOMBIOS */
1571 if (!adev
->is_atom_bios
) {
1572 dev_err(adev
->dev
, "Expecting atombios for GPU\n");
1576 r
= amdgpu_atombios_init(adev
);
1578 dev_err(adev
->dev
, "amdgpu_atombios_init failed\n");
1582 /* See if the asic supports SR-IOV */
1583 adev
->virtualization
.supports_sr_iov
=
1584 amdgpu_atombios_has_gpu_virtualization_table(adev
);
1586 /* Check if we are executing in a virtualized environment */
1587 adev
->virtualization
.is_virtual
= amdgpu_device_is_virtual();
1588 adev
->virtualization
.caps
= amdgpu_asic_get_virtual_caps(adev
);
1590 /* Post card if necessary */
1591 if (!amdgpu_card_posted(adev
) ||
1592 (adev
->virtualization
.is_virtual
&&
1593 !(adev
->virtualization
.caps
& AMDGPU_VIRT_CAPS_SRIOV_EN
))) {
1595 dev_err(adev
->dev
, "Card not posted and no BIOS - ignoring\n");
1599 DRM_INFO("GPU not posted. posting now...\n");
1600 amdgpu_atom_asic_init(adev
->mode_info
.atom_context
);
1603 /* Initialize clocks */
1604 r
= amdgpu_atombios_get_clock_info(adev
);
1606 dev_err(adev
->dev
, "amdgpu_atombios_get_clock_info failed\n");
1609 /* init i2c buses */
1610 amdgpu_atombios_i2c_init(adev
);
1613 r
= amdgpu_fence_driver_init(adev
);
1615 dev_err(adev
->dev
, "amdgpu_fence_driver_init failed\n");
1619 /* init the mode config */
1620 drm_mode_config_init(adev
->ddev
);
1622 r
= amdgpu_init(adev
);
1624 dev_err(adev
->dev
, "amdgpu_init failed\n");
1629 adev
->accel_working
= true;
1631 amdgpu_fbdev_init(adev
);
1633 r
= amdgpu_ib_pool_init(adev
);
1635 dev_err(adev
->dev
, "IB initialization failed (%d).\n", r
);
1639 r
= amdgpu_ib_ring_tests(adev
);
1641 DRM_ERROR("ib ring test failed (%d).\n", r
);
1643 r
= amdgpu_gem_debugfs_init(adev
);
1645 DRM_ERROR("registering gem debugfs failed (%d).\n", r
);
1648 r
= amdgpu_debugfs_regs_init(adev
);
1650 DRM_ERROR("registering register debugfs failed (%d).\n", r
);
1653 r
= amdgpu_debugfs_firmware_init(adev
);
1655 DRM_ERROR("registering firmware debugfs failed (%d).\n", r
);
1659 if ((amdgpu_testing
& 1)) {
1660 if (adev
->accel_working
)
1661 amdgpu_test_moves(adev
);
1663 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
1665 if ((amdgpu_testing
& 2)) {
1666 if (adev
->accel_working
)
1667 amdgpu_test_syncing(adev
);
1669 DRM_INFO("amdgpu: acceleration disabled, skipping sync tests\n");
1671 if (amdgpu_benchmarking
) {
1672 if (adev
->accel_working
)
1673 amdgpu_benchmark(adev
, amdgpu_benchmarking
);
1675 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
1678 /* enable clockgating, etc. after ib tests, etc. since some blocks require
1679 * explicit gating rather than handling it automatically.
1681 r
= amdgpu_late_init(adev
);
1683 dev_err(adev
->dev
, "amdgpu_late_init failed\n");
1691 vga_switcheroo_fini_domain_pm_ops(adev
->dev
);
1695 static void amdgpu_debugfs_remove_files(struct amdgpu_device
*adev
);
1698 * amdgpu_device_fini - tear down the driver
1700 * @adev: amdgpu_device pointer
1702 * Tear down the driver info (all asics).
1703 * Called at driver shutdown.
1705 void amdgpu_device_fini(struct amdgpu_device
*adev
)
1709 DRM_INFO("amdgpu: finishing device.\n");
1710 adev
->shutdown
= true;
1711 /* evict vram memory */
1712 amdgpu_bo_evict_vram(adev
);
1713 amdgpu_ib_pool_fini(adev
);
1714 amdgpu_fence_driver_fini(adev
);
1715 drm_crtc_force_disable_all(adev
->ddev
);
1716 amdgpu_fbdev_fini(adev
);
1717 r
= amdgpu_fini(adev
);
1718 kfree(adev
->ip_block_status
);
1719 adev
->ip_block_status
= NULL
;
1720 adev
->accel_working
= false;
1721 /* free i2c buses */
1722 amdgpu_i2c_fini(adev
);
1723 amdgpu_atombios_fini(adev
);
1726 vga_switcheroo_unregister_client(adev
->pdev
);
1727 if (adev
->flags
& AMD_IS_PX
)
1728 vga_switcheroo_fini_domain_pm_ops(adev
->dev
);
1729 vga_client_register(adev
->pdev
, NULL
, NULL
, NULL
);
1731 pci_iounmap(adev
->pdev
, adev
->rio_mem
);
1732 adev
->rio_mem
= NULL
;
1733 iounmap(adev
->rmmio
);
1735 amdgpu_doorbell_fini(adev
);
1736 amdgpu_debugfs_regs_cleanup(adev
);
1737 amdgpu_debugfs_remove_files(adev
);
1745 * amdgpu_suspend_kms - initiate device suspend
1747 * @pdev: drm dev pointer
1748 * @state: suspend state
1750 * Puts the hw in the suspend state (all asics).
1751 * Returns 0 for success or an error on failure.
1752 * Called at driver suspend.
1754 int amdgpu_suspend_kms(struct drm_device
*dev
, bool suspend
, bool fbcon
)
1756 struct amdgpu_device
*adev
;
1757 struct drm_crtc
*crtc
;
1758 struct drm_connector
*connector
;
1761 if (dev
== NULL
|| dev
->dev_private
== NULL
) {
1765 adev
= dev
->dev_private
;
1767 if (dev
->switch_power_state
== DRM_SWITCH_POWER_OFF
)
1770 drm_kms_helper_poll_disable(dev
);
1772 /* turn off display hw */
1773 drm_modeset_lock_all(dev
);
1774 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
1775 drm_helper_connector_dpms(connector
, DRM_MODE_DPMS_OFF
);
1777 drm_modeset_unlock_all(dev
);
1779 /* unpin the front buffers and cursors */
1780 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
1781 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
1782 struct amdgpu_framebuffer
*rfb
= to_amdgpu_framebuffer(crtc
->primary
->fb
);
1783 struct amdgpu_bo
*robj
;
1785 if (amdgpu_crtc
->cursor_bo
) {
1786 struct amdgpu_bo
*aobj
= gem_to_amdgpu_bo(amdgpu_crtc
->cursor_bo
);
1787 r
= amdgpu_bo_reserve(aobj
, false);
1789 amdgpu_bo_unpin(aobj
);
1790 amdgpu_bo_unreserve(aobj
);
1794 if (rfb
== NULL
|| rfb
->obj
== NULL
) {
1797 robj
= gem_to_amdgpu_bo(rfb
->obj
);
1798 /* don't unpin kernel fb objects */
1799 if (!amdgpu_fbdev_robj_is_fb(adev
, robj
)) {
1800 r
= amdgpu_bo_reserve(robj
, false);
1802 amdgpu_bo_unpin(robj
);
1803 amdgpu_bo_unreserve(robj
);
1807 /* evict vram memory */
1808 amdgpu_bo_evict_vram(adev
);
1810 amdgpu_fence_driver_suspend(adev
);
1812 r
= amdgpu_suspend(adev
);
1814 /* evict remaining vram memory */
1815 amdgpu_bo_evict_vram(adev
);
1817 pci_save_state(dev
->pdev
);
1819 /* Shut down the device */
1820 pci_disable_device(dev
->pdev
);
1821 pci_set_power_state(dev
->pdev
, PCI_D3hot
);
1826 amdgpu_fbdev_set_suspend(adev
, 1);
1833 * amdgpu_resume_kms - initiate device resume
1835 * @pdev: drm dev pointer
1837 * Bring the hw back to operating state (all asics).
1838 * Returns 0 for success or an error on failure.
1839 * Called at driver resume.
1841 int amdgpu_resume_kms(struct drm_device
*dev
, bool resume
, bool fbcon
)
1843 struct drm_connector
*connector
;
1844 struct amdgpu_device
*adev
= dev
->dev_private
;
1845 struct drm_crtc
*crtc
;
1848 if (dev
->switch_power_state
== DRM_SWITCH_POWER_OFF
)
1855 pci_set_power_state(dev
->pdev
, PCI_D0
);
1856 pci_restore_state(dev
->pdev
);
1857 if (pci_enable_device(dev
->pdev
)) {
1865 if (!amdgpu_card_posted(adev
))
1866 amdgpu_atom_asic_init(adev
->mode_info
.atom_context
);
1868 r
= amdgpu_resume(adev
);
1870 DRM_ERROR("amdgpu_resume failed (%d).\n", r
);
1872 amdgpu_fence_driver_resume(adev
);
1875 r
= amdgpu_ib_ring_tests(adev
);
1877 DRM_ERROR("ib ring test failed (%d).\n", r
);
1880 r
= amdgpu_late_init(adev
);
1885 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
1886 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
1888 if (amdgpu_crtc
->cursor_bo
) {
1889 struct amdgpu_bo
*aobj
= gem_to_amdgpu_bo(amdgpu_crtc
->cursor_bo
);
1890 r
= amdgpu_bo_reserve(aobj
, false);
1892 r
= amdgpu_bo_pin(aobj
,
1893 AMDGPU_GEM_DOMAIN_VRAM
,
1894 &amdgpu_crtc
->cursor_addr
);
1896 DRM_ERROR("Failed to pin cursor BO (%d)\n", r
);
1897 amdgpu_bo_unreserve(aobj
);
1902 /* blat the mode back in */
1904 drm_helper_resume_force_mode(dev
);
1905 /* turn on display hw */
1906 drm_modeset_lock_all(dev
);
1907 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
1908 drm_helper_connector_dpms(connector
, DRM_MODE_DPMS_ON
);
1910 drm_modeset_unlock_all(dev
);
1913 drm_kms_helper_poll_enable(dev
);
1916 * Most of the connector probing functions try to acquire runtime pm
1917 * refs to ensure that the GPU is powered on when connector polling is
1918 * performed. Since we're calling this from a runtime PM callback,
1919 * trying to acquire rpm refs will cause us to deadlock.
1921 * Since we're guaranteed to be holding the rpm lock, it's safe to
1922 * temporarily disable the rpm helpers so this doesn't deadlock us.
1925 dev
->dev
->power
.disable_depth
++;
1927 drm_helper_hpd_irq_event(dev
);
1929 dev
->dev
->power
.disable_depth
--;
1933 amdgpu_fbdev_set_suspend(adev
, 0);
1941 * amdgpu_gpu_reset - reset the asic
1943 * @adev: amdgpu device pointer
1945 * Attempt the reset the GPU if it has hung (all asics).
1946 * Returns 0 for success or an error on failure.
1948 int amdgpu_gpu_reset(struct amdgpu_device
*adev
)
1953 atomic_inc(&adev
->gpu_reset_counter
);
1956 resched
= ttm_bo_lock_delayed_workqueue(&adev
->mman
.bdev
);
1958 /* block scheduler */
1959 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
1960 struct amdgpu_ring
*ring
= adev
->rings
[i
];
1964 kthread_park(ring
->sched
.thread
);
1965 amd_sched_hw_job_reset(&ring
->sched
);
1967 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
1968 amdgpu_fence_driver_force_completion(adev
);
1971 amdgpu_atombios_scratch_regs_save(adev
);
1972 r
= amdgpu_suspend(adev
);
1975 /* Disable fb access */
1976 if (adev
->mode_info
.num_crtc
) {
1977 struct amdgpu_mode_mc_save save
;
1978 amdgpu_display_stop_mc_access(adev
, &save
);
1979 amdgpu_wait_for_idle(adev
, AMD_IP_BLOCK_TYPE_GMC
);
1982 r
= amdgpu_asic_reset(adev
);
1984 amdgpu_atom_asic_init(adev
->mode_info
.atom_context
);
1987 dev_info(adev
->dev
, "GPU reset succeeded, trying to resume\n");
1988 r
= amdgpu_resume(adev
);
1990 /* restore scratch */
1991 amdgpu_atombios_scratch_regs_restore(adev
);
1993 r
= amdgpu_ib_ring_tests(adev
);
1995 dev_err(adev
->dev
, "ib ring test failed (%d).\n", r
);
1996 r
= amdgpu_suspend(adev
);
2000 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
2001 struct amdgpu_ring
*ring
= adev
->rings
[i
];
2004 amd_sched_job_recovery(&ring
->sched
);
2005 kthread_unpark(ring
->sched
.thread
);
2008 dev_err(adev
->dev
, "asic resume failed (%d).\n", r
);
2009 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
2010 if (adev
->rings
[i
]) {
2011 kthread_unpark(adev
->rings
[i
]->sched
.thread
);
2016 drm_helper_resume_force_mode(adev
->ddev
);
2018 ttm_bo_unlock_delayed_workqueue(&adev
->mman
.bdev
, resched
);
2020 /* bad news, how to tell it to userspace ? */
2021 dev_info(adev
->dev
, "GPU reset failed\n");
2023 amdgpu_irq_gpu_reset_resume_helper(adev
);
2028 void amdgpu_get_pcie_info(struct amdgpu_device
*adev
)
2033 if (amdgpu_pcie_gen_cap
)
2034 adev
->pm
.pcie_gen_mask
= amdgpu_pcie_gen_cap
;
2036 if (amdgpu_pcie_lane_cap
)
2037 adev
->pm
.pcie_mlw_mask
= amdgpu_pcie_lane_cap
;
2039 /* covers APUs as well */
2040 if (pci_is_root_bus(adev
->pdev
->bus
)) {
2041 if (adev
->pm
.pcie_gen_mask
== 0)
2042 adev
->pm
.pcie_gen_mask
= AMDGPU_DEFAULT_PCIE_GEN_MASK
;
2043 if (adev
->pm
.pcie_mlw_mask
== 0)
2044 adev
->pm
.pcie_mlw_mask
= AMDGPU_DEFAULT_PCIE_MLW_MASK
;
2048 if (adev
->pm
.pcie_gen_mask
== 0) {
2049 ret
= drm_pcie_get_speed_cap_mask(adev
->ddev
, &mask
);
2051 adev
->pm
.pcie_gen_mask
= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1
|
2052 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2
|
2053 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3
);
2055 if (mask
& DRM_PCIE_SPEED_25
)
2056 adev
->pm
.pcie_gen_mask
|= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1
;
2057 if (mask
& DRM_PCIE_SPEED_50
)
2058 adev
->pm
.pcie_gen_mask
|= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2
;
2059 if (mask
& DRM_PCIE_SPEED_80
)
2060 adev
->pm
.pcie_gen_mask
|= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3
;
2062 adev
->pm
.pcie_gen_mask
= AMDGPU_DEFAULT_PCIE_GEN_MASK
;
2065 if (adev
->pm
.pcie_mlw_mask
== 0) {
2066 ret
= drm_pcie_get_max_link_width(adev
->ddev
, &mask
);
2070 adev
->pm
.pcie_mlw_mask
= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32
|
2071 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16
|
2072 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12
|
2073 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8
|
2074 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4
|
2075 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2
|
2076 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1
);
2079 adev
->pm
.pcie_mlw_mask
= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16
|
2080 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12
|
2081 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8
|
2082 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4
|
2083 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2
|
2084 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1
);
2087 adev
->pm
.pcie_mlw_mask
= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12
|
2088 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8
|
2089 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4
|
2090 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2
|
2091 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1
);
2094 adev
->pm
.pcie_mlw_mask
= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8
|
2095 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4
|
2096 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2
|
2097 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1
);
2100 adev
->pm
.pcie_mlw_mask
= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4
|
2101 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2
|
2102 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1
);
2105 adev
->pm
.pcie_mlw_mask
= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2
|
2106 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1
);
2109 adev
->pm
.pcie_mlw_mask
= CAIL_PCIE_LINK_WIDTH_SUPPORT_X1
;
2115 adev
->pm
.pcie_mlw_mask
= AMDGPU_DEFAULT_PCIE_MLW_MASK
;
2123 int amdgpu_debugfs_add_files(struct amdgpu_device
*adev
,
2124 const struct drm_info_list
*files
,
2129 for (i
= 0; i
< adev
->debugfs_count
; i
++) {
2130 if (adev
->debugfs
[i
].files
== files
) {
2131 /* Already registered */
2136 i
= adev
->debugfs_count
+ 1;
2137 if (i
> AMDGPU_DEBUGFS_MAX_COMPONENTS
) {
2138 DRM_ERROR("Reached maximum number of debugfs components.\n");
2139 DRM_ERROR("Report so we increase "
2140 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
2143 adev
->debugfs
[adev
->debugfs_count
].files
= files
;
2144 adev
->debugfs
[adev
->debugfs_count
].num_files
= nfiles
;
2145 adev
->debugfs_count
= i
;
2146 #if defined(CONFIG_DEBUG_FS)
2147 drm_debugfs_create_files(files
, nfiles
,
2148 adev
->ddev
->control
->debugfs_root
,
2149 adev
->ddev
->control
);
2150 drm_debugfs_create_files(files
, nfiles
,
2151 adev
->ddev
->primary
->debugfs_root
,
2152 adev
->ddev
->primary
);
2157 static void amdgpu_debugfs_remove_files(struct amdgpu_device
*adev
)
2159 #if defined(CONFIG_DEBUG_FS)
2162 for (i
= 0; i
< adev
->debugfs_count
; i
++) {
2163 drm_debugfs_remove_files(adev
->debugfs
[i
].files
,
2164 adev
->debugfs
[i
].num_files
,
2165 adev
->ddev
->control
);
2166 drm_debugfs_remove_files(adev
->debugfs
[i
].files
,
2167 adev
->debugfs
[i
].num_files
,
2168 adev
->ddev
->primary
);
2173 #if defined(CONFIG_DEBUG_FS)
2175 static ssize_t
amdgpu_debugfs_regs_read(struct file
*f
, char __user
*buf
,
2176 size_t size
, loff_t
*pos
)
2178 struct amdgpu_device
*adev
= f
->f_inode
->i_private
;
2182 unsigned instance_bank
, sh_bank
, se_bank
;
2184 if (size
& 0x3 || *pos
& 0x3)
2187 if (*pos
& (1ULL << 62)) {
2188 se_bank
= (*pos
>> 24) & 0x3FF;
2189 sh_bank
= (*pos
>> 34) & 0x3FF;
2190 instance_bank
= (*pos
>> 44) & 0x3FF;
2198 if (sh_bank
>= adev
->gfx
.config
.max_sh_per_se
||
2199 se_bank
>= adev
->gfx
.config
.max_shader_engines
)
2201 mutex_lock(&adev
->grbm_idx_mutex
);
2202 amdgpu_gfx_select_se_sh(adev
, se_bank
,
2203 sh_bank
, instance_bank
);
2209 if (*pos
> adev
->rmmio_size
)
2212 value
= RREG32(*pos
>> 2);
2213 r
= put_user(value
, (uint32_t *)buf
);
2227 amdgpu_gfx_select_se_sh(adev
, 0xffffffff, 0xffffffff, 0xffffffff);
2228 mutex_unlock(&adev
->grbm_idx_mutex
);
2234 static ssize_t
amdgpu_debugfs_regs_write(struct file
*f
, const char __user
*buf
,
2235 size_t size
, loff_t
*pos
)
2237 struct amdgpu_device
*adev
= f
->f_inode
->i_private
;
2241 if (size
& 0x3 || *pos
& 0x3)
2247 if (*pos
> adev
->rmmio_size
)
2250 r
= get_user(value
, (uint32_t *)buf
);
2254 WREG32(*pos
>> 2, value
);
2265 static ssize_t
amdgpu_debugfs_regs_pcie_read(struct file
*f
, char __user
*buf
,
2266 size_t size
, loff_t
*pos
)
2268 struct amdgpu_device
*adev
= f
->f_inode
->i_private
;
2272 if (size
& 0x3 || *pos
& 0x3)
2278 value
= RREG32_PCIE(*pos
>> 2);
2279 r
= put_user(value
, (uint32_t *)buf
);
2292 static ssize_t
amdgpu_debugfs_regs_pcie_write(struct file
*f
, const char __user
*buf
,
2293 size_t size
, loff_t
*pos
)
2295 struct amdgpu_device
*adev
= f
->f_inode
->i_private
;
2299 if (size
& 0x3 || *pos
& 0x3)
2305 r
= get_user(value
, (uint32_t *)buf
);
2309 WREG32_PCIE(*pos
>> 2, value
);
2320 static ssize_t
amdgpu_debugfs_regs_didt_read(struct file
*f
, char __user
*buf
,
2321 size_t size
, loff_t
*pos
)
2323 struct amdgpu_device
*adev
= f
->f_inode
->i_private
;
2327 if (size
& 0x3 || *pos
& 0x3)
2333 value
= RREG32_DIDT(*pos
>> 2);
2334 r
= put_user(value
, (uint32_t *)buf
);
2347 static ssize_t
amdgpu_debugfs_regs_didt_write(struct file
*f
, const char __user
*buf
,
2348 size_t size
, loff_t
*pos
)
2350 struct amdgpu_device
*adev
= f
->f_inode
->i_private
;
2354 if (size
& 0x3 || *pos
& 0x3)
2360 r
= get_user(value
, (uint32_t *)buf
);
2364 WREG32_DIDT(*pos
>> 2, value
);
2375 static ssize_t
amdgpu_debugfs_regs_smc_read(struct file
*f
, char __user
*buf
,
2376 size_t size
, loff_t
*pos
)
2378 struct amdgpu_device
*adev
= f
->f_inode
->i_private
;
2382 if (size
& 0x3 || *pos
& 0x3)
2388 value
= RREG32_SMC(*pos
>> 2);
2389 r
= put_user(value
, (uint32_t *)buf
);
2402 static ssize_t
amdgpu_debugfs_regs_smc_write(struct file
*f
, const char __user
*buf
,
2403 size_t size
, loff_t
*pos
)
2405 struct amdgpu_device
*adev
= f
->f_inode
->i_private
;
2409 if (size
& 0x3 || *pos
& 0x3)
2415 r
= get_user(value
, (uint32_t *)buf
);
2419 WREG32_SMC(*pos
>> 2, value
);
2430 static ssize_t
amdgpu_debugfs_gca_config_read(struct file
*f
, char __user
*buf
,
2431 size_t size
, loff_t
*pos
)
2433 struct amdgpu_device
*adev
= f
->f_inode
->i_private
;
2436 uint32_t *config
, no_regs
= 0;
2438 if (size
& 0x3 || *pos
& 0x3)
2441 config
= kmalloc(256 * sizeof(*config
), GFP_KERNEL
);
2445 /* version, increment each time something is added */
2446 config
[no_regs
++] = 0;
2447 config
[no_regs
++] = adev
->gfx
.config
.max_shader_engines
;
2448 config
[no_regs
++] = adev
->gfx
.config
.max_tile_pipes
;
2449 config
[no_regs
++] = adev
->gfx
.config
.max_cu_per_sh
;
2450 config
[no_regs
++] = adev
->gfx
.config
.max_sh_per_se
;
2451 config
[no_regs
++] = adev
->gfx
.config
.max_backends_per_se
;
2452 config
[no_regs
++] = adev
->gfx
.config
.max_texture_channel_caches
;
2453 config
[no_regs
++] = adev
->gfx
.config
.max_gprs
;
2454 config
[no_regs
++] = adev
->gfx
.config
.max_gs_threads
;
2455 config
[no_regs
++] = adev
->gfx
.config
.max_hw_contexts
;
2456 config
[no_regs
++] = adev
->gfx
.config
.sc_prim_fifo_size_frontend
;
2457 config
[no_regs
++] = adev
->gfx
.config
.sc_prim_fifo_size_backend
;
2458 config
[no_regs
++] = adev
->gfx
.config
.sc_hiz_tile_fifo_size
;
2459 config
[no_regs
++] = adev
->gfx
.config
.sc_earlyz_tile_fifo_size
;
2460 config
[no_regs
++] = adev
->gfx
.config
.num_tile_pipes
;
2461 config
[no_regs
++] = adev
->gfx
.config
.backend_enable_mask
;
2462 config
[no_regs
++] = adev
->gfx
.config
.mem_max_burst_length_bytes
;
2463 config
[no_regs
++] = adev
->gfx
.config
.mem_row_size_in_kb
;
2464 config
[no_regs
++] = adev
->gfx
.config
.shader_engine_tile_size
;
2465 config
[no_regs
++] = adev
->gfx
.config
.num_gpus
;
2466 config
[no_regs
++] = adev
->gfx
.config
.multi_gpu_tile_size
;
2467 config
[no_regs
++] = adev
->gfx
.config
.mc_arb_ramcfg
;
2468 config
[no_regs
++] = adev
->gfx
.config
.gb_addr_config
;
2469 config
[no_regs
++] = adev
->gfx
.config
.num_rbs
;
2471 while (size
&& (*pos
< no_regs
* 4)) {
2474 value
= config
[*pos
>> 2];
2475 r
= put_user(value
, (uint32_t *)buf
);
2492 static const struct file_operations amdgpu_debugfs_regs_fops
= {
2493 .owner
= THIS_MODULE
,
2494 .read
= amdgpu_debugfs_regs_read
,
2495 .write
= amdgpu_debugfs_regs_write
,
2496 .llseek
= default_llseek
2498 static const struct file_operations amdgpu_debugfs_regs_didt_fops
= {
2499 .owner
= THIS_MODULE
,
2500 .read
= amdgpu_debugfs_regs_didt_read
,
2501 .write
= amdgpu_debugfs_regs_didt_write
,
2502 .llseek
= default_llseek
2504 static const struct file_operations amdgpu_debugfs_regs_pcie_fops
= {
2505 .owner
= THIS_MODULE
,
2506 .read
= amdgpu_debugfs_regs_pcie_read
,
2507 .write
= amdgpu_debugfs_regs_pcie_write
,
2508 .llseek
= default_llseek
2510 static const struct file_operations amdgpu_debugfs_regs_smc_fops
= {
2511 .owner
= THIS_MODULE
,
2512 .read
= amdgpu_debugfs_regs_smc_read
,
2513 .write
= amdgpu_debugfs_regs_smc_write
,
2514 .llseek
= default_llseek
2517 static const struct file_operations amdgpu_debugfs_gca_config_fops
= {
2518 .owner
= THIS_MODULE
,
2519 .read
= amdgpu_debugfs_gca_config_read
,
2520 .llseek
= default_llseek
2523 static const struct file_operations
*debugfs_regs
[] = {
2524 &amdgpu_debugfs_regs_fops
,
2525 &amdgpu_debugfs_regs_didt_fops
,
2526 &amdgpu_debugfs_regs_pcie_fops
,
2527 &amdgpu_debugfs_regs_smc_fops
,
2528 &amdgpu_debugfs_gca_config_fops
,
2531 static const char *debugfs_regs_names
[] = {
2536 "amdgpu_gca_config",
2539 static int amdgpu_debugfs_regs_init(struct amdgpu_device
*adev
)
2541 struct drm_minor
*minor
= adev
->ddev
->primary
;
2542 struct dentry
*ent
, *root
= minor
->debugfs_root
;
2545 for (i
= 0; i
< ARRAY_SIZE(debugfs_regs
); i
++) {
2546 ent
= debugfs_create_file(debugfs_regs_names
[i
],
2547 S_IFREG
| S_IRUGO
, root
,
2548 adev
, debugfs_regs
[i
]);
2550 for (j
= 0; j
< i
; j
++) {
2551 debugfs_remove(adev
->debugfs_regs
[i
]);
2552 adev
->debugfs_regs
[i
] = NULL
;
2554 return PTR_ERR(ent
);
2558 i_size_write(ent
->d_inode
, adev
->rmmio_size
);
2559 adev
->debugfs_regs
[i
] = ent
;
2565 static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device
*adev
)
2569 for (i
= 0; i
< ARRAY_SIZE(debugfs_regs
); i
++) {
2570 if (adev
->debugfs_regs
[i
]) {
2571 debugfs_remove(adev
->debugfs_regs
[i
]);
2572 adev
->debugfs_regs
[i
] = NULL
;
2577 int amdgpu_debugfs_init(struct drm_minor
*minor
)
2582 void amdgpu_debugfs_cleanup(struct drm_minor
*minor
)
2586 static int amdgpu_debugfs_regs_init(struct amdgpu_device
*adev
)
2590 static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device
*adev
) { }