2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/list.h>
25 #include <linux/slab.h>
26 #include <linux/pci.h>
27 #include <linux/acpi.h>
29 #include <linux/firmware.h>
30 #include <drm/amdgpu_drm.h>
32 #include "cgs_linux.h"
34 #include "amdgpu_ucode.h"
36 struct amdgpu_cgs_device
{
37 struct cgs_device base
;
38 struct amdgpu_device
*adev
;
41 #define CGS_FUNC_ADEV \
42 struct amdgpu_device *adev = \
43 ((struct amdgpu_cgs_device *)cgs_device)->adev
45 static int amdgpu_cgs_gpu_mem_info(void *cgs_device
, enum cgs_gpu_mem_type type
,
46 uint64_t *mc_start
, uint64_t *mc_size
,
51 case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB
:
52 case CGS_GPU_MEM_TYPE__VISIBLE_FB
:
54 *mc_size
= adev
->mc
.visible_vram_size
;
55 *mem_size
= adev
->mc
.visible_vram_size
- adev
->vram_pin_size
;
57 case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB
:
58 case CGS_GPU_MEM_TYPE__INVISIBLE_FB
:
59 *mc_start
= adev
->mc
.visible_vram_size
;
60 *mc_size
= adev
->mc
.real_vram_size
- adev
->mc
.visible_vram_size
;
63 case CGS_GPU_MEM_TYPE__GART_CACHEABLE
:
64 case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE
:
65 *mc_start
= adev
->mc
.gtt_start
;
66 *mc_size
= adev
->mc
.gtt_size
;
67 *mem_size
= adev
->mc
.gtt_size
- adev
->gart_pin_size
;
76 static int amdgpu_cgs_gmap_kmem(void *cgs_device
, void *kmem
,
78 uint64_t min_offset
, uint64_t max_offset
,
79 cgs_handle_t
*kmem_handle
, uint64_t *mcaddr
)
84 struct page
*kmem_page
= vmalloc_to_page(kmem
);
85 int npages
= ALIGN(size
, PAGE_SIZE
) >> PAGE_SHIFT
;
87 struct sg_table
*sg
= drm_prime_pages_to_sg(&kmem_page
, npages
);
88 ret
= amdgpu_bo_create(adev
, size
, PAGE_SIZE
, false,
89 AMDGPU_GEM_DOMAIN_GTT
, 0, sg
, NULL
, &bo
);
92 ret
= amdgpu_bo_reserve(bo
, false);
93 if (unlikely(ret
!= 0))
96 /* pin buffer into GTT */
97 ret
= amdgpu_bo_pin_restricted(bo
, AMDGPU_GEM_DOMAIN_GTT
,
98 min_offset
, max_offset
, mcaddr
);
99 amdgpu_bo_unreserve(bo
);
101 *kmem_handle
= (cgs_handle_t
)bo
;
105 static int amdgpu_cgs_gunmap_kmem(void *cgs_device
, cgs_handle_t kmem_handle
)
107 struct amdgpu_bo
*obj
= (struct amdgpu_bo
*)kmem_handle
;
110 int r
= amdgpu_bo_reserve(obj
, false);
111 if (likely(r
== 0)) {
112 amdgpu_bo_unpin(obj
);
113 amdgpu_bo_unreserve(obj
);
115 amdgpu_bo_unref(&obj
);
121 static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device
,
122 enum cgs_gpu_mem_type type
,
123 uint64_t size
, uint64_t align
,
124 uint64_t min_offset
, uint64_t max_offset
,
125 cgs_handle_t
*handle
)
131 struct amdgpu_bo
*obj
;
132 struct ttm_placement placement
;
133 struct ttm_place place
;
135 if (min_offset
> max_offset
) {
140 /* fail if the alignment is not a power of 2 */
141 if (((align
!= 1) && (align
& (align
- 1)))
142 || size
== 0 || align
== 0)
147 case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB
:
148 case CGS_GPU_MEM_TYPE__VISIBLE_FB
:
149 flags
= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
;
150 domain
= AMDGPU_GEM_DOMAIN_VRAM
;
151 if (max_offset
> adev
->mc
.real_vram_size
)
153 place
.fpfn
= min_offset
>> PAGE_SHIFT
;
154 place
.lpfn
= max_offset
>> PAGE_SHIFT
;
155 place
.flags
= TTM_PL_FLAG_WC
| TTM_PL_FLAG_UNCACHED
|
158 case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB
:
159 case CGS_GPU_MEM_TYPE__INVISIBLE_FB
:
160 flags
= AMDGPU_GEM_CREATE_NO_CPU_ACCESS
;
161 domain
= AMDGPU_GEM_DOMAIN_VRAM
;
162 if (adev
->mc
.visible_vram_size
< adev
->mc
.real_vram_size
) {
164 max(min_offset
, adev
->mc
.visible_vram_size
) >> PAGE_SHIFT
;
166 min(max_offset
, adev
->mc
.real_vram_size
) >> PAGE_SHIFT
;
167 place
.flags
= TTM_PL_FLAG_WC
| TTM_PL_FLAG_UNCACHED
|
172 case CGS_GPU_MEM_TYPE__GART_CACHEABLE
:
173 domain
= AMDGPU_GEM_DOMAIN_GTT
;
174 place
.fpfn
= min_offset
>> PAGE_SHIFT
;
175 place
.lpfn
= max_offset
>> PAGE_SHIFT
;
176 place
.flags
= TTM_PL_FLAG_CACHED
| TTM_PL_FLAG_TT
;
178 case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE
:
179 flags
= AMDGPU_GEM_CREATE_CPU_GTT_USWC
;
180 domain
= AMDGPU_GEM_DOMAIN_GTT
;
181 place
.fpfn
= min_offset
>> PAGE_SHIFT
;
182 place
.lpfn
= max_offset
>> PAGE_SHIFT
;
183 place
.flags
= TTM_PL_FLAG_WC
| TTM_PL_FLAG_TT
|
184 TTM_PL_FLAG_UNCACHED
;
193 placement
.placement
= &place
;
194 placement
.num_placement
= 1;
195 placement
.busy_placement
= &place
;
196 placement
.num_busy_placement
= 1;
198 ret
= amdgpu_bo_create_restricted(adev
, size
, PAGE_SIZE
,
200 NULL
, &placement
, NULL
,
203 DRM_ERROR("(%d) bo create failed\n", ret
);
206 *handle
= (cgs_handle_t
)obj
;
211 static int amdgpu_cgs_free_gpu_mem(void *cgs_device
, cgs_handle_t handle
)
213 struct amdgpu_bo
*obj
= (struct amdgpu_bo
*)handle
;
216 int r
= amdgpu_bo_reserve(obj
, false);
217 if (likely(r
== 0)) {
218 amdgpu_bo_kunmap(obj
);
219 amdgpu_bo_unpin(obj
);
220 amdgpu_bo_unreserve(obj
);
222 amdgpu_bo_unref(&obj
);
228 static int amdgpu_cgs_gmap_gpu_mem(void *cgs_device
, cgs_handle_t handle
,
232 u64 min_offset
, max_offset
;
233 struct amdgpu_bo
*obj
= (struct amdgpu_bo
*)handle
;
235 WARN_ON_ONCE(obj
->placement
.num_placement
> 1);
237 min_offset
= obj
->placements
[0].fpfn
<< PAGE_SHIFT
;
238 max_offset
= obj
->placements
[0].lpfn
<< PAGE_SHIFT
;
240 r
= amdgpu_bo_reserve(obj
, false);
241 if (unlikely(r
!= 0))
243 r
= amdgpu_bo_pin_restricted(obj
, AMDGPU_GEM_DOMAIN_GTT
,
244 min_offset
, max_offset
, mcaddr
);
245 amdgpu_bo_unreserve(obj
);
249 static int amdgpu_cgs_gunmap_gpu_mem(void *cgs_device
, cgs_handle_t handle
)
252 struct amdgpu_bo
*obj
= (struct amdgpu_bo
*)handle
;
253 r
= amdgpu_bo_reserve(obj
, false);
254 if (unlikely(r
!= 0))
256 r
= amdgpu_bo_unpin(obj
);
257 amdgpu_bo_unreserve(obj
);
261 static int amdgpu_cgs_kmap_gpu_mem(void *cgs_device
, cgs_handle_t handle
,
265 struct amdgpu_bo
*obj
= (struct amdgpu_bo
*)handle
;
266 r
= amdgpu_bo_reserve(obj
, false);
267 if (unlikely(r
!= 0))
269 r
= amdgpu_bo_kmap(obj
, map
);
270 amdgpu_bo_unreserve(obj
);
274 static int amdgpu_cgs_kunmap_gpu_mem(void *cgs_device
, cgs_handle_t handle
)
277 struct amdgpu_bo
*obj
= (struct amdgpu_bo
*)handle
;
278 r
= amdgpu_bo_reserve(obj
, false);
279 if (unlikely(r
!= 0))
281 amdgpu_bo_kunmap(obj
);
282 amdgpu_bo_unreserve(obj
);
286 static uint32_t amdgpu_cgs_read_register(void *cgs_device
, unsigned offset
)
289 return RREG32(offset
);
292 static void amdgpu_cgs_write_register(void *cgs_device
, unsigned offset
,
296 WREG32(offset
, value
);
299 static uint32_t amdgpu_cgs_read_ind_register(void *cgs_device
,
300 enum cgs_ind_reg space
,
305 case CGS_IND_REG__MMIO
:
306 return RREG32_IDX(index
);
307 case CGS_IND_REG__PCIE
:
308 return RREG32_PCIE(index
);
309 case CGS_IND_REG__SMC
:
310 return RREG32_SMC(index
);
311 case CGS_IND_REG__UVD_CTX
:
312 return RREG32_UVD_CTX(index
);
313 case CGS_IND_REG__DIDT
:
314 return RREG32_DIDT(index
);
315 case CGS_IND_REG__AUDIO_ENDPT
:
316 DRM_ERROR("audio endpt register access not implemented.\n");
319 WARN(1, "Invalid indirect register space");
323 static void amdgpu_cgs_write_ind_register(void *cgs_device
,
324 enum cgs_ind_reg space
,
325 unsigned index
, uint32_t value
)
329 case CGS_IND_REG__MMIO
:
330 return WREG32_IDX(index
, value
);
331 case CGS_IND_REG__PCIE
:
332 return WREG32_PCIE(index
, value
);
333 case CGS_IND_REG__SMC
:
334 return WREG32_SMC(index
, value
);
335 case CGS_IND_REG__UVD_CTX
:
336 return WREG32_UVD_CTX(index
, value
);
337 case CGS_IND_REG__DIDT
:
338 return WREG32_DIDT(index
, value
);
339 case CGS_IND_REG__AUDIO_ENDPT
:
340 DRM_ERROR("audio endpt register access not implemented.\n");
343 WARN(1, "Invalid indirect register space");
346 static uint8_t amdgpu_cgs_read_pci_config_byte(void *cgs_device
, unsigned addr
)
350 int ret
= pci_read_config_byte(adev
->pdev
, addr
, &val
);
351 if (WARN(ret
, "pci_read_config_byte error"))
356 static uint16_t amdgpu_cgs_read_pci_config_word(void *cgs_device
, unsigned addr
)
360 int ret
= pci_read_config_word(adev
->pdev
, addr
, &val
);
361 if (WARN(ret
, "pci_read_config_word error"))
366 static uint32_t amdgpu_cgs_read_pci_config_dword(void *cgs_device
,
371 int ret
= pci_read_config_dword(adev
->pdev
, addr
, &val
);
372 if (WARN(ret
, "pci_read_config_dword error"))
377 static void amdgpu_cgs_write_pci_config_byte(void *cgs_device
, unsigned addr
,
381 int ret
= pci_write_config_byte(adev
->pdev
, addr
, value
);
382 WARN(ret
, "pci_write_config_byte error");
385 static void amdgpu_cgs_write_pci_config_word(void *cgs_device
, unsigned addr
,
389 int ret
= pci_write_config_word(adev
->pdev
, addr
, value
);
390 WARN(ret
, "pci_write_config_word error");
393 static void amdgpu_cgs_write_pci_config_dword(void *cgs_device
, unsigned addr
,
397 int ret
= pci_write_config_dword(adev
->pdev
, addr
, value
);
398 WARN(ret
, "pci_write_config_dword error");
402 static int amdgpu_cgs_get_pci_resource(void *cgs_device
,
403 enum cgs_resource_type resource_type
,
406 uint64_t *resource_base
)
410 if (resource_base
== NULL
)
413 switch (resource_type
) {
414 case CGS_RESOURCE_TYPE_MMIO
:
415 if (adev
->rmmio_size
== 0)
417 if ((offset
+ size
) > adev
->rmmio_size
)
419 *resource_base
= adev
->rmmio_base
;
421 case CGS_RESOURCE_TYPE_DOORBELL
:
422 if (adev
->doorbell
.size
== 0)
424 if ((offset
+ size
) > adev
->doorbell
.size
)
426 *resource_base
= adev
->doorbell
.base
;
428 case CGS_RESOURCE_TYPE_FB
:
429 case CGS_RESOURCE_TYPE_IO
:
430 case CGS_RESOURCE_TYPE_ROM
:
436 static const void *amdgpu_cgs_atom_get_data_table(void *cgs_device
,
437 unsigned table
, uint16_t *size
,
438 uint8_t *frev
, uint8_t *crev
)
443 if (amdgpu_atom_parse_data_header(
444 adev
->mode_info
.atom_context
, table
, size
,
445 frev
, crev
, &data_start
))
446 return (uint8_t*)adev
->mode_info
.atom_context
->bios
+
452 static int amdgpu_cgs_atom_get_cmd_table_revs(void *cgs_device
, unsigned table
,
453 uint8_t *frev
, uint8_t *crev
)
457 if (amdgpu_atom_parse_cmd_header(
458 adev
->mode_info
.atom_context
, table
,
465 static int amdgpu_cgs_atom_exec_cmd_table(void *cgs_device
, unsigned table
,
470 return amdgpu_atom_execute_table(
471 adev
->mode_info
.atom_context
, table
, args
);
474 static int amdgpu_cgs_create_pm_request(void *cgs_device
, cgs_handle_t
*request
)
480 static int amdgpu_cgs_destroy_pm_request(void *cgs_device
, cgs_handle_t request
)
486 static int amdgpu_cgs_set_pm_request(void *cgs_device
, cgs_handle_t request
,
493 static int amdgpu_cgs_pm_request_clock(void *cgs_device
, cgs_handle_t request
,
494 enum cgs_clock clock
, unsigned freq
)
500 static int amdgpu_cgs_pm_request_engine(void *cgs_device
, cgs_handle_t request
,
501 enum cgs_engine engine
, int powered
)
509 static int amdgpu_cgs_pm_query_clock_limits(void *cgs_device
,
510 enum cgs_clock clock
,
511 struct cgs_clock_limits
*limits
)
517 static int amdgpu_cgs_set_camera_voltages(void *cgs_device
, uint32_t mask
,
518 const uint32_t *voltages
)
520 DRM_ERROR("not implemented");
524 struct cgs_irq_params
{
526 cgs_irq_source_set_func_t set
;
527 cgs_irq_handler_func_t handler
;
531 static int cgs_set_irq_state(struct amdgpu_device
*adev
,
532 struct amdgpu_irq_src
*src
,
534 enum amdgpu_interrupt_state state
)
536 struct cgs_irq_params
*irq_params
=
537 (struct cgs_irq_params
*)src
->data
;
540 if (!irq_params
->set
)
542 return irq_params
->set(irq_params
->private_data
,
548 static int cgs_process_irq(struct amdgpu_device
*adev
,
549 struct amdgpu_irq_src
*source
,
550 struct amdgpu_iv_entry
*entry
)
552 struct cgs_irq_params
*irq_params
=
553 (struct cgs_irq_params
*)source
->data
;
556 if (!irq_params
->handler
)
558 return irq_params
->handler(irq_params
->private_data
,
563 static const struct amdgpu_irq_src_funcs cgs_irq_funcs
= {
564 .set
= cgs_set_irq_state
,
565 .process
= cgs_process_irq
,
568 static int amdgpu_cgs_add_irq_source(void *cgs_device
, unsigned src_id
,
570 cgs_irq_source_set_func_t set
,
571 cgs_irq_handler_func_t handler
,
576 struct cgs_irq_params
*irq_params
;
577 struct amdgpu_irq_src
*source
=
578 kzalloc(sizeof(struct amdgpu_irq_src
), GFP_KERNEL
);
582 kzalloc(sizeof(struct cgs_irq_params
), GFP_KERNEL
);
587 source
->num_types
= num_types
;
588 source
->funcs
= &cgs_irq_funcs
;
589 irq_params
->src_id
= src_id
;
590 irq_params
->set
= set
;
591 irq_params
->handler
= handler
;
592 irq_params
->private_data
= private_data
;
593 source
->data
= (void *)irq_params
;
594 ret
= amdgpu_irq_add_id(adev
, src_id
, source
);
603 static int amdgpu_cgs_irq_get(void *cgs_device
, unsigned src_id
, unsigned type
)
606 return amdgpu_irq_get(adev
, adev
->irq
.sources
[src_id
], type
);
609 static int amdgpu_cgs_irq_put(void *cgs_device
, unsigned src_id
, unsigned type
)
612 return amdgpu_irq_put(adev
, adev
->irq
.sources
[src_id
], type
);
615 int amdgpu_cgs_set_clockgating_state(void *cgs_device
,
616 enum amd_ip_block_type block_type
,
617 enum amd_clockgating_state state
)
622 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
623 if (!adev
->ip_block_status
[i
].valid
)
626 if (adev
->ip_blocks
[i
].type
== block_type
) {
627 r
= adev
->ip_blocks
[i
].funcs
->set_clockgating_state(
636 int amdgpu_cgs_set_powergating_state(void *cgs_device
,
637 enum amd_ip_block_type block_type
,
638 enum amd_powergating_state state
)
643 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
644 if (!adev
->ip_block_status
[i
].valid
)
647 if (adev
->ip_blocks
[i
].type
== block_type
) {
648 r
= adev
->ip_blocks
[i
].funcs
->set_powergating_state(
658 static uint32_t fw_type_convert(void *cgs_device
, uint32_t fw_type
)
661 enum AMDGPU_UCODE_ID result
= AMDGPU_UCODE_ID_MAXIMUM
;
664 case CGS_UCODE_ID_SDMA0
:
665 result
= AMDGPU_UCODE_ID_SDMA0
;
667 case CGS_UCODE_ID_SDMA1
:
668 result
= AMDGPU_UCODE_ID_SDMA1
;
670 case CGS_UCODE_ID_CP_CE
:
671 result
= AMDGPU_UCODE_ID_CP_CE
;
673 case CGS_UCODE_ID_CP_PFP
:
674 result
= AMDGPU_UCODE_ID_CP_PFP
;
676 case CGS_UCODE_ID_CP_ME
:
677 result
= AMDGPU_UCODE_ID_CP_ME
;
679 case CGS_UCODE_ID_CP_MEC
:
680 case CGS_UCODE_ID_CP_MEC_JT1
:
681 result
= AMDGPU_UCODE_ID_CP_MEC1
;
683 case CGS_UCODE_ID_CP_MEC_JT2
:
684 if (adev
->asic_type
== CHIP_TONGA
)
685 result
= AMDGPU_UCODE_ID_CP_MEC2
;
686 else if (adev
->asic_type
== CHIP_CARRIZO
)
687 result
= AMDGPU_UCODE_ID_CP_MEC1
;
689 case CGS_UCODE_ID_RLC_G
:
690 result
= AMDGPU_UCODE_ID_RLC_G
;
693 DRM_ERROR("Firmware type not supported\n");
698 static int amdgpu_cgs_get_firmware_info(void *cgs_device
,
699 enum cgs_ucode_id type
,
700 struct cgs_firmware_info
*info
)
704 if (CGS_UCODE_ID_SMU
!= type
) {
707 const struct gfx_firmware_header_v1_0
*header
;
708 enum AMDGPU_UCODE_ID id
;
709 struct amdgpu_firmware_info
*ucode
;
711 id
= fw_type_convert(cgs_device
, type
);
712 ucode
= &adev
->firmware
.ucode
[id
];
713 if (ucode
->fw
== NULL
)
716 gpu_addr
= ucode
->mc_addr
;
717 header
= (const struct gfx_firmware_header_v1_0
*)ucode
->fw
->data
;
718 data_size
= le32_to_cpu(header
->header
.ucode_size_bytes
);
720 if ((type
== CGS_UCODE_ID_CP_MEC_JT1
) ||
721 (type
== CGS_UCODE_ID_CP_MEC_JT2
)) {
722 gpu_addr
+= le32_to_cpu(header
->jt_offset
) << 2;
723 data_size
= le32_to_cpu(header
->jt_size
) << 2;
725 info
->mc_addr
= gpu_addr
;
726 info
->image_size
= data_size
;
727 info
->version
= (uint16_t)le32_to_cpu(header
->header
.ucode_version
);
728 info
->feature_version
= (uint16_t)le32_to_cpu(header
->ucode_feature_version
);
730 char fw_name
[30] = {0};
733 uint32_t ucode_start_address
;
735 const struct smc_firmware_header_v1_0
*hdr
;
737 switch (adev
->asic_type
) {
739 strcpy(fw_name
, "amdgpu/tonga_smc.bin");
742 strcpy(fw_name
, "amdgpu/fiji_smc.bin");
745 DRM_ERROR("SMC firmware not supported\n");
749 err
= request_firmware(&adev
->pm
.fw
, fw_name
, adev
->dev
);
751 DRM_ERROR("Failed to request firmware\n");
755 err
= amdgpu_ucode_validate(adev
->pm
.fw
);
757 DRM_ERROR("Failed to load firmware \"%s\"", fw_name
);
758 release_firmware(adev
->pm
.fw
);
763 hdr
= (const struct smc_firmware_header_v1_0
*) adev
->pm
.fw
->data
;
764 adev
->pm
.fw_version
= le32_to_cpu(hdr
->header
.ucode_version
);
765 ucode_size
= le32_to_cpu(hdr
->header
.ucode_size_bytes
);
766 ucode_start_address
= le32_to_cpu(hdr
->ucode_start_addr
);
767 src
= (const uint8_t *)(adev
->pm
.fw
->data
+
768 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
770 info
->version
= adev
->pm
.fw_version
;
771 info
->image_size
= ucode_size
;
772 info
->kptr
= (void *)src
;
777 static int amdgpu_cgs_query_system_info(void *cgs_device
,
778 struct cgs_system_info
*sys_info
)
782 if (NULL
== sys_info
)
785 if (sizeof(struct cgs_system_info
) != sys_info
->size
)
788 switch (sys_info
->info_id
) {
789 case CGS_SYSTEM_INFO_ADAPTER_BDF_ID
:
790 sys_info
->value
= adev
->pdev
->devfn
| (adev
->pdev
->bus
->number
<< 8);
792 case CGS_SYSTEM_INFO_PCIE_GEN_INFO
:
793 sys_info
->value
= adev
->pm
.pcie_gen_mask
;
795 case CGS_SYSTEM_INFO_PCIE_MLW
:
796 sys_info
->value
= adev
->pm
.pcie_mlw_mask
;
798 case CGS_SYSTEM_INFO_CG_FLAGS
:
799 sys_info
->value
= adev
->cg_flags
;
801 case CGS_SYSTEM_INFO_PG_FLAGS
:
802 sys_info
->value
= adev
->pg_flags
;
811 static int amdgpu_cgs_get_active_displays_info(void *cgs_device
,
812 struct cgs_display_info
*info
)
815 struct amdgpu_crtc
*amdgpu_crtc
;
816 struct drm_device
*ddev
= adev
->ddev
;
817 struct drm_crtc
*crtc
;
818 uint32_t line_time_us
, vblank_lines
;
819 struct cgs_mode_info
*mode_info
;
824 mode_info
= info
->mode_info
;
826 if (adev
->mode_info
.num_crtc
&& adev
->mode_info
.mode_config_initialized
) {
827 list_for_each_entry(crtc
,
828 &ddev
->mode_config
.crtc_list
, head
) {
829 amdgpu_crtc
= to_amdgpu_crtc(crtc
);
831 info
->active_display_mask
|= (1 << amdgpu_crtc
->crtc_id
);
832 info
->display_count
++;
834 if (mode_info
!= NULL
&&
835 crtc
->enabled
&& amdgpu_crtc
->enabled
&&
836 amdgpu_crtc
->hw_mode
.clock
) {
837 line_time_us
= (amdgpu_crtc
->hw_mode
.crtc_htotal
* 1000) /
838 amdgpu_crtc
->hw_mode
.clock
;
839 vblank_lines
= amdgpu_crtc
->hw_mode
.crtc_vblank_end
-
840 amdgpu_crtc
->hw_mode
.crtc_vdisplay
+
841 (amdgpu_crtc
->v_border
* 2);
842 mode_info
->vblank_time_us
= vblank_lines
* line_time_us
;
843 mode_info
->refresh_rate
= drm_mode_vrefresh(&amdgpu_crtc
->hw_mode
);
844 mode_info
->ref_clock
= adev
->clock
.spll
.reference_freq
;
854 static int amdgpu_cgs_notify_dpm_enabled(void *cgs_device
, bool enabled
)
858 adev
->pm
.dpm_enabled
= enabled
;
863 /** \brief evaluate acpi namespace object, handle or pathname must be valid
865 * \param info input/output arguments for the control method
869 #if defined(CONFIG_ACPI)
870 static int amdgpu_cgs_acpi_eval_object(void *cgs_device
,
871 struct cgs_acpi_method_info
*info
)
875 struct acpi_object_list input
;
876 struct acpi_buffer output
= { ACPI_ALLOCATE_BUFFER
, NULL
};
877 union acpi_object
*params
= NULL
;
878 union acpi_object
*obj
= NULL
;
879 uint8_t name
[5] = {'\0'};
880 struct cgs_acpi_method_argument
*argument
= NULL
;
884 uint32_t func_no
= 0xFFFFFFFF;
886 handle
= ACPI_HANDLE(&adev
->pdev
->dev
);
890 memset(&input
, 0, sizeof(struct acpi_object_list
));
892 /* validate input info */
893 if (info
->size
!= sizeof(struct cgs_acpi_method_info
))
896 input
.count
= info
->input_count
;
897 if (info
->input_count
> 0) {
898 if (info
->pinput_argument
== NULL
)
900 argument
= info
->pinput_argument
;
901 func_no
= argument
->value
;
902 for (i
= 0; i
< info
->input_count
; i
++) {
903 if (((argument
->type
== ACPI_TYPE_STRING
) ||
904 (argument
->type
== ACPI_TYPE_BUFFER
)) &&
905 (argument
->pointer
== NULL
))
911 if (info
->output_count
> 0) {
912 if (info
->poutput_argument
== NULL
)
914 argument
= info
->poutput_argument
;
915 for (i
= 0; i
< info
->output_count
; i
++) {
916 if (((argument
->type
== ACPI_TYPE_STRING
) ||
917 (argument
->type
== ACPI_TYPE_BUFFER
))
918 && (argument
->pointer
== NULL
))
924 /* The path name passed to acpi_evaluate_object should be null terminated */
925 if ((info
->field
& CGS_ACPI_FIELD_METHOD_NAME
) != 0) {
926 strncpy(name
, (char *)&(info
->name
), sizeof(uint32_t));
930 /* parse input parameters */
931 if (input
.count
> 0) {
932 input
.pointer
= params
=
933 kzalloc(sizeof(union acpi_object
) * input
.count
, GFP_KERNEL
);
937 argument
= info
->pinput_argument
;
939 for (i
= 0; i
< input
.count
; i
++) {
940 params
->type
= argument
->type
;
941 switch (params
->type
) {
942 case ACPI_TYPE_INTEGER
:
943 params
->integer
.value
= argument
->value
;
945 case ACPI_TYPE_STRING
:
946 params
->string
.length
= argument
->method_length
;
947 params
->string
.pointer
= argument
->pointer
;
949 case ACPI_TYPE_BUFFER
:
950 params
->buffer
.length
= argument
->method_length
;
951 params
->buffer
.pointer
= argument
->pointer
;
961 /* parse output info */
962 count
= info
->output_count
;
963 argument
= info
->poutput_argument
;
965 /* evaluate the acpi method */
966 status
= acpi_evaluate_object(handle
, name
, &input
, &output
);
968 if (ACPI_FAILURE(status
)) {
973 /* return the output info */
974 obj
= output
.pointer
;
977 if ((obj
->type
!= ACPI_TYPE_PACKAGE
) ||
978 (obj
->package
.count
!= count
)) {
982 params
= obj
->package
.elements
;
986 if (params
== NULL
) {
991 for (i
= 0; i
< count
; i
++) {
992 if (argument
->type
!= params
->type
) {
996 switch (params
->type
) {
997 case ACPI_TYPE_INTEGER
:
998 argument
->value
= params
->integer
.value
;
1000 case ACPI_TYPE_STRING
:
1001 if ((params
->string
.length
!= argument
->data_length
) ||
1002 (params
->string
.pointer
== NULL
)) {
1006 strncpy(argument
->pointer
,
1007 params
->string
.pointer
,
1008 params
->string
.length
);
1010 case ACPI_TYPE_BUFFER
:
1011 if (params
->buffer
.pointer
== NULL
) {
1015 memcpy(argument
->pointer
,
1016 params
->buffer
.pointer
,
1017 argument
->data_length
);
1029 kfree((void *)input
.pointer
);
1033 static int amdgpu_cgs_acpi_eval_object(void *cgs_device
,
1034 struct cgs_acpi_method_info
*info
)
1040 int amdgpu_cgs_call_acpi_method(void *cgs_device
,
1041 uint32_t acpi_method
,
1042 uint32_t acpi_function
,
1043 void *pinput
, void *poutput
,
1044 uint32_t output_count
,
1045 uint32_t input_size
,
1046 uint32_t output_size
)
1048 struct cgs_acpi_method_argument acpi_input
[2] = { {0}, {0} };
1049 struct cgs_acpi_method_argument acpi_output
= {0};
1050 struct cgs_acpi_method_info info
= {0};
1052 acpi_input
[0].type
= CGS_ACPI_TYPE_INTEGER
;
1053 acpi_input
[0].method_length
= sizeof(uint32_t);
1054 acpi_input
[0].data_length
= sizeof(uint32_t);
1055 acpi_input
[0].value
= acpi_function
;
1057 acpi_input
[1].type
= CGS_ACPI_TYPE_BUFFER
;
1058 acpi_input
[1].method_length
= CGS_ACPI_MAX_BUFFER_SIZE
;
1059 acpi_input
[1].data_length
= input_size
;
1060 acpi_input
[1].pointer
= pinput
;
1062 acpi_output
.type
= CGS_ACPI_TYPE_BUFFER
;
1063 acpi_output
.method_length
= CGS_ACPI_MAX_BUFFER_SIZE
;
1064 acpi_output
.data_length
= output_size
;
1065 acpi_output
.pointer
= poutput
;
1067 info
.size
= sizeof(struct cgs_acpi_method_info
);
1068 info
.field
= CGS_ACPI_FIELD_METHOD_NAME
| CGS_ACPI_FIELD_INPUT_ARGUMENT_COUNT
;
1069 info
.input_count
= 2;
1070 info
.name
= acpi_method
;
1071 info
.pinput_argument
= acpi_input
;
1072 info
.output_count
= output_count
;
1073 info
.poutput_argument
= &acpi_output
;
1075 return amdgpu_cgs_acpi_eval_object(cgs_device
, &info
);
1078 static const struct cgs_ops amdgpu_cgs_ops
= {
1079 amdgpu_cgs_gpu_mem_info
,
1080 amdgpu_cgs_gmap_kmem
,
1081 amdgpu_cgs_gunmap_kmem
,
1082 amdgpu_cgs_alloc_gpu_mem
,
1083 amdgpu_cgs_free_gpu_mem
,
1084 amdgpu_cgs_gmap_gpu_mem
,
1085 amdgpu_cgs_gunmap_gpu_mem
,
1086 amdgpu_cgs_kmap_gpu_mem
,
1087 amdgpu_cgs_kunmap_gpu_mem
,
1088 amdgpu_cgs_read_register
,
1089 amdgpu_cgs_write_register
,
1090 amdgpu_cgs_read_ind_register
,
1091 amdgpu_cgs_write_ind_register
,
1092 amdgpu_cgs_read_pci_config_byte
,
1093 amdgpu_cgs_read_pci_config_word
,
1094 amdgpu_cgs_read_pci_config_dword
,
1095 amdgpu_cgs_write_pci_config_byte
,
1096 amdgpu_cgs_write_pci_config_word
,
1097 amdgpu_cgs_write_pci_config_dword
,
1098 amdgpu_cgs_get_pci_resource
,
1099 amdgpu_cgs_atom_get_data_table
,
1100 amdgpu_cgs_atom_get_cmd_table_revs
,
1101 amdgpu_cgs_atom_exec_cmd_table
,
1102 amdgpu_cgs_create_pm_request
,
1103 amdgpu_cgs_destroy_pm_request
,
1104 amdgpu_cgs_set_pm_request
,
1105 amdgpu_cgs_pm_request_clock
,
1106 amdgpu_cgs_pm_request_engine
,
1107 amdgpu_cgs_pm_query_clock_limits
,
1108 amdgpu_cgs_set_camera_voltages
,
1109 amdgpu_cgs_get_firmware_info
,
1110 amdgpu_cgs_set_powergating_state
,
1111 amdgpu_cgs_set_clockgating_state
,
1112 amdgpu_cgs_get_active_displays_info
,
1113 amdgpu_cgs_notify_dpm_enabled
,
1114 amdgpu_cgs_call_acpi_method
,
1115 amdgpu_cgs_query_system_info
,
1118 static const struct cgs_os_ops amdgpu_cgs_os_ops
= {
1119 amdgpu_cgs_add_irq_source
,
1124 void *amdgpu_cgs_create_device(struct amdgpu_device
*adev
)
1126 struct amdgpu_cgs_device
*cgs_device
=
1127 kmalloc(sizeof(*cgs_device
), GFP_KERNEL
);
1130 DRM_ERROR("Couldn't allocate CGS device structure\n");
1134 cgs_device
->base
.ops
= &amdgpu_cgs_ops
;
1135 cgs_device
->base
.os_ops
= &amdgpu_cgs_os_ops
;
1136 cgs_device
->adev
= adev
;
1141 void amdgpu_cgs_destroy_device(void *cgs_device
)