drm/amdgpu: add bo list copy
[deliverable/linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu.h
CommitLineData
97b2e202
AD
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __AMDGPU_H__
29#define __AMDGPU_H__
30
31#include <linux/atomic.h>
32#include <linux/wait.h>
33#include <linux/list.h>
34#include <linux/kref.h>
35#include <linux/interval_tree.h>
36#include <linux/hashtable.h>
37#include <linux/fence.h>
38
39#include <ttm/ttm_bo_api.h>
40#include <ttm/ttm_bo_driver.h>
41#include <ttm/ttm_placement.h>
42#include <ttm/ttm_module.h>
43#include <ttm/ttm_execbuf_util.h>
44
d03846af 45#include <drm/drmP.h>
97b2e202 46#include <drm/drm_gem.h>
7e5a547f 47#include <drm/amdgpu_drm.h>
97b2e202 48
5fc3aeeb 49#include "amd_shared.h"
97b2e202
AD
50#include "amdgpu_mode.h"
51#include "amdgpu_ih.h"
52#include "amdgpu_irq.h"
53#include "amdgpu_ucode.h"
54#include "amdgpu_gds.h"
55
b80d8475
AD
56#include "gpu_scheduler.h"
57
97b2e202
AD
58/*
59 * Modules parameters.
60 */
61extern int amdgpu_modeset;
62extern int amdgpu_vram_limit;
63extern int amdgpu_gart_size;
64extern int amdgpu_benchmarking;
65extern int amdgpu_testing;
66extern int amdgpu_audio;
67extern int amdgpu_disp_priority;
68extern int amdgpu_hw_i2c;
69extern int amdgpu_pcie_gen2;
70extern int amdgpu_msi;
71extern int amdgpu_lockup_timeout;
72extern int amdgpu_dpm;
73extern int amdgpu_smc_load_fw;
74extern int amdgpu_aspm;
75extern int amdgpu_runtime_pm;
76extern int amdgpu_hard_reset;
77extern unsigned amdgpu_ip_block_mask;
78extern int amdgpu_bapm;
79extern int amdgpu_deep_color;
80extern int amdgpu_vm_size;
81extern int amdgpu_vm_block_size;
b80d8475 82extern int amdgpu_enable_scheduler;
97b2e202
AD
83
84#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
85#define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2)
86/* AMDGPU_IB_POOL_SIZE must be a power of 2 */
87#define AMDGPU_IB_POOL_SIZE 16
88#define AMDGPU_DEBUGFS_MAX_COMPONENTS 32
89#define AMDGPUFB_CONN_LIMIT 4
90#define AMDGPU_BIOS_NUM_SCRATCH 8
91
97b2e202
AD
92/* max number of rings */
93#define AMDGPU_MAX_RINGS 16
94#define AMDGPU_MAX_GFX_RINGS 1
95#define AMDGPU_MAX_COMPUTE_RINGS 8
96#define AMDGPU_MAX_VCE_RINGS 2
97
98/* number of hw syncs before falling back on blocking */
99#define AMDGPU_NUM_SYNCS 4
100
101/* hardcode that limit for now */
102#define AMDGPU_VA_RESERVED_SIZE (8 << 20)
103
104/* hard reset data */
105#define AMDGPU_ASIC_RESET_DATA 0x39d5e86b
106
107/* reset flags */
108#define AMDGPU_RESET_GFX (1 << 0)
109#define AMDGPU_RESET_COMPUTE (1 << 1)
110#define AMDGPU_RESET_DMA (1 << 2)
111#define AMDGPU_RESET_CP (1 << 3)
112#define AMDGPU_RESET_GRBM (1 << 4)
113#define AMDGPU_RESET_DMA1 (1 << 5)
114#define AMDGPU_RESET_RLC (1 << 6)
115#define AMDGPU_RESET_SEM (1 << 7)
116#define AMDGPU_RESET_IH (1 << 8)
117#define AMDGPU_RESET_VMC (1 << 9)
118#define AMDGPU_RESET_MC (1 << 10)
119#define AMDGPU_RESET_DISPLAY (1 << 11)
120#define AMDGPU_RESET_UVD (1 << 12)
121#define AMDGPU_RESET_VCE (1 << 13)
122#define AMDGPU_RESET_VCE1 (1 << 14)
123
124/* CG block flags */
125#define AMDGPU_CG_BLOCK_GFX (1 << 0)
126#define AMDGPU_CG_BLOCK_MC (1 << 1)
127#define AMDGPU_CG_BLOCK_SDMA (1 << 2)
128#define AMDGPU_CG_BLOCK_UVD (1 << 3)
129#define AMDGPU_CG_BLOCK_VCE (1 << 4)
130#define AMDGPU_CG_BLOCK_HDP (1 << 5)
131#define AMDGPU_CG_BLOCK_BIF (1 << 6)
132
133/* CG flags */
134#define AMDGPU_CG_SUPPORT_GFX_MGCG (1 << 0)
135#define AMDGPU_CG_SUPPORT_GFX_MGLS (1 << 1)
136#define AMDGPU_CG_SUPPORT_GFX_CGCG (1 << 2)
137#define AMDGPU_CG_SUPPORT_GFX_CGLS (1 << 3)
138#define AMDGPU_CG_SUPPORT_GFX_CGTS (1 << 4)
139#define AMDGPU_CG_SUPPORT_GFX_CGTS_LS (1 << 5)
140#define AMDGPU_CG_SUPPORT_GFX_CP_LS (1 << 6)
141#define AMDGPU_CG_SUPPORT_GFX_RLC_LS (1 << 7)
142#define AMDGPU_CG_SUPPORT_MC_LS (1 << 8)
143#define AMDGPU_CG_SUPPORT_MC_MGCG (1 << 9)
144#define AMDGPU_CG_SUPPORT_SDMA_LS (1 << 10)
145#define AMDGPU_CG_SUPPORT_SDMA_MGCG (1 << 11)
146#define AMDGPU_CG_SUPPORT_BIF_LS (1 << 12)
147#define AMDGPU_CG_SUPPORT_UVD_MGCG (1 << 13)
148#define AMDGPU_CG_SUPPORT_VCE_MGCG (1 << 14)
149#define AMDGPU_CG_SUPPORT_HDP_LS (1 << 15)
150#define AMDGPU_CG_SUPPORT_HDP_MGCG (1 << 16)
151
152/* PG flags */
153#define AMDGPU_PG_SUPPORT_GFX_PG (1 << 0)
154#define AMDGPU_PG_SUPPORT_GFX_SMG (1 << 1)
155#define AMDGPU_PG_SUPPORT_GFX_DMG (1 << 2)
156#define AMDGPU_PG_SUPPORT_UVD (1 << 3)
157#define AMDGPU_PG_SUPPORT_VCE (1 << 4)
158#define AMDGPU_PG_SUPPORT_CP (1 << 5)
159#define AMDGPU_PG_SUPPORT_GDS (1 << 6)
160#define AMDGPU_PG_SUPPORT_RLC_SMU_HS (1 << 7)
161#define AMDGPU_PG_SUPPORT_SDMA (1 << 8)
162#define AMDGPU_PG_SUPPORT_ACP (1 << 9)
163#define AMDGPU_PG_SUPPORT_SAMU (1 << 10)
164
165/* GFX current status */
166#define AMDGPU_GFX_NORMAL_MODE 0x00000000L
167#define AMDGPU_GFX_SAFE_MODE 0x00000001L
168#define AMDGPU_GFX_PG_DISABLED_MODE 0x00000002L
169#define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L
170#define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L
171
172/* max cursor sizes (in pixels) */
173#define CIK_CURSOR_WIDTH 128
174#define CIK_CURSOR_HEIGHT 128
175
176struct amdgpu_device;
177struct amdgpu_fence;
178struct amdgpu_ib;
179struct amdgpu_vm;
180struct amdgpu_ring;
181struct amdgpu_semaphore;
182struct amdgpu_cs_parser;
183struct amdgpu_irq_src;
0b492a4c 184struct amdgpu_fpriv;
97b2e202
AD
185
186enum amdgpu_cp_irq {
187 AMDGPU_CP_IRQ_GFX_EOP = 0,
188 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP,
189 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP,
190 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP,
191 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP,
192 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP,
193 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP,
194 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP,
195 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP,
196
197 AMDGPU_CP_IRQ_LAST
198};
199
200enum amdgpu_sdma_irq {
201 AMDGPU_SDMA_IRQ_TRAP0 = 0,
202 AMDGPU_SDMA_IRQ_TRAP1,
203
204 AMDGPU_SDMA_IRQ_LAST
205};
206
207enum amdgpu_thermal_irq {
208 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0,
209 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW,
210
211 AMDGPU_THERMAL_IRQ_LAST
212};
213
97b2e202 214int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
5fc3aeeb 215 enum amd_ip_block_type block_type,
216 enum amd_clockgating_state state);
97b2e202 217int amdgpu_set_powergating_state(struct amdgpu_device *adev,
5fc3aeeb 218 enum amd_ip_block_type block_type,
219 enum amd_powergating_state state);
97b2e202
AD
220
221struct amdgpu_ip_block_version {
5fc3aeeb 222 enum amd_ip_block_type type;
97b2e202
AD
223 u32 major;
224 u32 minor;
225 u32 rev;
5fc3aeeb 226 const struct amd_ip_funcs *funcs;
97b2e202
AD
227};
228
229int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
5fc3aeeb 230 enum amd_ip_block_type type,
97b2e202
AD
231 u32 major, u32 minor);
232
233const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
234 struct amdgpu_device *adev,
5fc3aeeb 235 enum amd_ip_block_type type);
97b2e202
AD
236
237/* provided by hw blocks that can move/clear data. e.g., gfx or sdma */
238struct amdgpu_buffer_funcs {
239 /* maximum bytes in a single operation */
240 uint32_t copy_max_bytes;
241
242 /* number of dw to reserve per operation */
243 unsigned copy_num_dw;
244
245 /* used for buffer migration */
246 void (*emit_copy_buffer)(struct amdgpu_ring *ring,
247 /* src addr in bytes */
248 uint64_t src_offset,
249 /* dst addr in bytes */
250 uint64_t dst_offset,
251 /* number of byte to transfer */
252 uint32_t byte_count);
253
254 /* maximum bytes in a single operation */
255 uint32_t fill_max_bytes;
256
257 /* number of dw to reserve per operation */
258 unsigned fill_num_dw;
259
260 /* used for buffer clearing */
261 void (*emit_fill_buffer)(struct amdgpu_ring *ring,
262 /* value to write to memory */
263 uint32_t src_data,
264 /* dst addr in bytes */
265 uint64_t dst_offset,
266 /* number of byte to fill */
267 uint32_t byte_count);
268};
269
270/* provided by hw blocks that can write ptes, e.g., sdma */
271struct amdgpu_vm_pte_funcs {
272 /* copy pte entries from GART */
273 void (*copy_pte)(struct amdgpu_ib *ib,
274 uint64_t pe, uint64_t src,
275 unsigned count);
276 /* write pte one entry at a time with addr mapping */
277 void (*write_pte)(struct amdgpu_ib *ib,
278 uint64_t pe,
279 uint64_t addr, unsigned count,
280 uint32_t incr, uint32_t flags);
281 /* for linear pte/pde updates without addr mapping */
282 void (*set_pte_pde)(struct amdgpu_ib *ib,
283 uint64_t pe,
284 uint64_t addr, unsigned count,
285 uint32_t incr, uint32_t flags);
286 /* pad the indirect buffer to the necessary number of dw */
287 void (*pad_ib)(struct amdgpu_ib *ib);
288};
289
290/* provided by the gmc block */
291struct amdgpu_gart_funcs {
292 /* flush the vm tlb via mmio */
293 void (*flush_gpu_tlb)(struct amdgpu_device *adev,
294 uint32_t vmid);
295 /* write pte/pde updates using the cpu */
296 int (*set_pte_pde)(struct amdgpu_device *adev,
297 void *cpu_pt_addr, /* cpu addr of page table */
298 uint32_t gpu_page_idx, /* pte/pde to update */
299 uint64_t addr, /* addr to write into pte/pde */
300 uint32_t flags); /* access flags */
301};
302
303/* provided by the ih block */
304struct amdgpu_ih_funcs {
305 /* ring read/write ptr handling, called from interrupt context */
306 u32 (*get_wptr)(struct amdgpu_device *adev);
307 void (*decode_iv)(struct amdgpu_device *adev,
308 struct amdgpu_iv_entry *entry);
309 void (*set_rptr)(struct amdgpu_device *adev);
310};
311
312/* provided by hw blocks that expose a ring buffer for commands */
313struct amdgpu_ring_funcs {
314 /* ring read/write ptr handling */
315 u32 (*get_rptr)(struct amdgpu_ring *ring);
316 u32 (*get_wptr)(struct amdgpu_ring *ring);
317 void (*set_wptr)(struct amdgpu_ring *ring);
318 /* validating and patching of IBs */
319 int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
320 /* command emit functions */
321 void (*emit_ib)(struct amdgpu_ring *ring,
322 struct amdgpu_ib *ib);
323 void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
890ee23f 324 uint64_t seq, unsigned flags);
97b2e202
AD
325 bool (*emit_semaphore)(struct amdgpu_ring *ring,
326 struct amdgpu_semaphore *semaphore,
327 bool emit_wait);
328 void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
329 uint64_t pd_addr);
d2edb07b 330 void (*emit_hdp_flush)(struct amdgpu_ring *ring);
97b2e202
AD
331 void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
332 uint32_t gds_base, uint32_t gds_size,
333 uint32_t gws_base, uint32_t gws_size,
334 uint32_t oa_base, uint32_t oa_size);
335 /* testing functions */
336 int (*test_ring)(struct amdgpu_ring *ring);
337 int (*test_ib)(struct amdgpu_ring *ring);
338 bool (*is_lockup)(struct amdgpu_ring *ring);
339};
340
341/*
342 * BIOS.
343 */
344bool amdgpu_get_bios(struct amdgpu_device *adev);
345bool amdgpu_read_bios(struct amdgpu_device *adev);
346
347/*
348 * Dummy page
349 */
350struct amdgpu_dummy_page {
351 struct page *page;
352 dma_addr_t addr;
353};
354int amdgpu_dummy_page_init(struct amdgpu_device *adev);
355void amdgpu_dummy_page_fini(struct amdgpu_device *adev);
356
357
358/*
359 * Clocks
360 */
361
362#define AMDGPU_MAX_PPLL 3
363
364struct amdgpu_clock {
365 struct amdgpu_pll ppll[AMDGPU_MAX_PPLL];
366 struct amdgpu_pll spll;
367 struct amdgpu_pll mpll;
368 /* 10 Khz units */
369 uint32_t default_mclk;
370 uint32_t default_sclk;
371 uint32_t default_dispclk;
372 uint32_t current_dispclk;
373 uint32_t dp_extclk;
374 uint32_t max_pixel_clock;
375};
376
377/*
378 * Fences.
379 */
380struct amdgpu_fence_driver {
381 struct amdgpu_ring *ring;
382 uint64_t gpu_addr;
383 volatile uint32_t *cpu_addr;
384 /* sync_seq is protected by ring emission lock */
385 uint64_t sync_seq[AMDGPU_MAX_RINGS];
386 atomic64_t last_seq;
387 bool initialized;
97b2e202
AD
388 struct amdgpu_irq_src *irq_src;
389 unsigned irq_type;
390 struct delayed_work lockup_work;
391};
392
393/* some special values for the owner field */
394#define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul)
395#define AMDGPU_FENCE_OWNER_VM ((void*)1ul)
396#define AMDGPU_FENCE_OWNER_MOVE ((void*)2ul)
397
890ee23f
CZ
398#define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
399#define AMDGPU_FENCE_FLAG_INT (1 << 1)
400
97b2e202
AD
401struct amdgpu_fence {
402 struct fence base;
403
404 /* RB, DMA, etc. */
405 struct amdgpu_ring *ring;
406 uint64_t seq;
407
408 /* filp or special value for fence creator */
409 void *owner;
410
411 wait_queue_t fence_wake;
412};
413
414struct amdgpu_user_fence {
415 /* write-back bo */
416 struct amdgpu_bo *bo;
417 /* write-back address offset to bo start */
418 uint32_t offset;
c1b69ed0 419 uint64_t sequence;
97b2e202
AD
420};
421
422int amdgpu_fence_driver_init(struct amdgpu_device *adev);
423void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
424void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
425
426void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
427int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
428 struct amdgpu_irq_src *irq_src,
429 unsigned irq_type);
5ceb54c6
AD
430void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
431void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
97b2e202
AD
432int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
433 struct amdgpu_fence **fence);
434void amdgpu_fence_process(struct amdgpu_ring *ring);
435int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
436int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
437unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
438
439bool amdgpu_fence_signaled(struct amdgpu_fence *fence);
440int amdgpu_fence_wait(struct amdgpu_fence *fence, bool interruptible);
441int amdgpu_fence_wait_any(struct amdgpu_device *adev,
442 struct amdgpu_fence **fences,
443 bool intr);
97b2e202
AD
444struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence);
445void amdgpu_fence_unref(struct amdgpu_fence **fence);
446
447bool amdgpu_fence_need_sync(struct amdgpu_fence *fence,
448 struct amdgpu_ring *ring);
449void amdgpu_fence_note_sync(struct amdgpu_fence *fence,
450 struct amdgpu_ring *ring);
451
452static inline struct amdgpu_fence *amdgpu_fence_later(struct amdgpu_fence *a,
453 struct amdgpu_fence *b)
454{
455 if (!a) {
456 return b;
457 }
458
459 if (!b) {
460 return a;
461 }
462
463 BUG_ON(a->ring != b->ring);
464
465 if (a->seq > b->seq) {
466 return a;
467 } else {
468 return b;
469 }
470}
471
472static inline bool amdgpu_fence_is_earlier(struct amdgpu_fence *a,
473 struct amdgpu_fence *b)
474{
475 if (!a) {
476 return false;
477 }
478
479 if (!b) {
480 return true;
481 }
482
483 BUG_ON(a->ring != b->ring);
484
485 return a->seq < b->seq;
486}
487
488int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user,
489 void *owner, struct amdgpu_fence **fence);
490
491/*
492 * TTM.
493 */
494struct amdgpu_mman {
495 struct ttm_bo_global_ref bo_global_ref;
496 struct drm_global_reference mem_global_ref;
497 struct ttm_bo_device bdev;
498 bool mem_global_referenced;
499 bool initialized;
500
501#if defined(CONFIG_DEBUG_FS)
502 struct dentry *vram;
503 struct dentry *gtt;
504#endif
505
506 /* buffer handling */
507 const struct amdgpu_buffer_funcs *buffer_funcs;
508 struct amdgpu_ring *buffer_funcs_ring;
509};
510
511int amdgpu_copy_buffer(struct amdgpu_ring *ring,
512 uint64_t src_offset,
513 uint64_t dst_offset,
514 uint32_t byte_count,
515 struct reservation_object *resv,
516 struct amdgpu_fence **fence);
517int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
518
519struct amdgpu_bo_list_entry {
520 struct amdgpu_bo *robj;
521 struct ttm_validate_buffer tv;
522 struct amdgpu_bo_va *bo_va;
523 unsigned prefered_domains;
524 unsigned allowed_domains;
525 uint32_t priority;
526};
527
528struct amdgpu_bo_va_mapping {
529 struct list_head list;
530 struct interval_tree_node it;
531 uint64_t offset;
532 uint32_t flags;
533};
534
535/* bo virtual addresses in a specific vm */
536struct amdgpu_bo_va {
537 /* protected by bo being reserved */
538 struct list_head bo_list;
539 uint64_t addr;
540 struct amdgpu_fence *last_pt_update;
541 unsigned ref_count;
542
543 /* protected by vm mutex */
544 struct list_head mappings;
545 struct list_head vm_status;
546
547 /* constant after initialization */
548 struct amdgpu_vm *vm;
549 struct amdgpu_bo *bo;
550};
551
7e5a547f
CZ
552#define AMDGPU_GEM_DOMAIN_MAX 0x3
553
97b2e202
AD
554struct amdgpu_bo {
555 /* Protected by gem.mutex */
556 struct list_head list;
557 /* Protected by tbo.reserved */
558 u32 initial_domain;
7e5a547f 559 struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
97b2e202
AD
560 struct ttm_placement placement;
561 struct ttm_buffer_object tbo;
562 struct ttm_bo_kmap_obj kmap;
563 u64 flags;
564 unsigned pin_count;
565 void *kptr;
566 u64 tiling_flags;
567 u64 metadata_flags;
568 void *metadata;
569 u32 metadata_size;
570 /* list of all virtual address to which this bo
571 * is associated to
572 */
573 struct list_head va;
574 /* Constant after initialization */
575 struct amdgpu_device *adev;
576 struct drm_gem_object gem_base;
577
578 struct ttm_bo_kmap_obj dma_buf_vmap;
579 pid_t pid;
580 struct amdgpu_mn *mn;
581 struct list_head mn_list;
582};
583#define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base)
584
585void amdgpu_gem_object_free(struct drm_gem_object *obj);
586int amdgpu_gem_object_open(struct drm_gem_object *obj,
587 struct drm_file *file_priv);
588void amdgpu_gem_object_close(struct drm_gem_object *obj,
589 struct drm_file *file_priv);
590unsigned long amdgpu_gem_timeout(uint64_t timeout_ns);
591struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
592struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
593 struct dma_buf_attachment *attach,
594 struct sg_table *sg);
595struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
596 struct drm_gem_object *gobj,
597 int flags);
598int amdgpu_gem_prime_pin(struct drm_gem_object *obj);
599void amdgpu_gem_prime_unpin(struct drm_gem_object *obj);
600struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
601void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
602void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
603int amdgpu_gem_debugfs_init(struct amdgpu_device *adev);
604
605/* sub-allocation manager, it has to be protected by another lock.
606 * By conception this is an helper for other part of the driver
607 * like the indirect buffer or semaphore, which both have their
608 * locking.
609 *
610 * Principe is simple, we keep a list of sub allocation in offset
611 * order (first entry has offset == 0, last entry has the highest
612 * offset).
613 *
614 * When allocating new object we first check if there is room at
615 * the end total_size - (last_object_offset + last_object_size) >=
616 * alloc_size. If so we allocate new object there.
617 *
618 * When there is not enough room at the end, we start waiting for
619 * each sub object until we reach object_offset+object_size >=
620 * alloc_size, this object then become the sub object we return.
621 *
622 * Alignment can't be bigger than page size.
623 *
624 * Hole are not considered for allocation to keep things simple.
625 * Assumption is that there won't be hole (all object on same
626 * alignment).
627 */
628struct amdgpu_sa_manager {
629 wait_queue_head_t wq;
630 struct amdgpu_bo *bo;
631 struct list_head *hole;
632 struct list_head flist[AMDGPU_MAX_RINGS];
633 struct list_head olist;
634 unsigned size;
635 uint64_t gpu_addr;
636 void *cpu_ptr;
637 uint32_t domain;
638 uint32_t align;
639};
640
641struct amdgpu_sa_bo;
642
643/* sub-allocation buffer */
644struct amdgpu_sa_bo {
645 struct list_head olist;
646 struct list_head flist;
647 struct amdgpu_sa_manager *manager;
648 unsigned soffset;
649 unsigned eoffset;
650 struct amdgpu_fence *fence;
651};
652
653/*
654 * GEM objects.
655 */
656struct amdgpu_gem {
657 struct mutex mutex;
658 struct list_head objects;
659};
660
661int amdgpu_gem_init(struct amdgpu_device *adev);
662void amdgpu_gem_fini(struct amdgpu_device *adev);
663int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
664 int alignment, u32 initial_domain,
665 u64 flags, bool kernel,
666 struct drm_gem_object **obj);
667
668int amdgpu_mode_dumb_create(struct drm_file *file_priv,
669 struct drm_device *dev,
670 struct drm_mode_create_dumb *args);
671int amdgpu_mode_dumb_mmap(struct drm_file *filp,
672 struct drm_device *dev,
673 uint32_t handle, uint64_t *offset_p);
674
675/*
676 * Semaphores.
677 */
678struct amdgpu_semaphore {
679 struct amdgpu_sa_bo *sa_bo;
680 signed waiters;
681 uint64_t gpu_addr;
682};
683
684int amdgpu_semaphore_create(struct amdgpu_device *adev,
685 struct amdgpu_semaphore **semaphore);
686bool amdgpu_semaphore_emit_signal(struct amdgpu_ring *ring,
687 struct amdgpu_semaphore *semaphore);
688bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring,
689 struct amdgpu_semaphore *semaphore);
690void amdgpu_semaphore_free(struct amdgpu_device *adev,
691 struct amdgpu_semaphore **semaphore,
692 struct amdgpu_fence *fence);
693
694/*
695 * Synchronization
696 */
697struct amdgpu_sync {
698 struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS];
699 struct amdgpu_fence *sync_to[AMDGPU_MAX_RINGS];
700 struct amdgpu_fence *last_vm_update;
701};
702
703void amdgpu_sync_create(struct amdgpu_sync *sync);
91e1a520
CK
704int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
705 struct fence *f);
97b2e202
AD
706int amdgpu_sync_resv(struct amdgpu_device *adev,
707 struct amdgpu_sync *sync,
708 struct reservation_object *resv,
709 void *owner);
710int amdgpu_sync_rings(struct amdgpu_sync *sync,
711 struct amdgpu_ring *ring);
712void amdgpu_sync_free(struct amdgpu_device *adev, struct amdgpu_sync *sync,
713 struct amdgpu_fence *fence);
714
715/*
716 * GART structures, functions & helpers
717 */
718struct amdgpu_mc;
719
720#define AMDGPU_GPU_PAGE_SIZE 4096
721#define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1)
722#define AMDGPU_GPU_PAGE_SHIFT 12
723#define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
724
725struct amdgpu_gart {
726 dma_addr_t table_addr;
727 struct amdgpu_bo *robj;
728 void *ptr;
729 unsigned num_gpu_pages;
730 unsigned num_cpu_pages;
731 unsigned table_size;
732 struct page **pages;
733 dma_addr_t *pages_addr;
734 bool ready;
735 const struct amdgpu_gart_funcs *gart_funcs;
736};
737
738int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
739void amdgpu_gart_table_ram_free(struct amdgpu_device *adev);
740int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
741void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
742int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
743void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
744int amdgpu_gart_init(struct amdgpu_device *adev);
745void amdgpu_gart_fini(struct amdgpu_device *adev);
746void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
747 int pages);
748int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
749 int pages, struct page **pagelist,
750 dma_addr_t *dma_addr, uint32_t flags);
751
752/*
753 * GPU MC structures, functions & helpers
754 */
755struct amdgpu_mc {
756 resource_size_t aper_size;
757 resource_size_t aper_base;
758 resource_size_t agp_base;
759 /* for some chips with <= 32MB we need to lie
760 * about vram size near mc fb location */
761 u64 mc_vram_size;
762 u64 visible_vram_size;
763 u64 gtt_size;
764 u64 gtt_start;
765 u64 gtt_end;
766 u64 vram_start;
767 u64 vram_end;
768 unsigned vram_width;
769 u64 real_vram_size;
770 int vram_mtrr;
771 u64 gtt_base_align;
772 u64 mc_mask;
773 const struct firmware *fw; /* MC firmware */
774 uint32_t fw_version;
775 struct amdgpu_irq_src vm_fault;
81c59f54 776 uint32_t vram_type;
97b2e202
AD
777};
778
779/*
780 * GPU doorbell structures, functions & helpers
781 */
782typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
783{
784 AMDGPU_DOORBELL_KIQ = 0x000,
785 AMDGPU_DOORBELL_HIQ = 0x001,
786 AMDGPU_DOORBELL_DIQ = 0x002,
787 AMDGPU_DOORBELL_MEC_RING0 = 0x010,
788 AMDGPU_DOORBELL_MEC_RING1 = 0x011,
789 AMDGPU_DOORBELL_MEC_RING2 = 0x012,
790 AMDGPU_DOORBELL_MEC_RING3 = 0x013,
791 AMDGPU_DOORBELL_MEC_RING4 = 0x014,
792 AMDGPU_DOORBELL_MEC_RING5 = 0x015,
793 AMDGPU_DOORBELL_MEC_RING6 = 0x016,
794 AMDGPU_DOORBELL_MEC_RING7 = 0x017,
795 AMDGPU_DOORBELL_GFX_RING0 = 0x020,
796 AMDGPU_DOORBELL_sDMA_ENGINE0 = 0x1E0,
797 AMDGPU_DOORBELL_sDMA_ENGINE1 = 0x1E1,
798 AMDGPU_DOORBELL_IH = 0x1E8,
799 AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF,
800 AMDGPU_DOORBELL_INVALID = 0xFFFF
801} AMDGPU_DOORBELL_ASSIGNMENT;
802
803struct amdgpu_doorbell {
804 /* doorbell mmio */
805 resource_size_t base;
806 resource_size_t size;
807 u32 __iomem *ptr;
808 u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */
809};
810
811void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
812 phys_addr_t *aperture_base,
813 size_t *aperture_size,
814 size_t *start_offset);
815
816/*
817 * IRQS.
818 */
819
820struct amdgpu_flip_work {
821 struct work_struct flip_work;
822 struct work_struct unpin_work;
823 struct amdgpu_device *adev;
824 int crtc_id;
825 uint64_t base;
826 struct drm_pending_vblank_event *event;
827 struct amdgpu_bo *old_rbo;
828 struct fence *fence;
829};
830
831
832/*
833 * CP & rings.
834 */
835
836struct amdgpu_ib {
837 struct amdgpu_sa_bo *sa_bo;
838 uint32_t length_dw;
839 uint64_t gpu_addr;
840 uint32_t *ptr;
841 struct amdgpu_ring *ring;
842 struct amdgpu_fence *fence;
843 struct amdgpu_user_fence *user;
844 struct amdgpu_vm *vm;
3cb485f3 845 struct amdgpu_ctx *ctx;
97b2e202 846 struct amdgpu_sync sync;
97b2e202
AD
847 uint32_t gds_base, gds_size;
848 uint32_t gws_base, gws_size;
849 uint32_t oa_base, oa_size;
de807f81 850 uint32_t flags;
5430a3ff
CK
851 /* resulting sequence number */
852 uint64_t sequence;
97b2e202
AD
853};
854
855enum amdgpu_ring_type {
856 AMDGPU_RING_TYPE_GFX,
857 AMDGPU_RING_TYPE_COMPUTE,
858 AMDGPU_RING_TYPE_SDMA,
859 AMDGPU_RING_TYPE_UVD,
860 AMDGPU_RING_TYPE_VCE
861};
862
c1b69ed0
CZ
863extern struct amd_sched_backend_ops amdgpu_sched_ops;
864
97b2e202
AD
865struct amdgpu_ring {
866 struct amdgpu_device *adev;
867 const struct amdgpu_ring_funcs *funcs;
868 struct amdgpu_fence_driver fence_drv;
b80d8475 869 struct amd_gpu_scheduler *scheduler;
97b2e202
AD
870
871 struct mutex *ring_lock;
872 struct amdgpu_bo *ring_obj;
873 volatile uint32_t *ring;
874 unsigned rptr_offs;
875 u64 next_rptr_gpu_addr;
876 volatile u32 *next_rptr_cpu_addr;
877 unsigned wptr;
878 unsigned wptr_old;
879 unsigned ring_size;
880 unsigned ring_free_dw;
881 int count_dw;
882 atomic_t last_rptr;
883 atomic64_t last_activity;
884 uint64_t gpu_addr;
885 uint32_t align_mask;
886 uint32_t ptr_mask;
887 bool ready;
888 u32 nop;
889 u32 idx;
890 u64 last_semaphore_signal_addr;
891 u64 last_semaphore_wait_addr;
892 u32 me;
893 u32 pipe;
894 u32 queue;
895 struct amdgpu_bo *mqd_obj;
896 u32 doorbell_index;
897 bool use_doorbell;
898 unsigned wptr_offs;
899 unsigned next_rptr_offs;
900 unsigned fence_offs;
3cb485f3 901 struct amdgpu_ctx *current_ctx;
97b2e202
AD
902 enum amdgpu_ring_type type;
903 char name[16];
904};
905
906/*
907 * VM
908 */
909
910/* maximum number of VMIDs */
911#define AMDGPU_NUM_VM 16
912
913/* number of entries in page table */
914#define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
915
916/* PTBs (Page Table Blocks) need to be aligned to 32K */
917#define AMDGPU_VM_PTB_ALIGN_SIZE 32768
918#define AMDGPU_VM_PTB_ALIGN_MASK (AMDGPU_VM_PTB_ALIGN_SIZE - 1)
919#define AMDGPU_VM_PTB_ALIGN(a) (((a) + AMDGPU_VM_PTB_ALIGN_MASK) & ~AMDGPU_VM_PTB_ALIGN_MASK)
920
921#define AMDGPU_PTE_VALID (1 << 0)
922#define AMDGPU_PTE_SYSTEM (1 << 1)
923#define AMDGPU_PTE_SNOOPED (1 << 2)
924
925/* VI only */
926#define AMDGPU_PTE_EXECUTABLE (1 << 4)
927
928#define AMDGPU_PTE_READABLE (1 << 5)
929#define AMDGPU_PTE_WRITEABLE (1 << 6)
930
931/* PTE (Page Table Entry) fragment field for different page sizes */
932#define AMDGPU_PTE_FRAG_4KB (0 << 7)
933#define AMDGPU_PTE_FRAG_64KB (4 << 7)
934#define AMDGPU_LOG2_PAGES_PER_FRAG 4
935
936struct amdgpu_vm_pt {
937 struct amdgpu_bo *bo;
938 uint64_t addr;
939};
940
941struct amdgpu_vm_id {
942 unsigned id;
943 uint64_t pd_gpu_addr;
944 /* last flushed PD/PT update */
945 struct amdgpu_fence *flushed_updates;
946 /* last use of vmid */
947 struct amdgpu_fence *last_id_use;
948};
949
950struct amdgpu_vm {
951 struct mutex mutex;
952
953 struct rb_root va;
954
955 /* protecting invalidated and freed */
956 spinlock_t status_lock;
957
958 /* BOs moved, but not yet updated in the PT */
959 struct list_head invalidated;
960
961 /* BOs freed, but not yet updated in the PT */
962 struct list_head freed;
963
964 /* contains the page directory */
965 struct amdgpu_bo *page_directory;
966 unsigned max_pde_used;
967
968 /* array of page tables, one for each page directory entry */
969 struct amdgpu_vm_pt *page_tables;
970
971 /* for id and flush management per ring */
972 struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS];
973};
974
975struct amdgpu_vm_manager {
976 struct amdgpu_fence *active[AMDGPU_NUM_VM];
977 uint32_t max_pfn;
978 /* number of VMIDs */
979 unsigned nvm;
980 /* vram base address for page table entry */
981 u64 vram_base_offset;
982 /* is vm enabled? */
983 bool enabled;
984 /* for hw to save the PD addr on suspend/resume */
985 uint32_t saved_table_addr[AMDGPU_NUM_VM];
986 /* vm pte handling */
987 const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
988 struct amdgpu_ring *vm_pte_funcs_ring;
989};
990
991/*
992 * context related structures
993 */
994
21c16bf6
CK
995#define AMDGPU_CTX_MAX_CS_PENDING 16
996
997struct amdgpu_ctx_ring {
998 uint64_t sequence;
999 struct fence *fences[AMDGPU_CTX_MAX_CS_PENDING];
9cb7e5a9 1000 struct amd_context_entity c_entity;
21c16bf6
CK
1001};
1002
97b2e202 1003struct amdgpu_ctx {
0b492a4c 1004 struct kref refcount;
9cb7e5a9 1005 struct amdgpu_device *adev;
0b492a4c 1006 unsigned reset_counter;
21c16bf6
CK
1007 spinlock_t ring_lock;
1008 struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
97b2e202
AD
1009};
1010
1011struct amdgpu_ctx_mgr {
0b492a4c
AD
1012 struct amdgpu_device *adev;
1013 struct mutex lock;
1014 /* protected by lock */
1015 struct idr ctx_handles;
97b2e202
AD
1016};
1017
0b492a4c
AD
1018int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
1019 uint32_t *id);
1020int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
1021 uint32_t id);
1022
1023void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv);
1024
1025struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
1026int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
1027
21c16bf6
CK
1028uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
1029 struct fence *fence);
1030struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
1031 struct amdgpu_ring *ring, uint64_t seq);
1032
0b492a4c
AD
1033int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
1034 struct drm_file *filp);
1035
1036
97b2e202
AD
1037/*
1038 * file private structure
1039 */
1040
1041struct amdgpu_fpriv {
1042 struct amdgpu_vm vm;
1043 struct mutex bo_list_lock;
1044 struct idr bo_list_handles;
0b492a4c 1045 struct amdgpu_ctx_mgr ctx_mgr;
97b2e202
AD
1046};
1047
1048/*
1049 * residency list
1050 */
1051
1052struct amdgpu_bo_list {
1053 struct mutex lock;
1054 struct amdgpu_bo *gds_obj;
1055 struct amdgpu_bo *gws_obj;
1056 struct amdgpu_bo *oa_obj;
1057 bool has_userptr;
1058 unsigned num_entries;
1059 struct amdgpu_bo_list_entry *array;
1060};
1061
1062struct amdgpu_bo_list *
1063amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id);
1064void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
372bc1e1
CZ
1065void amdgpu_bo_list_copy(struct amdgpu_device *adev,
1066 struct amdgpu_bo_list *dst,
1067 struct amdgpu_bo_list *src);
97b2e202
AD
1068void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
1069
1070/*
1071 * GFX stuff
1072 */
1073#include "clearstate_defs.h"
1074
1075struct amdgpu_rlc {
1076 /* for power gating */
1077 struct amdgpu_bo *save_restore_obj;
1078 uint64_t save_restore_gpu_addr;
1079 volatile uint32_t *sr_ptr;
1080 const u32 *reg_list;
1081 u32 reg_list_size;
1082 /* for clear state */
1083 struct amdgpu_bo *clear_state_obj;
1084 uint64_t clear_state_gpu_addr;
1085 volatile uint32_t *cs_ptr;
1086 const struct cs_section_def *cs_data;
1087 u32 clear_state_size;
1088 /* for cp tables */
1089 struct amdgpu_bo *cp_table_obj;
1090 uint64_t cp_table_gpu_addr;
1091 volatile uint32_t *cp_table_ptr;
1092 u32 cp_table_size;
1093};
1094
1095struct amdgpu_mec {
1096 struct amdgpu_bo *hpd_eop_obj;
1097 u64 hpd_eop_gpu_addr;
1098 u32 num_pipe;
1099 u32 num_mec;
1100 u32 num_queue;
1101};
1102
1103/*
1104 * GPU scratch registers structures, functions & helpers
1105 */
1106struct amdgpu_scratch {
1107 unsigned num_reg;
1108 uint32_t reg_base;
1109 bool free[32];
1110 uint32_t reg[32];
1111};
1112
1113/*
1114 * GFX configurations
1115 */
1116struct amdgpu_gca_config {
1117 unsigned max_shader_engines;
1118 unsigned max_tile_pipes;
1119 unsigned max_cu_per_sh;
1120 unsigned max_sh_per_se;
1121 unsigned max_backends_per_se;
1122 unsigned max_texture_channel_caches;
1123 unsigned max_gprs;
1124 unsigned max_gs_threads;
1125 unsigned max_hw_contexts;
1126 unsigned sc_prim_fifo_size_frontend;
1127 unsigned sc_prim_fifo_size_backend;
1128 unsigned sc_hiz_tile_fifo_size;
1129 unsigned sc_earlyz_tile_fifo_size;
1130
1131 unsigned num_tile_pipes;
1132 unsigned backend_enable_mask;
1133 unsigned mem_max_burst_length_bytes;
1134 unsigned mem_row_size_in_kb;
1135 unsigned shader_engine_tile_size;
1136 unsigned num_gpus;
1137 unsigned multi_gpu_tile_size;
1138 unsigned mc_arb_ramcfg;
1139 unsigned gb_addr_config;
1140
1141 uint32_t tile_mode_array[32];
1142 uint32_t macrotile_mode_array[16];
1143};
1144
1145struct amdgpu_gfx {
1146 struct mutex gpu_clock_mutex;
1147 struct amdgpu_gca_config config;
1148 struct amdgpu_rlc rlc;
1149 struct amdgpu_mec mec;
1150 struct amdgpu_scratch scratch;
1151 const struct firmware *me_fw; /* ME firmware */
1152 uint32_t me_fw_version;
1153 const struct firmware *pfp_fw; /* PFP firmware */
1154 uint32_t pfp_fw_version;
1155 const struct firmware *ce_fw; /* CE firmware */
1156 uint32_t ce_fw_version;
1157 const struct firmware *rlc_fw; /* RLC firmware */
1158 uint32_t rlc_fw_version;
1159 const struct firmware *mec_fw; /* MEC firmware */
1160 uint32_t mec_fw_version;
1161 const struct firmware *mec2_fw; /* MEC2 firmware */
1162 uint32_t mec2_fw_version;
02558a00
KW
1163 uint32_t me_feature_version;
1164 uint32_t ce_feature_version;
1165 uint32_t pfp_feature_version;
351643d7
JZ
1166 uint32_t rlc_feature_version;
1167 uint32_t mec_feature_version;
1168 uint32_t mec2_feature_version;
97b2e202
AD
1169 struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
1170 unsigned num_gfx_rings;
1171 struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
1172 unsigned num_compute_rings;
1173 struct amdgpu_irq_src eop_irq;
1174 struct amdgpu_irq_src priv_reg_irq;
1175 struct amdgpu_irq_src priv_inst_irq;
1176 /* gfx status */
1177 uint32_t gfx_current_status;
1178 /* sync signal for const engine */
1179 unsigned ce_sync_offs;
a101a899
KW
1180 /* ce ram size*/
1181 unsigned ce_ram_size;
97b2e202
AD
1182};
1183
1184int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm,
1185 unsigned size, struct amdgpu_ib *ib);
1186void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib);
1187int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
1188 struct amdgpu_ib *ib, void *owner);
1189int amdgpu_ib_pool_init(struct amdgpu_device *adev);
1190void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
1191int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
1192/* Ring access between begin & end cannot sleep */
1193void amdgpu_ring_free_size(struct amdgpu_ring *ring);
1194int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
1195int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw);
1196void amdgpu_ring_commit(struct amdgpu_ring *ring);
1197void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring);
1198void amdgpu_ring_undo(struct amdgpu_ring *ring);
1199void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring);
1200void amdgpu_ring_lockup_update(struct amdgpu_ring *ring);
1201bool amdgpu_ring_test_lockup(struct amdgpu_ring *ring);
1202unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
1203 uint32_t **data);
1204int amdgpu_ring_restore(struct amdgpu_ring *ring,
1205 unsigned size, uint32_t *data);
1206int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
1207 unsigned ring_size, u32 nop, u32 align_mask,
1208 struct amdgpu_irq_src *irq_src, unsigned irq_type,
1209 enum amdgpu_ring_type ring_type);
1210void amdgpu_ring_fini(struct amdgpu_ring *ring);
1211
1212/*
1213 * CS.
1214 */
1215struct amdgpu_cs_chunk {
1216 uint32_t chunk_id;
1217 uint32_t length_dw;
1218 uint32_t *kdata;
1219 void __user *user_ptr;
1220};
1221
1222struct amdgpu_cs_parser {
1223 struct amdgpu_device *adev;
1224 struct drm_file *filp;
3cb485f3 1225 struct amdgpu_ctx *ctx;
97b2e202
AD
1226 struct amdgpu_bo_list *bo_list;
1227 /* chunks */
1228 unsigned nchunks;
1229 struct amdgpu_cs_chunk *chunks;
1230 /* relocations */
1231 struct amdgpu_bo_list_entry *vm_bos;
97b2e202
AD
1232 struct list_head validated;
1233
1234 struct amdgpu_ib *ibs;
1235 uint32_t num_ibs;
1236
1237 struct ww_acquire_ctx ticket;
1238
1239 /* user fence */
1240 struct amdgpu_user_fence uf;
c1b69ed0
CZ
1241
1242 struct mutex job_lock;
1243 struct work_struct job_work;
1244 int (*prepare_job)(struct amdgpu_cs_parser *sched_job);
1245 int (*run_job)(struct amdgpu_cs_parser *sched_job);
97b2e202
AD
1246};
1247
1248static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx)
1249{
1250 return p->ibs[ib_idx].ptr[idx];
1251}
1252
1253/*
1254 * Writeback
1255 */
1256#define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */
1257
1258struct amdgpu_wb {
1259 struct amdgpu_bo *wb_obj;
1260 volatile uint32_t *wb;
1261 uint64_t gpu_addr;
1262 u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */
1263 unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)];
1264};
1265
1266int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb);
1267void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb);
1268
1269/**
1270 * struct amdgpu_pm - power management datas
1271 * It keeps track of various data needed to take powermanagement decision.
1272 */
1273
1274enum amdgpu_pm_state_type {
1275 /* not used for dpm */
1276 POWER_STATE_TYPE_DEFAULT,
1277 POWER_STATE_TYPE_POWERSAVE,
1278 /* user selectable states */
1279 POWER_STATE_TYPE_BATTERY,
1280 POWER_STATE_TYPE_BALANCED,
1281 POWER_STATE_TYPE_PERFORMANCE,
1282 /* internal states */
1283 POWER_STATE_TYPE_INTERNAL_UVD,
1284 POWER_STATE_TYPE_INTERNAL_UVD_SD,
1285 POWER_STATE_TYPE_INTERNAL_UVD_HD,
1286 POWER_STATE_TYPE_INTERNAL_UVD_HD2,
1287 POWER_STATE_TYPE_INTERNAL_UVD_MVC,
1288 POWER_STATE_TYPE_INTERNAL_BOOT,
1289 POWER_STATE_TYPE_INTERNAL_THERMAL,
1290 POWER_STATE_TYPE_INTERNAL_ACPI,
1291 POWER_STATE_TYPE_INTERNAL_ULV,
1292 POWER_STATE_TYPE_INTERNAL_3DPERF,
1293};
1294
1295enum amdgpu_int_thermal_type {
1296 THERMAL_TYPE_NONE,
1297 THERMAL_TYPE_EXTERNAL,
1298 THERMAL_TYPE_EXTERNAL_GPIO,
1299 THERMAL_TYPE_RV6XX,
1300 THERMAL_TYPE_RV770,
1301 THERMAL_TYPE_ADT7473_WITH_INTERNAL,
1302 THERMAL_TYPE_EVERGREEN,
1303 THERMAL_TYPE_SUMO,
1304 THERMAL_TYPE_NI,
1305 THERMAL_TYPE_SI,
1306 THERMAL_TYPE_EMC2103_WITH_INTERNAL,
1307 THERMAL_TYPE_CI,
1308 THERMAL_TYPE_KV,
1309};
1310
1311enum amdgpu_dpm_auto_throttle_src {
1312 AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL,
1313 AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
1314};
1315
1316enum amdgpu_dpm_event_src {
1317 AMDGPU_DPM_EVENT_SRC_ANALOG = 0,
1318 AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1,
1319 AMDGPU_DPM_EVENT_SRC_DIGITAL = 2,
1320 AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
1321 AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
1322};
1323
1324#define AMDGPU_MAX_VCE_LEVELS 6
1325
1326enum amdgpu_vce_level {
1327 AMDGPU_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */
1328 AMDGPU_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */
1329 AMDGPU_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */
1330 AMDGPU_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
1331 AMDGPU_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */
1332 AMDGPU_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
1333};
1334
1335struct amdgpu_ps {
1336 u32 caps; /* vbios flags */
1337 u32 class; /* vbios flags */
1338 u32 class2; /* vbios flags */
1339 /* UVD clocks */
1340 u32 vclk;
1341 u32 dclk;
1342 /* VCE clocks */
1343 u32 evclk;
1344 u32 ecclk;
1345 bool vce_active;
1346 enum amdgpu_vce_level vce_level;
1347 /* asic priv */
1348 void *ps_priv;
1349};
1350
1351struct amdgpu_dpm_thermal {
1352 /* thermal interrupt work */
1353 struct work_struct work;
1354 /* low temperature threshold */
1355 int min_temp;
1356 /* high temperature threshold */
1357 int max_temp;
1358 /* was last interrupt low to high or high to low */
1359 bool high_to_low;
1360 /* interrupt source */
1361 struct amdgpu_irq_src irq;
1362};
1363
1364enum amdgpu_clk_action
1365{
1366 AMDGPU_SCLK_UP = 1,
1367 AMDGPU_SCLK_DOWN
1368};
1369
1370struct amdgpu_blacklist_clocks
1371{
1372 u32 sclk;
1373 u32 mclk;
1374 enum amdgpu_clk_action action;
1375};
1376
1377struct amdgpu_clock_and_voltage_limits {
1378 u32 sclk;
1379 u32 mclk;
1380 u16 vddc;
1381 u16 vddci;
1382};
1383
1384struct amdgpu_clock_array {
1385 u32 count;
1386 u32 *values;
1387};
1388
1389struct amdgpu_clock_voltage_dependency_entry {
1390 u32 clk;
1391 u16 v;
1392};
1393
1394struct amdgpu_clock_voltage_dependency_table {
1395 u32 count;
1396 struct amdgpu_clock_voltage_dependency_entry *entries;
1397};
1398
1399union amdgpu_cac_leakage_entry {
1400 struct {
1401 u16 vddc;
1402 u32 leakage;
1403 };
1404 struct {
1405 u16 vddc1;
1406 u16 vddc2;
1407 u16 vddc3;
1408 };
1409};
1410
1411struct amdgpu_cac_leakage_table {
1412 u32 count;
1413 union amdgpu_cac_leakage_entry *entries;
1414};
1415
1416struct amdgpu_phase_shedding_limits_entry {
1417 u16 voltage;
1418 u32 sclk;
1419 u32 mclk;
1420};
1421
1422struct amdgpu_phase_shedding_limits_table {
1423 u32 count;
1424 struct amdgpu_phase_shedding_limits_entry *entries;
1425};
1426
1427struct amdgpu_uvd_clock_voltage_dependency_entry {
1428 u32 vclk;
1429 u32 dclk;
1430 u16 v;
1431};
1432
1433struct amdgpu_uvd_clock_voltage_dependency_table {
1434 u8 count;
1435 struct amdgpu_uvd_clock_voltage_dependency_entry *entries;
1436};
1437
1438struct amdgpu_vce_clock_voltage_dependency_entry {
1439 u32 ecclk;
1440 u32 evclk;
1441 u16 v;
1442};
1443
1444struct amdgpu_vce_clock_voltage_dependency_table {
1445 u8 count;
1446 struct amdgpu_vce_clock_voltage_dependency_entry *entries;
1447};
1448
1449struct amdgpu_ppm_table {
1450 u8 ppm_design;
1451 u16 cpu_core_number;
1452 u32 platform_tdp;
1453 u32 small_ac_platform_tdp;
1454 u32 platform_tdc;
1455 u32 small_ac_platform_tdc;
1456 u32 apu_tdp;
1457 u32 dgpu_tdp;
1458 u32 dgpu_ulv_power;
1459 u32 tj_max;
1460};
1461
1462struct amdgpu_cac_tdp_table {
1463 u16 tdp;
1464 u16 configurable_tdp;
1465 u16 tdc;
1466 u16 battery_power_limit;
1467 u16 small_power_limit;
1468 u16 low_cac_leakage;
1469 u16 high_cac_leakage;
1470 u16 maximum_power_delivery_limit;
1471};
1472
1473struct amdgpu_dpm_dynamic_state {
1474 struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk;
1475 struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk;
1476 struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk;
1477 struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk;
1478 struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk;
1479 struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
1480 struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
1481 struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
1482 struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
1483 struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk;
1484 struct amdgpu_clock_array valid_sclk_values;
1485 struct amdgpu_clock_array valid_mclk_values;
1486 struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc;
1487 struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac;
1488 u32 mclk_sclk_ratio;
1489 u32 sclk_mclk_delta;
1490 u16 vddc_vddci_delta;
1491 u16 min_vddc_for_pcie_gen2;
1492 struct amdgpu_cac_leakage_table cac_leakage_table;
1493 struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table;
1494 struct amdgpu_ppm_table *ppm_table;
1495 struct amdgpu_cac_tdp_table *cac_tdp_table;
1496};
1497
1498struct amdgpu_dpm_fan {
1499 u16 t_min;
1500 u16 t_med;
1501 u16 t_high;
1502 u16 pwm_min;
1503 u16 pwm_med;
1504 u16 pwm_high;
1505 u8 t_hyst;
1506 u32 cycle_delay;
1507 u16 t_max;
1508 u8 control_mode;
1509 u16 default_max_fan_pwm;
1510 u16 default_fan_output_sensitivity;
1511 u16 fan_output_sensitivity;
1512 bool ucode_fan_control;
1513};
1514
1515enum amdgpu_pcie_gen {
1516 AMDGPU_PCIE_GEN1 = 0,
1517 AMDGPU_PCIE_GEN2 = 1,
1518 AMDGPU_PCIE_GEN3 = 2,
1519 AMDGPU_PCIE_GEN_INVALID = 0xffff
1520};
1521
1522enum amdgpu_dpm_forced_level {
1523 AMDGPU_DPM_FORCED_LEVEL_AUTO = 0,
1524 AMDGPU_DPM_FORCED_LEVEL_LOW = 1,
1525 AMDGPU_DPM_FORCED_LEVEL_HIGH = 2,
1526};
1527
1528struct amdgpu_vce_state {
1529 /* vce clocks */
1530 u32 evclk;
1531 u32 ecclk;
1532 /* gpu clocks */
1533 u32 sclk;
1534 u32 mclk;
1535 u8 clk_idx;
1536 u8 pstate;
1537};
1538
1539struct amdgpu_dpm_funcs {
1540 int (*get_temperature)(struct amdgpu_device *adev);
1541 int (*pre_set_power_state)(struct amdgpu_device *adev);
1542 int (*set_power_state)(struct amdgpu_device *adev);
1543 void (*post_set_power_state)(struct amdgpu_device *adev);
1544 void (*display_configuration_changed)(struct amdgpu_device *adev);
1545 u32 (*get_sclk)(struct amdgpu_device *adev, bool low);
1546 u32 (*get_mclk)(struct amdgpu_device *adev, bool low);
1547 void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps);
1548 void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m);
1549 int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level);
1550 bool (*vblank_too_short)(struct amdgpu_device *adev);
1551 void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
b7a07769 1552 void (*powergate_vce)(struct amdgpu_device *adev, bool gate);
97b2e202
AD
1553 void (*enable_bapm)(struct amdgpu_device *adev, bool enable);
1554 void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode);
1555 u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
1556 int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
1557 int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
1558};
1559
1560struct amdgpu_dpm {
1561 struct amdgpu_ps *ps;
1562 /* number of valid power states */
1563 int num_ps;
1564 /* current power state that is active */
1565 struct amdgpu_ps *current_ps;
1566 /* requested power state */
1567 struct amdgpu_ps *requested_ps;
1568 /* boot up power state */
1569 struct amdgpu_ps *boot_ps;
1570 /* default uvd power state */
1571 struct amdgpu_ps *uvd_ps;
1572 /* vce requirements */
1573 struct amdgpu_vce_state vce_states[AMDGPU_MAX_VCE_LEVELS];
1574 enum amdgpu_vce_level vce_level;
1575 enum amdgpu_pm_state_type state;
1576 enum amdgpu_pm_state_type user_state;
1577 u32 platform_caps;
1578 u32 voltage_response_time;
1579 u32 backbias_response_time;
1580 void *priv;
1581 u32 new_active_crtcs;
1582 int new_active_crtc_count;
1583 u32 current_active_crtcs;
1584 int current_active_crtc_count;
1585 struct amdgpu_dpm_dynamic_state dyn_state;
1586 struct amdgpu_dpm_fan fan;
1587 u32 tdp_limit;
1588 u32 near_tdp_limit;
1589 u32 near_tdp_limit_adjusted;
1590 u32 sq_ramping_threshold;
1591 u32 cac_leakage;
1592 u16 tdp_od_limit;
1593 u32 tdp_adjustment;
1594 u16 load_line_slope;
1595 bool power_control;
1596 bool ac_power;
1597 /* special states active */
1598 bool thermal_active;
1599 bool uvd_active;
1600 bool vce_active;
1601 /* thermal handling */
1602 struct amdgpu_dpm_thermal thermal;
1603 /* forced levels */
1604 enum amdgpu_dpm_forced_level forced_level;
1605};
1606
1607struct amdgpu_pm {
1608 struct mutex mutex;
97b2e202
AD
1609 u32 current_sclk;
1610 u32 current_mclk;
1611 u32 default_sclk;
1612 u32 default_mclk;
1613 struct amdgpu_i2c_chan *i2c_bus;
1614 /* internal thermal controller on rv6xx+ */
1615 enum amdgpu_int_thermal_type int_thermal_type;
1616 struct device *int_hwmon_dev;
1617 /* fan control parameters */
1618 bool no_fan;
1619 u8 fan_pulses_per_revolution;
1620 u8 fan_min_rpm;
1621 u8 fan_max_rpm;
1622 /* dpm */
1623 bool dpm_enabled;
1624 struct amdgpu_dpm dpm;
1625 const struct firmware *fw; /* SMC firmware */
1626 uint32_t fw_version;
1627 const struct amdgpu_dpm_funcs *funcs;
1628};
1629
1630/*
1631 * UVD
1632 */
1633#define AMDGPU_MAX_UVD_HANDLES 10
1634#define AMDGPU_UVD_STACK_SIZE (1024*1024)
1635#define AMDGPU_UVD_HEAP_SIZE (1024*1024)
1636#define AMDGPU_UVD_FIRMWARE_OFFSET 256
1637
1638struct amdgpu_uvd {
1639 struct amdgpu_bo *vcpu_bo;
1640 void *cpu_addr;
1641 uint64_t gpu_addr;
1642 void *saved_bo;
1643 atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
1644 struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
1645 struct delayed_work idle_work;
1646 const struct firmware *fw; /* UVD firmware */
1647 struct amdgpu_ring ring;
1648 struct amdgpu_irq_src irq;
1649 bool address_64_bit;
1650};
1651
1652/*
1653 * VCE
1654 */
1655#define AMDGPU_MAX_VCE_HANDLES 16
97b2e202
AD
1656#define AMDGPU_VCE_FIRMWARE_OFFSET 256
1657
6a585777
AD
1658#define AMDGPU_VCE_HARVEST_VCE0 (1 << 0)
1659#define AMDGPU_VCE_HARVEST_VCE1 (1 << 1)
1660
97b2e202
AD
1661struct amdgpu_vce {
1662 struct amdgpu_bo *vcpu_bo;
1663 uint64_t gpu_addr;
1664 unsigned fw_version;
1665 unsigned fb_version;
1666 atomic_t handles[AMDGPU_MAX_VCE_HANDLES];
1667 struct drm_file *filp[AMDGPU_MAX_VCE_HANDLES];
f1689ec1 1668 uint32_t img_size[AMDGPU_MAX_VCE_HANDLES];
97b2e202
AD
1669 struct delayed_work idle_work;
1670 const struct firmware *fw; /* VCE firmware */
1671 struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS];
1672 struct amdgpu_irq_src irq;
6a585777 1673 unsigned harvest_config;
97b2e202
AD
1674};
1675
1676/*
1677 * SDMA
1678 */
1679struct amdgpu_sdma {
1680 /* SDMA firmware */
1681 const struct firmware *fw;
1682 uint32_t fw_version;
cfa2104f 1683 uint32_t feature_version;
97b2e202
AD
1684
1685 struct amdgpu_ring ring;
1686};
1687
1688/*
1689 * Firmware
1690 */
1691struct amdgpu_firmware {
1692 struct amdgpu_firmware_info ucode[AMDGPU_UCODE_ID_MAXIMUM];
1693 bool smu_load;
1694 struct amdgpu_bo *fw_buf;
1695 unsigned int fw_size;
1696};
1697
1698/*
1699 * Benchmarking
1700 */
1701void amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
1702
1703
1704/*
1705 * Testing
1706 */
1707void amdgpu_test_moves(struct amdgpu_device *adev);
1708void amdgpu_test_ring_sync(struct amdgpu_device *adev,
1709 struct amdgpu_ring *cpA,
1710 struct amdgpu_ring *cpB);
1711void amdgpu_test_syncing(struct amdgpu_device *adev);
1712
1713/*
1714 * MMU Notifier
1715 */
1716#if defined(CONFIG_MMU_NOTIFIER)
1717int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
1718void amdgpu_mn_unregister(struct amdgpu_bo *bo);
1719#else
1720static int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
1721{
1722 return -ENODEV;
1723}
1724static void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
1725#endif
1726
1727/*
1728 * Debugfs
1729 */
1730struct amdgpu_debugfs {
1731 struct drm_info_list *files;
1732 unsigned num_files;
1733};
1734
1735int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
1736 struct drm_info_list *files,
1737 unsigned nfiles);
1738int amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
1739
1740#if defined(CONFIG_DEBUG_FS)
1741int amdgpu_debugfs_init(struct drm_minor *minor);
1742void amdgpu_debugfs_cleanup(struct drm_minor *minor);
1743#endif
1744
1745/*
1746 * amdgpu smumgr functions
1747 */
1748struct amdgpu_smumgr_funcs {
1749 int (*check_fw_load_finish)(struct amdgpu_device *adev, uint32_t fwtype);
1750 int (*request_smu_load_fw)(struct amdgpu_device *adev);
1751 int (*request_smu_specific_fw)(struct amdgpu_device *adev, uint32_t fwtype);
1752};
1753
1754/*
1755 * amdgpu smumgr
1756 */
1757struct amdgpu_smumgr {
1758 struct amdgpu_bo *toc_buf;
1759 struct amdgpu_bo *smu_buf;
1760 /* asic priv smu data */
1761 void *priv;
1762 spinlock_t smu_lock;
1763 /* smumgr functions */
1764 const struct amdgpu_smumgr_funcs *smumgr_funcs;
1765 /* ucode loading complete flag */
1766 uint32_t fw_flags;
1767};
1768
1769/*
1770 * ASIC specific register table accessible by UMD
1771 */
1772struct amdgpu_allowed_register_entry {
1773 uint32_t reg_offset;
1774 bool untouched;
1775 bool grbm_indexed;
1776};
1777
1778struct amdgpu_cu_info {
1779 uint32_t number; /* total active CU number */
1780 uint32_t ao_cu_mask;
1781 uint32_t bitmap[4][4];
1782};
1783
1784
1785/*
1786 * ASIC specific functions.
1787 */
1788struct amdgpu_asic_funcs {
1789 bool (*read_disabled_bios)(struct amdgpu_device *adev);
1790 int (*read_register)(struct amdgpu_device *adev, u32 se_num,
1791 u32 sh_num, u32 reg_offset, u32 *value);
1792 void (*set_vga_state)(struct amdgpu_device *adev, bool state);
1793 int (*reset)(struct amdgpu_device *adev);
1794 /* wait for mc_idle */
1795 int (*wait_for_mc_idle)(struct amdgpu_device *adev);
1796 /* get the reference clock */
1797 u32 (*get_xclk)(struct amdgpu_device *adev);
1798 /* get the gpu clock counter */
1799 uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
1800 int (*get_cu_info)(struct amdgpu_device *adev, struct amdgpu_cu_info *info);
1801 /* MM block clocks */
1802 int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
1803 int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
1804};
1805
1806/*
1807 * IOCTL.
1808 */
1809int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
1810 struct drm_file *filp);
1811int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
1812 struct drm_file *filp);
1813
1814int amdgpu_gem_info_ioctl(struct drm_device *dev, void *data,
1815 struct drm_file *filp);
1816int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
1817 struct drm_file *filp);
1818int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
1819 struct drm_file *filp);
1820int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
1821 struct drm_file *filp);
1822int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
1823 struct drm_file *filp);
1824int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
1825 struct drm_file *filp);
1826int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
1827int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
1828
1829int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
1830 struct drm_file *filp);
1831
1832/* VRAM scratch page for HDP bug, default vram page */
1833struct amdgpu_vram_scratch {
1834 struct amdgpu_bo *robj;
1835 volatile uint32_t *ptr;
1836 u64 gpu_addr;
1837};
1838
1839/*
1840 * ACPI
1841 */
1842struct amdgpu_atif_notification_cfg {
1843 bool enabled;
1844 int command_code;
1845};
1846
1847struct amdgpu_atif_notifications {
1848 bool display_switch;
1849 bool expansion_mode_change;
1850 bool thermal_state;
1851 bool forced_power_state;
1852 bool system_power_state;
1853 bool display_conf_change;
1854 bool px_gfx_switch;
1855 bool brightness_change;
1856 bool dgpu_display_event;
1857};
1858
1859struct amdgpu_atif_functions {
1860 bool system_params;
1861 bool sbios_requests;
1862 bool select_active_disp;
1863 bool lid_state;
1864 bool get_tv_standard;
1865 bool set_tv_standard;
1866 bool get_panel_expansion_mode;
1867 bool set_panel_expansion_mode;
1868 bool temperature_change;
1869 bool graphics_device_types;
1870};
1871
1872struct amdgpu_atif {
1873 struct amdgpu_atif_notifications notifications;
1874 struct amdgpu_atif_functions functions;
1875 struct amdgpu_atif_notification_cfg notification_cfg;
1876 struct amdgpu_encoder *encoder_for_bl;
1877};
1878
1879struct amdgpu_atcs_functions {
1880 bool get_ext_state;
1881 bool pcie_perf_req;
1882 bool pcie_dev_rdy;
1883 bool pcie_bus_width;
1884};
1885
1886struct amdgpu_atcs {
1887 struct amdgpu_atcs_functions functions;
1888};
1889
d03846af
CZ
1890/*
1891 * CGS
1892 */
1893void *amdgpu_cgs_create_device(struct amdgpu_device *adev);
1894void amdgpu_cgs_destroy_device(void *cgs_device);
1895
1896
97b2e202
AD
1897/*
1898 * Core structure, functions and helpers.
1899 */
1900typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t);
1901typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
1902
1903typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
1904typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
1905
8faf0e08
AD
1906struct amdgpu_ip_block_status {
1907 bool valid;
1908 bool sw;
1909 bool hw;
1910};
1911
97b2e202
AD
1912struct amdgpu_device {
1913 struct device *dev;
1914 struct drm_device *ddev;
1915 struct pci_dev *pdev;
1916 struct rw_semaphore exclusive_lock;
1917
1918 /* ASIC */
2f7d10b3 1919 enum amd_asic_type asic_type;
97b2e202
AD
1920 uint32_t family;
1921 uint32_t rev_id;
1922 uint32_t external_rev_id;
1923 unsigned long flags;
1924 int usec_timeout;
1925 const struct amdgpu_asic_funcs *asic_funcs;
1926 bool shutdown;
1927 bool suspend;
1928 bool need_dma32;
1929 bool accel_working;
1930 bool needs_reset;
1931 struct work_struct reset_work;
1932 struct notifier_block acpi_nb;
1933 struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
1934 struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
1935 unsigned debugfs_count;
1936#if defined(CONFIG_DEBUG_FS)
1937 struct dentry *debugfs_regs;
1938#endif
1939 struct amdgpu_atif atif;
1940 struct amdgpu_atcs atcs;
1941 struct mutex srbm_mutex;
1942 /* GRBM index mutex. Protects concurrent access to GRBM index */
1943 struct mutex grbm_idx_mutex;
1944 struct dev_pm_domain vga_pm_domain;
1945 bool have_disp_power_ref;
1946
1947 /* BIOS */
1948 uint8_t *bios;
1949 bool is_atom_bios;
1950 uint16_t bios_header_start;
1951 struct amdgpu_bo *stollen_vga_memory;
1952 uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
1953
1954 /* Register/doorbell mmio */
1955 resource_size_t rmmio_base;
1956 resource_size_t rmmio_size;
1957 void __iomem *rmmio;
1958 /* protects concurrent MM_INDEX/DATA based register access */
1959 spinlock_t mmio_idx_lock;
1960 /* protects concurrent SMC based register access */
1961 spinlock_t smc_idx_lock;
1962 amdgpu_rreg_t smc_rreg;
1963 amdgpu_wreg_t smc_wreg;
1964 /* protects concurrent PCIE register access */
1965 spinlock_t pcie_idx_lock;
1966 amdgpu_rreg_t pcie_rreg;
1967 amdgpu_wreg_t pcie_wreg;
1968 /* protects concurrent UVD register access */
1969 spinlock_t uvd_ctx_idx_lock;
1970 amdgpu_rreg_t uvd_ctx_rreg;
1971 amdgpu_wreg_t uvd_ctx_wreg;
1972 /* protects concurrent DIDT register access */
1973 spinlock_t didt_idx_lock;
1974 amdgpu_rreg_t didt_rreg;
1975 amdgpu_wreg_t didt_wreg;
1976 /* protects concurrent ENDPOINT (audio) register access */
1977 spinlock_t audio_endpt_idx_lock;
1978 amdgpu_block_rreg_t audio_endpt_rreg;
1979 amdgpu_block_wreg_t audio_endpt_wreg;
1980 void __iomem *rio_mem;
1981 resource_size_t rio_mem_size;
1982 struct amdgpu_doorbell doorbell;
1983
1984 /* clock/pll info */
1985 struct amdgpu_clock clock;
1986
1987 /* MC */
1988 struct amdgpu_mc mc;
1989 struct amdgpu_gart gart;
1990 struct amdgpu_dummy_page dummy_page;
1991 struct amdgpu_vm_manager vm_manager;
1992
1993 /* memory management */
1994 struct amdgpu_mman mman;
1995 struct amdgpu_gem gem;
1996 struct amdgpu_vram_scratch vram_scratch;
1997 struct amdgpu_wb wb;
1998 atomic64_t vram_usage;
1999 atomic64_t vram_vis_usage;
2000 atomic64_t gtt_usage;
2001 atomic64_t num_bytes_moved;
d94aed5a 2002 atomic_t gpu_reset_counter;
97b2e202
AD
2003
2004 /* display */
2005 struct amdgpu_mode_info mode_info;
2006 struct work_struct hotplug_work;
2007 struct amdgpu_irq_src crtc_irq;
2008 struct amdgpu_irq_src pageflip_irq;
2009 struct amdgpu_irq_src hpd_irq;
2010
2011 /* rings */
2012 wait_queue_head_t fence_queue;
2013 unsigned fence_context;
2014 struct mutex ring_lock;
2015 unsigned num_rings;
2016 struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
2017 bool ib_pool_ready;
2018 struct amdgpu_sa_manager ring_tmp_bo;
2019
2020 /* interrupts */
2021 struct amdgpu_irq irq;
2022
2023 /* dpm */
2024 struct amdgpu_pm pm;
2025 u32 cg_flags;
2026 u32 pg_flags;
2027
2028 /* amdgpu smumgr */
2029 struct amdgpu_smumgr smu;
2030
2031 /* gfx */
2032 struct amdgpu_gfx gfx;
2033
2034 /* sdma */
2035 struct amdgpu_sdma sdma[2];
2036 struct amdgpu_irq_src sdma_trap_irq;
2037 struct amdgpu_irq_src sdma_illegal_inst_irq;
2038
2039 /* uvd */
2040 bool has_uvd;
2041 struct amdgpu_uvd uvd;
2042
2043 /* vce */
2044 struct amdgpu_vce vce;
2045
2046 /* firmwares */
2047 struct amdgpu_firmware firmware;
2048
2049 /* GDS */
2050 struct amdgpu_gds gds;
2051
2052 const struct amdgpu_ip_block_version *ip_blocks;
2053 int num_ip_blocks;
8faf0e08 2054 struct amdgpu_ip_block_status *ip_block_status;
97b2e202
AD
2055 struct mutex mn_lock;
2056 DECLARE_HASHTABLE(mn_hash, 7);
2057
2058 /* tracking pinned memory */
2059 u64 vram_pin_size;
2060 u64 gart_pin_size;
130e0371
OG
2061
2062 /* amdkfd interface */
2063 struct kfd_dev *kfd;
97b2e202
AD
2064};
2065
2066bool amdgpu_device_is_px(struct drm_device *dev);
2067int amdgpu_device_init(struct amdgpu_device *adev,
2068 struct drm_device *ddev,
2069 struct pci_dev *pdev,
2070 uint32_t flags);
2071void amdgpu_device_fini(struct amdgpu_device *adev);
2072int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
2073
2074uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
2075 bool always_indirect);
2076void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
2077 bool always_indirect);
2078u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
2079void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
2080
2081u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
2082void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
2083
2084/*
2085 * Cast helper
2086 */
2087extern const struct fence_ops amdgpu_fence_ops;
2088static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f)
2089{
2090 struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
2091
2092 if (__f->base.ops == &amdgpu_fence_ops)
2093 return __f;
2094
2095 return NULL;
2096}
2097
2098/*
2099 * Registers read & write functions.
2100 */
2101#define RREG32(reg) amdgpu_mm_rreg(adev, (reg), false)
2102#define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), true)
2103#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), false))
2104#define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), false)
2105#define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), true)
2106#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
2107#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
2108#define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
2109#define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v))
2110#define RREG32_SMC(reg) adev->smc_rreg(adev, (reg))
2111#define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v))
2112#define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg))
2113#define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v))
2114#define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg))
2115#define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v))
2116#define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg))
2117#define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v))
2118#define WREG32_P(reg, val, mask) \
2119 do { \
2120 uint32_t tmp_ = RREG32(reg); \
2121 tmp_ &= (mask); \
2122 tmp_ |= ((val) & ~(mask)); \
2123 WREG32(reg, tmp_); \
2124 } while (0)
2125#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
2126#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
2127#define WREG32_PLL_P(reg, val, mask) \
2128 do { \
2129 uint32_t tmp_ = RREG32_PLL(reg); \
2130 tmp_ &= (mask); \
2131 tmp_ |= ((val) & ~(mask)); \
2132 WREG32_PLL(reg, tmp_); \
2133 } while (0)
2134#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false))
2135#define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg))
2136#define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v))
2137
2138#define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index))
2139#define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v))
2140
2141#define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
2142#define REG_FIELD_MASK(reg, field) reg##__##field##_MASK
2143
2144#define REG_SET_FIELD(orig_val, reg, field, field_val) \
2145 (((orig_val) & ~REG_FIELD_MASK(reg, field)) | \
2146 (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field))))
2147
2148#define REG_GET_FIELD(value, reg, field) \
2149 (((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field))
2150
2151/*
2152 * BIOS helpers.
2153 */
2154#define RBIOS8(i) (adev->bios[i])
2155#define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
2156#define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
2157
2158/*
2159 * RING helpers.
2160 */
2161static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
2162{
2163 if (ring->count_dw <= 0)
86c2b790 2164 DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
97b2e202
AD
2165 ring->ring[ring->wptr++] = v;
2166 ring->wptr &= ring->ptr_mask;
2167 ring->count_dw--;
2168 ring->ring_free_dw--;
2169}
2170
2171/*
2172 * ASICs macro.
2173 */
2174#define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state))
2175#define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev))
2176#define amdgpu_asic_wait_for_mc_idle(adev) (adev)->asic_funcs->wait_for_mc_idle((adev))
2177#define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
2178#define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
2179#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
2180#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
2181#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
2182#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
2183#define amdgpu_asic_get_cu_info(adev, info) (adev)->asic_funcs->get_cu_info((adev), (info))
2184#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
2185#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
2186#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
2187#define amdgpu_vm_write_pte(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (addr), (count), (incr), (flags)))
2188#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
2189#define amdgpu_vm_pad_ib(adev, ib) ((adev)->vm_manager.vm_pte_funcs->pad_ib((ib)))
2190#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
2191#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
2192#define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r))
2193#define amdgpu_ring_is_lockup(r) (r)->funcs->is_lockup((r))
2194#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
2195#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
2196#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
2197#define amdgpu_ring_emit_ib(r, ib) (r)->funcs->emit_ib((r), (ib))
2198#define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
890ee23f 2199#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
97b2e202
AD
2200#define amdgpu_ring_emit_semaphore(r, semaphore, emit_wait) (r)->funcs->emit_semaphore((r), (semaphore), (emit_wait))
2201#define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
d2edb07b 2202#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
97b2e202
AD
2203#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
2204#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
2205#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
2206#define amdgpu_display_set_vga_render_state(adev, r) (adev)->mode_info.funcs->set_vga_render_state((adev), (r))
2207#define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
2208#define amdgpu_display_vblank_wait(adev, crtc) (adev)->mode_info.funcs->vblank_wait((adev), (crtc))
2209#define amdgpu_display_is_display_hung(adev) (adev)->mode_info.funcs->is_display_hung((adev))
2210#define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
2211#define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e))
2212#define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h))
2213#define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h))
2214#define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev))
2215#define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev))
2216#define amdgpu_display_page_flip(adev, crtc, base) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base))
2217#define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos))
2218#define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c))
2219#define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
2220#define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s))
2221#define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s))
2222#define amdgpu_emit_copy_buffer(adev, r, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((r), (s), (d), (b))
2223#define amdgpu_emit_fill_buffer(adev, r, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((r), (s), (d), (b))
2224#define amdgpu_dpm_get_temperature(adev) (adev)->pm.funcs->get_temperature((adev))
2225#define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
2226#define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
2227#define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev))
2228#define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev))
2229#define amdgpu_dpm_get_sclk(adev, l) (adev)->pm.funcs->get_sclk((adev), (l))
2230#define amdgpu_dpm_get_mclk(adev, l) (adev)->pm.funcs->get_mclk((adev), (l))
2231#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
2232#define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m))
2233#define amdgpu_dpm_force_performance_level(adev, l) (adev)->pm.funcs->force_performance_level((adev), (l))
2234#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
2235#define amdgpu_dpm_powergate_uvd(adev, g) (adev)->pm.funcs->powergate_uvd((adev), (g))
b7a07769 2236#define amdgpu_dpm_powergate_vce(adev, g) (adev)->pm.funcs->powergate_vce((adev), (g))
97b2e202
AD
2237#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
2238#define amdgpu_dpm_set_fan_control_mode(adev, m) (adev)->pm.funcs->set_fan_control_mode((adev), (m))
2239#define amdgpu_dpm_get_fan_control_mode(adev) (adev)->pm.funcs->get_fan_control_mode((adev))
2240#define amdgpu_dpm_set_fan_speed_percent(adev, s) (adev)->pm.funcs->set_fan_speed_percent((adev), (s))
2241#define amdgpu_dpm_get_fan_speed_percent(adev, s) (adev)->pm.funcs->get_fan_speed_percent((adev), (s))
2242
2243#define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
2244
2245/* Common functions */
2246int amdgpu_gpu_reset(struct amdgpu_device *adev);
2247void amdgpu_pci_config_reset(struct amdgpu_device *adev);
2248bool amdgpu_card_posted(struct amdgpu_device *adev);
2249void amdgpu_update_display_priority(struct amdgpu_device *adev);
2250bool amdgpu_boot_test_post_card(struct amdgpu_device *adev);
2251int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
2252int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
2253 u32 ip_instance, u32 ring,
2254 struct amdgpu_ring **out_ring);
2255void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain);
2256bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
2257int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
2258 uint32_t flags);
2259bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
2260bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
2261uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
2262 struct ttm_mem_reg *mem);
2263void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
2264void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
2265void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
2266void amdgpu_program_register_sequence(struct amdgpu_device *adev,
2267 const u32 *registers,
2268 const u32 array_size);
2269
2270bool amdgpu_device_is_px(struct drm_device *dev);
2271/* atpx handler */
2272#if defined(CONFIG_VGA_SWITCHEROO)
2273void amdgpu_register_atpx_handler(void);
2274void amdgpu_unregister_atpx_handler(void);
2275#else
2276static inline void amdgpu_register_atpx_handler(void) {}
2277static inline void amdgpu_unregister_atpx_handler(void) {}
2278#endif
2279
2280/*
2281 * KMS
2282 */
2283extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
2284extern int amdgpu_max_kms_ioctl;
2285
2286int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
2287int amdgpu_driver_unload_kms(struct drm_device *dev);
2288void amdgpu_driver_lastclose_kms(struct drm_device *dev);
2289int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
2290void amdgpu_driver_postclose_kms(struct drm_device *dev,
2291 struct drm_file *file_priv);
2292void amdgpu_driver_preclose_kms(struct drm_device *dev,
2293 struct drm_file *file_priv);
2294int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
2295int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
2296u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, int crtc);
2297int amdgpu_enable_vblank_kms(struct drm_device *dev, int crtc);
2298void amdgpu_disable_vblank_kms(struct drm_device *dev, int crtc);
2299int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
2300 int *max_error,
2301 struct timeval *vblank_time,
2302 unsigned flags);
2303long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
2304 unsigned long arg);
2305
2306/*
2307 * vm
2308 */
2309int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
2310void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
2311struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
2312 struct amdgpu_vm *vm,
2313 struct list_head *head);
7f8a5290
CK
2314int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
2315 struct amdgpu_sync *sync);
97b2e202
AD
2316void amdgpu_vm_flush(struct amdgpu_ring *ring,
2317 struct amdgpu_vm *vm,
2318 struct amdgpu_fence *updates);
2319void amdgpu_vm_fence(struct amdgpu_device *adev,
2320 struct amdgpu_vm *vm,
2321 struct amdgpu_fence *fence);
2322uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr);
2323int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
2324 struct amdgpu_vm *vm);
2325int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
2326 struct amdgpu_vm *vm);
2327int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
cfe2c978 2328 struct amdgpu_vm *vm, struct amdgpu_sync *sync);
97b2e202
AD
2329int amdgpu_vm_bo_update(struct amdgpu_device *adev,
2330 struct amdgpu_bo_va *bo_va,
2331 struct ttm_mem_reg *mem);
2332void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2333 struct amdgpu_bo *bo);
2334struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
2335 struct amdgpu_bo *bo);
2336struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
2337 struct amdgpu_vm *vm,
2338 struct amdgpu_bo *bo);
2339int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2340 struct amdgpu_bo_va *bo_va,
2341 uint64_t addr, uint64_t offset,
2342 uint64_t size, uint32_t flags);
2343int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
2344 struct amdgpu_bo_va *bo_va,
2345 uint64_t addr);
2346void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2347 struct amdgpu_bo_va *bo_va);
2348
2349/*
2350 * functions used by amdgpu_encoder.c
2351 */
2352struct amdgpu_afmt_acr {
2353 u32 clock;
2354
2355 int n_32khz;
2356 int cts_32khz;
2357
2358 int n_44_1khz;
2359 int cts_44_1khz;
2360
2361 int n_48khz;
2362 int cts_48khz;
2363
2364};
2365
2366struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock);
2367
2368/* amdgpu_acpi.c */
2369#if defined(CONFIG_ACPI)
2370int amdgpu_acpi_init(struct amdgpu_device *adev);
2371void amdgpu_acpi_fini(struct amdgpu_device *adev);
2372bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev);
2373int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
2374 u8 perf_req, bool advertise);
2375int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
2376#else
2377static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
2378static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
2379#endif
2380
2381struct amdgpu_bo_va_mapping *
2382amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
2383 uint64_t addr, struct amdgpu_bo **bo);
2384
2385#include "amdgpu_object.h"
2386
2387#endif
This page took 0.137571 seconds and 5 git commands to generate.