drm/amdgpu: add sched isr to fence process
[deliverable/linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu.h
CommitLineData
97b2e202
AD
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __AMDGPU_H__
29#define __AMDGPU_H__
30
31#include <linux/atomic.h>
32#include <linux/wait.h>
33#include <linux/list.h>
34#include <linux/kref.h>
35#include <linux/interval_tree.h>
36#include <linux/hashtable.h>
37#include <linux/fence.h>
38
39#include <ttm/ttm_bo_api.h>
40#include <ttm/ttm_bo_driver.h>
41#include <ttm/ttm_placement.h>
42#include <ttm/ttm_module.h>
43#include <ttm/ttm_execbuf_util.h>
44
d03846af 45#include <drm/drmP.h>
97b2e202 46#include <drm/drm_gem.h>
7e5a547f 47#include <drm/amdgpu_drm.h>
97b2e202 48
5fc3aeeb 49#include "amd_shared.h"
97b2e202
AD
50#include "amdgpu_mode.h"
51#include "amdgpu_ih.h"
52#include "amdgpu_irq.h"
53#include "amdgpu_ucode.h"
54#include "amdgpu_gds.h"
55
b80d8475
AD
56#include "gpu_scheduler.h"
57
97b2e202
AD
58/*
59 * Modules parameters.
60 */
61extern int amdgpu_modeset;
62extern int amdgpu_vram_limit;
63extern int amdgpu_gart_size;
64extern int amdgpu_benchmarking;
65extern int amdgpu_testing;
66extern int amdgpu_audio;
67extern int amdgpu_disp_priority;
68extern int amdgpu_hw_i2c;
69extern int amdgpu_pcie_gen2;
70extern int amdgpu_msi;
71extern int amdgpu_lockup_timeout;
72extern int amdgpu_dpm;
73extern int amdgpu_smc_load_fw;
74extern int amdgpu_aspm;
75extern int amdgpu_runtime_pm;
76extern int amdgpu_hard_reset;
77extern unsigned amdgpu_ip_block_mask;
78extern int amdgpu_bapm;
79extern int amdgpu_deep_color;
80extern int amdgpu_vm_size;
81extern int amdgpu_vm_block_size;
b80d8475 82extern int amdgpu_enable_scheduler;
97b2e202 83
4b559c90 84#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
97b2e202
AD
85#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
86#define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2)
87/* AMDGPU_IB_POOL_SIZE must be a power of 2 */
88#define AMDGPU_IB_POOL_SIZE 16
89#define AMDGPU_DEBUGFS_MAX_COMPONENTS 32
90#define AMDGPUFB_CONN_LIMIT 4
91#define AMDGPU_BIOS_NUM_SCRATCH 8
92
97b2e202
AD
93/* max number of rings */
94#define AMDGPU_MAX_RINGS 16
95#define AMDGPU_MAX_GFX_RINGS 1
96#define AMDGPU_MAX_COMPUTE_RINGS 8
97#define AMDGPU_MAX_VCE_RINGS 2
98
99/* number of hw syncs before falling back on blocking */
100#define AMDGPU_NUM_SYNCS 4
101
102/* hardcode that limit for now */
103#define AMDGPU_VA_RESERVED_SIZE (8 << 20)
104
105/* hard reset data */
106#define AMDGPU_ASIC_RESET_DATA 0x39d5e86b
107
108/* reset flags */
109#define AMDGPU_RESET_GFX (1 << 0)
110#define AMDGPU_RESET_COMPUTE (1 << 1)
111#define AMDGPU_RESET_DMA (1 << 2)
112#define AMDGPU_RESET_CP (1 << 3)
113#define AMDGPU_RESET_GRBM (1 << 4)
114#define AMDGPU_RESET_DMA1 (1 << 5)
115#define AMDGPU_RESET_RLC (1 << 6)
116#define AMDGPU_RESET_SEM (1 << 7)
117#define AMDGPU_RESET_IH (1 << 8)
118#define AMDGPU_RESET_VMC (1 << 9)
119#define AMDGPU_RESET_MC (1 << 10)
120#define AMDGPU_RESET_DISPLAY (1 << 11)
121#define AMDGPU_RESET_UVD (1 << 12)
122#define AMDGPU_RESET_VCE (1 << 13)
123#define AMDGPU_RESET_VCE1 (1 << 14)
124
125/* CG block flags */
126#define AMDGPU_CG_BLOCK_GFX (1 << 0)
127#define AMDGPU_CG_BLOCK_MC (1 << 1)
128#define AMDGPU_CG_BLOCK_SDMA (1 << 2)
129#define AMDGPU_CG_BLOCK_UVD (1 << 3)
130#define AMDGPU_CG_BLOCK_VCE (1 << 4)
131#define AMDGPU_CG_BLOCK_HDP (1 << 5)
132#define AMDGPU_CG_BLOCK_BIF (1 << 6)
133
134/* CG flags */
135#define AMDGPU_CG_SUPPORT_GFX_MGCG (1 << 0)
136#define AMDGPU_CG_SUPPORT_GFX_MGLS (1 << 1)
137#define AMDGPU_CG_SUPPORT_GFX_CGCG (1 << 2)
138#define AMDGPU_CG_SUPPORT_GFX_CGLS (1 << 3)
139#define AMDGPU_CG_SUPPORT_GFX_CGTS (1 << 4)
140#define AMDGPU_CG_SUPPORT_GFX_CGTS_LS (1 << 5)
141#define AMDGPU_CG_SUPPORT_GFX_CP_LS (1 << 6)
142#define AMDGPU_CG_SUPPORT_GFX_RLC_LS (1 << 7)
143#define AMDGPU_CG_SUPPORT_MC_LS (1 << 8)
144#define AMDGPU_CG_SUPPORT_MC_MGCG (1 << 9)
145#define AMDGPU_CG_SUPPORT_SDMA_LS (1 << 10)
146#define AMDGPU_CG_SUPPORT_SDMA_MGCG (1 << 11)
147#define AMDGPU_CG_SUPPORT_BIF_LS (1 << 12)
148#define AMDGPU_CG_SUPPORT_UVD_MGCG (1 << 13)
149#define AMDGPU_CG_SUPPORT_VCE_MGCG (1 << 14)
150#define AMDGPU_CG_SUPPORT_HDP_LS (1 << 15)
151#define AMDGPU_CG_SUPPORT_HDP_MGCG (1 << 16)
152
153/* PG flags */
154#define AMDGPU_PG_SUPPORT_GFX_PG (1 << 0)
155#define AMDGPU_PG_SUPPORT_GFX_SMG (1 << 1)
156#define AMDGPU_PG_SUPPORT_GFX_DMG (1 << 2)
157#define AMDGPU_PG_SUPPORT_UVD (1 << 3)
158#define AMDGPU_PG_SUPPORT_VCE (1 << 4)
159#define AMDGPU_PG_SUPPORT_CP (1 << 5)
160#define AMDGPU_PG_SUPPORT_GDS (1 << 6)
161#define AMDGPU_PG_SUPPORT_RLC_SMU_HS (1 << 7)
162#define AMDGPU_PG_SUPPORT_SDMA (1 << 8)
163#define AMDGPU_PG_SUPPORT_ACP (1 << 9)
164#define AMDGPU_PG_SUPPORT_SAMU (1 << 10)
165
166/* GFX current status */
167#define AMDGPU_GFX_NORMAL_MODE 0x00000000L
168#define AMDGPU_GFX_SAFE_MODE 0x00000001L
169#define AMDGPU_GFX_PG_DISABLED_MODE 0x00000002L
170#define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L
171#define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L
172
173/* max cursor sizes (in pixels) */
174#define CIK_CURSOR_WIDTH 128
175#define CIK_CURSOR_HEIGHT 128
176
177struct amdgpu_device;
178struct amdgpu_fence;
179struct amdgpu_ib;
180struct amdgpu_vm;
181struct amdgpu_ring;
182struct amdgpu_semaphore;
183struct amdgpu_cs_parser;
184struct amdgpu_irq_src;
0b492a4c 185struct amdgpu_fpriv;
97b2e202
AD
186
187enum amdgpu_cp_irq {
188 AMDGPU_CP_IRQ_GFX_EOP = 0,
189 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP,
190 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP,
191 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP,
192 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP,
193 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP,
194 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP,
195 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP,
196 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP,
197
198 AMDGPU_CP_IRQ_LAST
199};
200
201enum amdgpu_sdma_irq {
202 AMDGPU_SDMA_IRQ_TRAP0 = 0,
203 AMDGPU_SDMA_IRQ_TRAP1,
204
205 AMDGPU_SDMA_IRQ_LAST
206};
207
208enum amdgpu_thermal_irq {
209 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0,
210 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW,
211
212 AMDGPU_THERMAL_IRQ_LAST
213};
214
97b2e202 215int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
5fc3aeeb 216 enum amd_ip_block_type block_type,
217 enum amd_clockgating_state state);
97b2e202 218int amdgpu_set_powergating_state(struct amdgpu_device *adev,
5fc3aeeb 219 enum amd_ip_block_type block_type,
220 enum amd_powergating_state state);
97b2e202
AD
221
222struct amdgpu_ip_block_version {
5fc3aeeb 223 enum amd_ip_block_type type;
97b2e202
AD
224 u32 major;
225 u32 minor;
226 u32 rev;
5fc3aeeb 227 const struct amd_ip_funcs *funcs;
97b2e202
AD
228};
229
230int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
5fc3aeeb 231 enum amd_ip_block_type type,
97b2e202
AD
232 u32 major, u32 minor);
233
234const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
235 struct amdgpu_device *adev,
5fc3aeeb 236 enum amd_ip_block_type type);
97b2e202
AD
237
238/* provided by hw blocks that can move/clear data. e.g., gfx or sdma */
239struct amdgpu_buffer_funcs {
240 /* maximum bytes in a single operation */
241 uint32_t copy_max_bytes;
242
243 /* number of dw to reserve per operation */
244 unsigned copy_num_dw;
245
246 /* used for buffer migration */
247 void (*emit_copy_buffer)(struct amdgpu_ring *ring,
248 /* src addr in bytes */
249 uint64_t src_offset,
250 /* dst addr in bytes */
251 uint64_t dst_offset,
252 /* number of byte to transfer */
253 uint32_t byte_count);
254
255 /* maximum bytes in a single operation */
256 uint32_t fill_max_bytes;
257
258 /* number of dw to reserve per operation */
259 unsigned fill_num_dw;
260
261 /* used for buffer clearing */
262 void (*emit_fill_buffer)(struct amdgpu_ring *ring,
263 /* value to write to memory */
264 uint32_t src_data,
265 /* dst addr in bytes */
266 uint64_t dst_offset,
267 /* number of byte to fill */
268 uint32_t byte_count);
269};
270
271/* provided by hw blocks that can write ptes, e.g., sdma */
272struct amdgpu_vm_pte_funcs {
273 /* copy pte entries from GART */
274 void (*copy_pte)(struct amdgpu_ib *ib,
275 uint64_t pe, uint64_t src,
276 unsigned count);
277 /* write pte one entry at a time with addr mapping */
278 void (*write_pte)(struct amdgpu_ib *ib,
279 uint64_t pe,
280 uint64_t addr, unsigned count,
281 uint32_t incr, uint32_t flags);
282 /* for linear pte/pde updates without addr mapping */
283 void (*set_pte_pde)(struct amdgpu_ib *ib,
284 uint64_t pe,
285 uint64_t addr, unsigned count,
286 uint32_t incr, uint32_t flags);
287 /* pad the indirect buffer to the necessary number of dw */
288 void (*pad_ib)(struct amdgpu_ib *ib);
289};
290
291/* provided by the gmc block */
292struct amdgpu_gart_funcs {
293 /* flush the vm tlb via mmio */
294 void (*flush_gpu_tlb)(struct amdgpu_device *adev,
295 uint32_t vmid);
296 /* write pte/pde updates using the cpu */
297 int (*set_pte_pde)(struct amdgpu_device *adev,
298 void *cpu_pt_addr, /* cpu addr of page table */
299 uint32_t gpu_page_idx, /* pte/pde to update */
300 uint64_t addr, /* addr to write into pte/pde */
301 uint32_t flags); /* access flags */
302};
303
304/* provided by the ih block */
305struct amdgpu_ih_funcs {
306 /* ring read/write ptr handling, called from interrupt context */
307 u32 (*get_wptr)(struct amdgpu_device *adev);
308 void (*decode_iv)(struct amdgpu_device *adev,
309 struct amdgpu_iv_entry *entry);
310 void (*set_rptr)(struct amdgpu_device *adev);
311};
312
313/* provided by hw blocks that expose a ring buffer for commands */
314struct amdgpu_ring_funcs {
315 /* ring read/write ptr handling */
316 u32 (*get_rptr)(struct amdgpu_ring *ring);
317 u32 (*get_wptr)(struct amdgpu_ring *ring);
318 void (*set_wptr)(struct amdgpu_ring *ring);
319 /* validating and patching of IBs */
320 int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
321 /* command emit functions */
322 void (*emit_ib)(struct amdgpu_ring *ring,
323 struct amdgpu_ib *ib);
324 void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
890ee23f 325 uint64_t seq, unsigned flags);
97b2e202
AD
326 bool (*emit_semaphore)(struct amdgpu_ring *ring,
327 struct amdgpu_semaphore *semaphore,
328 bool emit_wait);
329 void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
330 uint64_t pd_addr);
d2edb07b 331 void (*emit_hdp_flush)(struct amdgpu_ring *ring);
97b2e202
AD
332 void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
333 uint32_t gds_base, uint32_t gds_size,
334 uint32_t gws_base, uint32_t gws_size,
335 uint32_t oa_base, uint32_t oa_size);
336 /* testing functions */
337 int (*test_ring)(struct amdgpu_ring *ring);
338 int (*test_ib)(struct amdgpu_ring *ring);
339 bool (*is_lockup)(struct amdgpu_ring *ring);
340};
341
342/*
343 * BIOS.
344 */
345bool amdgpu_get_bios(struct amdgpu_device *adev);
346bool amdgpu_read_bios(struct amdgpu_device *adev);
347
348/*
349 * Dummy page
350 */
351struct amdgpu_dummy_page {
352 struct page *page;
353 dma_addr_t addr;
354};
355int amdgpu_dummy_page_init(struct amdgpu_device *adev);
356void amdgpu_dummy_page_fini(struct amdgpu_device *adev);
357
358
359/*
360 * Clocks
361 */
362
363#define AMDGPU_MAX_PPLL 3
364
365struct amdgpu_clock {
366 struct amdgpu_pll ppll[AMDGPU_MAX_PPLL];
367 struct amdgpu_pll spll;
368 struct amdgpu_pll mpll;
369 /* 10 Khz units */
370 uint32_t default_mclk;
371 uint32_t default_sclk;
372 uint32_t default_dispclk;
373 uint32_t current_dispclk;
374 uint32_t dp_extclk;
375 uint32_t max_pixel_clock;
376};
377
378/*
379 * Fences.
380 */
381struct amdgpu_fence_driver {
382 struct amdgpu_ring *ring;
383 uint64_t gpu_addr;
384 volatile uint32_t *cpu_addr;
385 /* sync_seq is protected by ring emission lock */
386 uint64_t sync_seq[AMDGPU_MAX_RINGS];
387 atomic64_t last_seq;
388 bool initialized;
97b2e202
AD
389 struct amdgpu_irq_src *irq_src;
390 unsigned irq_type;
391 struct delayed_work lockup_work;
392};
393
394/* some special values for the owner field */
395#define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul)
396#define AMDGPU_FENCE_OWNER_VM ((void*)1ul)
397#define AMDGPU_FENCE_OWNER_MOVE ((void*)2ul)
398
890ee23f
CZ
399#define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
400#define AMDGPU_FENCE_FLAG_INT (1 << 1)
401
97b2e202
AD
402struct amdgpu_fence {
403 struct fence base;
404
405 /* RB, DMA, etc. */
406 struct amdgpu_ring *ring;
407 uint64_t seq;
408
409 /* filp or special value for fence creator */
410 void *owner;
411
412 wait_queue_t fence_wake;
413};
414
415struct amdgpu_user_fence {
416 /* write-back bo */
417 struct amdgpu_bo *bo;
418 /* write-back address offset to bo start */
419 uint32_t offset;
c1b69ed0 420 uint64_t sequence;
97b2e202
AD
421};
422
423int amdgpu_fence_driver_init(struct amdgpu_device *adev);
424void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
425void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
426
427void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
428int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
429 struct amdgpu_irq_src *irq_src,
430 unsigned irq_type);
5ceb54c6
AD
431void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
432void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
97b2e202
AD
433int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
434 struct amdgpu_fence **fence);
435void amdgpu_fence_process(struct amdgpu_ring *ring);
436int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
437int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
438unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
439
440bool amdgpu_fence_signaled(struct amdgpu_fence *fence);
441int amdgpu_fence_wait(struct amdgpu_fence *fence, bool interruptible);
442int amdgpu_fence_wait_any(struct amdgpu_device *adev,
443 struct amdgpu_fence **fences,
444 bool intr);
97b2e202
AD
445struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence);
446void amdgpu_fence_unref(struct amdgpu_fence **fence);
447
448bool amdgpu_fence_need_sync(struct amdgpu_fence *fence,
449 struct amdgpu_ring *ring);
450void amdgpu_fence_note_sync(struct amdgpu_fence *fence,
451 struct amdgpu_ring *ring);
452
453static inline struct amdgpu_fence *amdgpu_fence_later(struct amdgpu_fence *a,
454 struct amdgpu_fence *b)
455{
456 if (!a) {
457 return b;
458 }
459
460 if (!b) {
461 return a;
462 }
463
464 BUG_ON(a->ring != b->ring);
465
466 if (a->seq > b->seq) {
467 return a;
468 } else {
469 return b;
470 }
471}
472
473static inline bool amdgpu_fence_is_earlier(struct amdgpu_fence *a,
474 struct amdgpu_fence *b)
475{
476 if (!a) {
477 return false;
478 }
479
480 if (!b) {
481 return true;
482 }
483
484 BUG_ON(a->ring != b->ring);
485
486 return a->seq < b->seq;
487}
488
489int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user,
490 void *owner, struct amdgpu_fence **fence);
491
492/*
493 * TTM.
494 */
495struct amdgpu_mman {
496 struct ttm_bo_global_ref bo_global_ref;
497 struct drm_global_reference mem_global_ref;
498 struct ttm_bo_device bdev;
499 bool mem_global_referenced;
500 bool initialized;
501
502#if defined(CONFIG_DEBUG_FS)
503 struct dentry *vram;
504 struct dentry *gtt;
505#endif
506
507 /* buffer handling */
508 const struct amdgpu_buffer_funcs *buffer_funcs;
509 struct amdgpu_ring *buffer_funcs_ring;
510};
511
512int amdgpu_copy_buffer(struct amdgpu_ring *ring,
513 uint64_t src_offset,
514 uint64_t dst_offset,
515 uint32_t byte_count,
516 struct reservation_object *resv,
517 struct amdgpu_fence **fence);
518int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
519
520struct amdgpu_bo_list_entry {
521 struct amdgpu_bo *robj;
522 struct ttm_validate_buffer tv;
523 struct amdgpu_bo_va *bo_va;
524 unsigned prefered_domains;
525 unsigned allowed_domains;
526 uint32_t priority;
527};
528
529struct amdgpu_bo_va_mapping {
530 struct list_head list;
531 struct interval_tree_node it;
532 uint64_t offset;
533 uint32_t flags;
534};
535
536/* bo virtual addresses in a specific vm */
537struct amdgpu_bo_va {
538 /* protected by bo being reserved */
539 struct list_head bo_list;
540 uint64_t addr;
541 struct amdgpu_fence *last_pt_update;
542 unsigned ref_count;
543
544 /* protected by vm mutex */
545 struct list_head mappings;
546 struct list_head vm_status;
547
548 /* constant after initialization */
549 struct amdgpu_vm *vm;
550 struct amdgpu_bo *bo;
551};
552
7e5a547f
CZ
553#define AMDGPU_GEM_DOMAIN_MAX 0x3
554
97b2e202
AD
555struct amdgpu_bo {
556 /* Protected by gem.mutex */
557 struct list_head list;
558 /* Protected by tbo.reserved */
559 u32 initial_domain;
7e5a547f 560 struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
97b2e202
AD
561 struct ttm_placement placement;
562 struct ttm_buffer_object tbo;
563 struct ttm_bo_kmap_obj kmap;
564 u64 flags;
565 unsigned pin_count;
566 void *kptr;
567 u64 tiling_flags;
568 u64 metadata_flags;
569 void *metadata;
570 u32 metadata_size;
571 /* list of all virtual address to which this bo
572 * is associated to
573 */
574 struct list_head va;
575 /* Constant after initialization */
576 struct amdgpu_device *adev;
577 struct drm_gem_object gem_base;
578
579 struct ttm_bo_kmap_obj dma_buf_vmap;
580 pid_t pid;
581 struct amdgpu_mn *mn;
582 struct list_head mn_list;
583};
584#define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base)
585
586void amdgpu_gem_object_free(struct drm_gem_object *obj);
587int amdgpu_gem_object_open(struct drm_gem_object *obj,
588 struct drm_file *file_priv);
589void amdgpu_gem_object_close(struct drm_gem_object *obj,
590 struct drm_file *file_priv);
591unsigned long amdgpu_gem_timeout(uint64_t timeout_ns);
592struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
593struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
594 struct dma_buf_attachment *attach,
595 struct sg_table *sg);
596struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
597 struct drm_gem_object *gobj,
598 int flags);
599int amdgpu_gem_prime_pin(struct drm_gem_object *obj);
600void amdgpu_gem_prime_unpin(struct drm_gem_object *obj);
601struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
602void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
603void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
604int amdgpu_gem_debugfs_init(struct amdgpu_device *adev);
605
606/* sub-allocation manager, it has to be protected by another lock.
607 * By conception this is an helper for other part of the driver
608 * like the indirect buffer or semaphore, which both have their
609 * locking.
610 *
611 * Principe is simple, we keep a list of sub allocation in offset
612 * order (first entry has offset == 0, last entry has the highest
613 * offset).
614 *
615 * When allocating new object we first check if there is room at
616 * the end total_size - (last_object_offset + last_object_size) >=
617 * alloc_size. If so we allocate new object there.
618 *
619 * When there is not enough room at the end, we start waiting for
620 * each sub object until we reach object_offset+object_size >=
621 * alloc_size, this object then become the sub object we return.
622 *
623 * Alignment can't be bigger than page size.
624 *
625 * Hole are not considered for allocation to keep things simple.
626 * Assumption is that there won't be hole (all object on same
627 * alignment).
628 */
629struct amdgpu_sa_manager {
630 wait_queue_head_t wq;
631 struct amdgpu_bo *bo;
632 struct list_head *hole;
633 struct list_head flist[AMDGPU_MAX_RINGS];
634 struct list_head olist;
635 unsigned size;
636 uint64_t gpu_addr;
637 void *cpu_ptr;
638 uint32_t domain;
639 uint32_t align;
640};
641
642struct amdgpu_sa_bo;
643
644/* sub-allocation buffer */
645struct amdgpu_sa_bo {
646 struct list_head olist;
647 struct list_head flist;
648 struct amdgpu_sa_manager *manager;
649 unsigned soffset;
650 unsigned eoffset;
651 struct amdgpu_fence *fence;
652};
653
654/*
655 * GEM objects.
656 */
657struct amdgpu_gem {
658 struct mutex mutex;
659 struct list_head objects;
660};
661
662int amdgpu_gem_init(struct amdgpu_device *adev);
663void amdgpu_gem_fini(struct amdgpu_device *adev);
664int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
665 int alignment, u32 initial_domain,
666 u64 flags, bool kernel,
667 struct drm_gem_object **obj);
668
669int amdgpu_mode_dumb_create(struct drm_file *file_priv,
670 struct drm_device *dev,
671 struct drm_mode_create_dumb *args);
672int amdgpu_mode_dumb_mmap(struct drm_file *filp,
673 struct drm_device *dev,
674 uint32_t handle, uint64_t *offset_p);
675
676/*
677 * Semaphores.
678 */
679struct amdgpu_semaphore {
680 struct amdgpu_sa_bo *sa_bo;
681 signed waiters;
682 uint64_t gpu_addr;
683};
684
685int amdgpu_semaphore_create(struct amdgpu_device *adev,
686 struct amdgpu_semaphore **semaphore);
687bool amdgpu_semaphore_emit_signal(struct amdgpu_ring *ring,
688 struct amdgpu_semaphore *semaphore);
689bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring,
690 struct amdgpu_semaphore *semaphore);
691void amdgpu_semaphore_free(struct amdgpu_device *adev,
692 struct amdgpu_semaphore **semaphore,
693 struct amdgpu_fence *fence);
694
695/*
696 * Synchronization
697 */
698struct amdgpu_sync {
699 struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS];
700 struct amdgpu_fence *sync_to[AMDGPU_MAX_RINGS];
701 struct amdgpu_fence *last_vm_update;
702};
703
704void amdgpu_sync_create(struct amdgpu_sync *sync);
91e1a520
CK
705int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
706 struct fence *f);
97b2e202
AD
707int amdgpu_sync_resv(struct amdgpu_device *adev,
708 struct amdgpu_sync *sync,
709 struct reservation_object *resv,
710 void *owner);
711int amdgpu_sync_rings(struct amdgpu_sync *sync,
712 struct amdgpu_ring *ring);
713void amdgpu_sync_free(struct amdgpu_device *adev, struct amdgpu_sync *sync,
714 struct amdgpu_fence *fence);
715
716/*
717 * GART structures, functions & helpers
718 */
719struct amdgpu_mc;
720
721#define AMDGPU_GPU_PAGE_SIZE 4096
722#define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1)
723#define AMDGPU_GPU_PAGE_SHIFT 12
724#define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
725
726struct amdgpu_gart {
727 dma_addr_t table_addr;
728 struct amdgpu_bo *robj;
729 void *ptr;
730 unsigned num_gpu_pages;
731 unsigned num_cpu_pages;
732 unsigned table_size;
733 struct page **pages;
734 dma_addr_t *pages_addr;
735 bool ready;
736 const struct amdgpu_gart_funcs *gart_funcs;
737};
738
739int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
740void amdgpu_gart_table_ram_free(struct amdgpu_device *adev);
741int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
742void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
743int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
744void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
745int amdgpu_gart_init(struct amdgpu_device *adev);
746void amdgpu_gart_fini(struct amdgpu_device *adev);
747void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
748 int pages);
749int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
750 int pages, struct page **pagelist,
751 dma_addr_t *dma_addr, uint32_t flags);
752
753/*
754 * GPU MC structures, functions & helpers
755 */
756struct amdgpu_mc {
757 resource_size_t aper_size;
758 resource_size_t aper_base;
759 resource_size_t agp_base;
760 /* for some chips with <= 32MB we need to lie
761 * about vram size near mc fb location */
762 u64 mc_vram_size;
763 u64 visible_vram_size;
764 u64 gtt_size;
765 u64 gtt_start;
766 u64 gtt_end;
767 u64 vram_start;
768 u64 vram_end;
769 unsigned vram_width;
770 u64 real_vram_size;
771 int vram_mtrr;
772 u64 gtt_base_align;
773 u64 mc_mask;
774 const struct firmware *fw; /* MC firmware */
775 uint32_t fw_version;
776 struct amdgpu_irq_src vm_fault;
81c59f54 777 uint32_t vram_type;
97b2e202
AD
778};
779
780/*
781 * GPU doorbell structures, functions & helpers
782 */
783typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
784{
785 AMDGPU_DOORBELL_KIQ = 0x000,
786 AMDGPU_DOORBELL_HIQ = 0x001,
787 AMDGPU_DOORBELL_DIQ = 0x002,
788 AMDGPU_DOORBELL_MEC_RING0 = 0x010,
789 AMDGPU_DOORBELL_MEC_RING1 = 0x011,
790 AMDGPU_DOORBELL_MEC_RING2 = 0x012,
791 AMDGPU_DOORBELL_MEC_RING3 = 0x013,
792 AMDGPU_DOORBELL_MEC_RING4 = 0x014,
793 AMDGPU_DOORBELL_MEC_RING5 = 0x015,
794 AMDGPU_DOORBELL_MEC_RING6 = 0x016,
795 AMDGPU_DOORBELL_MEC_RING7 = 0x017,
796 AMDGPU_DOORBELL_GFX_RING0 = 0x020,
797 AMDGPU_DOORBELL_sDMA_ENGINE0 = 0x1E0,
798 AMDGPU_DOORBELL_sDMA_ENGINE1 = 0x1E1,
799 AMDGPU_DOORBELL_IH = 0x1E8,
800 AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF,
801 AMDGPU_DOORBELL_INVALID = 0xFFFF
802} AMDGPU_DOORBELL_ASSIGNMENT;
803
804struct amdgpu_doorbell {
805 /* doorbell mmio */
806 resource_size_t base;
807 resource_size_t size;
808 u32 __iomem *ptr;
809 u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */
810};
811
812void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
813 phys_addr_t *aperture_base,
814 size_t *aperture_size,
815 size_t *start_offset);
816
817/*
818 * IRQS.
819 */
820
821struct amdgpu_flip_work {
822 struct work_struct flip_work;
823 struct work_struct unpin_work;
824 struct amdgpu_device *adev;
825 int crtc_id;
826 uint64_t base;
827 struct drm_pending_vblank_event *event;
828 struct amdgpu_bo *old_rbo;
829 struct fence *fence;
830};
831
832
833/*
834 * CP & rings.
835 */
836
837struct amdgpu_ib {
838 struct amdgpu_sa_bo *sa_bo;
839 uint32_t length_dw;
840 uint64_t gpu_addr;
841 uint32_t *ptr;
842 struct amdgpu_ring *ring;
843 struct amdgpu_fence *fence;
844 struct amdgpu_user_fence *user;
845 struct amdgpu_vm *vm;
3cb485f3 846 struct amdgpu_ctx *ctx;
97b2e202 847 struct amdgpu_sync sync;
97b2e202
AD
848 uint32_t gds_base, gds_size;
849 uint32_t gws_base, gws_size;
850 uint32_t oa_base, oa_size;
de807f81 851 uint32_t flags;
5430a3ff
CK
852 /* resulting sequence number */
853 uint64_t sequence;
97b2e202
AD
854};
855
856enum amdgpu_ring_type {
857 AMDGPU_RING_TYPE_GFX,
858 AMDGPU_RING_TYPE_COMPUTE,
859 AMDGPU_RING_TYPE_SDMA,
860 AMDGPU_RING_TYPE_UVD,
861 AMDGPU_RING_TYPE_VCE
862};
863
c1b69ed0
CZ
864extern struct amd_sched_backend_ops amdgpu_sched_ops;
865
97b2e202
AD
866struct amdgpu_ring {
867 struct amdgpu_device *adev;
868 const struct amdgpu_ring_funcs *funcs;
869 struct amdgpu_fence_driver fence_drv;
b80d8475 870 struct amd_gpu_scheduler *scheduler;
97b2e202
AD
871
872 struct mutex *ring_lock;
873 struct amdgpu_bo *ring_obj;
874 volatile uint32_t *ring;
875 unsigned rptr_offs;
876 u64 next_rptr_gpu_addr;
877 volatile u32 *next_rptr_cpu_addr;
878 unsigned wptr;
879 unsigned wptr_old;
880 unsigned ring_size;
881 unsigned ring_free_dw;
882 int count_dw;
883 atomic_t last_rptr;
884 atomic64_t last_activity;
885 uint64_t gpu_addr;
886 uint32_t align_mask;
887 uint32_t ptr_mask;
888 bool ready;
889 u32 nop;
890 u32 idx;
891 u64 last_semaphore_signal_addr;
892 u64 last_semaphore_wait_addr;
893 u32 me;
894 u32 pipe;
895 u32 queue;
896 struct amdgpu_bo *mqd_obj;
897 u32 doorbell_index;
898 bool use_doorbell;
899 unsigned wptr_offs;
900 unsigned next_rptr_offs;
901 unsigned fence_offs;
3cb485f3 902 struct amdgpu_ctx *current_ctx;
97b2e202
AD
903 enum amdgpu_ring_type type;
904 char name[16];
4274f5d4 905 bool is_pte_ring;
97b2e202
AD
906};
907
908/*
909 * VM
910 */
911
912/* maximum number of VMIDs */
913#define AMDGPU_NUM_VM 16
914
915/* number of entries in page table */
916#define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
917
918/* PTBs (Page Table Blocks) need to be aligned to 32K */
919#define AMDGPU_VM_PTB_ALIGN_SIZE 32768
920#define AMDGPU_VM_PTB_ALIGN_MASK (AMDGPU_VM_PTB_ALIGN_SIZE - 1)
921#define AMDGPU_VM_PTB_ALIGN(a) (((a) + AMDGPU_VM_PTB_ALIGN_MASK) & ~AMDGPU_VM_PTB_ALIGN_MASK)
922
923#define AMDGPU_PTE_VALID (1 << 0)
924#define AMDGPU_PTE_SYSTEM (1 << 1)
925#define AMDGPU_PTE_SNOOPED (1 << 2)
926
927/* VI only */
928#define AMDGPU_PTE_EXECUTABLE (1 << 4)
929
930#define AMDGPU_PTE_READABLE (1 << 5)
931#define AMDGPU_PTE_WRITEABLE (1 << 6)
932
933/* PTE (Page Table Entry) fragment field for different page sizes */
934#define AMDGPU_PTE_FRAG_4KB (0 << 7)
935#define AMDGPU_PTE_FRAG_64KB (4 << 7)
936#define AMDGPU_LOG2_PAGES_PER_FRAG 4
937
938struct amdgpu_vm_pt {
939 struct amdgpu_bo *bo;
940 uint64_t addr;
941};
942
943struct amdgpu_vm_id {
944 unsigned id;
945 uint64_t pd_gpu_addr;
946 /* last flushed PD/PT update */
947 struct amdgpu_fence *flushed_updates;
948 /* last use of vmid */
949 struct amdgpu_fence *last_id_use;
950};
951
952struct amdgpu_vm {
953 struct mutex mutex;
954
955 struct rb_root va;
956
957 /* protecting invalidated and freed */
958 spinlock_t status_lock;
959
960 /* BOs moved, but not yet updated in the PT */
961 struct list_head invalidated;
962
963 /* BOs freed, but not yet updated in the PT */
964 struct list_head freed;
965
966 /* contains the page directory */
967 struct amdgpu_bo *page_directory;
968 unsigned max_pde_used;
969
970 /* array of page tables, one for each page directory entry */
971 struct amdgpu_vm_pt *page_tables;
972
973 /* for id and flush management per ring */
974 struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS];
975};
976
977struct amdgpu_vm_manager {
978 struct amdgpu_fence *active[AMDGPU_NUM_VM];
979 uint32_t max_pfn;
980 /* number of VMIDs */
981 unsigned nvm;
982 /* vram base address for page table entry */
983 u64 vram_base_offset;
984 /* is vm enabled? */
985 bool enabled;
986 /* for hw to save the PD addr on suspend/resume */
987 uint32_t saved_table_addr[AMDGPU_NUM_VM];
988 /* vm pte handling */
989 const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
990 struct amdgpu_ring *vm_pte_funcs_ring;
991};
992
993/*
994 * context related structures
995 */
996
21c16bf6
CK
997#define AMDGPU_CTX_MAX_CS_PENDING 16
998
999struct amdgpu_ctx_ring {
1000 uint64_t sequence;
1001 struct fence *fences[AMDGPU_CTX_MAX_CS_PENDING];
9cb7e5a9 1002 struct amd_context_entity c_entity;
21c16bf6
CK
1003};
1004
97b2e202 1005struct amdgpu_ctx {
0b492a4c 1006 struct kref refcount;
9cb7e5a9 1007 struct amdgpu_device *adev;
0b492a4c 1008 unsigned reset_counter;
21c16bf6
CK
1009 spinlock_t ring_lock;
1010 struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
97b2e202
AD
1011};
1012
1013struct amdgpu_ctx_mgr {
0b492a4c
AD
1014 struct amdgpu_device *adev;
1015 struct mutex lock;
1016 /* protected by lock */
1017 struct idr ctx_handles;
97b2e202
AD
1018};
1019
0b492a4c
AD
1020int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
1021 uint32_t *id);
1022int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
1023 uint32_t id);
1024
1025void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv);
1026
1027struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
1028int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
1029
21c16bf6
CK
1030uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
1031 struct fence *fence);
1032struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
1033 struct amdgpu_ring *ring, uint64_t seq);
1034
0b492a4c
AD
1035int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
1036 struct drm_file *filp);
1037
1038
97b2e202
AD
1039/*
1040 * file private structure
1041 */
1042
1043struct amdgpu_fpriv {
1044 struct amdgpu_vm vm;
1045 struct mutex bo_list_lock;
1046 struct idr bo_list_handles;
0b492a4c 1047 struct amdgpu_ctx_mgr ctx_mgr;
97b2e202
AD
1048};
1049
1050/*
1051 * residency list
1052 */
1053
1054struct amdgpu_bo_list {
1055 struct mutex lock;
1056 struct amdgpu_bo *gds_obj;
1057 struct amdgpu_bo *gws_obj;
1058 struct amdgpu_bo *oa_obj;
1059 bool has_userptr;
1060 unsigned num_entries;
1061 struct amdgpu_bo_list_entry *array;
1062};
1063
1064struct amdgpu_bo_list *
1065amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id);
1066void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
372bc1e1
CZ
1067void amdgpu_bo_list_copy(struct amdgpu_device *adev,
1068 struct amdgpu_bo_list *dst,
1069 struct amdgpu_bo_list *src);
97b2e202
AD
1070void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
1071
1072/*
1073 * GFX stuff
1074 */
1075#include "clearstate_defs.h"
1076
1077struct amdgpu_rlc {
1078 /* for power gating */
1079 struct amdgpu_bo *save_restore_obj;
1080 uint64_t save_restore_gpu_addr;
1081 volatile uint32_t *sr_ptr;
1082 const u32 *reg_list;
1083 u32 reg_list_size;
1084 /* for clear state */
1085 struct amdgpu_bo *clear_state_obj;
1086 uint64_t clear_state_gpu_addr;
1087 volatile uint32_t *cs_ptr;
1088 const struct cs_section_def *cs_data;
1089 u32 clear_state_size;
1090 /* for cp tables */
1091 struct amdgpu_bo *cp_table_obj;
1092 uint64_t cp_table_gpu_addr;
1093 volatile uint32_t *cp_table_ptr;
1094 u32 cp_table_size;
1095};
1096
1097struct amdgpu_mec {
1098 struct amdgpu_bo *hpd_eop_obj;
1099 u64 hpd_eop_gpu_addr;
1100 u32 num_pipe;
1101 u32 num_mec;
1102 u32 num_queue;
1103};
1104
1105/*
1106 * GPU scratch registers structures, functions & helpers
1107 */
1108struct amdgpu_scratch {
1109 unsigned num_reg;
1110 uint32_t reg_base;
1111 bool free[32];
1112 uint32_t reg[32];
1113};
1114
1115/*
1116 * GFX configurations
1117 */
1118struct amdgpu_gca_config {
1119 unsigned max_shader_engines;
1120 unsigned max_tile_pipes;
1121 unsigned max_cu_per_sh;
1122 unsigned max_sh_per_se;
1123 unsigned max_backends_per_se;
1124 unsigned max_texture_channel_caches;
1125 unsigned max_gprs;
1126 unsigned max_gs_threads;
1127 unsigned max_hw_contexts;
1128 unsigned sc_prim_fifo_size_frontend;
1129 unsigned sc_prim_fifo_size_backend;
1130 unsigned sc_hiz_tile_fifo_size;
1131 unsigned sc_earlyz_tile_fifo_size;
1132
1133 unsigned num_tile_pipes;
1134 unsigned backend_enable_mask;
1135 unsigned mem_max_burst_length_bytes;
1136 unsigned mem_row_size_in_kb;
1137 unsigned shader_engine_tile_size;
1138 unsigned num_gpus;
1139 unsigned multi_gpu_tile_size;
1140 unsigned mc_arb_ramcfg;
1141 unsigned gb_addr_config;
1142
1143 uint32_t tile_mode_array[32];
1144 uint32_t macrotile_mode_array[16];
1145};
1146
1147struct amdgpu_gfx {
1148 struct mutex gpu_clock_mutex;
1149 struct amdgpu_gca_config config;
1150 struct amdgpu_rlc rlc;
1151 struct amdgpu_mec mec;
1152 struct amdgpu_scratch scratch;
1153 const struct firmware *me_fw; /* ME firmware */
1154 uint32_t me_fw_version;
1155 const struct firmware *pfp_fw; /* PFP firmware */
1156 uint32_t pfp_fw_version;
1157 const struct firmware *ce_fw; /* CE firmware */
1158 uint32_t ce_fw_version;
1159 const struct firmware *rlc_fw; /* RLC firmware */
1160 uint32_t rlc_fw_version;
1161 const struct firmware *mec_fw; /* MEC firmware */
1162 uint32_t mec_fw_version;
1163 const struct firmware *mec2_fw; /* MEC2 firmware */
1164 uint32_t mec2_fw_version;
02558a00
KW
1165 uint32_t me_feature_version;
1166 uint32_t ce_feature_version;
1167 uint32_t pfp_feature_version;
351643d7
JZ
1168 uint32_t rlc_feature_version;
1169 uint32_t mec_feature_version;
1170 uint32_t mec2_feature_version;
97b2e202
AD
1171 struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
1172 unsigned num_gfx_rings;
1173 struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
1174 unsigned num_compute_rings;
1175 struct amdgpu_irq_src eop_irq;
1176 struct amdgpu_irq_src priv_reg_irq;
1177 struct amdgpu_irq_src priv_inst_irq;
1178 /* gfx status */
1179 uint32_t gfx_current_status;
1180 /* sync signal for const engine */
1181 unsigned ce_sync_offs;
a101a899
KW
1182 /* ce ram size*/
1183 unsigned ce_ram_size;
97b2e202
AD
1184};
1185
1186int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm,
1187 unsigned size, struct amdgpu_ib *ib);
1188void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib);
1189int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
1190 struct amdgpu_ib *ib, void *owner);
1191int amdgpu_ib_pool_init(struct amdgpu_device *adev);
1192void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
1193int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
1194/* Ring access between begin & end cannot sleep */
1195void amdgpu_ring_free_size(struct amdgpu_ring *ring);
1196int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
1197int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw);
1198void amdgpu_ring_commit(struct amdgpu_ring *ring);
1199void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring);
1200void amdgpu_ring_undo(struct amdgpu_ring *ring);
1201void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring);
1202void amdgpu_ring_lockup_update(struct amdgpu_ring *ring);
1203bool amdgpu_ring_test_lockup(struct amdgpu_ring *ring);
1204unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
1205 uint32_t **data);
1206int amdgpu_ring_restore(struct amdgpu_ring *ring,
1207 unsigned size, uint32_t *data);
1208int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
1209 unsigned ring_size, u32 nop, u32 align_mask,
1210 struct amdgpu_irq_src *irq_src, unsigned irq_type,
1211 enum amdgpu_ring_type ring_type);
1212void amdgpu_ring_fini(struct amdgpu_ring *ring);
1213
1214/*
1215 * CS.
1216 */
1217struct amdgpu_cs_chunk {
1218 uint32_t chunk_id;
1219 uint32_t length_dw;
1220 uint32_t *kdata;
1221 void __user *user_ptr;
1222};
1223
d5fc5e82
CZ
1224union amdgpu_sched_job_param {
1225 struct {
1226 struct amdgpu_vm *vm;
1227 uint64_t start;
1228 uint64_t last;
1229 struct amdgpu_fence **fence;
1230
1231 } vm_mapping;
1232 struct {
1233 struct amdgpu_bo *bo;
1234 } vm;
1235};
1236
97b2e202
AD
1237struct amdgpu_cs_parser {
1238 struct amdgpu_device *adev;
1239 struct drm_file *filp;
3cb485f3 1240 struct amdgpu_ctx *ctx;
97b2e202
AD
1241 struct amdgpu_bo_list *bo_list;
1242 /* chunks */
1243 unsigned nchunks;
1244 struct amdgpu_cs_chunk *chunks;
1245 /* relocations */
1246 struct amdgpu_bo_list_entry *vm_bos;
97b2e202
AD
1247 struct list_head validated;
1248
1249 struct amdgpu_ib *ibs;
1250 uint32_t num_ibs;
1251
1252 struct ww_acquire_ctx ticket;
1253
1254 /* user fence */
1255 struct amdgpu_user_fence uf;
c1b69ed0 1256
4b559c90 1257 struct amdgpu_ring *ring;
c1b69ed0
CZ
1258 struct mutex job_lock;
1259 struct work_struct job_work;
1260 int (*prepare_job)(struct amdgpu_cs_parser *sched_job);
d5fc5e82 1261 union amdgpu_sched_job_param job_param;
c1b69ed0 1262 int (*run_job)(struct amdgpu_cs_parser *sched_job);
049fc527 1263 int (*free_job)(struct amdgpu_cs_parser *sched_job);
97b2e202
AD
1264};
1265
1266static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx)
1267{
1268 return p->ibs[ib_idx].ptr[idx];
1269}
1270
1271/*
1272 * Writeback
1273 */
1274#define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */
1275
1276struct amdgpu_wb {
1277 struct amdgpu_bo *wb_obj;
1278 volatile uint32_t *wb;
1279 uint64_t gpu_addr;
1280 u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */
1281 unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)];
1282};
1283
1284int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb);
1285void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb);
1286
1287/**
1288 * struct amdgpu_pm - power management datas
1289 * It keeps track of various data needed to take powermanagement decision.
1290 */
1291
1292enum amdgpu_pm_state_type {
1293 /* not used for dpm */
1294 POWER_STATE_TYPE_DEFAULT,
1295 POWER_STATE_TYPE_POWERSAVE,
1296 /* user selectable states */
1297 POWER_STATE_TYPE_BATTERY,
1298 POWER_STATE_TYPE_BALANCED,
1299 POWER_STATE_TYPE_PERFORMANCE,
1300 /* internal states */
1301 POWER_STATE_TYPE_INTERNAL_UVD,
1302 POWER_STATE_TYPE_INTERNAL_UVD_SD,
1303 POWER_STATE_TYPE_INTERNAL_UVD_HD,
1304 POWER_STATE_TYPE_INTERNAL_UVD_HD2,
1305 POWER_STATE_TYPE_INTERNAL_UVD_MVC,
1306 POWER_STATE_TYPE_INTERNAL_BOOT,
1307 POWER_STATE_TYPE_INTERNAL_THERMAL,
1308 POWER_STATE_TYPE_INTERNAL_ACPI,
1309 POWER_STATE_TYPE_INTERNAL_ULV,
1310 POWER_STATE_TYPE_INTERNAL_3DPERF,
1311};
1312
1313enum amdgpu_int_thermal_type {
1314 THERMAL_TYPE_NONE,
1315 THERMAL_TYPE_EXTERNAL,
1316 THERMAL_TYPE_EXTERNAL_GPIO,
1317 THERMAL_TYPE_RV6XX,
1318 THERMAL_TYPE_RV770,
1319 THERMAL_TYPE_ADT7473_WITH_INTERNAL,
1320 THERMAL_TYPE_EVERGREEN,
1321 THERMAL_TYPE_SUMO,
1322 THERMAL_TYPE_NI,
1323 THERMAL_TYPE_SI,
1324 THERMAL_TYPE_EMC2103_WITH_INTERNAL,
1325 THERMAL_TYPE_CI,
1326 THERMAL_TYPE_KV,
1327};
1328
1329enum amdgpu_dpm_auto_throttle_src {
1330 AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL,
1331 AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
1332};
1333
1334enum amdgpu_dpm_event_src {
1335 AMDGPU_DPM_EVENT_SRC_ANALOG = 0,
1336 AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1,
1337 AMDGPU_DPM_EVENT_SRC_DIGITAL = 2,
1338 AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
1339 AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
1340};
1341
1342#define AMDGPU_MAX_VCE_LEVELS 6
1343
1344enum amdgpu_vce_level {
1345 AMDGPU_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */
1346 AMDGPU_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */
1347 AMDGPU_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */
1348 AMDGPU_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
1349 AMDGPU_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */
1350 AMDGPU_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
1351};
1352
1353struct amdgpu_ps {
1354 u32 caps; /* vbios flags */
1355 u32 class; /* vbios flags */
1356 u32 class2; /* vbios flags */
1357 /* UVD clocks */
1358 u32 vclk;
1359 u32 dclk;
1360 /* VCE clocks */
1361 u32 evclk;
1362 u32 ecclk;
1363 bool vce_active;
1364 enum amdgpu_vce_level vce_level;
1365 /* asic priv */
1366 void *ps_priv;
1367};
1368
1369struct amdgpu_dpm_thermal {
1370 /* thermal interrupt work */
1371 struct work_struct work;
1372 /* low temperature threshold */
1373 int min_temp;
1374 /* high temperature threshold */
1375 int max_temp;
1376 /* was last interrupt low to high or high to low */
1377 bool high_to_low;
1378 /* interrupt source */
1379 struct amdgpu_irq_src irq;
1380};
1381
1382enum amdgpu_clk_action
1383{
1384 AMDGPU_SCLK_UP = 1,
1385 AMDGPU_SCLK_DOWN
1386};
1387
1388struct amdgpu_blacklist_clocks
1389{
1390 u32 sclk;
1391 u32 mclk;
1392 enum amdgpu_clk_action action;
1393};
1394
1395struct amdgpu_clock_and_voltage_limits {
1396 u32 sclk;
1397 u32 mclk;
1398 u16 vddc;
1399 u16 vddci;
1400};
1401
1402struct amdgpu_clock_array {
1403 u32 count;
1404 u32 *values;
1405};
1406
1407struct amdgpu_clock_voltage_dependency_entry {
1408 u32 clk;
1409 u16 v;
1410};
1411
1412struct amdgpu_clock_voltage_dependency_table {
1413 u32 count;
1414 struct amdgpu_clock_voltage_dependency_entry *entries;
1415};
1416
1417union amdgpu_cac_leakage_entry {
1418 struct {
1419 u16 vddc;
1420 u32 leakage;
1421 };
1422 struct {
1423 u16 vddc1;
1424 u16 vddc2;
1425 u16 vddc3;
1426 };
1427};
1428
1429struct amdgpu_cac_leakage_table {
1430 u32 count;
1431 union amdgpu_cac_leakage_entry *entries;
1432};
1433
1434struct amdgpu_phase_shedding_limits_entry {
1435 u16 voltage;
1436 u32 sclk;
1437 u32 mclk;
1438};
1439
1440struct amdgpu_phase_shedding_limits_table {
1441 u32 count;
1442 struct amdgpu_phase_shedding_limits_entry *entries;
1443};
1444
1445struct amdgpu_uvd_clock_voltage_dependency_entry {
1446 u32 vclk;
1447 u32 dclk;
1448 u16 v;
1449};
1450
1451struct amdgpu_uvd_clock_voltage_dependency_table {
1452 u8 count;
1453 struct amdgpu_uvd_clock_voltage_dependency_entry *entries;
1454};
1455
1456struct amdgpu_vce_clock_voltage_dependency_entry {
1457 u32 ecclk;
1458 u32 evclk;
1459 u16 v;
1460};
1461
1462struct amdgpu_vce_clock_voltage_dependency_table {
1463 u8 count;
1464 struct amdgpu_vce_clock_voltage_dependency_entry *entries;
1465};
1466
1467struct amdgpu_ppm_table {
1468 u8 ppm_design;
1469 u16 cpu_core_number;
1470 u32 platform_tdp;
1471 u32 small_ac_platform_tdp;
1472 u32 platform_tdc;
1473 u32 small_ac_platform_tdc;
1474 u32 apu_tdp;
1475 u32 dgpu_tdp;
1476 u32 dgpu_ulv_power;
1477 u32 tj_max;
1478};
1479
1480struct amdgpu_cac_tdp_table {
1481 u16 tdp;
1482 u16 configurable_tdp;
1483 u16 tdc;
1484 u16 battery_power_limit;
1485 u16 small_power_limit;
1486 u16 low_cac_leakage;
1487 u16 high_cac_leakage;
1488 u16 maximum_power_delivery_limit;
1489};
1490
1491struct amdgpu_dpm_dynamic_state {
1492 struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk;
1493 struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk;
1494 struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk;
1495 struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk;
1496 struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk;
1497 struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
1498 struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
1499 struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
1500 struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
1501 struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk;
1502 struct amdgpu_clock_array valid_sclk_values;
1503 struct amdgpu_clock_array valid_mclk_values;
1504 struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc;
1505 struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac;
1506 u32 mclk_sclk_ratio;
1507 u32 sclk_mclk_delta;
1508 u16 vddc_vddci_delta;
1509 u16 min_vddc_for_pcie_gen2;
1510 struct amdgpu_cac_leakage_table cac_leakage_table;
1511 struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table;
1512 struct amdgpu_ppm_table *ppm_table;
1513 struct amdgpu_cac_tdp_table *cac_tdp_table;
1514};
1515
1516struct amdgpu_dpm_fan {
1517 u16 t_min;
1518 u16 t_med;
1519 u16 t_high;
1520 u16 pwm_min;
1521 u16 pwm_med;
1522 u16 pwm_high;
1523 u8 t_hyst;
1524 u32 cycle_delay;
1525 u16 t_max;
1526 u8 control_mode;
1527 u16 default_max_fan_pwm;
1528 u16 default_fan_output_sensitivity;
1529 u16 fan_output_sensitivity;
1530 bool ucode_fan_control;
1531};
1532
1533enum amdgpu_pcie_gen {
1534 AMDGPU_PCIE_GEN1 = 0,
1535 AMDGPU_PCIE_GEN2 = 1,
1536 AMDGPU_PCIE_GEN3 = 2,
1537 AMDGPU_PCIE_GEN_INVALID = 0xffff
1538};
1539
1540enum amdgpu_dpm_forced_level {
1541 AMDGPU_DPM_FORCED_LEVEL_AUTO = 0,
1542 AMDGPU_DPM_FORCED_LEVEL_LOW = 1,
1543 AMDGPU_DPM_FORCED_LEVEL_HIGH = 2,
1544};
1545
1546struct amdgpu_vce_state {
1547 /* vce clocks */
1548 u32 evclk;
1549 u32 ecclk;
1550 /* gpu clocks */
1551 u32 sclk;
1552 u32 mclk;
1553 u8 clk_idx;
1554 u8 pstate;
1555};
1556
1557struct amdgpu_dpm_funcs {
1558 int (*get_temperature)(struct amdgpu_device *adev);
1559 int (*pre_set_power_state)(struct amdgpu_device *adev);
1560 int (*set_power_state)(struct amdgpu_device *adev);
1561 void (*post_set_power_state)(struct amdgpu_device *adev);
1562 void (*display_configuration_changed)(struct amdgpu_device *adev);
1563 u32 (*get_sclk)(struct amdgpu_device *adev, bool low);
1564 u32 (*get_mclk)(struct amdgpu_device *adev, bool low);
1565 void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps);
1566 void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m);
1567 int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level);
1568 bool (*vblank_too_short)(struct amdgpu_device *adev);
1569 void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
b7a07769 1570 void (*powergate_vce)(struct amdgpu_device *adev, bool gate);
97b2e202
AD
1571 void (*enable_bapm)(struct amdgpu_device *adev, bool enable);
1572 void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode);
1573 u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
1574 int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
1575 int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
1576};
1577
1578struct amdgpu_dpm {
1579 struct amdgpu_ps *ps;
1580 /* number of valid power states */
1581 int num_ps;
1582 /* current power state that is active */
1583 struct amdgpu_ps *current_ps;
1584 /* requested power state */
1585 struct amdgpu_ps *requested_ps;
1586 /* boot up power state */
1587 struct amdgpu_ps *boot_ps;
1588 /* default uvd power state */
1589 struct amdgpu_ps *uvd_ps;
1590 /* vce requirements */
1591 struct amdgpu_vce_state vce_states[AMDGPU_MAX_VCE_LEVELS];
1592 enum amdgpu_vce_level vce_level;
1593 enum amdgpu_pm_state_type state;
1594 enum amdgpu_pm_state_type user_state;
1595 u32 platform_caps;
1596 u32 voltage_response_time;
1597 u32 backbias_response_time;
1598 void *priv;
1599 u32 new_active_crtcs;
1600 int new_active_crtc_count;
1601 u32 current_active_crtcs;
1602 int current_active_crtc_count;
1603 struct amdgpu_dpm_dynamic_state dyn_state;
1604 struct amdgpu_dpm_fan fan;
1605 u32 tdp_limit;
1606 u32 near_tdp_limit;
1607 u32 near_tdp_limit_adjusted;
1608 u32 sq_ramping_threshold;
1609 u32 cac_leakage;
1610 u16 tdp_od_limit;
1611 u32 tdp_adjustment;
1612 u16 load_line_slope;
1613 bool power_control;
1614 bool ac_power;
1615 /* special states active */
1616 bool thermal_active;
1617 bool uvd_active;
1618 bool vce_active;
1619 /* thermal handling */
1620 struct amdgpu_dpm_thermal thermal;
1621 /* forced levels */
1622 enum amdgpu_dpm_forced_level forced_level;
1623};
1624
1625struct amdgpu_pm {
1626 struct mutex mutex;
97b2e202
AD
1627 u32 current_sclk;
1628 u32 current_mclk;
1629 u32 default_sclk;
1630 u32 default_mclk;
1631 struct amdgpu_i2c_chan *i2c_bus;
1632 /* internal thermal controller on rv6xx+ */
1633 enum amdgpu_int_thermal_type int_thermal_type;
1634 struct device *int_hwmon_dev;
1635 /* fan control parameters */
1636 bool no_fan;
1637 u8 fan_pulses_per_revolution;
1638 u8 fan_min_rpm;
1639 u8 fan_max_rpm;
1640 /* dpm */
1641 bool dpm_enabled;
1642 struct amdgpu_dpm dpm;
1643 const struct firmware *fw; /* SMC firmware */
1644 uint32_t fw_version;
1645 const struct amdgpu_dpm_funcs *funcs;
1646};
1647
1648/*
1649 * UVD
1650 */
1651#define AMDGPU_MAX_UVD_HANDLES 10
1652#define AMDGPU_UVD_STACK_SIZE (1024*1024)
1653#define AMDGPU_UVD_HEAP_SIZE (1024*1024)
1654#define AMDGPU_UVD_FIRMWARE_OFFSET 256
1655
1656struct amdgpu_uvd {
1657 struct amdgpu_bo *vcpu_bo;
1658 void *cpu_addr;
1659 uint64_t gpu_addr;
1660 void *saved_bo;
1661 atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
1662 struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
1663 struct delayed_work idle_work;
1664 const struct firmware *fw; /* UVD firmware */
1665 struct amdgpu_ring ring;
1666 struct amdgpu_irq_src irq;
1667 bool address_64_bit;
1668};
1669
1670/*
1671 * VCE
1672 */
1673#define AMDGPU_MAX_VCE_HANDLES 16
97b2e202
AD
1674#define AMDGPU_VCE_FIRMWARE_OFFSET 256
1675
6a585777
AD
1676#define AMDGPU_VCE_HARVEST_VCE0 (1 << 0)
1677#define AMDGPU_VCE_HARVEST_VCE1 (1 << 1)
1678
97b2e202
AD
1679struct amdgpu_vce {
1680 struct amdgpu_bo *vcpu_bo;
1681 uint64_t gpu_addr;
1682 unsigned fw_version;
1683 unsigned fb_version;
1684 atomic_t handles[AMDGPU_MAX_VCE_HANDLES];
1685 struct drm_file *filp[AMDGPU_MAX_VCE_HANDLES];
f1689ec1 1686 uint32_t img_size[AMDGPU_MAX_VCE_HANDLES];
97b2e202
AD
1687 struct delayed_work idle_work;
1688 const struct firmware *fw; /* VCE firmware */
1689 struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS];
1690 struct amdgpu_irq_src irq;
6a585777 1691 unsigned harvest_config;
97b2e202
AD
1692};
1693
1694/*
1695 * SDMA
1696 */
1697struct amdgpu_sdma {
1698 /* SDMA firmware */
1699 const struct firmware *fw;
1700 uint32_t fw_version;
cfa2104f 1701 uint32_t feature_version;
97b2e202
AD
1702
1703 struct amdgpu_ring ring;
1704};
1705
1706/*
1707 * Firmware
1708 */
1709struct amdgpu_firmware {
1710 struct amdgpu_firmware_info ucode[AMDGPU_UCODE_ID_MAXIMUM];
1711 bool smu_load;
1712 struct amdgpu_bo *fw_buf;
1713 unsigned int fw_size;
1714};
1715
1716/*
1717 * Benchmarking
1718 */
1719void amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
1720
1721
1722/*
1723 * Testing
1724 */
1725void amdgpu_test_moves(struct amdgpu_device *adev);
1726void amdgpu_test_ring_sync(struct amdgpu_device *adev,
1727 struct amdgpu_ring *cpA,
1728 struct amdgpu_ring *cpB);
1729void amdgpu_test_syncing(struct amdgpu_device *adev);
1730
1731/*
1732 * MMU Notifier
1733 */
1734#if defined(CONFIG_MMU_NOTIFIER)
1735int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
1736void amdgpu_mn_unregister(struct amdgpu_bo *bo);
1737#else
1738static int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
1739{
1740 return -ENODEV;
1741}
1742static void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
1743#endif
1744
1745/*
1746 * Debugfs
1747 */
1748struct amdgpu_debugfs {
1749 struct drm_info_list *files;
1750 unsigned num_files;
1751};
1752
1753int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
1754 struct drm_info_list *files,
1755 unsigned nfiles);
1756int amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
1757
1758#if defined(CONFIG_DEBUG_FS)
1759int amdgpu_debugfs_init(struct drm_minor *minor);
1760void amdgpu_debugfs_cleanup(struct drm_minor *minor);
1761#endif
1762
1763/*
1764 * amdgpu smumgr functions
1765 */
1766struct amdgpu_smumgr_funcs {
1767 int (*check_fw_load_finish)(struct amdgpu_device *adev, uint32_t fwtype);
1768 int (*request_smu_load_fw)(struct amdgpu_device *adev);
1769 int (*request_smu_specific_fw)(struct amdgpu_device *adev, uint32_t fwtype);
1770};
1771
1772/*
1773 * amdgpu smumgr
1774 */
1775struct amdgpu_smumgr {
1776 struct amdgpu_bo *toc_buf;
1777 struct amdgpu_bo *smu_buf;
1778 /* asic priv smu data */
1779 void *priv;
1780 spinlock_t smu_lock;
1781 /* smumgr functions */
1782 const struct amdgpu_smumgr_funcs *smumgr_funcs;
1783 /* ucode loading complete flag */
1784 uint32_t fw_flags;
1785};
1786
1787/*
1788 * ASIC specific register table accessible by UMD
1789 */
1790struct amdgpu_allowed_register_entry {
1791 uint32_t reg_offset;
1792 bool untouched;
1793 bool grbm_indexed;
1794};
1795
1796struct amdgpu_cu_info {
1797 uint32_t number; /* total active CU number */
1798 uint32_t ao_cu_mask;
1799 uint32_t bitmap[4][4];
1800};
1801
1802
1803/*
1804 * ASIC specific functions.
1805 */
1806struct amdgpu_asic_funcs {
1807 bool (*read_disabled_bios)(struct amdgpu_device *adev);
1808 int (*read_register)(struct amdgpu_device *adev, u32 se_num,
1809 u32 sh_num, u32 reg_offset, u32 *value);
1810 void (*set_vga_state)(struct amdgpu_device *adev, bool state);
1811 int (*reset)(struct amdgpu_device *adev);
1812 /* wait for mc_idle */
1813 int (*wait_for_mc_idle)(struct amdgpu_device *adev);
1814 /* get the reference clock */
1815 u32 (*get_xclk)(struct amdgpu_device *adev);
1816 /* get the gpu clock counter */
1817 uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
1818 int (*get_cu_info)(struct amdgpu_device *adev, struct amdgpu_cu_info *info);
1819 /* MM block clocks */
1820 int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
1821 int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
1822};
1823
1824/*
1825 * IOCTL.
1826 */
1827int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
1828 struct drm_file *filp);
1829int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
1830 struct drm_file *filp);
1831
1832int amdgpu_gem_info_ioctl(struct drm_device *dev, void *data,
1833 struct drm_file *filp);
1834int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
1835 struct drm_file *filp);
1836int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
1837 struct drm_file *filp);
1838int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
1839 struct drm_file *filp);
1840int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
1841 struct drm_file *filp);
1842int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
1843 struct drm_file *filp);
1844int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
1845int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
1846
1847int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
1848 struct drm_file *filp);
1849
1850/* VRAM scratch page for HDP bug, default vram page */
1851struct amdgpu_vram_scratch {
1852 struct amdgpu_bo *robj;
1853 volatile uint32_t *ptr;
1854 u64 gpu_addr;
1855};
1856
1857/*
1858 * ACPI
1859 */
1860struct amdgpu_atif_notification_cfg {
1861 bool enabled;
1862 int command_code;
1863};
1864
1865struct amdgpu_atif_notifications {
1866 bool display_switch;
1867 bool expansion_mode_change;
1868 bool thermal_state;
1869 bool forced_power_state;
1870 bool system_power_state;
1871 bool display_conf_change;
1872 bool px_gfx_switch;
1873 bool brightness_change;
1874 bool dgpu_display_event;
1875};
1876
1877struct amdgpu_atif_functions {
1878 bool system_params;
1879 bool sbios_requests;
1880 bool select_active_disp;
1881 bool lid_state;
1882 bool get_tv_standard;
1883 bool set_tv_standard;
1884 bool get_panel_expansion_mode;
1885 bool set_panel_expansion_mode;
1886 bool temperature_change;
1887 bool graphics_device_types;
1888};
1889
1890struct amdgpu_atif {
1891 struct amdgpu_atif_notifications notifications;
1892 struct amdgpu_atif_functions functions;
1893 struct amdgpu_atif_notification_cfg notification_cfg;
1894 struct amdgpu_encoder *encoder_for_bl;
1895};
1896
1897struct amdgpu_atcs_functions {
1898 bool get_ext_state;
1899 bool pcie_perf_req;
1900 bool pcie_dev_rdy;
1901 bool pcie_bus_width;
1902};
1903
1904struct amdgpu_atcs {
1905 struct amdgpu_atcs_functions functions;
1906};
1907
d03846af
CZ
1908/*
1909 * CGS
1910 */
1911void *amdgpu_cgs_create_device(struct amdgpu_device *adev);
1912void amdgpu_cgs_destroy_device(void *cgs_device);
1913
1914
97b2e202
AD
1915/*
1916 * Core structure, functions and helpers.
1917 */
1918typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t);
1919typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
1920
1921typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
1922typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
1923
8faf0e08
AD
1924struct amdgpu_ip_block_status {
1925 bool valid;
1926 bool sw;
1927 bool hw;
1928};
1929
97b2e202
AD
1930struct amdgpu_device {
1931 struct device *dev;
1932 struct drm_device *ddev;
1933 struct pci_dev *pdev;
1934 struct rw_semaphore exclusive_lock;
1935
1936 /* ASIC */
2f7d10b3 1937 enum amd_asic_type asic_type;
97b2e202
AD
1938 uint32_t family;
1939 uint32_t rev_id;
1940 uint32_t external_rev_id;
1941 unsigned long flags;
1942 int usec_timeout;
1943 const struct amdgpu_asic_funcs *asic_funcs;
1944 bool shutdown;
1945 bool suspend;
1946 bool need_dma32;
1947 bool accel_working;
1948 bool needs_reset;
1949 struct work_struct reset_work;
1950 struct notifier_block acpi_nb;
1951 struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
1952 struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
1953 unsigned debugfs_count;
1954#if defined(CONFIG_DEBUG_FS)
1955 struct dentry *debugfs_regs;
1956#endif
1957 struct amdgpu_atif atif;
1958 struct amdgpu_atcs atcs;
1959 struct mutex srbm_mutex;
1960 /* GRBM index mutex. Protects concurrent access to GRBM index */
1961 struct mutex grbm_idx_mutex;
1962 struct dev_pm_domain vga_pm_domain;
1963 bool have_disp_power_ref;
1964
1965 /* BIOS */
1966 uint8_t *bios;
1967 bool is_atom_bios;
1968 uint16_t bios_header_start;
1969 struct amdgpu_bo *stollen_vga_memory;
1970 uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
1971
1972 /* Register/doorbell mmio */
1973 resource_size_t rmmio_base;
1974 resource_size_t rmmio_size;
1975 void __iomem *rmmio;
1976 /* protects concurrent MM_INDEX/DATA based register access */
1977 spinlock_t mmio_idx_lock;
1978 /* protects concurrent SMC based register access */
1979 spinlock_t smc_idx_lock;
1980 amdgpu_rreg_t smc_rreg;
1981 amdgpu_wreg_t smc_wreg;
1982 /* protects concurrent PCIE register access */
1983 spinlock_t pcie_idx_lock;
1984 amdgpu_rreg_t pcie_rreg;
1985 amdgpu_wreg_t pcie_wreg;
1986 /* protects concurrent UVD register access */
1987 spinlock_t uvd_ctx_idx_lock;
1988 amdgpu_rreg_t uvd_ctx_rreg;
1989 amdgpu_wreg_t uvd_ctx_wreg;
1990 /* protects concurrent DIDT register access */
1991 spinlock_t didt_idx_lock;
1992 amdgpu_rreg_t didt_rreg;
1993 amdgpu_wreg_t didt_wreg;
1994 /* protects concurrent ENDPOINT (audio) register access */
1995 spinlock_t audio_endpt_idx_lock;
1996 amdgpu_block_rreg_t audio_endpt_rreg;
1997 amdgpu_block_wreg_t audio_endpt_wreg;
1998 void __iomem *rio_mem;
1999 resource_size_t rio_mem_size;
2000 struct amdgpu_doorbell doorbell;
2001
2002 /* clock/pll info */
2003 struct amdgpu_clock clock;
2004
2005 /* MC */
2006 struct amdgpu_mc mc;
2007 struct amdgpu_gart gart;
2008 struct amdgpu_dummy_page dummy_page;
2009 struct amdgpu_vm_manager vm_manager;
2010
2011 /* memory management */
2012 struct amdgpu_mman mman;
2013 struct amdgpu_gem gem;
2014 struct amdgpu_vram_scratch vram_scratch;
2015 struct amdgpu_wb wb;
2016 atomic64_t vram_usage;
2017 atomic64_t vram_vis_usage;
2018 atomic64_t gtt_usage;
2019 atomic64_t num_bytes_moved;
d94aed5a 2020 atomic_t gpu_reset_counter;
97b2e202
AD
2021
2022 /* display */
2023 struct amdgpu_mode_info mode_info;
2024 struct work_struct hotplug_work;
2025 struct amdgpu_irq_src crtc_irq;
2026 struct amdgpu_irq_src pageflip_irq;
2027 struct amdgpu_irq_src hpd_irq;
2028
2029 /* rings */
2030 wait_queue_head_t fence_queue;
2031 unsigned fence_context;
2032 struct mutex ring_lock;
2033 unsigned num_rings;
2034 struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
2035 bool ib_pool_ready;
2036 struct amdgpu_sa_manager ring_tmp_bo;
2037
2038 /* interrupts */
2039 struct amdgpu_irq irq;
2040
2041 /* dpm */
2042 struct amdgpu_pm pm;
2043 u32 cg_flags;
2044 u32 pg_flags;
2045
2046 /* amdgpu smumgr */
2047 struct amdgpu_smumgr smu;
2048
2049 /* gfx */
2050 struct amdgpu_gfx gfx;
2051
2052 /* sdma */
2053 struct amdgpu_sdma sdma[2];
2054 struct amdgpu_irq_src sdma_trap_irq;
2055 struct amdgpu_irq_src sdma_illegal_inst_irq;
2056
2057 /* uvd */
2058 bool has_uvd;
2059 struct amdgpu_uvd uvd;
2060
2061 /* vce */
2062 struct amdgpu_vce vce;
2063
2064 /* firmwares */
2065 struct amdgpu_firmware firmware;
2066
2067 /* GDS */
2068 struct amdgpu_gds gds;
2069
2070 const struct amdgpu_ip_block_version *ip_blocks;
2071 int num_ip_blocks;
8faf0e08 2072 struct amdgpu_ip_block_status *ip_block_status;
97b2e202
AD
2073 struct mutex mn_lock;
2074 DECLARE_HASHTABLE(mn_hash, 7);
2075
2076 /* tracking pinned memory */
2077 u64 vram_pin_size;
2078 u64 gart_pin_size;
130e0371
OG
2079
2080 /* amdkfd interface */
2081 struct kfd_dev *kfd;
23ca0e4e
CZ
2082
2083 /* kernel conext for IB submission */
2084 struct amdgpu_ctx *kernel_ctx;
97b2e202
AD
2085};
2086
2087bool amdgpu_device_is_px(struct drm_device *dev);
2088int amdgpu_device_init(struct amdgpu_device *adev,
2089 struct drm_device *ddev,
2090 struct pci_dev *pdev,
2091 uint32_t flags);
2092void amdgpu_device_fini(struct amdgpu_device *adev);
2093int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
2094
2095uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
2096 bool always_indirect);
2097void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
2098 bool always_indirect);
2099u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
2100void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
2101
2102u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
2103void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
2104
2105/*
2106 * Cast helper
2107 */
2108extern const struct fence_ops amdgpu_fence_ops;
2109static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f)
2110{
2111 struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
2112
2113 if (__f->base.ops == &amdgpu_fence_ops)
2114 return __f;
2115
2116 return NULL;
2117}
2118
2119/*
2120 * Registers read & write functions.
2121 */
2122#define RREG32(reg) amdgpu_mm_rreg(adev, (reg), false)
2123#define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), true)
2124#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), false))
2125#define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), false)
2126#define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), true)
2127#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
2128#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
2129#define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
2130#define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v))
2131#define RREG32_SMC(reg) adev->smc_rreg(adev, (reg))
2132#define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v))
2133#define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg))
2134#define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v))
2135#define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg))
2136#define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v))
2137#define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg))
2138#define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v))
2139#define WREG32_P(reg, val, mask) \
2140 do { \
2141 uint32_t tmp_ = RREG32(reg); \
2142 tmp_ &= (mask); \
2143 tmp_ |= ((val) & ~(mask)); \
2144 WREG32(reg, tmp_); \
2145 } while (0)
2146#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
2147#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
2148#define WREG32_PLL_P(reg, val, mask) \
2149 do { \
2150 uint32_t tmp_ = RREG32_PLL(reg); \
2151 tmp_ &= (mask); \
2152 tmp_ |= ((val) & ~(mask)); \
2153 WREG32_PLL(reg, tmp_); \
2154 } while (0)
2155#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false))
2156#define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg))
2157#define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v))
2158
2159#define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index))
2160#define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v))
2161
2162#define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
2163#define REG_FIELD_MASK(reg, field) reg##__##field##_MASK
2164
2165#define REG_SET_FIELD(orig_val, reg, field, field_val) \
2166 (((orig_val) & ~REG_FIELD_MASK(reg, field)) | \
2167 (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field))))
2168
2169#define REG_GET_FIELD(value, reg, field) \
2170 (((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field))
2171
2172/*
2173 * BIOS helpers.
2174 */
2175#define RBIOS8(i) (adev->bios[i])
2176#define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
2177#define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
2178
2179/*
2180 * RING helpers.
2181 */
2182static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
2183{
2184 if (ring->count_dw <= 0)
86c2b790 2185 DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
97b2e202
AD
2186 ring->ring[ring->wptr++] = v;
2187 ring->wptr &= ring->ptr_mask;
2188 ring->count_dw--;
2189 ring->ring_free_dw--;
2190}
2191
2192/*
2193 * ASICs macro.
2194 */
2195#define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state))
2196#define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev))
2197#define amdgpu_asic_wait_for_mc_idle(adev) (adev)->asic_funcs->wait_for_mc_idle((adev))
2198#define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
2199#define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
2200#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
2201#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
2202#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
2203#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
2204#define amdgpu_asic_get_cu_info(adev, info) (adev)->asic_funcs->get_cu_info((adev), (info))
2205#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
2206#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
2207#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
2208#define amdgpu_vm_write_pte(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (addr), (count), (incr), (flags)))
2209#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
2210#define amdgpu_vm_pad_ib(adev, ib) ((adev)->vm_manager.vm_pte_funcs->pad_ib((ib)))
2211#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
2212#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
2213#define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r))
2214#define amdgpu_ring_is_lockup(r) (r)->funcs->is_lockup((r))
2215#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
2216#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
2217#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
2218#define amdgpu_ring_emit_ib(r, ib) (r)->funcs->emit_ib((r), (ib))
2219#define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
890ee23f 2220#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
97b2e202
AD
2221#define amdgpu_ring_emit_semaphore(r, semaphore, emit_wait) (r)->funcs->emit_semaphore((r), (semaphore), (emit_wait))
2222#define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
d2edb07b 2223#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
97b2e202
AD
2224#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
2225#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
2226#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
2227#define amdgpu_display_set_vga_render_state(adev, r) (adev)->mode_info.funcs->set_vga_render_state((adev), (r))
2228#define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
2229#define amdgpu_display_vblank_wait(adev, crtc) (adev)->mode_info.funcs->vblank_wait((adev), (crtc))
2230#define amdgpu_display_is_display_hung(adev) (adev)->mode_info.funcs->is_display_hung((adev))
2231#define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
2232#define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e))
2233#define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h))
2234#define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h))
2235#define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev))
2236#define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev))
2237#define amdgpu_display_page_flip(adev, crtc, base) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base))
2238#define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos))
2239#define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c))
2240#define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
2241#define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s))
2242#define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s))
2243#define amdgpu_emit_copy_buffer(adev, r, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((r), (s), (d), (b))
2244#define amdgpu_emit_fill_buffer(adev, r, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((r), (s), (d), (b))
2245#define amdgpu_dpm_get_temperature(adev) (adev)->pm.funcs->get_temperature((adev))
2246#define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
2247#define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
2248#define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev))
2249#define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev))
2250#define amdgpu_dpm_get_sclk(adev, l) (adev)->pm.funcs->get_sclk((adev), (l))
2251#define amdgpu_dpm_get_mclk(adev, l) (adev)->pm.funcs->get_mclk((adev), (l))
2252#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
2253#define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m))
2254#define amdgpu_dpm_force_performance_level(adev, l) (adev)->pm.funcs->force_performance_level((adev), (l))
2255#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
2256#define amdgpu_dpm_powergate_uvd(adev, g) (adev)->pm.funcs->powergate_uvd((adev), (g))
b7a07769 2257#define amdgpu_dpm_powergate_vce(adev, g) (adev)->pm.funcs->powergate_vce((adev), (g))
97b2e202
AD
2258#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
2259#define amdgpu_dpm_set_fan_control_mode(adev, m) (adev)->pm.funcs->set_fan_control_mode((adev), (m))
2260#define amdgpu_dpm_get_fan_control_mode(adev) (adev)->pm.funcs->get_fan_control_mode((adev))
2261#define amdgpu_dpm_set_fan_speed_percent(adev, s) (adev)->pm.funcs->set_fan_speed_percent((adev), (s))
2262#define amdgpu_dpm_get_fan_speed_percent(adev, s) (adev)->pm.funcs->get_fan_speed_percent((adev), (s))
2263
2264#define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
2265
2266/* Common functions */
2267int amdgpu_gpu_reset(struct amdgpu_device *adev);
2268void amdgpu_pci_config_reset(struct amdgpu_device *adev);
2269bool amdgpu_card_posted(struct amdgpu_device *adev);
2270void amdgpu_update_display_priority(struct amdgpu_device *adev);
2271bool amdgpu_boot_test_post_card(struct amdgpu_device *adev);
d5fc5e82
CZ
2272struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
2273 struct drm_file *filp,
2274 struct amdgpu_ctx *ctx,
2275 struct amdgpu_ib *ibs,
2276 uint32_t num_ibs);
2277
97b2e202
AD
2278int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
2279int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
2280 u32 ip_instance, u32 ring,
2281 struct amdgpu_ring **out_ring);
2282void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain);
2283bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
2284int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
2285 uint32_t flags);
2286bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
2287bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
2288uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
2289 struct ttm_mem_reg *mem);
2290void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
2291void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
2292void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
2293void amdgpu_program_register_sequence(struct amdgpu_device *adev,
2294 const u32 *registers,
2295 const u32 array_size);
2296
2297bool amdgpu_device_is_px(struct drm_device *dev);
2298/* atpx handler */
2299#if defined(CONFIG_VGA_SWITCHEROO)
2300void amdgpu_register_atpx_handler(void);
2301void amdgpu_unregister_atpx_handler(void);
2302#else
2303static inline void amdgpu_register_atpx_handler(void) {}
2304static inline void amdgpu_unregister_atpx_handler(void) {}
2305#endif
2306
2307/*
2308 * KMS
2309 */
2310extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
2311extern int amdgpu_max_kms_ioctl;
2312
2313int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
2314int amdgpu_driver_unload_kms(struct drm_device *dev);
2315void amdgpu_driver_lastclose_kms(struct drm_device *dev);
2316int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
2317void amdgpu_driver_postclose_kms(struct drm_device *dev,
2318 struct drm_file *file_priv);
2319void amdgpu_driver_preclose_kms(struct drm_device *dev,
2320 struct drm_file *file_priv);
2321int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
2322int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
2323u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, int crtc);
2324int amdgpu_enable_vblank_kms(struct drm_device *dev, int crtc);
2325void amdgpu_disable_vblank_kms(struct drm_device *dev, int crtc);
2326int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
2327 int *max_error,
2328 struct timeval *vblank_time,
2329 unsigned flags);
2330long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
2331 unsigned long arg);
2332
2333/*
2334 * vm
2335 */
2336int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
2337void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
2338struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
2339 struct amdgpu_vm *vm,
2340 struct list_head *head);
7f8a5290
CK
2341int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
2342 struct amdgpu_sync *sync);
97b2e202
AD
2343void amdgpu_vm_flush(struct amdgpu_ring *ring,
2344 struct amdgpu_vm *vm,
2345 struct amdgpu_fence *updates);
2346void amdgpu_vm_fence(struct amdgpu_device *adev,
2347 struct amdgpu_vm *vm,
2348 struct amdgpu_fence *fence);
2349uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr);
2350int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
2351 struct amdgpu_vm *vm);
2352int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
2353 struct amdgpu_vm *vm);
2354int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
cfe2c978 2355 struct amdgpu_vm *vm, struct amdgpu_sync *sync);
97b2e202
AD
2356int amdgpu_vm_bo_update(struct amdgpu_device *adev,
2357 struct amdgpu_bo_va *bo_va,
2358 struct ttm_mem_reg *mem);
2359void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2360 struct amdgpu_bo *bo);
2361struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
2362 struct amdgpu_bo *bo);
2363struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
2364 struct amdgpu_vm *vm,
2365 struct amdgpu_bo *bo);
2366int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2367 struct amdgpu_bo_va *bo_va,
2368 uint64_t addr, uint64_t offset,
2369 uint64_t size, uint32_t flags);
2370int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
2371 struct amdgpu_bo_va *bo_va,
2372 uint64_t addr);
2373void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2374 struct amdgpu_bo_va *bo_va);
2375
2376/*
2377 * functions used by amdgpu_encoder.c
2378 */
2379struct amdgpu_afmt_acr {
2380 u32 clock;
2381
2382 int n_32khz;
2383 int cts_32khz;
2384
2385 int n_44_1khz;
2386 int cts_44_1khz;
2387
2388 int n_48khz;
2389 int cts_48khz;
2390
2391};
2392
2393struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock);
2394
2395/* amdgpu_acpi.c */
2396#if defined(CONFIG_ACPI)
2397int amdgpu_acpi_init(struct amdgpu_device *adev);
2398void amdgpu_acpi_fini(struct amdgpu_device *adev);
2399bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev);
2400int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
2401 u8 perf_req, bool advertise);
2402int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
2403#else
2404static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
2405static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
2406#endif
2407
2408struct amdgpu_bo_va_mapping *
2409amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
2410 uint64_t addr, struct amdgpu_bo **bo);
2411
2412#include "amdgpu_object.h"
2413
2414#endif
This page took 0.139521 seconds and 5 git commands to generate.