drm/amdgpu: remove unused function
[deliverable/linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu.h
index 82edf95b7740d7fe9070cba62009374c08c34562..811353c3f1319e2d8b095ccb7c5228190545f6dd 100644 (file)
@@ -53,6 +53,7 @@
 #include "amdgpu_ucode.h"
 #include "amdgpu_gds.h"
 #include "amd_powerplay.h"
+#include "amdgpu_acp.h"
 
 #include "gpu_scheduler.h"
 
@@ -74,7 +75,6 @@ extern int amdgpu_dpm;
 extern int amdgpu_smc_load_fw;
 extern int amdgpu_aspm;
 extern int amdgpu_runtime_pm;
-extern int amdgpu_hard_reset;
 extern unsigned amdgpu_ip_block_mask;
 extern int amdgpu_bapm;
 extern int amdgpu_deep_color;
@@ -82,10 +82,8 @@ extern int amdgpu_vm_size;
 extern int amdgpu_vm_block_size;
 extern int amdgpu_vm_fault_stop;
 extern int amdgpu_vm_debug;
-extern int amdgpu_enable_scheduler;
 extern int amdgpu_sched_jobs;
 extern int amdgpu_sched_hw_submission;
-extern int amdgpu_enable_semaphores;
 extern int amdgpu_powerplay;
 
 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS         3000
@@ -189,7 +187,6 @@ struct amdgpu_fence;
 struct amdgpu_ib;
 struct amdgpu_vm;
 struct amdgpu_ring;
-struct amdgpu_semaphore;
 struct amdgpu_cs_parser;
 struct amdgpu_job;
 struct amdgpu_irq_src;
@@ -287,7 +284,7 @@ struct amdgpu_vm_pte_funcs {
                         unsigned count);
        /* write pte one entry at a time with addr mapping */
        void (*write_pte)(struct amdgpu_ib *ib,
-                         uint64_t pe,
+                         const dma_addr_t *pages_addr, uint64_t pe,
                          uint64_t addr, unsigned count,
                          uint32_t incr, uint32_t flags);
        /* for linear pte/pde updates without addr mapping */
@@ -334,9 +331,6 @@ struct amdgpu_ring_funcs {
                        struct amdgpu_ib *ib);
        void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
                           uint64_t seq, unsigned flags);
-       bool (*emit_semaphore)(struct amdgpu_ring *ring,
-                              struct amdgpu_semaphore *semaphore,
-                              bool emit_wait);
        void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
                              uint64_t pd_addr);
        void (*emit_hdp_flush)(struct amdgpu_ring *ring);
@@ -394,7 +388,7 @@ struct amdgpu_fence_driver {
        uint64_t                        gpu_addr;
        volatile uint32_t               *cpu_addr;
        /* sync_seq is protected by ring emission lock */
-       uint64_t                        sync_seq[AMDGPU_MAX_RINGS];
+       uint64_t                        sync_seq;
        atomic64_t                      last_seq;
        bool                            initialized;
        struct amdgpu_irq_src           *irq_src;
@@ -447,11 +441,6 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
 
-bool amdgpu_fence_need_sync(struct amdgpu_fence *fence,
-                           struct amdgpu_ring *ring);
-void amdgpu_fence_note_sync(struct amdgpu_fence *fence,
-                           struct amdgpu_ring *ring);
-
 /*
  * TTM.
  */
@@ -484,8 +473,6 @@ struct amdgpu_bo_list_entry {
        struct amdgpu_bo                *robj;
        struct ttm_validate_buffer      tv;
        struct amdgpu_bo_va             *bo_va;
-       unsigned                        prefered_domains;
-       unsigned                        allowed_domains;
        uint32_t                        priority;
 };
 
@@ -522,7 +509,8 @@ struct amdgpu_bo {
        /* Protected by gem.mutex */
        struct list_head                list;
        /* Protected by tbo.reserved */
-       u32                             initial_domain;
+       u32                             prefered_domains;
+       u32                             allowed_domains;
        struct ttm_place                placements[AMDGPU_GEM_DOMAIN_MAX + 1];
        struct ttm_placement            placement;
        struct ttm_buffer_object        tbo;
@@ -639,32 +627,10 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
 int amdgpu_mode_dumb_mmap(struct drm_file *filp,
                          struct drm_device *dev,
                          uint32_t handle, uint64_t *offset_p);
-
-/*
- * Semaphores.
- */
-struct amdgpu_semaphore {
-       struct amdgpu_sa_bo     *sa_bo;
-       signed                  waiters;
-       uint64_t                gpu_addr;
-};
-
-int amdgpu_semaphore_create(struct amdgpu_device *adev,
-                           struct amdgpu_semaphore **semaphore);
-bool amdgpu_semaphore_emit_signal(struct amdgpu_ring *ring,
-                                 struct amdgpu_semaphore *semaphore);
-bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring,
-                               struct amdgpu_semaphore *semaphore);
-void amdgpu_semaphore_free(struct amdgpu_device *adev,
-                          struct amdgpu_semaphore **semaphore,
-                          struct fence *fence);
-
 /*
  * Synchronization
  */
 struct amdgpu_sync {
-       struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS];
-       struct fence            *sync_to[AMDGPU_MAX_RINGS];
        DECLARE_HASHTABLE(fences, 4);
        struct fence            *last_vm_update;
 };
@@ -676,8 +642,6 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
                     struct amdgpu_sync *sync,
                     struct reservation_object *resv,
                     void *owner);
-int amdgpu_sync_rings(struct amdgpu_sync *sync,
-                     struct amdgpu_ring *ring);
 struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
 int amdgpu_sync_wait(struct amdgpu_sync *sync);
 void amdgpu_sync_free(struct amdgpu_device *adev, struct amdgpu_sync *sync,
@@ -814,6 +778,7 @@ struct amdgpu_ib {
        struct amdgpu_ring              *ring;
        struct amdgpu_fence             *fence;
        struct amdgpu_user_fence        *user;
+       bool                            grabbed_vmid;
        struct amdgpu_vm                *vm;
        struct amdgpu_ctx               *ctx;
        struct amdgpu_sync              sync;
@@ -850,7 +815,6 @@ struct amdgpu_ring {
        struct amd_gpu_scheduler        sched;
 
        spinlock_t              fence_lock;
-       struct mutex            *ring_lock;
        struct amdgpu_bo        *ring_obj;
        volatile uint32_t       *ring;
        unsigned                rptr_offs;
@@ -859,7 +823,7 @@ struct amdgpu_ring {
        unsigned                wptr;
        unsigned                wptr_old;
        unsigned                ring_size;
-       unsigned                ring_free_dw;
+       unsigned                max_dw;
        int                     count_dw;
        uint64_t                gpu_addr;
        uint32_t                align_mask;
@@ -867,8 +831,6 @@ struct amdgpu_ring {
        bool                    ready;
        u32                     nop;
        u32                     idx;
-       u64                     last_semaphore_signal_addr;
-       u64                     last_semaphore_wait_addr;
        u32                     me;
        u32                     pipe;
        u32                     queue;
@@ -932,6 +894,8 @@ struct amdgpu_vm_id {
 };
 
 struct amdgpu_vm {
+       /* tree of virtual addresses mapped */
+       spinlock_t              it_lock;
        struct rb_root          va;
 
        /* protecting invalidated */
@@ -956,21 +920,25 @@ struct amdgpu_vm {
 
        /* for id and flush management per ring */
        struct amdgpu_vm_id     ids[AMDGPU_MAX_RINGS];
-       /* for interval tree */
-       spinlock_t              it_lock;
+
        /* protecting freed */
        spinlock_t              freed_lock;
 };
 
+struct amdgpu_vm_manager_id {
+       struct list_head        list;
+       struct fence            *active;
+       atomic_long_t           owner;
+};
+
 struct amdgpu_vm_manager {
-       struct {
-               struct fence    *active;
-               atomic_long_t   owner;
-       } ids[AMDGPU_NUM_VM];
+       /* Handling of VMIDs */
+       struct mutex                            lock;
+       unsigned                                num_ids;
+       struct list_head                        ids_lru;
+       struct amdgpu_vm_manager_id             ids[AMDGPU_NUM_VM];
 
        uint32_t                                max_pfn;
-       /* number of VMIDs */
-       unsigned                                nvm;
        /* vram base address for page table entry  */
        u64                                     vram_base_offset;
        /* is vm enabled? */
@@ -980,6 +948,7 @@ struct amdgpu_vm_manager {
        struct amdgpu_ring                      *vm_pte_funcs_ring;
 };
 
+void amdgpu_vm_manager_init(struct amdgpu_device *adev);
 void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
@@ -990,14 +959,11 @@ void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates);
 void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
                                  struct amdgpu_vm *vm);
 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
-                     struct amdgpu_sync *sync);
+                     struct amdgpu_sync *sync, struct fence *fence);
 void amdgpu_vm_flush(struct amdgpu_ring *ring,
                     struct amdgpu_vm *vm,
                     struct fence *updates);
-void amdgpu_vm_fence(struct amdgpu_device *adev,
-                    struct amdgpu_vm *vm,
-                    struct fence *fence);
-uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr);
+uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
                                    struct amdgpu_vm *vm);
 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
@@ -1096,6 +1062,8 @@ struct amdgpu_bo_list {
 
 struct amdgpu_bo_list *
 amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id);
+void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
+                            struct list_head *validated);
 void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
 void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
 
@@ -1219,15 +1187,10 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
 int amdgpu_ib_pool_init(struct amdgpu_device *adev);
 void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
 int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
-/* Ring access between begin & end cannot sleep */
-void amdgpu_ring_free_size(struct amdgpu_ring *ring);
 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
-int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw);
 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
 void amdgpu_ring_commit(struct amdgpu_ring *ring);
-void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring);
 void amdgpu_ring_undo(struct amdgpu_ring *ring);
-void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring);
 unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
                            uint32_t **data);
 int amdgpu_ring_restore(struct amdgpu_ring *ring,
@@ -1246,26 +1209,29 @@ struct amdgpu_cs_chunk {
        uint32_t                chunk_id;
        uint32_t                length_dw;
        uint32_t                *kdata;
-       void __user             *user_ptr;
 };
 
 struct amdgpu_cs_parser {
        struct amdgpu_device    *adev;
        struct drm_file         *filp;
        struct amdgpu_ctx       *ctx;
-       struct amdgpu_bo_list *bo_list;
+
        /* chunks */
        unsigned                nchunks;
        struct amdgpu_cs_chunk  *chunks;
-       /* relocations */
-       struct amdgpu_bo_list_entry     vm_pd;
-       struct list_head        validated;
-       struct fence            *fence;
 
-       struct amdgpu_ib        *ibs;
+       /* indirect buffers */
        uint32_t                num_ibs;
+       struct amdgpu_ib        *ibs;
 
-       struct ww_acquire_ctx   ticket;
+       /* buffer objects */
+       struct ww_acquire_ctx           ticket;
+       struct amdgpu_bo_list           *bo_list;
+       struct amdgpu_bo_list_entry     vm_pd;
+       struct list_head                validated;
+       struct fence                    *fence;
+       uint64_t                        bytes_moved_threshold;
+       uint64_t                        bytes_moved;
 
        /* user fence */
        struct amdgpu_user_fence        uf;
@@ -1538,6 +1504,7 @@ enum amdgpu_dpm_forced_level {
        AMDGPU_DPM_FORCED_LEVEL_AUTO = 0,
        AMDGPU_DPM_FORCED_LEVEL_LOW = 1,
        AMDGPU_DPM_FORCED_LEVEL_HIGH = 2,
+       AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3,
 };
 
 struct amdgpu_vce_state {
@@ -1924,6 +1891,13 @@ void *amdgpu_cgs_create_device(struct amdgpu_device *adev);
 void amdgpu_cgs_destroy_device(void *cgs_device);
 
 
+/*
+ * CGS
+ */
+void *amdgpu_cgs_create_device(struct amdgpu_device *adev);
+void amdgpu_cgs_destroy_device(void *cgs_device);
+
+
 /*
  * Core structure, functions and helpers.
  */
@@ -1944,6 +1918,10 @@ struct amdgpu_device {
        struct drm_device               *ddev;
        struct pci_dev                  *pdev;
 
+#ifdef CONFIG_DRM_AMD_ACP
+       struct amdgpu_acp               acp;
+#endif
+
        /* ASIC */
        enum amd_asic_type              asic_type;
        uint32_t                        family;
@@ -2038,7 +2016,6 @@ struct amdgpu_device {
 
        /* rings */
        unsigned                        fence_context;
-       struct mutex                    ring_lock;
        unsigned                        num_rings;
        struct amdgpu_ring              *rings[AMDGPU_MAX_RINGS];
        bool                            ib_pool_ready;
@@ -2050,6 +2027,7 @@ struct amdgpu_device {
        /* powerplay */
        struct amd_powerplay            powerplay;
        bool                            pp_enabled;
+       bool                            pp_force_state_enabled;
 
        /* dpm */
        struct amdgpu_pm                pm;
@@ -2197,7 +2175,6 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
        ring->ring[ring->wptr++] = v;
        ring->wptr &= ring->ptr_mask;
        ring->count_dw--;
-       ring->ring_free_dw--;
 }
 
 static inline struct amdgpu_sdma_instance *
@@ -2233,7 +2210,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
 #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
 #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
-#define amdgpu_vm_write_pte(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (addr), (count), (incr), (flags)))
+#define amdgpu_vm_write_pte(adev, ib, pa, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pa), (pe), (addr), (count), (incr), (flags)))
 #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
 #define amdgpu_vm_pad_ib(adev, ib) ((adev)->vm_manager.vm_pte_funcs->pad_ib((ib)))
 #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
@@ -2245,7 +2222,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 #define amdgpu_ring_emit_ib(r, ib) (r)->funcs->emit_ib((r), (ib))
 #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
 #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
-#define amdgpu_ring_emit_semaphore(r, semaphore, emit_wait) (r)->funcs->emit_semaphore((r), (semaphore), (emit_wait))
 #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
 #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
 #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
@@ -2339,6 +2315,21 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 #define amdgpu_dpm_get_performance_level(adev) \
        (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle)
 
+#define amdgpu_dpm_get_pp_num_states(adev, data) \
+       (adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)
+
+#define amdgpu_dpm_get_pp_table(adev, table) \
+       (adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table)
+
+#define amdgpu_dpm_set_pp_table(adev, buf, size) \
+       (adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size)
+
+#define amdgpu_dpm_print_clock_levels(adev, type, buf) \
+       (adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf)
+
+#define amdgpu_dpm_force_clock_level(adev, type, level) \
+               (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
+
 #define amdgpu_dpm_dispatch_task(adev, event_id, input, output)                \
        (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
 
@@ -2349,7 +2340,6 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev);
 void amdgpu_pci_config_reset(struct amdgpu_device *adev);
 bool amdgpu_card_posted(struct amdgpu_device *adev);
 void amdgpu_update_display_priority(struct amdgpu_device *adev);
-bool amdgpu_boot_test_post_card(struct amdgpu_device *adev);
 
 int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
 int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
@@ -2360,6 +2350,8 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
 int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
                                     uint32_t flags);
 bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
+bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
+                                 unsigned long end);
 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
 uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
                                 struct ttm_mem_reg *mem);
This page took 0.029767 seconds and 5 git commands to generate.