drm/radeon/kms: allocate vram scratch page on 6xx+
authorAlex Deucher <alexander.deucher@amd.com>
Fri, 28 Oct 2011 14:30:02 +0000 (10:30 -0400)
committerDave Airlie <airlied@redhat.com>
Tue, 1 Nov 2011 16:05:02 +0000 (16:05 +0000)
The vram scratch was originally only used on some 7xx asics
to work around a hw bug.  Allocate the scratch page on all 6xx+
radeons and set the MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR to point
to it.  We shouldn't ever hit it since we limit the system
aperture to vram or vram and AGP, but better safe than sorry.

Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/rv770.c

index ed406e8404a3498870d517fd286b5824a8665e22..db9027d871e34d8f50a75004e1901331ffffb8fc 100644 (file)
@@ -3031,6 +3031,10 @@ static int evergreen_startup(struct radeon_device *rdev)
                }
        }
 
+       r = r600_vram_scratch_init(rdev);
+       if (r)
+               return r;
+
        evergreen_mc_program(rdev);
        if (rdev->flags & RADEON_IS_AGP) {
                evergreen_agp_enable(rdev);
@@ -3235,6 +3239,7 @@ void evergreen_fini(struct radeon_device *rdev)
        radeon_ib_pool_fini(rdev);
        radeon_irq_kms_fini(rdev);
        evergreen_pcie_gart_fini(rdev);
+       r600_vram_scratch_fini(rdev);
        radeon_gem_fini(rdev);
        radeon_fence_driver_fini(rdev);
        radeon_agp_fini(rdev);
index 556b7bc3418b9da6d0aa5bb61bdf31e83ce89059..56afaff6299a9f0986b6f6fc5ed6ad324bac57ea 100644 (file)
@@ -1361,6 +1361,10 @@ static int cayman_startup(struct radeon_device *rdev)
                return r;
        }
 
+       r = r600_vram_scratch_init(rdev);
+       if (r)
+               return r;
+
        evergreen_mc_program(rdev);
        r = cayman_pcie_gart_enable(rdev);
        if (r)
@@ -1556,6 +1560,7 @@ void cayman_fini(struct radeon_device *rdev)
        radeon_ib_pool_fini(rdev);
        radeon_irq_kms_fini(rdev);
        cayman_pcie_gart_fini(rdev);
+       r600_vram_scratch_fini(rdev);
        radeon_gem_fini(rdev);
        radeon_fence_driver_fini(rdev);
        radeon_bo_fini(rdev);
index 1f007adc272313e0f3b114d6884562947cb9b104..75b8e004ca80ddfc3efd63e0c0ba486874341163 100644 (file)
@@ -1137,7 +1137,7 @@ static void r600_mc_program(struct radeon_device *rdev)
                WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
                WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
        }
-       WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
+       WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
        tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
        tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
        WREG32(MC_VM_FB_LOCATION, tmp);
@@ -1276,6 +1276,53 @@ int r600_mc_init(struct radeon_device *rdev)
        return 0;
 }
 
+int r600_vram_scratch_init(struct radeon_device *rdev)
+{
+       int r;
+
+       if (rdev->vram_scratch.robj == NULL) {
+               r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
+                                    PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
+                                    &rdev->vram_scratch.robj);
+               if (r) {
+                       return r;
+               }
+       }
+
+       r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
+       if (unlikely(r != 0))
+               return r;
+       r = radeon_bo_pin(rdev->vram_scratch.robj,
+                         RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr);
+       if (r) {
+               radeon_bo_unreserve(rdev->vram_scratch.robj);
+               return r;
+       }
+       r = radeon_bo_kmap(rdev->vram_scratch.robj,
+                               (void **)&rdev->vram_scratch.ptr);
+       if (r)
+               radeon_bo_unpin(rdev->vram_scratch.robj);
+       radeon_bo_unreserve(rdev->vram_scratch.robj);
+
+       return r;
+}
+
+void r600_vram_scratch_fini(struct radeon_device *rdev)
+{
+       int r;
+
+       if (rdev->vram_scratch.robj == NULL) {
+               return;
+       }
+       r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
+       if (likely(r == 0)) {
+               radeon_bo_kunmap(rdev->vram_scratch.robj);
+               radeon_bo_unpin(rdev->vram_scratch.robj);
+               radeon_bo_unreserve(rdev->vram_scratch.robj);
+       }
+       radeon_bo_unref(&rdev->vram_scratch.robj);
+}
+
 /* We doesn't check that the GPU really needs a reset we simply do the
  * reset, it's up to the caller to determine if the GPU needs one. We
  * might add an helper function to check that.
@@ -2436,6 +2483,10 @@ int r600_startup(struct radeon_device *rdev)
                }
        }
 
+       r = r600_vram_scratch_init(rdev);
+       if (r)
+               return r;
+
        r600_mc_program(rdev);
        if (rdev->flags & RADEON_IS_AGP) {
                r600_agp_enable(rdev);
@@ -2656,6 +2707,7 @@ void r600_fini(struct radeon_device *rdev)
        radeon_ib_pool_fini(rdev);
        radeon_irq_kms_fini(rdev);
        r600_pcie_gart_fini(rdev);
+       r600_vram_scratch_fini(rdev);
        radeon_agp_fini(rdev);
        radeon_gem_fini(rdev);
        radeon_fence_driver_fini(rdev);
index 00f6dc4973a9666eb62432429b8323ce6d726a18..83b76db7bcfddd104cc4674bc3208b725d4c1742 100644 (file)
@@ -1144,10 +1144,11 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *filp);
 
-/* VRAM scratch page for HDP bug */
-struct r700_vram_scratch {
+/* VRAM scratch page for HDP bug, default vram page */
+struct r600_vram_scratch {
        struct radeon_bo                *robj;
        volatile uint32_t               *ptr;
+       u64                             gpu_addr;
 };
 
 /*
@@ -1219,7 +1220,7 @@ struct radeon_device {
        const struct firmware *rlc_fw;  /* r6/700 RLC firmware */
        const struct firmware *mc_fw;   /* NI MC firmware */
        struct r600_blit r600_blit;
-       struct r700_vram_scratch vram_scratch;
+       struct r600_vram_scratch vram_scratch;
        int msi_enabled; /* msi enabled */
        struct r600_ih ih; /* r6/700 interrupt ring */
        struct work_struct hotplug_work;
@@ -1467,6 +1468,12 @@ extern int radeon_resume_kms(struct drm_device *dev);
 extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
 extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
 
+/*
+ * R600 vram scratch functions
+ */
+int r600_vram_scratch_init(struct radeon_device *rdev);
+void r600_vram_scratch_fini(struct radeon_device *rdev);
+
 /*
  * r600 functions used by radeon_encoder.c
  */
index 87cc1feee3ac9e29d10a52cd33d4062342b5cc95..a09049d1590180b56c50f4e2cb6ee9600b79a8c1 100644 (file)
@@ -282,7 +282,7 @@ static void rv770_mc_program(struct radeon_device *rdev)
                WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
                        rdev->mc.vram_end >> 12);
        }
-       WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
+       WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
        tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
        tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
        WREG32(MC_VM_FB_LOCATION, tmp);
@@ -959,54 +959,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)
 
 }
 
-static int rv770_vram_scratch_init(struct radeon_device *rdev)
-{
-       int r;
-       u64 gpu_addr;
-
-       if (rdev->vram_scratch.robj == NULL) {
-               r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
-                                    PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
-                                    &rdev->vram_scratch.robj);
-               if (r) {
-                       return r;
-               }
-       }
-
-       r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
-       if (unlikely(r != 0))
-               return r;
-       r = radeon_bo_pin(rdev->vram_scratch.robj,
-                         RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
-       if (r) {
-               radeon_bo_unreserve(rdev->vram_scratch.robj);
-               return r;
-       }
-       r = radeon_bo_kmap(rdev->vram_scratch.robj,
-                               (void **)&rdev->vram_scratch.ptr);
-       if (r)
-               radeon_bo_unpin(rdev->vram_scratch.robj);
-       radeon_bo_unreserve(rdev->vram_scratch.robj);
-
-       return r;
-}
-
-static void rv770_vram_scratch_fini(struct radeon_device *rdev)
-{
-       int r;
-
-       if (rdev->vram_scratch.robj == NULL) {
-               return;
-       }
-       r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
-       if (likely(r == 0)) {
-               radeon_bo_kunmap(rdev->vram_scratch.robj);
-               radeon_bo_unpin(rdev->vram_scratch.robj);
-               radeon_bo_unreserve(rdev->vram_scratch.robj);
-       }
-       radeon_bo_unref(&rdev->vram_scratch.robj);
-}
-
 void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
 {
        u64 size_bf, size_af;
@@ -1106,6 +1058,10 @@ static int rv770_startup(struct radeon_device *rdev)
                }
        }
 
+       r = r600_vram_scratch_init(rdev);
+       if (r)
+               return r;
+
        rv770_mc_program(rdev);
        if (rdev->flags & RADEON_IS_AGP) {
                rv770_agp_enable(rdev);
@@ -1114,9 +1070,7 @@ static int rv770_startup(struct radeon_device *rdev)
                if (r)
                        return r;
        }
-       r = rv770_vram_scratch_init(rdev);
-       if (r)
-               return r;
+
        rv770_gpu_init(rdev);
        r = r600_blit_init(rdev);
        if (r) {
@@ -1316,7 +1270,7 @@ void rv770_fini(struct radeon_device *rdev)
        radeon_ib_pool_fini(rdev);
        radeon_irq_kms_fini(rdev);
        rv770_pcie_gart_fini(rdev);
-       rv770_vram_scratch_fini(rdev);
+       r600_vram_scratch_fini(rdev);
        radeon_gem_fini(rdev);
        radeon_fence_driver_fini(rdev);
        radeon_agp_fini(rdev);
This page took 0.034793 seconds and 5 git commands to generate.