2 * Copyright (C) 2007 Ben Skeggs.
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
30 #include "nouveau_drv.h"
32 struct nv50_instmem_priv
{
33 uint32_t save1700
[5]; /* 0x1700->0x1710 */
35 struct nouveau_gpuobj
*pramin_pt
;
36 struct nouveau_gpuobj
*pramin_bar
;
37 struct nouveau_gpuobj
*fb_bar
;
41 nv50_channel_del(struct nouveau_channel
**pchan
)
43 struct nouveau_channel
*chan
;
50 nouveau_gpuobj_ref(NULL
, &chan
->ramfc
);
51 nouveau_gpuobj_ref(NULL
, &chan
->vm_pd
);
52 if (chan
->ramin_heap
.free_stack
.next
)
53 drm_mm_takedown(&chan
->ramin_heap
);
54 nouveau_gpuobj_ref(NULL
, &chan
->ramin
);
59 nv50_channel_new(struct drm_device
*dev
, u32 size
,
60 struct nouveau_channel
**pchan
)
62 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
63 u32 pgd
= (dev_priv
->chipset
== 0x50) ? 0x1400 : 0x0200;
64 u32 fc
= (dev_priv
->chipset
== 0x50) ? 0x0000 : 0x4200;
65 struct nouveau_channel
*chan
;
68 chan
= kzalloc(sizeof(*chan
), GFP_KERNEL
);
73 ret
= nouveau_gpuobj_new(dev
, NULL
, size
, 0x1000, 0, &chan
->ramin
);
75 nv50_channel_del(&chan
);
79 ret
= drm_mm_init(&chan
->ramin_heap
, 0x6000, chan
->ramin
->size
);
81 nv50_channel_del(&chan
);
85 ret
= nouveau_gpuobj_new_fake(dev
, chan
->ramin
->pinst
== ~0 ? ~0 :
86 chan
->ramin
->pinst
+ pgd
,
87 chan
->ramin
->vinst
+ pgd
,
88 0x4000, NVOBJ_FLAG_ZERO_ALLOC
,
91 nv50_channel_del(&chan
);
95 ret
= nouveau_gpuobj_new_fake(dev
, chan
->ramin
->pinst
== ~0 ? ~0 :
96 chan
->ramin
->pinst
+ fc
,
97 chan
->ramin
->vinst
+ fc
, 0x100,
98 NVOBJ_FLAG_ZERO_ALLOC
, &chan
->ramfc
);
100 nv50_channel_del(&chan
);
109 nv50_instmem_init(struct drm_device
*dev
)
111 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
112 struct nv50_instmem_priv
*priv
;
113 struct nouveau_channel
*chan
;
117 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
120 dev_priv
->engine
.instmem
.priv
= priv
;
122 /* Save state, will restore at takedown. */
123 for (i
= 0x1700; i
<= 0x1710; i
+= 4)
124 priv
->save1700
[(i
-0x1700)/4] = nv_rd32(dev
, i
);
126 /* Global PRAMIN heap */
127 ret
= drm_mm_init(&dev_priv
->ramin_heap
, 0, dev_priv
->ramin_size
);
129 NV_ERROR(dev
, "Failed to init RAMIN heap\n");
133 /* we need a channel to plug into the hw to control the BARs */
134 ret
= nv50_channel_new(dev
, 128*1024, &dev_priv
->fifos
[0]);
137 chan
= dev_priv
->fifos
[127] = dev_priv
->fifos
[0];
139 /* allocate page table for PRAMIN BAR */
140 ret
= nouveau_gpuobj_new(dev
, chan
, (dev_priv
->ramin_size
>> 12) * 8,
141 0x1000, NVOBJ_FLAG_ZERO_ALLOC
,
146 nv_wo32(chan
->vm_pd
, 0x0000, priv
->pramin_pt
->vinst
| 0x63);
147 nv_wo32(chan
->vm_pd
, 0x0004, 0);
149 /* DMA object for PRAMIN BAR */
150 ret
= nouveau_gpuobj_new(dev
, chan
, 6*4, 16, 0, &priv
->pramin_bar
);
153 nv_wo32(priv
->pramin_bar
, 0x00, 0x7fc00000);
154 nv_wo32(priv
->pramin_bar
, 0x04, dev_priv
->ramin_size
- 1);
155 nv_wo32(priv
->pramin_bar
, 0x08, 0x00000000);
156 nv_wo32(priv
->pramin_bar
, 0x0c, 0x00000000);
157 nv_wo32(priv
->pramin_bar
, 0x10, 0x00000000);
158 nv_wo32(priv
->pramin_bar
, 0x14, 0x00000000);
160 /* map channel into PRAMIN, gpuobj didn't do it for us */
161 ret
= nv50_instmem_bind(dev
, chan
->ramin
);
166 nv_wr32(dev
, 0x001704, 0x00000000 | (chan
->ramin
->vinst
>> 12));
167 nv_wr32(dev
, 0x001704, 0x40000000 | (chan
->ramin
->vinst
>> 12));
168 nv_wr32(dev
, 0x00170c, 0x80000000 | (priv
->pramin_bar
->cinst
>> 4));
170 tmp
= nv_ri32(dev
, 0);
171 nv_wi32(dev
, 0, ~tmp
);
172 if (nv_ri32(dev
, 0) != ~tmp
) {
173 NV_ERROR(dev
, "PRAMIN readback failed\n");
176 nv_wi32(dev
, 0, tmp
);
178 dev_priv
->ramin_available
= true;
180 /* Determine VM layout */
181 dev_priv
->vm_gart_base
= roundup(NV50_VM_BLOCK
, NV50_VM_BLOCK
);
182 dev_priv
->vm_gart_size
= NV50_VM_BLOCK
;
184 dev_priv
->vm_vram_base
= dev_priv
->vm_gart_base
+ dev_priv
->vm_gart_size
;
185 dev_priv
->vm_vram_size
= dev_priv
->vram_size
;
186 if (dev_priv
->vm_vram_size
> NV50_VM_MAX_VRAM
)
187 dev_priv
->vm_vram_size
= NV50_VM_MAX_VRAM
;
188 dev_priv
->vm_vram_size
= roundup(dev_priv
->vm_vram_size
, NV50_VM_BLOCK
);
189 dev_priv
->vm_vram_pt_nr
= dev_priv
->vm_vram_size
/ NV50_VM_BLOCK
;
191 dev_priv
->vm_end
= dev_priv
->vm_vram_base
+ dev_priv
->vm_vram_size
;
193 NV_DEBUG(dev
, "NV50VM: GART 0x%016llx-0x%016llx\n",
194 dev_priv
->vm_gart_base
,
195 dev_priv
->vm_gart_base
+ dev_priv
->vm_gart_size
- 1);
196 NV_DEBUG(dev
, "NV50VM: VRAM 0x%016llx-0x%016llx\n",
197 dev_priv
->vm_vram_base
,
198 dev_priv
->vm_vram_base
+ dev_priv
->vm_vram_size
- 1);
200 /* VRAM page table(s), mapped into VM at +1GiB */
201 for (i
= 0; i
< dev_priv
->vm_vram_pt_nr
; i
++) {
202 ret
= nouveau_gpuobj_new(dev
, NULL
, NV50_VM_BLOCK
/ 0x10000 * 8,
203 0, NVOBJ_FLAG_ZERO_ALLOC
,
204 &chan
->vm_vram_pt
[i
]);
206 NV_ERROR(dev
, "Error creating VRAM PGT: %d\n", ret
);
207 dev_priv
->vm_vram_pt_nr
= i
;
210 dev_priv
->vm_vram_pt
[i
] = chan
->vm_vram_pt
[i
];
212 nv_wo32(chan
->vm_pd
, 0x10 + (i
*8),
213 chan
->vm_vram_pt
[i
]->vinst
| 0x61);
214 nv_wo32(chan
->vm_pd
, 0x14 + (i
*8), 0);
217 /* DMA object for FB BAR */
218 ret
= nouveau_gpuobj_new(dev
, chan
, 6*4, 16, 0, &priv
->fb_bar
);
221 nv_wo32(priv
->fb_bar
, 0x00, 0x7fc00000);
222 nv_wo32(priv
->fb_bar
, 0x04, 0x40000000 +
223 pci_resource_len(dev
->pdev
, 1) - 1);
224 nv_wo32(priv
->fb_bar
, 0x08, 0x40000000);
225 nv_wo32(priv
->fb_bar
, 0x0c, 0x00000000);
226 nv_wo32(priv
->fb_bar
, 0x10, 0x00000000);
227 nv_wo32(priv
->fb_bar
, 0x14, 0x00000000);
229 dev_priv
->engine
.instmem
.flush(dev
);
231 nv_wr32(dev
, 0x001708, 0x80000000 | (priv
->fb_bar
->cinst
>> 4));
232 for (i
= 0; i
< 8; i
++)
233 nv_wr32(dev
, 0x1900 + (i
*4), 0);
239 nv50_instmem_takedown(struct drm_device
*dev
)
241 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
242 struct nv50_instmem_priv
*priv
= dev_priv
->engine
.instmem
.priv
;
243 struct nouveau_channel
*chan
= dev_priv
->fifos
[0];
251 dev_priv
->ramin_available
= false;
253 /* Restore state from before init */
254 for (i
= 0x1700; i
<= 0x1710; i
+= 4)
255 nv_wr32(dev
, i
, priv
->save1700
[(i
- 0x1700) / 4]);
257 nouveau_gpuobj_ref(NULL
, &priv
->fb_bar
);
258 nouveau_gpuobj_ref(NULL
, &priv
->pramin_bar
);
259 nouveau_gpuobj_ref(NULL
, &priv
->pramin_pt
);
261 /* Destroy dummy channel */
263 for (i
= 0; i
< dev_priv
->vm_vram_pt_nr
; i
++)
264 nouveau_gpuobj_ref(NULL
, &chan
->vm_vram_pt
[i
]);
265 dev_priv
->vm_vram_pt_nr
= 0;
267 nv50_channel_del(&dev_priv
->fifos
[0]);
268 dev_priv
->fifos
[127] = NULL
;
271 dev_priv
->engine
.instmem
.priv
= NULL
;
276 nv50_instmem_suspend(struct drm_device
*dev
)
278 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
279 struct nouveau_channel
*chan
= dev_priv
->fifos
[0];
280 struct nouveau_gpuobj
*ramin
= chan
->ramin
;
283 ramin
->im_backing_suspend
= vmalloc(ramin
->size
);
284 if (!ramin
->im_backing_suspend
)
287 for (i
= 0; i
< ramin
->size
; i
+= 4)
288 ramin
->im_backing_suspend
[i
/4] = nv_ri32(dev
, i
);
293 nv50_instmem_resume(struct drm_device
*dev
)
295 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
296 struct nv50_instmem_priv
*priv
= dev_priv
->engine
.instmem
.priv
;
297 struct nouveau_channel
*chan
= dev_priv
->fifos
[0];
298 struct nouveau_gpuobj
*ramin
= chan
->ramin
;
301 dev_priv
->ramin_available
= false;
302 dev_priv
->ramin_base
= ~0;
303 for (i
= 0; i
< ramin
->size
; i
+= 4)
304 nv_wo32(ramin
, i
, ramin
->im_backing_suspend
[i
/4]);
305 dev_priv
->ramin_available
= true;
306 vfree(ramin
->im_backing_suspend
);
307 ramin
->im_backing_suspend
= NULL
;
309 /* Poke the relevant regs, and pray it works :) */
310 nv_wr32(dev
, NV50_PUNK_BAR_CFG_BASE
, (chan
->ramin
->vinst
>> 12));
311 nv_wr32(dev
, NV50_PUNK_UNK1710
, 0);
312 nv_wr32(dev
, NV50_PUNK_BAR_CFG_BASE
, (chan
->ramin
->vinst
>> 12) |
313 NV50_PUNK_BAR_CFG_BASE_VALID
);
314 nv_wr32(dev
, NV50_PUNK_BAR1_CTXDMA
, (priv
->fb_bar
->cinst
>> 4) |
315 NV50_PUNK_BAR1_CTXDMA_VALID
);
316 nv_wr32(dev
, NV50_PUNK_BAR3_CTXDMA
, (priv
->pramin_bar
->cinst
>> 4) |
317 NV50_PUNK_BAR3_CTXDMA_VALID
);
319 for (i
= 0; i
< 8; i
++)
320 nv_wr32(dev
, 0x1900 + (i
*4), 0);
324 nv50_instmem_populate(struct drm_device
*dev
, struct nouveau_gpuobj
*gpuobj
,
329 if (gpuobj
->im_backing
)
332 *sz
= ALIGN(*sz
, 4096);
336 ret
= nouveau_bo_new(dev
, NULL
, *sz
, 0, TTM_PL_FLAG_VRAM
, 0, 0x0000,
337 true, false, &gpuobj
->im_backing
);
339 NV_ERROR(dev
, "error getting PRAMIN backing pages: %d\n", ret
);
343 ret
= nouveau_bo_pin(gpuobj
->im_backing
, TTM_PL_FLAG_VRAM
);
345 NV_ERROR(dev
, "error pinning PRAMIN backing VRAM: %d\n", ret
);
346 nouveau_bo_ref(NULL
, &gpuobj
->im_backing
);
350 gpuobj
->vinst
= gpuobj
->im_backing
->bo
.mem
.start
<< PAGE_SHIFT
;
355 nv50_instmem_clear(struct drm_device
*dev
, struct nouveau_gpuobj
*gpuobj
)
357 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
359 if (gpuobj
&& gpuobj
->im_backing
) {
360 if (gpuobj
->im_bound
)
361 dev_priv
->engine
.instmem
.unbind(dev
, gpuobj
);
362 nouveau_bo_unpin(gpuobj
->im_backing
);
363 nouveau_bo_ref(NULL
, &gpuobj
->im_backing
);
364 gpuobj
->im_backing
= NULL
;
369 nv50_instmem_bind(struct drm_device
*dev
, struct nouveau_gpuobj
*gpuobj
)
371 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
372 struct nv50_instmem_priv
*priv
= dev_priv
->engine
.instmem
.priv
;
373 struct nouveau_gpuobj
*pramin_pt
= priv
->pramin_pt
;
374 uint32_t pte
, pte_end
;
377 if (!gpuobj
->im_backing
|| !gpuobj
->im_pramin
|| gpuobj
->im_bound
)
380 NV_DEBUG(dev
, "st=0x%lx sz=0x%lx\n",
381 gpuobj
->im_pramin
->start
, gpuobj
->im_pramin
->size
);
383 pte
= (gpuobj
->im_pramin
->start
>> 12) << 1;
384 pte_end
= ((gpuobj
->im_pramin
->size
>> 12) << 1) + pte
;
385 vram
= gpuobj
->vinst
;
387 NV_DEBUG(dev
, "pramin=0x%lx, pte=%d, pte_end=%d\n",
388 gpuobj
->im_pramin
->start
, pte
, pte_end
);
389 NV_DEBUG(dev
, "first vram page: 0x%010llx\n", gpuobj
->vinst
);
392 if (dev_priv
->vram_sys_base
) {
393 vram
+= dev_priv
->vram_sys_base
;
397 while (pte
< pte_end
) {
398 nv_wo32(pramin_pt
, (pte
* 4) + 0, lower_32_bits(vram
));
399 nv_wo32(pramin_pt
, (pte
* 4) + 4, upper_32_bits(vram
));
403 dev_priv
->engine
.instmem
.flush(dev
);
405 nv50_vm_flush(dev
, 4);
406 nv50_vm_flush(dev
, 6);
408 gpuobj
->im_bound
= 1;
413 nv50_instmem_unbind(struct drm_device
*dev
, struct nouveau_gpuobj
*gpuobj
)
415 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
416 struct nv50_instmem_priv
*priv
= dev_priv
->engine
.instmem
.priv
;
417 uint32_t pte
, pte_end
;
419 if (gpuobj
->im_bound
== 0)
422 /* can happen during late takedown */
423 if (unlikely(!dev_priv
->ramin_available
))
426 pte
= (gpuobj
->im_pramin
->start
>> 12) << 1;
427 pte_end
= ((gpuobj
->im_pramin
->size
>> 12) << 1) + pte
;
429 while (pte
< pte_end
) {
430 nv_wo32(priv
->pramin_pt
, (pte
* 4) + 0, 0x00000000);
431 nv_wo32(priv
->pramin_pt
, (pte
* 4) + 4, 0x00000000);
434 dev_priv
->engine
.instmem
.flush(dev
);
436 gpuobj
->im_bound
= 0;
441 nv50_instmem_flush(struct drm_device
*dev
)
443 nv_wr32(dev
, 0x00330c, 0x00000001);
444 if (!nv_wait(dev
, 0x00330c, 0x00000002, 0x00000000))
445 NV_ERROR(dev
, "PRAMIN flush timeout\n");
449 nv84_instmem_flush(struct drm_device
*dev
)
451 nv_wr32(dev
, 0x070000, 0x00000001);
452 if (!nv_wait(dev
, 0x070000, 0x00000002, 0x00000000))
453 NV_ERROR(dev
, "PRAMIN flush timeout\n");
457 nv50_vm_flush(struct drm_device
*dev
, int engine
)
459 nv_wr32(dev
, 0x100c80, (engine
<< 16) | 1);
460 if (!nv_wait(dev
, 0x100c80, 0x00000001, 0x00000000))
461 NV_ERROR(dev
, "vm flush timeout: engine %d\n", engine
);