Commit | Line | Data |
---|---|---|
6ee73861 BS |
1 | /* |
2 | * Copyright (C) 2006 Ben Skeggs. | |
3 | * | |
4 | * All Rights Reserved. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining | |
7 | * a copy of this software and associated documentation files (the | |
8 | * "Software"), to deal in the Software without restriction, including | |
9 | * without limitation the rights to use, copy, modify, merge, publish, | |
10 | * distribute, sublicense, and/or sell copies of the Software, and to | |
11 | * permit persons to whom the Software is furnished to do so, subject to | |
12 | * the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice (including the | |
15 | * next paragraph) shall be included in all copies or substantial | |
16 | * portions of the Software. | |
17 | * | |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
19 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
20 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | |
21 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | |
22 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | |
23 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | |
24 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
25 | * | |
26 | */ | |
27 | ||
28 | /* | |
29 | * Authors: | |
30 | * Ben Skeggs <darktama@iinet.net.au> | |
31 | */ | |
32 | ||
33 | #include "drmP.h" | |
34 | #include "drm.h" | |
35 | #include "nouveau_drv.h" | |
36 | #include "nouveau_drm.h" | |
37 | ||
38 | /* NVidia uses context objects to drive drawing operations. | |
39 | ||
40 | Context objects can be selected into 8 subchannels in the FIFO, | |
41 | and then used via DMA command buffers. | |
42 | ||
43 | A context object is referenced by a user defined handle (CARD32). The HW | |
44 | looks up graphics objects in a hash table in the instance RAM. | |
45 | ||
46 | An entry in the hash table consists of 2 CARD32. The first CARD32 contains | |
47 | the handle, the second one a bitfield, that contains the address of the | |
48 | object in instance RAM. | |
49 | ||
50 | The format of the second CARD32 seems to be: | |
51 | ||
52 | NV4 to NV30: | |
53 | ||
54 | 15: 0 instance_addr >> 4 | |
55 | 17:16 engine (here uses 1 = graphics) | |
56 | 28:24 channel id (here uses 0) | |
57 | 31 valid (use 1) | |
58 | ||
59 | NV40: | |
60 | ||
61 | 15: 0 instance_addr >> 4 (maybe 19-0) | |
62 | 21:20 engine (here uses 1 = graphics) | |
63 | I'm unsure about the other bits, but using 0 seems to work. | |
64 | ||
65 | The key into the hash table depends on the object handle and channel id and | |
66 | is given as: | |
67 | */ | |
68 | static uint32_t | |
69 | nouveau_ramht_hash_handle(struct drm_device *dev, int channel, uint32_t handle) | |
70 | { | |
71 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
72 | uint32_t hash = 0; | |
73 | int i; | |
74 | ||
75 | NV_DEBUG(dev, "ch%d handle=0x%08x\n", channel, handle); | |
76 | ||
77 | for (i = 32; i > 0; i -= dev_priv->ramht_bits) { | |
78 | hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1)); | |
79 | handle >>= dev_priv->ramht_bits; | |
80 | } | |
81 | ||
82 | if (dev_priv->card_type < NV_50) | |
83 | hash ^= channel << (dev_priv->ramht_bits - 4); | |
84 | hash <<= 3; | |
85 | ||
86 | NV_DEBUG(dev, "hash=0x%08x\n", hash); | |
87 | return hash; | |
88 | } | |
89 | ||
90 | static int | |
91 | nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht, | |
92 | uint32_t offset) | |
93 | { | |
94 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
95 | uint32_t ctx = nv_ro32(dev, ramht, (offset + 4)/4); | |
96 | ||
97 | if (dev_priv->card_type < NV_40) | |
98 | return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0); | |
99 | return (ctx != 0); | |
100 | } | |
101 | ||
102 | static int | |
103 | nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) | |
104 | { | |
105 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
106 | struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; | |
107 | struct nouveau_channel *chan = ref->channel; | |
108 | struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; | |
109 | uint32_t ctx, co, ho; | |
110 | ||
111 | if (!ramht) { | |
112 | NV_ERROR(dev, "No hash table!\n"); | |
113 | return -EINVAL; | |
114 | } | |
115 | ||
116 | if (dev_priv->card_type < NV_40) { | |
117 | ctx = NV_RAMHT_CONTEXT_VALID | (ref->instance >> 4) | | |
118 | (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | | |
119 | (ref->gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT); | |
120 | } else | |
121 | if (dev_priv->card_type < NV_50) { | |
122 | ctx = (ref->instance >> 4) | | |
123 | (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | | |
124 | (ref->gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); | |
125 | } else { | |
126 | if (ref->gpuobj->engine == NVOBJ_ENGINE_DISPLAY) { | |
127 | ctx = (ref->instance << 10) | 2; | |
128 | } else { | |
129 | ctx = (ref->instance >> 4) | | |
130 | ((ref->gpuobj->engine << | |
131 | NV40_RAMHT_CONTEXT_ENGINE_SHIFT)); | |
132 | } | |
133 | } | |
134 | ||
135 | instmem->prepare_access(dev, true); | |
136 | co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle); | |
137 | do { | |
138 | if (!nouveau_ramht_entry_valid(dev, ramht, co)) { | |
139 | NV_DEBUG(dev, | |
140 | "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n", | |
141 | chan->id, co, ref->handle, ctx); | |
142 | nv_wo32(dev, ramht, (co + 0)/4, ref->handle); | |
143 | nv_wo32(dev, ramht, (co + 4)/4, ctx); | |
144 | ||
145 | list_add_tail(&ref->list, &chan->ramht_refs); | |
146 | instmem->finish_access(dev); | |
147 | return 0; | |
148 | } | |
149 | NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n", | |
150 | chan->id, co, nv_ro32(dev, ramht, co/4)); | |
151 | ||
152 | co += 8; | |
153 | if (co >= dev_priv->ramht_size) | |
154 | co = 0; | |
155 | } while (co != ho); | |
156 | instmem->finish_access(dev); | |
157 | ||
158 | NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id); | |
159 | return -ENOMEM; | |
160 | } | |
161 | ||
162 | static void | |
163 | nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) | |
164 | { | |
165 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
166 | struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; | |
167 | struct nouveau_channel *chan = ref->channel; | |
168 | struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; | |
169 | uint32_t co, ho; | |
170 | ||
171 | if (!ramht) { | |
172 | NV_ERROR(dev, "No hash table!\n"); | |
173 | return; | |
174 | } | |
175 | ||
176 | instmem->prepare_access(dev, true); | |
177 | co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle); | |
178 | do { | |
179 | if (nouveau_ramht_entry_valid(dev, ramht, co) && | |
180 | (ref->handle == nv_ro32(dev, ramht, (co/4)))) { | |
181 | NV_DEBUG(dev, | |
182 | "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n", | |
183 | chan->id, co, ref->handle, | |
184 | nv_ro32(dev, ramht, (co + 4))); | |
185 | nv_wo32(dev, ramht, (co + 0)/4, 0x00000000); | |
186 | nv_wo32(dev, ramht, (co + 4)/4, 0x00000000); | |
187 | ||
188 | list_del(&ref->list); | |
189 | instmem->finish_access(dev); | |
190 | return; | |
191 | } | |
192 | ||
193 | co += 8; | |
194 | if (co >= dev_priv->ramht_size) | |
195 | co = 0; | |
196 | } while (co != ho); | |
197 | list_del(&ref->list); | |
198 | instmem->finish_access(dev); | |
199 | ||
200 | NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n", | |
201 | chan->id, ref->handle); | |
202 | } | |
203 | ||
204 | int | |
205 | nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, | |
206 | uint32_t size, int align, uint32_t flags, | |
207 | struct nouveau_gpuobj **gpuobj_ret) | |
208 | { | |
209 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
210 | struct nouveau_engine *engine = &dev_priv->engine; | |
211 | struct nouveau_gpuobj *gpuobj; | |
212 | struct mem_block *pramin = NULL; | |
213 | int ret; | |
214 | ||
215 | NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n", | |
216 | chan ? chan->id : -1, size, align, flags); | |
217 | ||
218 | if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL) | |
219 | return -EINVAL; | |
220 | ||
221 | gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); | |
222 | if (!gpuobj) | |
223 | return -ENOMEM; | |
224 | NV_DEBUG(dev, "gpuobj %p\n", gpuobj); | |
225 | gpuobj->flags = flags; | |
226 | gpuobj->im_channel = chan; | |
227 | ||
228 | list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); | |
229 | ||
230 | /* Choose between global instmem heap, and per-channel private | |
231 | * instmem heap. On <NV50 allow requests for private instmem | |
232 | * to be satisfied from global heap if no per-channel area | |
233 | * available. | |
234 | */ | |
235 | if (chan) { | |
236 | if (chan->ramin_heap) { | |
237 | NV_DEBUG(dev, "private heap\n"); | |
238 | pramin = chan->ramin_heap; | |
239 | } else | |
240 | if (dev_priv->card_type < NV_50) { | |
241 | NV_DEBUG(dev, "global heap fallback\n"); | |
242 | pramin = dev_priv->ramin_heap; | |
243 | } | |
244 | } else { | |
245 | NV_DEBUG(dev, "global heap\n"); | |
246 | pramin = dev_priv->ramin_heap; | |
247 | } | |
248 | ||
249 | if (!pramin) { | |
250 | NV_ERROR(dev, "No PRAMIN heap!\n"); | |
251 | return -EINVAL; | |
252 | } | |
253 | ||
254 | if (!chan) { | |
255 | ret = engine->instmem.populate(dev, gpuobj, &size); | |
256 | if (ret) { | |
257 | nouveau_gpuobj_del(dev, &gpuobj); | |
258 | return ret; | |
259 | } | |
260 | } | |
261 | ||
262 | /* Allocate a chunk of the PRAMIN aperture */ | |
263 | gpuobj->im_pramin = nouveau_mem_alloc_block(pramin, size, | |
264 | drm_order(align), | |
265 | (struct drm_file *)-2, 0); | |
266 | if (!gpuobj->im_pramin) { | |
267 | nouveau_gpuobj_del(dev, &gpuobj); | |
268 | return -ENOMEM; | |
269 | } | |
270 | ||
271 | if (!chan) { | |
272 | ret = engine->instmem.bind(dev, gpuobj); | |
273 | if (ret) { | |
274 | nouveau_gpuobj_del(dev, &gpuobj); | |
275 | return ret; | |
276 | } | |
277 | } | |
278 | ||
279 | if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { | |
280 | int i; | |
281 | ||
282 | engine->instmem.prepare_access(dev, true); | |
283 | for (i = 0; i < gpuobj->im_pramin->size; i += 4) | |
284 | nv_wo32(dev, gpuobj, i/4, 0); | |
285 | engine->instmem.finish_access(dev); | |
286 | } | |
287 | ||
288 | *gpuobj_ret = gpuobj; | |
289 | return 0; | |
290 | } | |
291 | ||
292 | int | |
293 | nouveau_gpuobj_early_init(struct drm_device *dev) | |
294 | { | |
295 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
296 | ||
297 | NV_DEBUG(dev, "\n"); | |
298 | ||
299 | INIT_LIST_HEAD(&dev_priv->gpuobj_list); | |
300 | ||
301 | return 0; | |
302 | } | |
303 | ||
304 | int | |
305 | nouveau_gpuobj_init(struct drm_device *dev) | |
306 | { | |
307 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
308 | int ret; | |
309 | ||
310 | NV_DEBUG(dev, "\n"); | |
311 | ||
312 | if (dev_priv->card_type < NV_50) { | |
313 | ret = nouveau_gpuobj_new_fake(dev, | |
314 | dev_priv->ramht_offset, ~0, dev_priv->ramht_size, | |
315 | NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ALLOW_NO_REFS, | |
316 | &dev_priv->ramht, NULL); | |
317 | if (ret) | |
318 | return ret; | |
319 | } | |
320 | ||
321 | return 0; | |
322 | } | |
323 | ||
324 | void | |
325 | nouveau_gpuobj_takedown(struct drm_device *dev) | |
326 | { | |
327 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
328 | ||
329 | NV_DEBUG(dev, "\n"); | |
330 | ||
331 | nouveau_gpuobj_del(dev, &dev_priv->ramht); | |
332 | } | |
333 | ||
334 | void | |
335 | nouveau_gpuobj_late_takedown(struct drm_device *dev) | |
336 | { | |
337 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
338 | struct nouveau_gpuobj *gpuobj = NULL; | |
339 | struct list_head *entry, *tmp; | |
340 | ||
341 | NV_DEBUG(dev, "\n"); | |
342 | ||
343 | list_for_each_safe(entry, tmp, &dev_priv->gpuobj_list) { | |
344 | gpuobj = list_entry(entry, struct nouveau_gpuobj, list); | |
345 | ||
346 | NV_ERROR(dev, "gpuobj %p still exists at takedown, refs=%d\n", | |
347 | gpuobj, gpuobj->refcount); | |
348 | gpuobj->refcount = 0; | |
349 | nouveau_gpuobj_del(dev, &gpuobj); | |
350 | } | |
351 | } | |
352 | ||
353 | int | |
354 | nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj) | |
355 | { | |
356 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
357 | struct nouveau_engine *engine = &dev_priv->engine; | |
358 | struct nouveau_gpuobj *gpuobj; | |
359 | int i; | |
360 | ||
361 | NV_DEBUG(dev, "gpuobj %p\n", pgpuobj ? *pgpuobj : NULL); | |
362 | ||
363 | if (!dev_priv || !pgpuobj || !(*pgpuobj)) | |
364 | return -EINVAL; | |
365 | gpuobj = *pgpuobj; | |
366 | ||
367 | if (gpuobj->refcount != 0) { | |
368 | NV_ERROR(dev, "gpuobj refcount is %d\n", gpuobj->refcount); | |
369 | return -EINVAL; | |
370 | } | |
371 | ||
372 | if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { | |
373 | engine->instmem.prepare_access(dev, true); | |
374 | for (i = 0; i < gpuobj->im_pramin->size; i += 4) | |
375 | nv_wo32(dev, gpuobj, i/4, 0); | |
376 | engine->instmem.finish_access(dev); | |
377 | } | |
378 | ||
379 | if (gpuobj->dtor) | |
380 | gpuobj->dtor(dev, gpuobj); | |
381 | ||
382 | if (gpuobj->im_backing && !(gpuobj->flags & NVOBJ_FLAG_FAKE)) | |
383 | engine->instmem.clear(dev, gpuobj); | |
384 | ||
385 | if (gpuobj->im_pramin) { | |
386 | if (gpuobj->flags & NVOBJ_FLAG_FAKE) | |
387 | kfree(gpuobj->im_pramin); | |
388 | else | |
389 | nouveau_mem_free_block(gpuobj->im_pramin); | |
390 | } | |
391 | ||
392 | list_del(&gpuobj->list); | |
393 | ||
394 | *pgpuobj = NULL; | |
395 | kfree(gpuobj); | |
396 | return 0; | |
397 | } | |
398 | ||
399 | static int | |
400 | nouveau_gpuobj_instance_get(struct drm_device *dev, | |
401 | struct nouveau_channel *chan, | |
402 | struct nouveau_gpuobj *gpuobj, uint32_t *inst) | |
403 | { | |
404 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
405 | struct nouveau_gpuobj *cpramin; | |
406 | ||
407 | /* <NV50 use PRAMIN address everywhere */ | |
408 | if (dev_priv->card_type < NV_50) { | |
409 | *inst = gpuobj->im_pramin->start; | |
410 | return 0; | |
411 | } | |
412 | ||
413 | if (chan && gpuobj->im_channel != chan) { | |
414 | NV_ERROR(dev, "Channel mismatch: obj %d, ref %d\n", | |
415 | gpuobj->im_channel->id, chan->id); | |
416 | return -EINVAL; | |
417 | } | |
418 | ||
419 | /* NV50 channel-local instance */ | |
420 | if (chan) { | |
421 | cpramin = chan->ramin->gpuobj; | |
422 | *inst = gpuobj->im_pramin->start - cpramin->im_pramin->start; | |
423 | return 0; | |
424 | } | |
425 | ||
426 | /* NV50 global (VRAM) instance */ | |
427 | if (!gpuobj->im_channel) { | |
428 | /* ...from global heap */ | |
429 | if (!gpuobj->im_backing) { | |
430 | NV_ERROR(dev, "AII, no VRAM backing gpuobj\n"); | |
431 | return -EINVAL; | |
432 | } | |
433 | *inst = gpuobj->im_backing_start; | |
434 | return 0; | |
435 | } else { | |
436 | /* ...from local heap */ | |
437 | cpramin = gpuobj->im_channel->ramin->gpuobj; | |
438 | *inst = cpramin->im_backing_start + | |
439 | (gpuobj->im_pramin->start - cpramin->im_pramin->start); | |
440 | return 0; | |
441 | } | |
442 | ||
443 | return -EINVAL; | |
444 | } | |
445 | ||
446 | int | |
447 | nouveau_gpuobj_ref_add(struct drm_device *dev, struct nouveau_channel *chan, | |
448 | uint32_t handle, struct nouveau_gpuobj *gpuobj, | |
449 | struct nouveau_gpuobj_ref **ref_ret) | |
450 | { | |
451 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
452 | struct nouveau_gpuobj_ref *ref; | |
453 | uint32_t instance; | |
454 | int ret; | |
455 | ||
456 | NV_DEBUG(dev, "ch%d h=0x%08x gpuobj=%p\n", | |
457 | chan ? chan->id : -1, handle, gpuobj); | |
458 | ||
459 | if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL)) | |
460 | return -EINVAL; | |
461 | ||
462 | if (!chan && !ref_ret) | |
463 | return -EINVAL; | |
464 | ||
465 | if (gpuobj->engine == NVOBJ_ENGINE_SW && !gpuobj->im_pramin) { | |
466 | /* sw object */ | |
467 | instance = 0x40; | |
468 | } else { | |
469 | ret = nouveau_gpuobj_instance_get(dev, chan, gpuobj, &instance); | |
470 | if (ret) | |
471 | return ret; | |
472 | } | |
473 | ||
474 | ref = kzalloc(sizeof(*ref), GFP_KERNEL); | |
475 | if (!ref) | |
476 | return -ENOMEM; | |
477 | INIT_LIST_HEAD(&ref->list); | |
478 | ref->gpuobj = gpuobj; | |
479 | ref->channel = chan; | |
480 | ref->instance = instance; | |
481 | ||
482 | if (!ref_ret) { | |
483 | ref->handle = handle; | |
484 | ||
485 | ret = nouveau_ramht_insert(dev, ref); | |
486 | if (ret) { | |
487 | kfree(ref); | |
488 | return ret; | |
489 | } | |
490 | } else { | |
491 | ref->handle = ~0; | |
492 | *ref_ret = ref; | |
493 | } | |
494 | ||
495 | ref->gpuobj->refcount++; | |
496 | return 0; | |
497 | } | |
498 | ||
499 | int nouveau_gpuobj_ref_del(struct drm_device *dev, struct nouveau_gpuobj_ref **pref) | |
500 | { | |
501 | struct nouveau_gpuobj_ref *ref; | |
502 | ||
503 | NV_DEBUG(dev, "ref %p\n", pref ? *pref : NULL); | |
504 | ||
505 | if (!dev || !pref || *pref == NULL) | |
506 | return -EINVAL; | |
507 | ref = *pref; | |
508 | ||
509 | if (ref->handle != ~0) | |
510 | nouveau_ramht_remove(dev, ref); | |
511 | ||
512 | if (ref->gpuobj) { | |
513 | ref->gpuobj->refcount--; | |
514 | ||
515 | if (ref->gpuobj->refcount == 0) { | |
516 | if (!(ref->gpuobj->flags & NVOBJ_FLAG_ALLOW_NO_REFS)) | |
517 | nouveau_gpuobj_del(dev, &ref->gpuobj); | |
518 | } | |
519 | } | |
520 | ||
521 | *pref = NULL; | |
522 | kfree(ref); | |
523 | return 0; | |
524 | } | |
525 | ||
526 | int | |
527 | nouveau_gpuobj_new_ref(struct drm_device *dev, | |
528 | struct nouveau_channel *oc, struct nouveau_channel *rc, | |
529 | uint32_t handle, uint32_t size, int align, | |
530 | uint32_t flags, struct nouveau_gpuobj_ref **ref) | |
531 | { | |
532 | struct nouveau_gpuobj *gpuobj = NULL; | |
533 | int ret; | |
534 | ||
535 | ret = nouveau_gpuobj_new(dev, oc, size, align, flags, &gpuobj); | |
536 | if (ret) | |
537 | return ret; | |
538 | ||
539 | ret = nouveau_gpuobj_ref_add(dev, rc, handle, gpuobj, ref); | |
540 | if (ret) { | |
541 | nouveau_gpuobj_del(dev, &gpuobj); | |
542 | return ret; | |
543 | } | |
544 | ||
545 | return 0; | |
546 | } | |
547 | ||
548 | int | |
549 | nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle, | |
550 | struct nouveau_gpuobj_ref **ref_ret) | |
551 | { | |
552 | struct nouveau_gpuobj_ref *ref; | |
553 | struct list_head *entry, *tmp; | |
554 | ||
555 | list_for_each_safe(entry, tmp, &chan->ramht_refs) { | |
556 | ref = list_entry(entry, struct nouveau_gpuobj_ref, list); | |
557 | ||
558 | if (ref->handle == handle) { | |
559 | if (ref_ret) | |
560 | *ref_ret = ref; | |
561 | return 0; | |
562 | } | |
563 | } | |
564 | ||
565 | return -EINVAL; | |
566 | } | |
567 | ||
568 | int | |
569 | nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, | |
570 | uint32_t b_offset, uint32_t size, | |
571 | uint32_t flags, struct nouveau_gpuobj **pgpuobj, | |
572 | struct nouveau_gpuobj_ref **pref) | |
573 | { | |
574 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
575 | struct nouveau_gpuobj *gpuobj = NULL; | |
576 | int i; | |
577 | ||
578 | NV_DEBUG(dev, | |
579 | "p_offset=0x%08x b_offset=0x%08x size=0x%08x flags=0x%08x\n", | |
580 | p_offset, b_offset, size, flags); | |
581 | ||
582 | gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); | |
583 | if (!gpuobj) | |
584 | return -ENOMEM; | |
585 | NV_DEBUG(dev, "gpuobj %p\n", gpuobj); | |
586 | gpuobj->im_channel = NULL; | |
587 | gpuobj->flags = flags | NVOBJ_FLAG_FAKE; | |
588 | ||
589 | list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); | |
590 | ||
591 | if (p_offset != ~0) { | |
592 | gpuobj->im_pramin = kzalloc(sizeof(struct mem_block), | |
593 | GFP_KERNEL); | |
594 | if (!gpuobj->im_pramin) { | |
595 | nouveau_gpuobj_del(dev, &gpuobj); | |
596 | return -ENOMEM; | |
597 | } | |
598 | gpuobj->im_pramin->start = p_offset; | |
599 | gpuobj->im_pramin->size = size; | |
600 | } | |
601 | ||
602 | if (b_offset != ~0) { | |
603 | gpuobj->im_backing = (struct nouveau_bo *)-1; | |
604 | gpuobj->im_backing_start = b_offset; | |
605 | } | |
606 | ||
607 | if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { | |
608 | dev_priv->engine.instmem.prepare_access(dev, true); | |
609 | for (i = 0; i < gpuobj->im_pramin->size; i += 4) | |
610 | nv_wo32(dev, gpuobj, i/4, 0); | |
611 | dev_priv->engine.instmem.finish_access(dev); | |
612 | } | |
613 | ||
614 | if (pref) { | |
615 | i = nouveau_gpuobj_ref_add(dev, NULL, 0, gpuobj, pref); | |
616 | if (i) { | |
617 | nouveau_gpuobj_del(dev, &gpuobj); | |
618 | return i; | |
619 | } | |
620 | } | |
621 | ||
622 | if (pgpuobj) | |
623 | *pgpuobj = gpuobj; | |
624 | return 0; | |
625 | } | |
626 | ||
627 | ||
628 | static uint32_t | |
629 | nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class) | |
630 | { | |
631 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
632 | ||
633 | /*XXX: dodgy hack for now */ | |
634 | if (dev_priv->card_type >= NV_50) | |
635 | return 24; | |
636 | if (dev_priv->card_type >= NV_40) | |
637 | return 32; | |
638 | return 16; | |
639 | } | |
640 | ||
641 | /* | |
642 | DMA objects are used to reference a piece of memory in the | |
643 | framebuffer, PCI or AGP address space. Each object is 16 bytes big | |
644 | and looks as follows: | |
645 | ||
646 | entry[0] | |
647 | 11:0 class (seems like I can always use 0 here) | |
648 | 12 page table present? | |
649 | 13 page entry linear? | |
650 | 15:14 access: 0 rw, 1 ro, 2 wo | |
651 | 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP | |
652 | 31:20 dma adjust (bits 0-11 of the address) | |
653 | entry[1] | |
654 | dma limit (size of transfer) | |
655 | entry[X] | |
656 | 1 0 readonly, 1 readwrite | |
657 | 31:12 dma frame address of the page (bits 12-31 of the address) | |
658 | entry[N] | |
659 | page table terminator, same value as the first pte, as does nvidia | |
660 | rivatv uses 0xffffffff | |
661 | ||
662 | Non linear page tables need a list of frame addresses afterwards, | |
663 | the rivatv project has some info on this. | |
664 | ||
665 | The method below creates a DMA object in instance RAM and returns a handle | |
666 | to it that can be used to set up context objects. | |
667 | */ | |
668 | int | |
669 | nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, | |
670 | uint64_t offset, uint64_t size, int access, | |
671 | int target, struct nouveau_gpuobj **gpuobj) | |
672 | { | |
673 | struct drm_device *dev = chan->dev; | |
674 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
675 | struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; | |
676 | int ret; | |
677 | ||
678 | NV_DEBUG(dev, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n", | |
679 | chan->id, class, offset, size); | |
680 | NV_DEBUG(dev, "access=%d target=%d\n", access, target); | |
681 | ||
682 | switch (target) { | |
683 | case NV_DMA_TARGET_AGP: | |
684 | offset += dev_priv->gart_info.aper_base; | |
685 | break; | |
686 | default: | |
687 | break; | |
688 | } | |
689 | ||
690 | ret = nouveau_gpuobj_new(dev, chan, | |
691 | nouveau_gpuobj_class_instmem_size(dev, class), | |
692 | 16, NVOBJ_FLAG_ZERO_ALLOC | | |
693 | NVOBJ_FLAG_ZERO_FREE, gpuobj); | |
694 | if (ret) { | |
695 | NV_ERROR(dev, "Error creating gpuobj: %d\n", ret); | |
696 | return ret; | |
697 | } | |
698 | ||
699 | instmem->prepare_access(dev, true); | |
700 | ||
701 | if (dev_priv->card_type < NV_50) { | |
702 | uint32_t frame, adjust, pte_flags = 0; | |
703 | ||
704 | if (access != NV_DMA_ACCESS_RO) | |
705 | pte_flags |= (1<<1); | |
706 | adjust = offset & 0x00000fff; | |
707 | frame = offset & ~0x00000fff; | |
708 | ||
709 | nv_wo32(dev, *gpuobj, 0, ((1<<12) | (1<<13) | | |
710 | (adjust << 20) | | |
711 | (access << 14) | | |
712 | (target << 16) | | |
713 | class)); | |
714 | nv_wo32(dev, *gpuobj, 1, size - 1); | |
715 | nv_wo32(dev, *gpuobj, 2, frame | pte_flags); | |
716 | nv_wo32(dev, *gpuobj, 3, frame | pte_flags); | |
717 | } else { | |
718 | uint64_t limit = offset + size - 1; | |
719 | uint32_t flags0, flags5; | |
720 | ||
721 | if (target == NV_DMA_TARGET_VIDMEM) { | |
722 | flags0 = 0x00190000; | |
723 | flags5 = 0x00010000; | |
724 | } else { | |
725 | flags0 = 0x7fc00000; | |
726 | flags5 = 0x00080000; | |
727 | } | |
728 | ||
729 | nv_wo32(dev, *gpuobj, 0, flags0 | class); | |
730 | nv_wo32(dev, *gpuobj, 1, lower_32_bits(limit)); | |
731 | nv_wo32(dev, *gpuobj, 2, lower_32_bits(offset)); | |
732 | nv_wo32(dev, *gpuobj, 3, ((upper_32_bits(limit) & 0xff) << 24) | | |
733 | (upper_32_bits(offset) & 0xff)); | |
734 | nv_wo32(dev, *gpuobj, 5, flags5); | |
735 | } | |
736 | ||
737 | instmem->finish_access(dev); | |
738 | ||
739 | (*gpuobj)->engine = NVOBJ_ENGINE_SW; | |
740 | (*gpuobj)->class = class; | |
741 | return 0; | |
742 | } | |
743 | ||
744 | int | |
745 | nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan, | |
746 | uint64_t offset, uint64_t size, int access, | |
747 | struct nouveau_gpuobj **gpuobj, | |
748 | uint32_t *o_ret) | |
749 | { | |
750 | struct drm_device *dev = chan->dev; | |
751 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
752 | int ret; | |
753 | ||
754 | if (dev_priv->gart_info.type == NOUVEAU_GART_AGP || | |
755 | (dev_priv->card_type >= NV_50 && | |
756 | dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) { | |
757 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, | |
758 | offset + dev_priv->vm_gart_base, | |
759 | size, access, NV_DMA_TARGET_AGP, | |
760 | gpuobj); | |
761 | if (o_ret) | |
762 | *o_ret = 0; | |
763 | } else | |
764 | if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) { | |
765 | *gpuobj = dev_priv->gart_info.sg_ctxdma; | |
766 | if (offset & ~0xffffffffULL) { | |
767 | NV_ERROR(dev, "obj offset exceeds 32-bits\n"); | |
768 | return -EINVAL; | |
769 | } | |
770 | if (o_ret) | |
771 | *o_ret = (uint32_t)offset; | |
772 | ret = (*gpuobj != NULL) ? 0 : -EINVAL; | |
773 | } else { | |
774 | NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type); | |
775 | return -EINVAL; | |
776 | } | |
777 | ||
778 | return ret; | |
779 | } | |
780 | ||
781 | /* Context objects in the instance RAM have the following structure. | |
782 | * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes. | |
783 | ||
784 | NV4 - NV30: | |
785 | ||
786 | entry[0] | |
787 | 11:0 class | |
788 | 12 chroma key enable | |
789 | 13 user clip enable | |
790 | 14 swizzle enable | |
791 | 17:15 patch config: | |
792 | scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre | |
793 | 18 synchronize enable | |
794 | 19 endian: 1 big, 0 little | |
795 | 21:20 dither mode | |
796 | 23 single step enable | |
797 | 24 patch status: 0 invalid, 1 valid | |
798 | 25 context_surface 0: 1 valid | |
799 | 26 context surface 1: 1 valid | |
800 | 27 context pattern: 1 valid | |
801 | 28 context rop: 1 valid | |
802 | 29,30 context beta, beta4 | |
803 | entry[1] | |
804 | 7:0 mono format | |
805 | 15:8 color format | |
806 | 31:16 notify instance address | |
807 | entry[2] | |
808 | 15:0 dma 0 instance address | |
809 | 31:16 dma 1 instance address | |
810 | entry[3] | |
811 | dma method traps | |
812 | ||
813 | NV40: | |
814 | No idea what the exact format is. Here's what can be deducted: | |
815 | ||
816 | entry[0]: | |
817 | 11:0 class (maybe uses more bits here?) | |
818 | 17 user clip enable | |
819 | 21:19 patch config | |
820 | 25 patch status valid ? | |
821 | entry[1]: | |
822 | 15:0 DMA notifier (maybe 20:0) | |
823 | entry[2]: | |
824 | 15:0 DMA 0 instance (maybe 20:0) | |
825 | 24 big endian | |
826 | entry[3]: | |
827 | 15:0 DMA 1 instance (maybe 20:0) | |
828 | entry[4]: | |
829 | entry[5]: | |
830 | set to 0? | |
831 | */ | |
832 | int | |
833 | nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class, | |
834 | struct nouveau_gpuobj **gpuobj) | |
835 | { | |
836 | struct drm_device *dev = chan->dev; | |
837 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
838 | int ret; | |
839 | ||
840 | NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class); | |
841 | ||
842 | ret = nouveau_gpuobj_new(dev, chan, | |
843 | nouveau_gpuobj_class_instmem_size(dev, class), | |
844 | 16, | |
845 | NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, | |
846 | gpuobj); | |
847 | if (ret) { | |
848 | NV_ERROR(dev, "Error creating gpuobj: %d\n", ret); | |
849 | return ret; | |
850 | } | |
851 | ||
852 | dev_priv->engine.instmem.prepare_access(dev, true); | |
853 | if (dev_priv->card_type >= NV_50) { | |
854 | nv_wo32(dev, *gpuobj, 0, class); | |
855 | nv_wo32(dev, *gpuobj, 5, 0x00010000); | |
856 | } else { | |
857 | switch (class) { | |
858 | case NV_CLASS_NULL: | |
859 | nv_wo32(dev, *gpuobj, 0, 0x00001030); | |
860 | nv_wo32(dev, *gpuobj, 1, 0xFFFFFFFF); | |
861 | break; | |
862 | default: | |
863 | if (dev_priv->card_type >= NV_40) { | |
864 | nv_wo32(dev, *gpuobj, 0, class); | |
865 | #ifdef __BIG_ENDIAN | |
866 | nv_wo32(dev, *gpuobj, 2, 0x01000000); | |
867 | #endif | |
868 | } else { | |
869 | #ifdef __BIG_ENDIAN | |
870 | nv_wo32(dev, *gpuobj, 0, class | 0x00080000); | |
871 | #else | |
872 | nv_wo32(dev, *gpuobj, 0, class); | |
873 | #endif | |
874 | } | |
875 | } | |
876 | } | |
877 | dev_priv->engine.instmem.finish_access(dev); | |
878 | ||
879 | (*gpuobj)->engine = NVOBJ_ENGINE_GR; | |
880 | (*gpuobj)->class = class; | |
881 | return 0; | |
882 | } | |
883 | ||
f03a314b | 884 | int |
6ee73861 BS |
885 | nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, |
886 | struct nouveau_gpuobj **gpuobj_ret) | |
887 | { | |
dd19e44b | 888 | struct drm_nouveau_private *dev_priv; |
6ee73861 BS |
889 | struct nouveau_gpuobj *gpuobj; |
890 | ||
891 | if (!chan || !gpuobj_ret || *gpuobj_ret != NULL) | |
892 | return -EINVAL; | |
dd19e44b | 893 | dev_priv = chan->dev->dev_private; |
6ee73861 BS |
894 | |
895 | gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); | |
896 | if (!gpuobj) | |
897 | return -ENOMEM; | |
898 | gpuobj->engine = NVOBJ_ENGINE_SW; | |
899 | gpuobj->class = class; | |
900 | ||
901 | list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); | |
902 | *gpuobj_ret = gpuobj; | |
903 | return 0; | |
904 | } | |
905 | ||
906 | static int | |
907 | nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) | |
908 | { | |
909 | struct drm_device *dev = chan->dev; | |
910 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
911 | struct nouveau_gpuobj *pramin = NULL; | |
912 | uint32_t size; | |
913 | uint32_t base; | |
914 | int ret; | |
915 | ||
916 | NV_DEBUG(dev, "ch%d\n", chan->id); | |
917 | ||
918 | /* Base amount for object storage (4KiB enough?) */ | |
919 | size = 0x1000; | |
920 | base = 0; | |
921 | ||
922 | /* PGRAPH context */ | |
923 | ||
924 | if (dev_priv->card_type == NV_50) { | |
925 | /* Various fixed table thingos */ | |
926 | size += 0x1400; /* mostly unknown stuff */ | |
927 | size += 0x4000; /* vm pd */ | |
928 | base = 0x6000; | |
929 | /* RAMHT, not sure about setting size yet, 32KiB to be safe */ | |
930 | size += 0x8000; | |
931 | /* RAMFC */ | |
932 | size += 0x1000; | |
933 | /* PGRAPH context */ | |
934 | size += 0x70000; | |
935 | } | |
936 | ||
937 | NV_DEBUG(dev, "ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n", | |
938 | chan->id, size, base); | |
939 | ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0, | |
940 | &chan->ramin); | |
941 | if (ret) { | |
942 | NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret); | |
943 | return ret; | |
944 | } | |
945 | pramin = chan->ramin->gpuobj; | |
946 | ||
947 | ret = nouveau_mem_init_heap(&chan->ramin_heap, | |
948 | pramin->im_pramin->start + base, size); | |
949 | if (ret) { | |
950 | NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret); | |
951 | nouveau_gpuobj_ref_del(dev, &chan->ramin); | |
952 | return ret; | |
953 | } | |
954 | ||
955 | return 0; | |
956 | } | |
957 | ||
958 | int | |
959 | nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | |
960 | uint32_t vram_h, uint32_t tt_h) | |
961 | { | |
962 | struct drm_device *dev = chan->dev; | |
963 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
964 | struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; | |
965 | struct nouveau_gpuobj *vram = NULL, *tt = NULL; | |
966 | int ret, i; | |
967 | ||
968 | INIT_LIST_HEAD(&chan->ramht_refs); | |
969 | ||
970 | NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); | |
971 | ||
972 | /* Reserve a block of PRAMIN for the channel | |
973 | *XXX: maybe on <NV50 too at some point | |
974 | */ | |
975 | if (0 || dev_priv->card_type == NV_50) { | |
976 | ret = nouveau_gpuobj_channel_init_pramin(chan); | |
977 | if (ret) { | |
978 | NV_ERROR(dev, "init pramin\n"); | |
979 | return ret; | |
980 | } | |
981 | } | |
982 | ||
983 | /* NV50 VM | |
984 | * - Allocate per-channel page-directory | |
985 | * - Map GART and VRAM into the channel's address space at the | |
986 | * locations determined during init. | |
987 | */ | |
988 | if (dev_priv->card_type >= NV_50) { | |
989 | uint32_t vm_offset, pde; | |
990 | ||
991 | instmem->prepare_access(dev, true); | |
992 | ||
993 | vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200; | |
994 | vm_offset += chan->ramin->gpuobj->im_pramin->start; | |
995 | ||
996 | ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000, | |
997 | 0, &chan->vm_pd, NULL); | |
998 | if (ret) { | |
999 | instmem->finish_access(dev); | |
1000 | return ret; | |
1001 | } | |
1002 | for (i = 0; i < 0x4000; i += 8) { | |
1003 | nv_wo32(dev, chan->vm_pd, (i+0)/4, 0x00000000); | |
1004 | nv_wo32(dev, chan->vm_pd, (i+4)/4, 0xdeadcafe); | |
1005 | } | |
1006 | ||
1007 | pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 2; | |
1008 | ret = nouveau_gpuobj_ref_add(dev, NULL, 0, | |
1009 | dev_priv->gart_info.sg_ctxdma, | |
1010 | &chan->vm_gart_pt); | |
1011 | if (ret) { | |
1012 | instmem->finish_access(dev); | |
1013 | return ret; | |
1014 | } | |
1015 | nv_wo32(dev, chan->vm_pd, pde++, | |
1016 | chan->vm_gart_pt->instance | 0x03); | |
1017 | nv_wo32(dev, chan->vm_pd, pde++, 0x00000000); | |
1018 | ||
1019 | pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 2; | |
1020 | for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) { | |
1021 | ret = nouveau_gpuobj_ref_add(dev, NULL, 0, | |
1022 | dev_priv->vm_vram_pt[i], | |
1023 | &chan->vm_vram_pt[i]); | |
1024 | if (ret) { | |
1025 | instmem->finish_access(dev); | |
1026 | return ret; | |
1027 | } | |
1028 | ||
1029 | nv_wo32(dev, chan->vm_pd, pde++, | |
1030 | chan->vm_vram_pt[i]->instance | 0x61); | |
1031 | nv_wo32(dev, chan->vm_pd, pde++, 0x00000000); | |
1032 | } | |
1033 | ||
1034 | instmem->finish_access(dev); | |
1035 | } | |
1036 | ||
1037 | /* RAMHT */ | |
1038 | if (dev_priv->card_type < NV_50) { | |
1039 | ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->ramht, | |
1040 | &chan->ramht); | |
1041 | if (ret) | |
1042 | return ret; | |
1043 | } else { | |
1044 | ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, | |
1045 | 0x8000, 16, | |
1046 | NVOBJ_FLAG_ZERO_ALLOC, | |
1047 | &chan->ramht); | |
1048 | if (ret) | |
1049 | return ret; | |
1050 | } | |
1051 | ||
1052 | /* VRAM ctxdma */ | |
1053 | if (dev_priv->card_type >= NV_50) { | |
1054 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, | |
1055 | 0, dev_priv->vm_end, | |
1056 | NV_DMA_ACCESS_RW, | |
1057 | NV_DMA_TARGET_AGP, &vram); | |
1058 | if (ret) { | |
1059 | NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); | |
1060 | return ret; | |
1061 | } | |
1062 | } else { | |
1063 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, | |
1064 | 0, dev_priv->fb_available_size, | |
1065 | NV_DMA_ACCESS_RW, | |
1066 | NV_DMA_TARGET_VIDMEM, &vram); | |
1067 | if (ret) { | |
1068 | NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); | |
1069 | return ret; | |
1070 | } | |
1071 | } | |
1072 | ||
1073 | ret = nouveau_gpuobj_ref_add(dev, chan, vram_h, vram, NULL); | |
1074 | if (ret) { | |
1075 | NV_ERROR(dev, "Error referencing VRAM ctxdma: %d\n", ret); | |
1076 | return ret; | |
1077 | } | |
1078 | ||
1079 | /* TT memory ctxdma */ | |
1080 | if (dev_priv->card_type >= NV_50) { | |
1081 | tt = vram; | |
1082 | } else | |
1083 | if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) { | |
1084 | ret = nouveau_gpuobj_gart_dma_new(chan, 0, | |
1085 | dev_priv->gart_info.aper_size, | |
1086 | NV_DMA_ACCESS_RW, &tt, NULL); | |
1087 | } else { | |
1088 | NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type); | |
1089 | ret = -EINVAL; | |
1090 | } | |
1091 | ||
1092 | if (ret) { | |
1093 | NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret); | |
1094 | return ret; | |
1095 | } | |
1096 | ||
1097 | ret = nouveau_gpuobj_ref_add(dev, chan, tt_h, tt, NULL); | |
1098 | if (ret) { | |
1099 | NV_ERROR(dev, "Error referencing TT ctxdma: %d\n", ret); | |
1100 | return ret; | |
1101 | } | |
1102 | ||
1103 | return 0; | |
1104 | } | |
1105 | ||
1106 | void | |
1107 | nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) | |
1108 | { | |
1109 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | |
1110 | struct drm_device *dev = chan->dev; | |
1111 | struct list_head *entry, *tmp; | |
1112 | struct nouveau_gpuobj_ref *ref; | |
1113 | int i; | |
1114 | ||
1115 | NV_DEBUG(dev, "ch%d\n", chan->id); | |
1116 | ||
1117 | if (!chan->ramht_refs.next) | |
1118 | return; | |
1119 | ||
1120 | list_for_each_safe(entry, tmp, &chan->ramht_refs) { | |
1121 | ref = list_entry(entry, struct nouveau_gpuobj_ref, list); | |
1122 | ||
1123 | nouveau_gpuobj_ref_del(dev, &ref); | |
1124 | } | |
1125 | ||
1126 | nouveau_gpuobj_ref_del(dev, &chan->ramht); | |
1127 | ||
1128 | nouveau_gpuobj_del(dev, &chan->vm_pd); | |
1129 | nouveau_gpuobj_ref_del(dev, &chan->vm_gart_pt); | |
1130 | for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) | |
1131 | nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]); | |
1132 | ||
1133 | if (chan->ramin_heap) | |
1134 | nouveau_mem_takedown(&chan->ramin_heap); | |
1135 | if (chan->ramin) | |
1136 | nouveau_gpuobj_ref_del(dev, &chan->ramin); | |
1137 | ||
1138 | } | |
1139 | ||
1140 | int | |
1141 | nouveau_gpuobj_suspend(struct drm_device *dev) | |
1142 | { | |
1143 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
1144 | struct nouveau_gpuobj *gpuobj; | |
1145 | int i; | |
1146 | ||
1147 | if (dev_priv->card_type < NV_50) { | |
1148 | dev_priv->susres.ramin_copy = vmalloc(dev_priv->ramin_rsvd_vram); | |
1149 | if (!dev_priv->susres.ramin_copy) | |
1150 | return -ENOMEM; | |
1151 | ||
1152 | for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4) | |
1153 | dev_priv->susres.ramin_copy[i/4] = nv_ri32(dev, i); | |
1154 | return 0; | |
1155 | } | |
1156 | ||
1157 | list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { | |
1158 | if (!gpuobj->im_backing || (gpuobj->flags & NVOBJ_FLAG_FAKE)) | |
1159 | continue; | |
1160 | ||
1161 | gpuobj->im_backing_suspend = vmalloc(gpuobj->im_pramin->size); | |
1162 | if (!gpuobj->im_backing_suspend) { | |
1163 | nouveau_gpuobj_resume(dev); | |
1164 | return -ENOMEM; | |
1165 | } | |
1166 | ||
1167 | dev_priv->engine.instmem.prepare_access(dev, false); | |
1168 | for (i = 0; i < gpuobj->im_pramin->size / 4; i++) | |
1169 | gpuobj->im_backing_suspend[i] = nv_ro32(dev, gpuobj, i); | |
1170 | dev_priv->engine.instmem.finish_access(dev); | |
1171 | } | |
1172 | ||
1173 | return 0; | |
1174 | } | |
1175 | ||
1176 | void | |
1177 | nouveau_gpuobj_suspend_cleanup(struct drm_device *dev) | |
1178 | { | |
1179 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
1180 | struct nouveau_gpuobj *gpuobj; | |
1181 | ||
1182 | if (dev_priv->card_type < NV_50) { | |
1183 | vfree(dev_priv->susres.ramin_copy); | |
1184 | dev_priv->susres.ramin_copy = NULL; | |
1185 | return; | |
1186 | } | |
1187 | ||
1188 | list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { | |
1189 | if (!gpuobj->im_backing_suspend) | |
1190 | continue; | |
1191 | ||
1192 | vfree(gpuobj->im_backing_suspend); | |
1193 | gpuobj->im_backing_suspend = NULL; | |
1194 | } | |
1195 | } | |
1196 | ||
1197 | void | |
1198 | nouveau_gpuobj_resume(struct drm_device *dev) | |
1199 | { | |
1200 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
1201 | struct nouveau_gpuobj *gpuobj; | |
1202 | int i; | |
1203 | ||
1204 | if (dev_priv->card_type < NV_50) { | |
1205 | for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4) | |
1206 | nv_wi32(dev, i, dev_priv->susres.ramin_copy[i/4]); | |
1207 | nouveau_gpuobj_suspend_cleanup(dev); | |
1208 | return; | |
1209 | } | |
1210 | ||
1211 | list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { | |
1212 | if (!gpuobj->im_backing_suspend) | |
1213 | continue; | |
1214 | ||
1215 | dev_priv->engine.instmem.prepare_access(dev, true); | |
1216 | for (i = 0; i < gpuobj->im_pramin->size / 4; i++) | |
1217 | nv_wo32(dev, gpuobj, i, gpuobj->im_backing_suspend[i]); | |
1218 | dev_priv->engine.instmem.finish_access(dev); | |
1219 | } | |
1220 | ||
1221 | nouveau_gpuobj_suspend_cleanup(dev); | |
1222 | } | |
1223 | ||
1224 | int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, | |
1225 | struct drm_file *file_priv) | |
1226 | { | |
1227 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
1228 | struct drm_nouveau_grobj_alloc *init = data; | |
1229 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | |
1230 | struct nouveau_pgraph_object_class *grc; | |
1231 | struct nouveau_gpuobj *gr = NULL; | |
1232 | struct nouveau_channel *chan; | |
1233 | int ret; | |
1234 | ||
1235 | NOUVEAU_CHECK_INITIALISED_WITH_RETURN; | |
1236 | NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan); | |
1237 | ||
1238 | if (init->handle == ~0) | |
1239 | return -EINVAL; | |
1240 | ||
1241 | grc = pgraph->grclass; | |
1242 | while (grc->id) { | |
1243 | if (grc->id == init->class) | |
1244 | break; | |
1245 | grc++; | |
1246 | } | |
1247 | ||
1248 | if (!grc->id) { | |
1249 | NV_ERROR(dev, "Illegal object class: 0x%x\n", init->class); | |
1250 | return -EPERM; | |
1251 | } | |
1252 | ||
1253 | if (nouveau_gpuobj_ref_find(chan, init->handle, NULL) == 0) | |
1254 | return -EEXIST; | |
1255 | ||
1256 | if (!grc->software) | |
1257 | ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr); | |
1258 | else | |
1259 | ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr); | |
1260 | ||
1261 | if (ret) { | |
1262 | NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n", | |
1263 | ret, init->channel, init->handle); | |
1264 | return ret; | |
1265 | } | |
1266 | ||
1267 | ret = nouveau_gpuobj_ref_add(dev, chan, init->handle, gr, NULL); | |
1268 | if (ret) { | |
1269 | NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n", | |
1270 | ret, init->channel, init->handle); | |
1271 | nouveau_gpuobj_del(dev, &gr); | |
1272 | return ret; | |
1273 | } | |
1274 | ||
1275 | return 0; | |
1276 | } | |
1277 | ||
1278 | int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, | |
1279 | struct drm_file *file_priv) | |
1280 | { | |
1281 | struct drm_nouveau_gpuobj_free *objfree = data; | |
1282 | struct nouveau_gpuobj_ref *ref; | |
1283 | struct nouveau_channel *chan; | |
1284 | int ret; | |
1285 | ||
1286 | NOUVEAU_CHECK_INITIALISED_WITH_RETURN; | |
1287 | NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan); | |
1288 | ||
1289 | ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref); | |
1290 | if (ret) | |
1291 | return ret; | |
1292 | nouveau_gpuobj_ref_del(dev, &ref); | |
1293 | ||
1294 | return 0; | |
1295 | } |