Commit | Line | Data |
---|---|---|
6ee73861 BS |
1 | /* |
2 | * Copyright 2005-2006 Stephane Marchesin | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the "Software"), | |
7 | * to deal in the Software without restriction, including without limitation | |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
9 | * and/or sell copies of the Software, and to permit persons to whom the | |
10 | * Software is furnished to do so, subject to the following conditions: | |
11 | * | |
12 | * The above copyright notice and this permission notice (including the next | |
13 | * paragraph) shall be included in all copies or substantial portions of the | |
14 | * Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
22 | * DEALINGS IN THE SOFTWARE. | |
23 | */ | |
24 | ||
760285e7 | 25 | #include <drm/drmP.h> |
6ee73861 | 26 | #include "nouveau_drv.h" |
760285e7 | 27 | #include <drm/nouveau_drm.h> |
6ee73861 | 28 | #include "nouveau_dma.h" |
c420b2dc | 29 | #include "nouveau_fifo.h" |
b7cb6c01 | 30 | #include "nouveau_ramht.h" |
d375e7d5 | 31 | #include "nouveau_fence.h" |
20abd163 | 32 | #include "nouveau_software.h" |
6ee73861 BS |
33 | |
34 | static int | |
dd6a46cc | 35 | nouveau_channel_pushbuf_init(struct nouveau_channel *chan) |
6ee73861 | 36 | { |
dd6a46cc | 37 | u32 mem = nouveau_vram_pushbuf ? TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT; |
6ee73861 BS |
38 | struct drm_device *dev = chan->dev; |
39 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
dd6a46cc BS |
40 | int ret; |
41 | ||
42 | /* allocate buffer object */ | |
22b33e8e | 43 | ret = nouveau_bo_new(dev, 65536, 0, mem, 0, 0, NULL, &chan->pushbuf_bo); |
dd6a46cc BS |
44 | if (ret) |
45 | goto out; | |
46 | ||
47 | ret = nouveau_bo_pin(chan->pushbuf_bo, mem); | |
48 | if (ret) | |
49 | goto out; | |
6ee73861 | 50 | |
dd6a46cc BS |
51 | ret = nouveau_bo_map(chan->pushbuf_bo); |
52 | if (ret) | |
53 | goto out; | |
54 | ||
55 | /* create DMA object covering the entire memtype where the push | |
56 | * buffer resides, userspace can submit its own push buffers from | |
57 | * anywhere within the same memtype. | |
58 | */ | |
180cc306 | 59 | chan->pushbuf_base = chan->pushbuf_bo->bo.offset; |
d87897d4 | 60 | if (dev_priv->card_type >= NV_50) { |
ce163f69 BS |
61 | ret = nouveau_bo_vma_add(chan->pushbuf_bo, chan->vm, |
62 | &chan->pushbuf_vma); | |
63 | if (ret) | |
64 | goto out; | |
65 | ||
96545299 BS |
66 | if (dev_priv->card_type < NV_C0) { |
67 | ret = nouveau_gpuobj_dma_new(chan, | |
68 | NV_CLASS_DMA_IN_MEMORY, 0, | |
69 | (1ULL << 40), | |
70 | NV_MEM_ACCESS_RO, | |
71 | NV_MEM_TARGET_VM, | |
dd6a46cc | 72 | &chan->pushbuf); |
96545299 | 73 | } |
ce163f69 | 74 | chan->pushbuf_base = chan->pushbuf_vma.offset; |
d87897d4 | 75 | } else |
dd6a46cc | 76 | if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_TT) { |
7f4a195f BS |
77 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, |
78 | dev_priv->gart_info.aper_size, | |
79 | NV_MEM_ACCESS_RO, | |
dd6a46cc BS |
80 | NV_MEM_TARGET_GART, |
81 | &chan->pushbuf); | |
6ee73861 BS |
82 | } else |
83 | if (dev_priv->card_type != NV_04) { | |
84 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, | |
85 | dev_priv->fb_available_size, | |
7f4a195f | 86 | NV_MEM_ACCESS_RO, |
dd6a46cc BS |
87 | NV_MEM_TARGET_VRAM, |
88 | &chan->pushbuf); | |
6ee73861 BS |
89 | } else { |
90 | /* NV04 cmdbuf hack, from original ddx.. not sure of it's | |
91 | * exact reason for existing :) PCI access to cmdbuf in | |
92 | * VRAM. | |
93 | */ | |
94 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, | |
7f4a195f | 95 | pci_resource_start(dev->pdev, 1), |
6ee73861 | 96 | dev_priv->fb_available_size, |
7f4a195f | 97 | NV_MEM_ACCESS_RO, |
dd6a46cc BS |
98 | NV_MEM_TARGET_PCI, |
99 | &chan->pushbuf); | |
6ee73861 BS |
100 | } |
101 | ||
dd6a46cc | 102 | out: |
6ee73861 | 103 | if (ret) { |
dd6a46cc | 104 | NV_ERROR(dev, "error initialising pushbuf: %d\n", ret); |
ce163f69 | 105 | nouveau_bo_vma_del(chan->pushbuf_bo, &chan->pushbuf_vma); |
dd6a46cc BS |
106 | nouveau_gpuobj_ref(NULL, &chan->pushbuf); |
107 | if (chan->pushbuf_bo) { | |
108 | nouveau_bo_unmap(chan->pushbuf_bo); | |
109 | nouveau_bo_ref(NULL, &chan->pushbuf_bo); | |
110 | } | |
96545299 BS |
111 | } |
112 | ||
dd6a46cc | 113 | return 0; |
6ee73861 BS |
114 | } |
115 | ||
116 | /* allocates and initializes a fifo for user space consumption */ | |
117 | int | |
118 | nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, | |
119 | struct drm_file *file_priv, | |
cff5c133 | 120 | uint32_t vram_handle, uint32_t gart_handle) |
6ee73861 | 121 | { |
5e120f6e | 122 | struct nouveau_exec_engine *fence = nv_engine(dev, NVOBJ_ENGINE_FENCE); |
c420b2dc | 123 | struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); |
6ee73861 | 124 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
e8a863c1 | 125 | struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); |
6ee73861 | 126 | struct nouveau_channel *chan; |
cff5c133 | 127 | unsigned long flags; |
48aca13f | 128 | int ret, i; |
6ee73861 | 129 | |
cff5c133 BS |
130 | /* allocate and lock channel structure */ |
131 | chan = kzalloc(sizeof(*chan), GFP_KERNEL); | |
132 | if (!chan) | |
6ee73861 | 133 | return -ENOMEM; |
6ee73861 | 134 | chan->dev = dev; |
6ee73861 BS |
135 | chan->file_priv = file_priv; |
136 | chan->vram_handle = vram_handle; | |
cff5c133 BS |
137 | chan->gart_handle = gart_handle; |
138 | ||
f091a3d4 FJ |
139 | kref_init(&chan->ref); |
140 | atomic_set(&chan->users, 1); | |
6a6b73f2 | 141 | mutex_init(&chan->mutex); |
cff5c133 | 142 | mutex_lock(&chan->mutex); |
6ee73861 | 143 | |
cff5c133 BS |
144 | /* allocate hw channel id */ |
145 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | |
146 | for (chan->id = 0; chan->id < pfifo->channels; chan->id++) { | |
147 | if (!dev_priv->channels.ptr[chan->id]) { | |
f091a3d4 | 148 | nouveau_channel_ref(chan, &dev_priv->channels.ptr[chan->id]); |
cff5c133 BS |
149 | break; |
150 | } | |
151 | } | |
152 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | |
153 | ||
154 | if (chan->id == pfifo->channels) { | |
155 | mutex_unlock(&chan->mutex); | |
156 | kfree(chan); | |
157 | return -ENODEV; | |
158 | } | |
159 | ||
160 | NV_DEBUG(dev, "initialising channel %d\n", chan->id); | |
6ee73861 | 161 | |
dd6a46cc BS |
162 | /* setup channel's memory and vm */ |
163 | ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle); | |
164 | if (ret) { | |
165 | NV_ERROR(dev, "gpuobj %d\n", ret); | |
cff5c133 | 166 | nouveau_channel_put(&chan); |
6ee73861 BS |
167 | return ret; |
168 | } | |
169 | ||
6ee73861 BS |
170 | /* Allocate space for per-channel fixed notifier memory */ |
171 | ret = nouveau_notifier_init_channel(chan); | |
172 | if (ret) { | |
173 | NV_ERROR(dev, "ntfy %d\n", ret); | |
cff5c133 | 174 | nouveau_channel_put(&chan); |
6ee73861 BS |
175 | return ret; |
176 | } | |
177 | ||
dd6a46cc BS |
178 | /* Allocate DMA push buffer */ |
179 | ret = nouveau_channel_pushbuf_init(chan); | |
6ee73861 | 180 | if (ret) { |
dd6a46cc | 181 | NV_ERROR(dev, "pushbuf %d\n", ret); |
cff5c133 | 182 | nouveau_channel_put(&chan); |
6ee73861 BS |
183 | return ret; |
184 | } | |
185 | ||
48aca13f | 186 | nouveau_dma_init(chan); |
dd6a46cc BS |
187 | chan->user_put = 0x40; |
188 | chan->user_get = 0x44; | |
4e03b4af | 189 | if (dev_priv->card_type >= NV_50) |
5e120f6e | 190 | chan->user_get_hi = 0x60; |
6ee73861 | 191 | |
c420b2dc BS |
192 | /* create fifo context */ |
193 | ret = pfifo->base.context_new(chan, NVOBJ_ENGINE_FIFO); | |
6ee73861 | 194 | if (ret) { |
cff5c133 | 195 | nouveau_channel_put(&chan); |
6ee73861 BS |
196 | return ret; |
197 | } | |
198 | ||
48aca13f BS |
199 | /* Insert NOPs for NOUVEAU_DMA_SKIPS */ |
200 | ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS); | |
201 | if (ret) { | |
202 | nouveau_channel_put(&chan); | |
203 | return ret; | |
204 | } | |
205 | ||
206 | for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) | |
207 | OUT_RING (chan, 0x00000000); | |
48aca13f | 208 | |
20abd163 BS |
209 | ret = nouveau_gpuobj_gr_new(chan, NvSw, nouveau_software_class(dev)); |
210 | if (ret) { | |
211 | nouveau_channel_put(&chan); | |
212 | return ret; | |
213 | } | |
214 | ||
5e120f6e BS |
215 | if (dev_priv->card_type < NV_C0) { |
216 | ret = RING_SPACE(chan, 2); | |
217 | if (ret) { | |
218 | nouveau_channel_put(&chan); | |
219 | return ret; | |
220 | } | |
221 | ||
222 | BEGIN_NV04(chan, NvSubSw, NV01_SUBCHAN_OBJECT, 1); | |
223 | OUT_RING (chan, NvSw); | |
224 | FIRE_RING (chan); | |
225 | } | |
226 | ||
227 | FIRE_RING(chan); | |
228 | ||
229 | ret = fence->context_new(chan, NVOBJ_ENGINE_FENCE); | |
6ee73861 | 230 | if (ret) { |
cff5c133 | 231 | nouveau_channel_put(&chan); |
6ee73861 BS |
232 | return ret; |
233 | } | |
234 | ||
235 | nouveau_debugfs_channel_init(chan); | |
236 | ||
cff5c133 | 237 | NV_DEBUG(dev, "channel %d initialised\n", chan->id); |
e8a863c1 BS |
238 | if (fpriv) { |
239 | spin_lock(&fpriv->lock); | |
240 | list_add(&chan->list, &fpriv->channels); | |
241 | spin_unlock(&fpriv->lock); | |
242 | } | |
6ee73861 BS |
243 | *chan_ret = chan; |
244 | return 0; | |
245 | } | |
246 | ||
feeb0aec FJ |
247 | struct nouveau_channel * |
248 | nouveau_channel_get_unlocked(struct nouveau_channel *ref) | |
249 | { | |
f091a3d4 | 250 | struct nouveau_channel *chan = NULL; |
feeb0aec | 251 | |
f091a3d4 FJ |
252 | if (likely(ref && atomic_inc_not_zero(&ref->users))) |
253 | nouveau_channel_ref(ref, &chan); | |
254 | ||
255 | return chan; | |
feeb0aec FJ |
256 | } |
257 | ||
cff5c133 | 258 | struct nouveau_channel * |
e8a863c1 | 259 | nouveau_channel_get(struct drm_file *file_priv, int id) |
cff5c133 | 260 | { |
e8a863c1 | 261 | struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); |
feeb0aec | 262 | struct nouveau_channel *chan; |
cff5c133 | 263 | |
e8a863c1 BS |
264 | spin_lock(&fpriv->lock); |
265 | list_for_each_entry(chan, &fpriv->channels, list) { | |
266 | if (chan->id == id) { | |
267 | chan = nouveau_channel_get_unlocked(chan); | |
268 | spin_unlock(&fpriv->lock); | |
269 | mutex_lock(&chan->mutex); | |
270 | return chan; | |
271 | } | |
cff5c133 | 272 | } |
e8a863c1 | 273 | spin_unlock(&fpriv->lock); |
cff5c133 | 274 | |
e8a863c1 | 275 | return ERR_PTR(-EINVAL); |
cff5c133 BS |
276 | } |
277 | ||
6ee73861 | 278 | void |
feeb0aec | 279 | nouveau_channel_put_unlocked(struct nouveau_channel **pchan) |
6ee73861 | 280 | { |
cff5c133 | 281 | struct nouveau_channel *chan = *pchan; |
6ee73861 BS |
282 | struct drm_device *dev = chan->dev; |
283 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
6ee73861 | 284 | unsigned long flags; |
6dfdd7a6 | 285 | int i; |
6ee73861 | 286 | |
cff5c133 | 287 | /* decrement the refcount, and we're done if there's still refs */ |
f091a3d4 FJ |
288 | if (likely(!atomic_dec_and_test(&chan->users))) { |
289 | nouveau_channel_ref(NULL, pchan); | |
cff5c133 BS |
290 | return; |
291 | } | |
6ee73861 | 292 | |
25985edc | 293 | /* no one wants the channel anymore */ |
cff5c133 | 294 | NV_DEBUG(dev, "freeing channel %d\n", chan->id); |
6ee73861 BS |
295 | nouveau_debugfs_channel_fini(chan); |
296 | ||
cff5c133 | 297 | /* give it chance to idle */ |
6dccd311 | 298 | nouveau_channel_idle(chan); |
6ee73861 | 299 | |
3945e475 | 300 | /* destroy the engine specific contexts */ |
f51ee65c | 301 | for (i = NVOBJ_ENGINE_NR - 1; i >= 0; i--) { |
6dfdd7a6 BS |
302 | if (chan->engctx[i]) |
303 | dev_priv->eng[i]->context_del(chan, i); | |
304 | } | |
6ee73861 | 305 | |
cff5c133 BS |
306 | /* aside from its resources, the channel should now be dead, |
307 | * remove it from the channel list | |
308 | */ | |
309 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | |
f091a3d4 | 310 | nouveau_channel_ref(NULL, &dev_priv->channels.ptr[chan->id]); |
cff5c133 BS |
311 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); |
312 | ||
313 | /* destroy any resources the channel owned */ | |
a8eaebc6 | 314 | nouveau_gpuobj_ref(NULL, &chan->pushbuf); |
6ee73861 | 315 | if (chan->pushbuf_bo) { |
ce163f69 | 316 | nouveau_bo_vma_del(chan->pushbuf_bo, &chan->pushbuf_vma); |
9d59e8a1 | 317 | nouveau_bo_unmap(chan->pushbuf_bo); |
6ee73861 BS |
318 | nouveau_bo_unpin(chan->pushbuf_bo); |
319 | nouveau_bo_ref(NULL, &chan->pushbuf_bo); | |
320 | } | |
b7cb6c01 | 321 | nouveau_ramht_ref(NULL, &chan->ramht, chan); |
6ee73861 | 322 | nouveau_notifier_takedown_channel(chan); |
b7cb6c01 | 323 | nouveau_gpuobj_channel_takedown(chan); |
6ee73861 | 324 | |
f091a3d4 | 325 | nouveau_channel_ref(NULL, pchan); |
6ee73861 BS |
326 | } |
327 | ||
feeb0aec FJ |
328 | void |
329 | nouveau_channel_put(struct nouveau_channel **pchan) | |
330 | { | |
331 | mutex_unlock(&(*pchan)->mutex); | |
332 | nouveau_channel_put_unlocked(pchan); | |
333 | } | |
334 | ||
f091a3d4 FJ |
335 | static void |
336 | nouveau_channel_del(struct kref *ref) | |
337 | { | |
338 | struct nouveau_channel *chan = | |
339 | container_of(ref, struct nouveau_channel, ref); | |
340 | ||
f091a3d4 FJ |
341 | kfree(chan); |
342 | } | |
343 | ||
344 | void | |
345 | nouveau_channel_ref(struct nouveau_channel *chan, | |
346 | struct nouveau_channel **pchan) | |
347 | { | |
348 | if (chan) | |
349 | kref_get(&chan->ref); | |
350 | ||
351 | if (*pchan) | |
352 | kref_put(&(*pchan)->ref, nouveau_channel_del); | |
353 | ||
354 | *pchan = chan; | |
355 | } | |
356 | ||
d1b167e1 | 357 | int |
6dccd311 FJ |
358 | nouveau_channel_idle(struct nouveau_channel *chan) |
359 | { | |
360 | struct drm_device *dev = chan->dev; | |
361 | struct nouveau_fence *fence = NULL; | |
362 | int ret; | |
363 | ||
5e120f6e BS |
364 | ret = nouveau_fence_new(chan, &fence); |
365 | if (!ret) { | |
366 | ret = nouveau_fence_wait(fence, false, false); | |
367 | nouveau_fence_unref(&fence); | |
6dccd311 | 368 | } |
5e120f6e BS |
369 | |
370 | if (ret) | |
371 | NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id); | |
d1b167e1 | 372 | return ret; |
6dccd311 FJ |
373 | } |
374 | ||
6ee73861 BS |
375 | /* cleans up all the fifos from file_priv */ |
376 | void | |
377 | nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv) | |
378 | { | |
c420b2dc | 379 | struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); |
cff5c133 | 380 | struct nouveau_channel *chan; |
6ee73861 BS |
381 | int i; |
382 | ||
c420b2dc BS |
383 | if (!pfifo) |
384 | return; | |
385 | ||
6ee73861 | 386 | NV_DEBUG(dev, "clearing FIFO enables from file_priv\n"); |
c420b2dc | 387 | for (i = 0; i < pfifo->channels; i++) { |
e8a863c1 | 388 | chan = nouveau_channel_get(file_priv, i); |
cff5c133 BS |
389 | if (IS_ERR(chan)) |
390 | continue; | |
6ee73861 | 391 | |
e8a863c1 | 392 | list_del(&chan->list); |
f091a3d4 | 393 | atomic_dec(&chan->users); |
cff5c133 | 394 | nouveau_channel_put(&chan); |
6ee73861 BS |
395 | } |
396 | } |