Commit | Line | Data |
---|---|---|
6ee73861 BS |
1 | /* |
2 | * Copyright 2007 Dave Airlied | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the "Software"), | |
7 | * to deal in the Software without restriction, including without limitation | |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
9 | * and/or sell copies of the Software, and to permit persons to whom the | |
10 | * Software is furnished to do so, subject to the following conditions: | |
11 | * | |
12 | * The above copyright notice and this permission notice (including the next | |
13 | * paragraph) shall be included in all copies or substantial portions of the | |
14 | * Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | */ | |
24 | /* | |
25 | * Authors: Dave Airlied <airlied@linux.ie> | |
26 | * Ben Skeggs <darktama@iinet.net.au> | |
27 | * Jeremy Kolb <jkolb@brandeis.edu> | |
28 | */ | |
29 | ||
30 | #include "drmP.h" | |
31 | ||
32 | #include "nouveau_drm.h" | |
33 | #include "nouveau_drv.h" | |
34 | #include "nouveau_dma.h" | |
35 | ||
a510604d | 36 | #include <linux/log2.h> |
5a0e3ad6 | 37 | #include <linux/slab.h> |
a510604d | 38 | |
415e6186 BS |
39 | int |
40 | nouveau_bo_sync_gpu(struct nouveau_bo *nvbo, struct nouveau_channel *chan) | |
41 | { | |
42 | struct nouveau_fence *prev_fence = nvbo->bo.sync_obj; | |
43 | int ret; | |
44 | ||
45 | if (!prev_fence || nouveau_fence_channel(prev_fence) == chan) | |
46 | return 0; | |
47 | ||
48 | spin_lock(&nvbo->bo.lock); | |
49 | ret = ttm_bo_wait(&nvbo->bo, false, false, false); | |
50 | spin_unlock(&nvbo->bo.lock); | |
51 | return ret; | |
52 | } | |
53 | ||
6ee73861 BS |
54 | static void |
55 | nouveau_bo_del_ttm(struct ttm_buffer_object *bo) | |
56 | { | |
57 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | |
a0af9add | 58 | struct drm_device *dev = dev_priv->dev; |
6ee73861 BS |
59 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
60 | ||
6ee73861 BS |
61 | if (unlikely(nvbo->gem)) |
62 | DRM_ERROR("bo %p still attached to GEM object\n", bo); | |
63 | ||
a0af9add FJ |
64 | if (nvbo->tile) |
65 | nv10_mem_expire_tiling(dev, nvbo->tile, NULL); | |
66 | ||
6ee73861 BS |
67 | kfree(nvbo); |
68 | } | |
69 | ||
a0af9add FJ |
70 | static void |
71 | nouveau_bo_fixup_align(struct drm_device *dev, | |
72 | uint32_t tile_mode, uint32_t tile_flags, | |
73 | int *align, int *size) | |
74 | { | |
75 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
76 | ||
77 | /* | |
78 | * Some of the tile_flags have a periodic structure of N*4096 bytes, | |
eb1dba0e MM |
79 | * align to to that as well as the page size. Align the size to the |
80 | * appropriate boundaries. This does imply that sizes are rounded up | |
81 | * 3-7 pages, so be aware of this and do not waste memory by allocating | |
82 | * many small buffers. | |
a0af9add FJ |
83 | */ |
84 | if (dev_priv->card_type == NV_50) { | |
a76fb4e8 | 85 | uint32_t block_size = dev_priv->vram_size >> 15; |
a510604d MM |
86 | int i; |
87 | ||
a0af9add FJ |
88 | switch (tile_flags) { |
89 | case 0x1800: | |
90 | case 0x2800: | |
91 | case 0x4800: | |
92 | case 0x7a00: | |
a510604d | 93 | if (is_power_of_2(block_size)) { |
a510604d MM |
94 | for (i = 1; i < 10; i++) { |
95 | *align = 12 * i * block_size; | |
96 | if (!(*align % 65536)) | |
97 | break; | |
98 | } | |
a0af9add | 99 | } else { |
a510604d MM |
100 | for (i = 1; i < 10; i++) { |
101 | *align = 8 * i * block_size; | |
102 | if (!(*align % 65536)) | |
103 | break; | |
104 | } | |
a0af9add | 105 | } |
eb1dba0e | 106 | *size = roundup(*size, *align); |
a0af9add FJ |
107 | break; |
108 | default: | |
109 | break; | |
110 | } | |
111 | ||
112 | } else { | |
113 | if (tile_mode) { | |
114 | if (dev_priv->chipset >= 0x40) { | |
115 | *align = 65536; | |
116 | *size = roundup(*size, 64 * tile_mode); | |
117 | ||
118 | } else if (dev_priv->chipset >= 0x30) { | |
119 | *align = 32768; | |
120 | *size = roundup(*size, 64 * tile_mode); | |
121 | ||
122 | } else if (dev_priv->chipset >= 0x20) { | |
123 | *align = 16384; | |
124 | *size = roundup(*size, 64 * tile_mode); | |
125 | ||
126 | } else if (dev_priv->chipset >= 0x10) { | |
127 | *align = 16384; | |
128 | *size = roundup(*size, 32 * tile_mode); | |
129 | } | |
130 | } | |
131 | } | |
132 | ||
1c7059e4 MM |
133 | /* ALIGN works only on powers of two. */ |
134 | *size = roundup(*size, PAGE_SIZE); | |
a0af9add FJ |
135 | |
136 | if (dev_priv->card_type == NV_50) { | |
1c7059e4 | 137 | *size = roundup(*size, 65536); |
a0af9add FJ |
138 | *align = max(65536, *align); |
139 | } | |
140 | } | |
141 | ||
6ee73861 BS |
142 | int |
143 | nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, | |
144 | int size, int align, uint32_t flags, uint32_t tile_mode, | |
145 | uint32_t tile_flags, bool no_vm, bool mappable, | |
146 | struct nouveau_bo **pnvbo) | |
147 | { | |
148 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
149 | struct nouveau_bo *nvbo; | |
8dea4a19 | 150 | int ret = 0; |
6ee73861 BS |
151 | |
152 | nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); | |
153 | if (!nvbo) | |
154 | return -ENOMEM; | |
155 | INIT_LIST_HEAD(&nvbo->head); | |
156 | INIT_LIST_HEAD(&nvbo->entry); | |
157 | nvbo->mappable = mappable; | |
158 | nvbo->no_vm = no_vm; | |
159 | nvbo->tile_mode = tile_mode; | |
160 | nvbo->tile_flags = tile_flags; | |
161 | ||
a0af9add | 162 | nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size); |
6ee73861 BS |
163 | align >>= PAGE_SHIFT; |
164 | ||
6ee73861 BS |
165 | nvbo->placement.fpfn = 0; |
166 | nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0; | |
78ad0f7b | 167 | nouveau_bo_placement_set(nvbo, flags, 0); |
6ee73861 BS |
168 | |
169 | nvbo->channel = chan; | |
6ee73861 BS |
170 | ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, |
171 | ttm_bo_type_device, &nvbo->placement, align, 0, | |
172 | false, NULL, size, nouveau_bo_del_ttm); | |
6ee73861 BS |
173 | if (ret) { |
174 | /* ttm will call nouveau_bo_del_ttm if it fails.. */ | |
175 | return ret; | |
176 | } | |
90af89b9 | 177 | nvbo->channel = NULL; |
6ee73861 | 178 | |
6ee73861 BS |
179 | *pnvbo = nvbo; |
180 | return 0; | |
181 | } | |
182 | ||
78ad0f7b FJ |
183 | static void |
184 | set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags) | |
185 | { | |
186 | *n = 0; | |
187 | ||
188 | if (type & TTM_PL_FLAG_VRAM) | |
189 | pl[(*n)++] = TTM_PL_FLAG_VRAM | flags; | |
190 | if (type & TTM_PL_FLAG_TT) | |
191 | pl[(*n)++] = TTM_PL_FLAG_TT | flags; | |
192 | if (type & TTM_PL_FLAG_SYSTEM) | |
193 | pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags; | |
194 | } | |
195 | ||
6ee73861 | 196 | void |
78ad0f7b | 197 | nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) |
6ee73861 | 198 | { |
78ad0f7b FJ |
199 | struct ttm_placement *pl = &nvbo->placement; |
200 | uint32_t flags = TTM_PL_MASK_CACHING | | |
201 | (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0); | |
202 | ||
203 | pl->placement = nvbo->placements; | |
204 | set_placement_list(nvbo->placements, &pl->num_placement, | |
205 | type, flags); | |
206 | ||
207 | pl->busy_placement = nvbo->busy_placements; | |
208 | set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, | |
209 | type | busy, flags); | |
6ee73861 BS |
210 | } |
211 | ||
212 | int | |
213 | nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) | |
214 | { | |
215 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); | |
216 | struct ttm_buffer_object *bo = &nvbo->bo; | |
78ad0f7b | 217 | int ret; |
6ee73861 BS |
218 | |
219 | if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) { | |
220 | NV_ERROR(nouveau_bdev(bo->bdev)->dev, | |
221 | "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo, | |
222 | 1 << bo->mem.mem_type, memtype); | |
223 | return -EINVAL; | |
224 | } | |
225 | ||
226 | if (nvbo->pin_refcnt++) | |
227 | return 0; | |
228 | ||
229 | ret = ttm_bo_reserve(bo, false, false, false, 0); | |
230 | if (ret) | |
231 | goto out; | |
232 | ||
78ad0f7b | 233 | nouveau_bo_placement_set(nvbo, memtype, 0); |
6ee73861 | 234 | |
9d87fa21 | 235 | ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false); |
6ee73861 BS |
236 | if (ret == 0) { |
237 | switch (bo->mem.mem_type) { | |
238 | case TTM_PL_VRAM: | |
239 | dev_priv->fb_aper_free -= bo->mem.size; | |
240 | break; | |
241 | case TTM_PL_TT: | |
242 | dev_priv->gart_info.aper_free -= bo->mem.size; | |
243 | break; | |
244 | default: | |
245 | break; | |
246 | } | |
247 | } | |
248 | ttm_bo_unreserve(bo); | |
249 | out: | |
250 | if (unlikely(ret)) | |
251 | nvbo->pin_refcnt--; | |
252 | return ret; | |
253 | } | |
254 | ||
255 | int | |
256 | nouveau_bo_unpin(struct nouveau_bo *nvbo) | |
257 | { | |
258 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); | |
259 | struct ttm_buffer_object *bo = &nvbo->bo; | |
78ad0f7b | 260 | int ret; |
6ee73861 BS |
261 | |
262 | if (--nvbo->pin_refcnt) | |
263 | return 0; | |
264 | ||
265 | ret = ttm_bo_reserve(bo, false, false, false, 0); | |
266 | if (ret) | |
267 | return ret; | |
268 | ||
78ad0f7b | 269 | nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); |
6ee73861 | 270 | |
9d87fa21 | 271 | ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false); |
6ee73861 BS |
272 | if (ret == 0) { |
273 | switch (bo->mem.mem_type) { | |
274 | case TTM_PL_VRAM: | |
275 | dev_priv->fb_aper_free += bo->mem.size; | |
276 | break; | |
277 | case TTM_PL_TT: | |
278 | dev_priv->gart_info.aper_free += bo->mem.size; | |
279 | break; | |
280 | default: | |
281 | break; | |
282 | } | |
283 | } | |
284 | ||
285 | ttm_bo_unreserve(bo); | |
286 | return ret; | |
287 | } | |
288 | ||
289 | int | |
290 | nouveau_bo_map(struct nouveau_bo *nvbo) | |
291 | { | |
292 | int ret; | |
293 | ||
294 | ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); | |
295 | if (ret) | |
296 | return ret; | |
297 | ||
298 | ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap); | |
299 | ttm_bo_unreserve(&nvbo->bo); | |
300 | return ret; | |
301 | } | |
302 | ||
303 | void | |
304 | nouveau_bo_unmap(struct nouveau_bo *nvbo) | |
305 | { | |
9d59e8a1 BS |
306 | if (nvbo) |
307 | ttm_bo_kunmap(&nvbo->kmap); | |
6ee73861 BS |
308 | } |
309 | ||
310 | u16 | |
311 | nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index) | |
312 | { | |
313 | bool is_iomem; | |
314 | u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); | |
315 | mem = &mem[index]; | |
316 | if (is_iomem) | |
317 | return ioread16_native((void __force __iomem *)mem); | |
318 | else | |
319 | return *mem; | |
320 | } | |
321 | ||
322 | void | |
323 | nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val) | |
324 | { | |
325 | bool is_iomem; | |
326 | u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); | |
327 | mem = &mem[index]; | |
328 | if (is_iomem) | |
329 | iowrite16_native(val, (void __force __iomem *)mem); | |
330 | else | |
331 | *mem = val; | |
332 | } | |
333 | ||
334 | u32 | |
335 | nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index) | |
336 | { | |
337 | bool is_iomem; | |
338 | u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); | |
339 | mem = &mem[index]; | |
340 | if (is_iomem) | |
341 | return ioread32_native((void __force __iomem *)mem); | |
342 | else | |
343 | return *mem; | |
344 | } | |
345 | ||
346 | void | |
347 | nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val) | |
348 | { | |
349 | bool is_iomem; | |
350 | u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); | |
351 | mem = &mem[index]; | |
352 | if (is_iomem) | |
353 | iowrite32_native(val, (void __force __iomem *)mem); | |
354 | else | |
355 | *mem = val; | |
356 | } | |
357 | ||
358 | static struct ttm_backend * | |
359 | nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev) | |
360 | { | |
361 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); | |
362 | struct drm_device *dev = dev_priv->dev; | |
363 | ||
364 | switch (dev_priv->gart_info.type) { | |
b694dfb2 | 365 | #if __OS_HAS_AGP |
6ee73861 BS |
366 | case NOUVEAU_GART_AGP: |
367 | return ttm_agp_backend_init(bdev, dev->agp->bridge); | |
b694dfb2 | 368 | #endif |
6ee73861 BS |
369 | case NOUVEAU_GART_SGDMA: |
370 | return nouveau_sgdma_init_ttm(dev); | |
371 | default: | |
372 | NV_ERROR(dev, "Unknown GART type %d\n", | |
373 | dev_priv->gart_info.type); | |
374 | break; | |
375 | } | |
376 | ||
377 | return NULL; | |
378 | } | |
379 | ||
380 | static int | |
381 | nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) | |
382 | { | |
383 | /* We'll do this from user space. */ | |
384 | return 0; | |
385 | } | |
386 | ||
387 | static int | |
388 | nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |
389 | struct ttm_mem_type_manager *man) | |
390 | { | |
391 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); | |
392 | struct drm_device *dev = dev_priv->dev; | |
393 | ||
394 | switch (type) { | |
395 | case TTM_PL_SYSTEM: | |
396 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; | |
397 | man->available_caching = TTM_PL_MASK_CACHING; | |
398 | man->default_caching = TTM_PL_FLAG_CACHED; | |
399 | break; | |
400 | case TTM_PL_VRAM: | |
401 | man->flags = TTM_MEMTYPE_FLAG_FIXED | | |
f32f02fd | 402 | TTM_MEMTYPE_FLAG_MAPPABLE; |
6ee73861 BS |
403 | man->available_caching = TTM_PL_FLAG_UNCACHED | |
404 | TTM_PL_FLAG_WC; | |
405 | man->default_caching = TTM_PL_FLAG_WC; | |
fbd2895e BS |
406 | if (dev_priv->card_type == NV_50) |
407 | man->gpu_offset = 0x40000000; | |
408 | else | |
409 | man->gpu_offset = 0; | |
6ee73861 BS |
410 | break; |
411 | case TTM_PL_TT: | |
412 | switch (dev_priv->gart_info.type) { | |
413 | case NOUVEAU_GART_AGP: | |
f32f02fd | 414 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; |
6ee73861 BS |
415 | man->available_caching = TTM_PL_FLAG_UNCACHED; |
416 | man->default_caching = TTM_PL_FLAG_UNCACHED; | |
417 | break; | |
418 | case NOUVEAU_GART_SGDMA: | |
419 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | | |
420 | TTM_MEMTYPE_FLAG_CMA; | |
421 | man->available_caching = TTM_PL_MASK_CACHING; | |
422 | man->default_caching = TTM_PL_FLAG_CACHED; | |
423 | break; | |
424 | default: | |
425 | NV_ERROR(dev, "Unknown GART type: %d\n", | |
426 | dev_priv->gart_info.type); | |
427 | return -EINVAL; | |
428 | } | |
6ee73861 BS |
429 | man->gpu_offset = dev_priv->vm_gart_base; |
430 | break; | |
431 | default: | |
432 | NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type); | |
433 | return -EINVAL; | |
434 | } | |
435 | return 0; | |
436 | } | |
437 | ||
438 | static void | |
439 | nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) | |
440 | { | |
441 | struct nouveau_bo *nvbo = nouveau_bo(bo); | |
442 | ||
443 | switch (bo->mem.mem_type) { | |
22fbd538 | 444 | case TTM_PL_VRAM: |
78ad0f7b FJ |
445 | nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, |
446 | TTM_PL_FLAG_SYSTEM); | |
22fbd538 | 447 | break; |
6ee73861 | 448 | default: |
78ad0f7b | 449 | nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0); |
6ee73861 BS |
450 | break; |
451 | } | |
22fbd538 FJ |
452 | |
453 | *pl = nvbo->placement; | |
6ee73861 BS |
454 | } |
455 | ||
456 | ||
457 | /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access | |
458 | * TTM_PL_{VRAM,TT} directly. | |
459 | */ | |
a0af9add | 460 | |
6ee73861 BS |
461 | static int |
462 | nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, | |
9d87fa21 JG |
463 | struct nouveau_bo *nvbo, bool evict, |
464 | bool no_wait_reserve, bool no_wait_gpu, | |
6ee73861 BS |
465 | struct ttm_mem_reg *new_mem) |
466 | { | |
467 | struct nouveau_fence *fence = NULL; | |
468 | int ret; | |
469 | ||
470 | ret = nouveau_fence_new(chan, &fence, true); | |
471 | if (ret) | |
472 | return ret; | |
473 | ||
474 | ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, | |
311ab694 FJ |
475 | evict || (nvbo->channel && |
476 | nvbo->channel != chan), | |
477 | no_wait_reserve, no_wait_gpu, new_mem); | |
6ee73861 BS |
478 | nouveau_fence_unref((void *)&fence); |
479 | return ret; | |
480 | } | |
481 | ||
482 | static inline uint32_t | |
f1ab0cc9 BS |
483 | nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, |
484 | struct nouveau_channel *chan, struct ttm_mem_reg *mem) | |
6ee73861 | 485 | { |
f1ab0cc9 BS |
486 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
487 | ||
488 | if (nvbo->no_vm) { | |
6ee73861 BS |
489 | if (mem->mem_type == TTM_PL_TT) |
490 | return NvDmaGART; | |
491 | return NvDmaVRAM; | |
492 | } | |
493 | ||
494 | if (mem->mem_type == TTM_PL_TT) | |
495 | return chan->gart_handle; | |
496 | return chan->vram_handle; | |
497 | } | |
498 | ||
499 | static int | |
f1ab0cc9 BS |
500 | nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
501 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | |
6ee73861 | 502 | { |
6ee73861 | 503 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); |
f1ab0cc9 BS |
504 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
505 | u64 length = (new_mem->num_pages << PAGE_SHIFT); | |
506 | u64 src_offset, dst_offset; | |
6ee73861 BS |
507 | int ret; |
508 | ||
6ee73861 BS |
509 | src_offset = old_mem->mm_node->start << PAGE_SHIFT; |
510 | dst_offset = new_mem->mm_node->start << PAGE_SHIFT; | |
f1ab0cc9 BS |
511 | if (!nvbo->no_vm) { |
512 | if (old_mem->mem_type == TTM_PL_VRAM) | |
6ee73861 | 513 | src_offset += dev_priv->vm_vram_base; |
6ee73861 | 514 | else |
f1ab0cc9 BS |
515 | src_offset += dev_priv->vm_gart_base; |
516 | ||
517 | if (new_mem->mem_type == TTM_PL_VRAM) | |
6ee73861 | 518 | dst_offset += dev_priv->vm_vram_base; |
f1ab0cc9 BS |
519 | else |
520 | dst_offset += dev_priv->vm_gart_base; | |
6ee73861 BS |
521 | } |
522 | ||
523 | ret = RING_SPACE(chan, 3); | |
524 | if (ret) | |
525 | return ret; | |
6ee73861 | 526 | |
f1ab0cc9 BS |
527 | BEGIN_RING(chan, NvSubM2MF, 0x0184, 2); |
528 | OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem)); | |
529 | OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem)); | |
530 | ||
531 | while (length) { | |
532 | u32 amount, stride, height; | |
533 | ||
534 | amount = min(length, (u64)(16 * 1024 * 1024)); | |
535 | stride = 64 * 4; | |
536 | height = amount / stride; | |
537 | ||
538 | if (new_mem->mem_type == TTM_PL_VRAM && nvbo->tile_flags) { | |
539 | ret = RING_SPACE(chan, 8); | |
540 | if (ret) | |
541 | return ret; | |
542 | ||
543 | BEGIN_RING(chan, NvSubM2MF, 0x0200, 7); | |
544 | OUT_RING (chan, 0); | |
545 | OUT_RING (chan, 0x20); | |
546 | OUT_RING (chan, stride); | |
547 | OUT_RING (chan, height); | |
548 | OUT_RING (chan, 1); | |
549 | OUT_RING (chan, 0); | |
550 | OUT_RING (chan, 0); | |
551 | } else { | |
552 | ret = RING_SPACE(chan, 2); | |
553 | if (ret) | |
554 | return ret; | |
555 | ||
556 | BEGIN_RING(chan, NvSubM2MF, 0x0200, 1); | |
557 | OUT_RING (chan, 1); | |
558 | } | |
559 | if (old_mem->mem_type == TTM_PL_VRAM && nvbo->tile_flags) { | |
560 | ret = RING_SPACE(chan, 8); | |
561 | if (ret) | |
562 | return ret; | |
563 | ||
564 | BEGIN_RING(chan, NvSubM2MF, 0x021c, 7); | |
565 | OUT_RING (chan, 0); | |
566 | OUT_RING (chan, 0x20); | |
567 | OUT_RING (chan, stride); | |
568 | OUT_RING (chan, height); | |
569 | OUT_RING (chan, 1); | |
570 | OUT_RING (chan, 0); | |
571 | OUT_RING (chan, 0); | |
572 | } else { | |
573 | ret = RING_SPACE(chan, 2); | |
574 | if (ret) | |
575 | return ret; | |
576 | ||
577 | BEGIN_RING(chan, NvSubM2MF, 0x021c, 1); | |
578 | OUT_RING (chan, 1); | |
579 | } | |
580 | ||
581 | ret = RING_SPACE(chan, 14); | |
6ee73861 BS |
582 | if (ret) |
583 | return ret; | |
f1ab0cc9 BS |
584 | |
585 | BEGIN_RING(chan, NvSubM2MF, 0x0238, 2); | |
586 | OUT_RING (chan, upper_32_bits(src_offset)); | |
587 | OUT_RING (chan, upper_32_bits(dst_offset)); | |
588 | BEGIN_RING(chan, NvSubM2MF, 0x030c, 8); | |
589 | OUT_RING (chan, lower_32_bits(src_offset)); | |
590 | OUT_RING (chan, lower_32_bits(dst_offset)); | |
591 | OUT_RING (chan, stride); | |
592 | OUT_RING (chan, stride); | |
593 | OUT_RING (chan, stride); | |
594 | OUT_RING (chan, height); | |
595 | OUT_RING (chan, 0x00000101); | |
596 | OUT_RING (chan, 0x00000000); | |
597 | BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); | |
598 | OUT_RING (chan, 0); | |
599 | ||
600 | length -= amount; | |
601 | src_offset += amount; | |
602 | dst_offset += amount; | |
6ee73861 BS |
603 | } |
604 | ||
f1ab0cc9 BS |
605 | return 0; |
606 | } | |
607 | ||
608 | static int | |
609 | nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |
610 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | |
611 | { | |
612 | u32 src_offset = old_mem->mm_node->start << PAGE_SHIFT; | |
613 | u32 dst_offset = new_mem->mm_node->start << PAGE_SHIFT; | |
614 | u32 page_count = new_mem->num_pages; | |
615 | int ret; | |
616 | ||
617 | ret = RING_SPACE(chan, 3); | |
618 | if (ret) | |
619 | return ret; | |
620 | ||
621 | BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2); | |
622 | OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem)); | |
623 | OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem)); | |
624 | ||
6ee73861 BS |
625 | page_count = new_mem->num_pages; |
626 | while (page_count) { | |
627 | int line_count = (page_count > 2047) ? 2047 : page_count; | |
628 | ||
6ee73861 BS |
629 | ret = RING_SPACE(chan, 11); |
630 | if (ret) | |
631 | return ret; | |
f1ab0cc9 | 632 | |
6ee73861 BS |
633 | BEGIN_RING(chan, NvSubM2MF, |
634 | NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); | |
f1ab0cc9 BS |
635 | OUT_RING (chan, src_offset); |
636 | OUT_RING (chan, dst_offset); | |
637 | OUT_RING (chan, PAGE_SIZE); /* src_pitch */ | |
638 | OUT_RING (chan, PAGE_SIZE); /* dst_pitch */ | |
639 | OUT_RING (chan, PAGE_SIZE); /* line_length */ | |
640 | OUT_RING (chan, line_count); | |
641 | OUT_RING (chan, 0x00000101); | |
642 | OUT_RING (chan, 0x00000000); | |
6ee73861 | 643 | BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); |
f1ab0cc9 | 644 | OUT_RING (chan, 0); |
6ee73861 BS |
645 | |
646 | page_count -= line_count; | |
647 | src_offset += (PAGE_SIZE * line_count); | |
648 | dst_offset += (PAGE_SIZE * line_count); | |
649 | } | |
650 | ||
f1ab0cc9 BS |
651 | return 0; |
652 | } | |
653 | ||
654 | static int | |
655 | nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, | |
656 | bool no_wait_reserve, bool no_wait_gpu, | |
657 | struct ttm_mem_reg *new_mem) | |
658 | { | |
659 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | |
660 | struct nouveau_bo *nvbo = nouveau_bo(bo); | |
661 | struct nouveau_channel *chan; | |
662 | int ret; | |
663 | ||
664 | chan = nvbo->channel; | |
665 | if (!chan || nvbo->no_vm) | |
666 | chan = dev_priv->channel; | |
667 | ||
668 | if (dev_priv->card_type < NV_50) | |
669 | ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem); | |
670 | else | |
671 | ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem); | |
672 | if (ret) | |
673 | return ret; | |
674 | ||
9d87fa21 | 675 | return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem); |
6ee73861 BS |
676 | } |
677 | ||
678 | static int | |
679 | nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, | |
9d87fa21 JG |
680 | bool no_wait_reserve, bool no_wait_gpu, |
681 | struct ttm_mem_reg *new_mem) | |
6ee73861 BS |
682 | { |
683 | u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; | |
684 | struct ttm_placement placement; | |
685 | struct ttm_mem_reg tmp_mem; | |
686 | int ret; | |
687 | ||
688 | placement.fpfn = placement.lpfn = 0; | |
689 | placement.num_placement = placement.num_busy_placement = 1; | |
77e2b5ed | 690 | placement.placement = placement.busy_placement = &placement_memtype; |
6ee73861 BS |
691 | |
692 | tmp_mem = *new_mem; | |
693 | tmp_mem.mm_node = NULL; | |
9d87fa21 | 694 | ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu); |
6ee73861 BS |
695 | if (ret) |
696 | return ret; | |
697 | ||
698 | ret = ttm_tt_bind(bo->ttm, &tmp_mem); | |
699 | if (ret) | |
700 | goto out; | |
701 | ||
9d87fa21 | 702 | ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem); |
6ee73861 BS |
703 | if (ret) |
704 | goto out; | |
705 | ||
9d87fa21 | 706 | ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); |
6ee73861 BS |
707 | out: |
708 | if (tmp_mem.mm_node) { | |
709 | spin_lock(&bo->bdev->glob->lru_lock); | |
710 | drm_mm_put_block(tmp_mem.mm_node); | |
711 | spin_unlock(&bo->bdev->glob->lru_lock); | |
712 | } | |
713 | ||
714 | return ret; | |
715 | } | |
716 | ||
717 | static int | |
718 | nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, | |
9d87fa21 JG |
719 | bool no_wait_reserve, bool no_wait_gpu, |
720 | struct ttm_mem_reg *new_mem) | |
6ee73861 BS |
721 | { |
722 | u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; | |
723 | struct ttm_placement placement; | |
724 | struct ttm_mem_reg tmp_mem; | |
725 | int ret; | |
726 | ||
727 | placement.fpfn = placement.lpfn = 0; | |
728 | placement.num_placement = placement.num_busy_placement = 1; | |
77e2b5ed | 729 | placement.placement = placement.busy_placement = &placement_memtype; |
6ee73861 BS |
730 | |
731 | tmp_mem = *new_mem; | |
732 | tmp_mem.mm_node = NULL; | |
9d87fa21 | 733 | ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu); |
6ee73861 BS |
734 | if (ret) |
735 | return ret; | |
736 | ||
9d87fa21 | 737 | ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem); |
6ee73861 BS |
738 | if (ret) |
739 | goto out; | |
740 | ||
9d87fa21 | 741 | ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); |
6ee73861 BS |
742 | if (ret) |
743 | goto out; | |
744 | ||
745 | out: | |
746 | if (tmp_mem.mm_node) { | |
747 | spin_lock(&bo->bdev->glob->lru_lock); | |
748 | drm_mm_put_block(tmp_mem.mm_node); | |
749 | spin_unlock(&bo->bdev->glob->lru_lock); | |
750 | } | |
751 | ||
752 | return ret; | |
753 | } | |
754 | ||
755 | static int | |
a0af9add FJ |
756 | nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, |
757 | struct nouveau_tile_reg **new_tile) | |
6ee73861 BS |
758 | { |
759 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | |
6ee73861 | 760 | struct drm_device *dev = dev_priv->dev; |
a0af9add FJ |
761 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
762 | uint64_t offset; | |
6ee73861 BS |
763 | int ret; |
764 | ||
a0af9add FJ |
765 | if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) { |
766 | /* Nothing to do. */ | |
767 | *new_tile = NULL; | |
768 | return 0; | |
769 | } | |
770 | ||
771 | offset = new_mem->mm_node->start << PAGE_SHIFT; | |
6ee73861 | 772 | |
a0af9add | 773 | if (dev_priv->card_type == NV_50) { |
6ee73861 BS |
774 | ret = nv50_mem_vm_bind_linear(dev, |
775 | offset + dev_priv->vm_vram_base, | |
776 | new_mem->size, nvbo->tile_flags, | |
777 | offset); | |
778 | if (ret) | |
779 | return ret; | |
a0af9add FJ |
780 | |
781 | } else if (dev_priv->card_type >= NV_10) { | |
782 | *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size, | |
783 | nvbo->tile_mode); | |
6ee73861 BS |
784 | } |
785 | ||
a0af9add FJ |
786 | return 0; |
787 | } | |
788 | ||
789 | static void | |
790 | nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, | |
791 | struct nouveau_tile_reg *new_tile, | |
792 | struct nouveau_tile_reg **old_tile) | |
793 | { | |
794 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | |
795 | struct drm_device *dev = dev_priv->dev; | |
796 | ||
797 | if (dev_priv->card_type >= NV_10 && | |
798 | dev_priv->card_type < NV_50) { | |
799 | if (*old_tile) | |
800 | nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj); | |
801 | ||
802 | *old_tile = new_tile; | |
803 | } | |
804 | } | |
805 | ||
806 | static int | |
807 | nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, | |
9d87fa21 JG |
808 | bool no_wait_reserve, bool no_wait_gpu, |
809 | struct ttm_mem_reg *new_mem) | |
a0af9add FJ |
810 | { |
811 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | |
812 | struct nouveau_bo *nvbo = nouveau_bo(bo); | |
813 | struct ttm_mem_reg *old_mem = &bo->mem; | |
814 | struct nouveau_tile_reg *new_tile = NULL; | |
815 | int ret = 0; | |
816 | ||
817 | ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile); | |
818 | if (ret) | |
819 | return ret; | |
820 | ||
a0af9add | 821 | /* Fake bo copy. */ |
6ee73861 BS |
822 | if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) { |
823 | BUG_ON(bo->mem.mm_node != NULL); | |
824 | bo->mem = *new_mem; | |
825 | new_mem->mm_node = NULL; | |
a0af9add | 826 | goto out; |
6ee73861 BS |
827 | } |
828 | ||
b8a6a804 BS |
829 | /* Software copy if the card isn't up and running yet. */ |
830 | if (!dev_priv->channel) { | |
831 | ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); | |
832 | goto out; | |
833 | } | |
834 | ||
a0af9add FJ |
835 | /* Hardware assisted copy. */ |
836 | if (new_mem->mem_type == TTM_PL_SYSTEM) | |
9d87fa21 | 837 | ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); |
a0af9add | 838 | else if (old_mem->mem_type == TTM_PL_SYSTEM) |
9d87fa21 | 839 | ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); |
a0af9add | 840 | else |
9d87fa21 | 841 | ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); |
6ee73861 | 842 | |
a0af9add FJ |
843 | if (!ret) |
844 | goto out; | |
845 | ||
846 | /* Fallback to software copy. */ | |
9d87fa21 | 847 | ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); |
a0af9add FJ |
848 | |
849 | out: | |
850 | if (ret) | |
851 | nouveau_bo_vm_cleanup(bo, NULL, &new_tile); | |
852 | else | |
853 | nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile); | |
854 | ||
855 | return ret; | |
6ee73861 BS |
856 | } |
857 | ||
858 | static int | |
859 | nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) | |
860 | { | |
861 | return 0; | |
862 | } | |
863 | ||
f32f02fd JG |
864 | static int |
865 | nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |
866 | { | |
867 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | |
868 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); | |
869 | struct drm_device *dev = dev_priv->dev; | |
870 | ||
871 | mem->bus.addr = NULL; | |
872 | mem->bus.offset = 0; | |
873 | mem->bus.size = mem->num_pages << PAGE_SHIFT; | |
874 | mem->bus.base = 0; | |
875 | mem->bus.is_iomem = false; | |
876 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) | |
877 | return -EINVAL; | |
878 | switch (mem->mem_type) { | |
879 | case TTM_PL_SYSTEM: | |
880 | /* System memory */ | |
881 | return 0; | |
882 | case TTM_PL_TT: | |
883 | #if __OS_HAS_AGP | |
884 | if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { | |
885 | mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; | |
886 | mem->bus.base = dev_priv->gart_info.aper_base; | |
887 | mem->bus.is_iomem = true; | |
888 | } | |
889 | #endif | |
890 | break; | |
891 | case TTM_PL_VRAM: | |
892 | mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; | |
01d73a69 | 893 | mem->bus.base = pci_resource_start(dev->pdev, 1); |
f32f02fd JG |
894 | mem->bus.is_iomem = true; |
895 | break; | |
896 | default: | |
897 | return -EINVAL; | |
898 | } | |
899 | return 0; | |
900 | } | |
901 | ||
902 | static void | |
903 | nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |
904 | { | |
905 | } | |
906 | ||
907 | static int | |
908 | nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) | |
909 | { | |
910 | return 0; | |
911 | } | |
912 | ||
6ee73861 BS |
913 | struct ttm_bo_driver nouveau_bo_driver = { |
914 | .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry, | |
915 | .invalidate_caches = nouveau_bo_invalidate_caches, | |
916 | .init_mem_type = nouveau_bo_init_mem_type, | |
917 | .evict_flags = nouveau_bo_evict_flags, | |
918 | .move = nouveau_bo_move, | |
919 | .verify_access = nouveau_bo_verify_access, | |
920 | .sync_obj_signaled = nouveau_fence_signalled, | |
921 | .sync_obj_wait = nouveau_fence_wait, | |
922 | .sync_obj_flush = nouveau_fence_flush, | |
923 | .sync_obj_unref = nouveau_fence_unref, | |
924 | .sync_obj_ref = nouveau_fence_ref, | |
f32f02fd JG |
925 | .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify, |
926 | .io_mem_reserve = &nouveau_ttm_io_mem_reserve, | |
927 | .io_mem_free = &nouveau_ttm_io_mem_free, | |
6ee73861 BS |
928 | }; |
929 |