Commit | Line | Data |
---|---|---|
6ee73861 BS |
1 | /* |
2 | * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. | |
3 | * Copyright 2005 Stephane Marchesin | |
4 | * | |
5 | * The Weather Channel (TM) funded Tungsten Graphics to develop the | |
6 | * initial release of the Radeon 8500 driver under the XFree86 license. | |
7 | * This notice must be preserved. | |
8 | * | |
9 | * Permission is hereby granted, free of charge, to any person obtaining a | |
10 | * copy of this software and associated documentation files (the "Software"), | |
11 | * to deal in the Software without restriction, including without limitation | |
12 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
13 | * and/or sell copies of the Software, and to permit persons to whom the | |
14 | * Software is furnished to do so, subject to the following conditions: | |
15 | * | |
16 | * The above copyright notice and this permission notice (including the next | |
17 | * paragraph) shall be included in all copies or substantial portions of the | |
18 | * Software. | |
19 | * | |
20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
21 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
23 | * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
24 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
25 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
26 | * DEALINGS IN THE SOFTWARE. | |
27 | * | |
28 | * Authors: | |
29 | * Keith Whitwell <keith@tungstengraphics.com> | |
30 | */ | |
31 | ||
32 | ||
33 | #include "drmP.h" | |
34 | #include "drm.h" | |
35 | #include "drm_sarea.h" | |
36 | #include "nouveau_drv.h" | |
37 | ||
38 | static struct mem_block * | |
39 | split_block(struct mem_block *p, uint64_t start, uint64_t size, | |
40 | struct drm_file *file_priv) | |
41 | { | |
42 | /* Maybe cut off the start of an existing block */ | |
43 | if (start > p->start) { | |
44 | struct mem_block *newblock = | |
45 | kmalloc(sizeof(*newblock), GFP_KERNEL); | |
46 | if (!newblock) | |
47 | goto out; | |
48 | newblock->start = start; | |
49 | newblock->size = p->size - (start - p->start); | |
50 | newblock->file_priv = NULL; | |
51 | newblock->next = p->next; | |
52 | newblock->prev = p; | |
53 | p->next->prev = newblock; | |
54 | p->next = newblock; | |
55 | p->size -= newblock->size; | |
56 | p = newblock; | |
57 | } | |
58 | ||
59 | /* Maybe cut off the end of an existing block */ | |
60 | if (size < p->size) { | |
61 | struct mem_block *newblock = | |
62 | kmalloc(sizeof(*newblock), GFP_KERNEL); | |
63 | if (!newblock) | |
64 | goto out; | |
65 | newblock->start = start + size; | |
66 | newblock->size = p->size - size; | |
67 | newblock->file_priv = NULL; | |
68 | newblock->next = p->next; | |
69 | newblock->prev = p; | |
70 | p->next->prev = newblock; | |
71 | p->next = newblock; | |
72 | p->size = size; | |
73 | } | |
74 | ||
75 | out: | |
76 | /* Our block is in the middle */ | |
77 | p->file_priv = file_priv; | |
78 | return p; | |
79 | } | |
80 | ||
81 | struct mem_block * | |
82 | nouveau_mem_alloc_block(struct mem_block *heap, uint64_t size, | |
83 | int align2, struct drm_file *file_priv, int tail) | |
84 | { | |
85 | struct mem_block *p; | |
86 | uint64_t mask = (1 << align2) - 1; | |
87 | ||
88 | if (!heap) | |
89 | return NULL; | |
90 | ||
91 | if (tail) { | |
92 | list_for_each_prev(p, heap) { | |
93 | uint64_t start = ((p->start + p->size) - size) & ~mask; | |
94 | ||
95 | if (p->file_priv == NULL && start >= p->start && | |
96 | start + size <= p->start + p->size) | |
97 | return split_block(p, start, size, file_priv); | |
98 | } | |
99 | } else { | |
100 | list_for_each(p, heap) { | |
101 | uint64_t start = (p->start + mask) & ~mask; | |
102 | ||
103 | if (p->file_priv == NULL && | |
104 | start + size <= p->start + p->size) | |
105 | return split_block(p, start, size, file_priv); | |
106 | } | |
107 | } | |
108 | ||
109 | return NULL; | |
110 | } | |
111 | ||
112 | void nouveau_mem_free_block(struct mem_block *p) | |
113 | { | |
114 | p->file_priv = NULL; | |
115 | ||
116 | /* Assumes a single contiguous range. Needs a special file_priv in | |
117 | * 'heap' to stop it being subsumed. | |
118 | */ | |
119 | if (p->next->file_priv == NULL) { | |
120 | struct mem_block *q = p->next; | |
121 | p->size += q->size; | |
122 | p->next = q->next; | |
123 | p->next->prev = p; | |
124 | kfree(q); | |
125 | } | |
126 | ||
127 | if (p->prev->file_priv == NULL) { | |
128 | struct mem_block *q = p->prev; | |
129 | q->size += p->size; | |
130 | q->next = p->next; | |
131 | q->next->prev = q; | |
132 | kfree(p); | |
133 | } | |
134 | } | |
135 | ||
136 | /* Initialize. How to check for an uninitialized heap? | |
137 | */ | |
138 | int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start, | |
139 | uint64_t size) | |
140 | { | |
141 | struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL); | |
142 | ||
143 | if (!blocks) | |
144 | return -ENOMEM; | |
145 | ||
146 | *heap = kmalloc(sizeof(**heap), GFP_KERNEL); | |
147 | if (!*heap) { | |
148 | kfree(blocks); | |
149 | return -ENOMEM; | |
150 | } | |
151 | ||
152 | blocks->start = start; | |
153 | blocks->size = size; | |
154 | blocks->file_priv = NULL; | |
155 | blocks->next = blocks->prev = *heap; | |
156 | ||
157 | memset(*heap, 0, sizeof(**heap)); | |
158 | (*heap)->file_priv = (struct drm_file *) -1; | |
159 | (*heap)->next = (*heap)->prev = blocks; | |
160 | return 0; | |
161 | } | |
162 | ||
163 | /* | |
164 | * Free all blocks associated with the releasing file_priv | |
165 | */ | |
166 | void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap) | |
167 | { | |
168 | struct mem_block *p; | |
169 | ||
170 | if (!heap || !heap->next) | |
171 | return; | |
172 | ||
173 | list_for_each(p, heap) { | |
174 | if (p->file_priv == file_priv) | |
175 | p->file_priv = NULL; | |
176 | } | |
177 | ||
178 | /* Assumes a single contiguous range. Needs a special file_priv in | |
179 | * 'heap' to stop it being subsumed. | |
180 | */ | |
181 | list_for_each(p, heap) { | |
182 | while ((p->file_priv == NULL) && | |
183 | (p->next->file_priv == NULL) && | |
184 | (p->next != heap)) { | |
185 | struct mem_block *q = p->next; | |
186 | p->size += q->size; | |
187 | p->next = q->next; | |
188 | p->next->prev = p; | |
189 | kfree(q); | |
190 | } | |
191 | } | |
192 | } | |
193 | ||
194 | /* | |
195 | * NV50 VM helpers | |
196 | */ | |
197 | int | |
198 | nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, | |
199 | uint32_t flags, uint64_t phys) | |
200 | { | |
201 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
202 | struct nouveau_gpuobj **pgt; | |
203 | unsigned psz, pfl, pages; | |
204 | ||
205 | if (virt >= dev_priv->vm_gart_base && | |
206 | (virt + size) < (dev_priv->vm_gart_base + dev_priv->vm_gart_size)) { | |
207 | psz = 12; | |
208 | pgt = &dev_priv->gart_info.sg_ctxdma; | |
209 | pfl = 0x21; | |
210 | virt -= dev_priv->vm_gart_base; | |
211 | } else | |
212 | if (virt >= dev_priv->vm_vram_base && | |
213 | (virt + size) < (dev_priv->vm_vram_base + dev_priv->vm_vram_size)) { | |
214 | psz = 16; | |
215 | pgt = dev_priv->vm_vram_pt; | |
216 | pfl = 0x01; | |
217 | virt -= dev_priv->vm_vram_base; | |
218 | } else { | |
219 | NV_ERROR(dev, "Invalid address: 0x%16llx-0x%16llx\n", | |
220 | virt, virt + size - 1); | |
221 | return -EINVAL; | |
222 | } | |
223 | ||
224 | pages = size >> psz; | |
225 | ||
226 | dev_priv->engine.instmem.prepare_access(dev, true); | |
227 | if (flags & 0x80000000) { | |
228 | while (pages--) { | |
229 | struct nouveau_gpuobj *pt = pgt[virt >> 29]; | |
230 | unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1; | |
231 | ||
232 | nv_wo32(dev, pt, pte++, 0x00000000); | |
233 | nv_wo32(dev, pt, pte++, 0x00000000); | |
234 | ||
235 | virt += (1 << psz); | |
236 | } | |
237 | } else { | |
238 | while (pages--) { | |
239 | struct nouveau_gpuobj *pt = pgt[virt >> 29]; | |
240 | unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1; | |
241 | unsigned offset_h = upper_32_bits(phys) & 0xff; | |
242 | unsigned offset_l = lower_32_bits(phys); | |
243 | ||
244 | nv_wo32(dev, pt, pte++, offset_l | pfl); | |
245 | nv_wo32(dev, pt, pte++, offset_h | flags); | |
246 | ||
247 | phys += (1 << psz); | |
248 | virt += (1 << psz); | |
249 | } | |
250 | } | |
251 | dev_priv->engine.instmem.finish_access(dev); | |
252 | ||
253 | nv_wr32(dev, 0x100c80, 0x00050001); | |
254 | if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { | |
255 | NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); | |
256 | NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); | |
257 | return -EBUSY; | |
258 | } | |
259 | ||
260 | nv_wr32(dev, 0x100c80, 0x00000001); | |
261 | if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { | |
262 | NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); | |
263 | NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); | |
264 | return -EBUSY; | |
265 | } | |
266 | ||
267 | return 0; | |
268 | } | |
269 | ||
270 | void | |
271 | nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) | |
272 | { | |
273 | nv50_mem_vm_bind_linear(dev, virt, size, 0x80000000, 0); | |
274 | } | |
275 | ||
276 | /* | |
277 | * Cleanup everything | |
278 | */ | |
279 | void nouveau_mem_takedown(struct mem_block **heap) | |
280 | { | |
281 | struct mem_block *p; | |
282 | ||
283 | if (!*heap) | |
284 | return; | |
285 | ||
286 | for (p = (*heap)->next; p != *heap;) { | |
287 | struct mem_block *q = p; | |
288 | p = p->next; | |
289 | kfree(q); | |
290 | } | |
291 | ||
292 | kfree(*heap); | |
293 | *heap = NULL; | |
294 | } | |
295 | ||
296 | void nouveau_mem_close(struct drm_device *dev) | |
297 | { | |
298 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
299 | ||
300 | if (dev_priv->ttm.bdev.man[TTM_PL_PRIV0].has_type) | |
301 | ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_PRIV0); | |
302 | ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); | |
303 | ||
304 | ttm_bo_device_release(&dev_priv->ttm.bdev); | |
305 | ||
306 | nouveau_ttm_global_release(dev_priv); | |
307 | ||
308 | if (drm_core_has_AGP(dev) && dev->agp && | |
309 | drm_core_check_feature(dev, DRIVER_MODESET)) { | |
310 | struct drm_agp_mem *entry, *tempe; | |
311 | ||
312 | /* Remove AGP resources, but leave dev->agp | |
313 | intact until drv_cleanup is called. */ | |
314 | list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) { | |
315 | if (entry->bound) | |
316 | drm_unbind_agp(entry->memory); | |
317 | drm_free_agp(entry->memory, entry->pages); | |
318 | kfree(entry); | |
319 | } | |
320 | INIT_LIST_HEAD(&dev->agp->memory); | |
321 | ||
322 | if (dev->agp->acquired) | |
323 | drm_agp_release(dev); | |
324 | ||
325 | dev->agp->acquired = 0; | |
326 | dev->agp->enabled = 0; | |
327 | } | |
328 | ||
329 | if (dev_priv->fb_mtrr) { | |
330 | drm_mtrr_del(dev_priv->fb_mtrr, drm_get_resource_start(dev, 1), | |
331 | drm_get_resource_len(dev, 1), DRM_MTRR_WC); | |
332 | dev_priv->fb_mtrr = 0; | |
333 | } | |
334 | } | |
335 | ||
336 | /*XXX won't work on BSD because of pci_read_config_dword */ | |
337 | static uint32_t | |
338 | nouveau_mem_fb_amount_igp(struct drm_device *dev) | |
339 | { | |
340 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
341 | struct pci_dev *bridge; | |
342 | uint32_t mem; | |
343 | ||
344 | bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1)); | |
345 | if (!bridge) { | |
346 | NV_ERROR(dev, "no bridge device\n"); | |
347 | return 0; | |
348 | } | |
349 | ||
350 | if (dev_priv->flags&NV_NFORCE) { | |
351 | pci_read_config_dword(bridge, 0x7C, &mem); | |
352 | return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024; | |
353 | } else | |
354 | if (dev_priv->flags&NV_NFORCE2) { | |
355 | pci_read_config_dword(bridge, 0x84, &mem); | |
356 | return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024; | |
357 | } | |
358 | ||
359 | NV_ERROR(dev, "impossible!\n"); | |
360 | return 0; | |
361 | } | |
362 | ||
363 | /* returns the amount of FB ram in bytes */ | |
364 | uint64_t nouveau_mem_fb_amount(struct drm_device *dev) | |
365 | { | |
366 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
367 | uint32_t boot0; | |
368 | ||
369 | switch (dev_priv->card_type) { | |
370 | case NV_04: | |
371 | boot0 = nv_rd32(dev, NV03_BOOT_0); | |
372 | if (boot0 & 0x00000100) | |
373 | return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024; | |
374 | ||
375 | switch (boot0 & NV03_BOOT_0_RAM_AMOUNT) { | |
376 | case NV04_BOOT_0_RAM_AMOUNT_32MB: | |
377 | return 32 * 1024 * 1024; | |
378 | case NV04_BOOT_0_RAM_AMOUNT_16MB: | |
379 | return 16 * 1024 * 1024; | |
380 | case NV04_BOOT_0_RAM_AMOUNT_8MB: | |
381 | return 8 * 1024 * 1024; | |
382 | case NV04_BOOT_0_RAM_AMOUNT_4MB: | |
383 | return 4 * 1024 * 1024; | |
384 | } | |
385 | break; | |
386 | case NV_10: | |
387 | case NV_20: | |
388 | case NV_30: | |
389 | case NV_40: | |
390 | case NV_50: | |
391 | default: | |
392 | if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) { | |
393 | return nouveau_mem_fb_amount_igp(dev); | |
394 | } else { | |
395 | uint64_t mem; | |
396 | mem = (nv_rd32(dev, NV04_FIFO_DATA) & | |
397 | NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >> | |
398 | NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT; | |
399 | return mem * 1024 * 1024; | |
400 | } | |
401 | break; | |
402 | } | |
403 | ||
404 | NV_ERROR(dev, | |
405 | "Unable to detect video ram size. Please report your setup to " | |
406 | DRIVER_EMAIL "\n"); | |
407 | return 0; | |
408 | } | |
409 | ||
b694dfb2 | 410 | #if __OS_HAS_AGP |
6ee73861 BS |
411 | static void nouveau_mem_reset_agp(struct drm_device *dev) |
412 | { | |
413 | uint32_t saved_pci_nv_1, saved_pci_nv_19, pmc_enable; | |
414 | ||
415 | saved_pci_nv_1 = nv_rd32(dev, NV04_PBUS_PCI_NV_1); | |
416 | saved_pci_nv_19 = nv_rd32(dev, NV04_PBUS_PCI_NV_19); | |
417 | ||
418 | /* clear busmaster bit */ | |
419 | nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4); | |
420 | /* clear SBA and AGP bits */ | |
421 | nv_wr32(dev, NV04_PBUS_PCI_NV_19, saved_pci_nv_19 & 0xfffff0ff); | |
422 | ||
423 | /* power cycle pgraph, if enabled */ | |
424 | pmc_enable = nv_rd32(dev, NV03_PMC_ENABLE); | |
425 | if (pmc_enable & NV_PMC_ENABLE_PGRAPH) { | |
426 | nv_wr32(dev, NV03_PMC_ENABLE, | |
427 | pmc_enable & ~NV_PMC_ENABLE_PGRAPH); | |
428 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | | |
429 | NV_PMC_ENABLE_PGRAPH); | |
430 | } | |
431 | ||
432 | /* and restore (gives effect of resetting AGP) */ | |
433 | nv_wr32(dev, NV04_PBUS_PCI_NV_19, saved_pci_nv_19); | |
434 | nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1); | |
435 | } | |
b694dfb2 | 436 | #endif |
6ee73861 BS |
437 | |
438 | int | |
439 | nouveau_mem_init_agp(struct drm_device *dev) | |
440 | { | |
b694dfb2 | 441 | #if __OS_HAS_AGP |
6ee73861 BS |
442 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
443 | struct drm_agp_info info; | |
444 | struct drm_agp_mode mode; | |
445 | int ret; | |
446 | ||
447 | if (nouveau_noagp) | |
448 | return 0; | |
449 | ||
450 | nouveau_mem_reset_agp(dev); | |
451 | ||
452 | if (!dev->agp->acquired) { | |
453 | ret = drm_agp_acquire(dev); | |
454 | if (ret) { | |
455 | NV_ERROR(dev, "Unable to acquire AGP: %d\n", ret); | |
456 | return ret; | |
457 | } | |
458 | } | |
459 | ||
460 | ret = drm_agp_info(dev, &info); | |
461 | if (ret) { | |
462 | NV_ERROR(dev, "Unable to get AGP info: %d\n", ret); | |
463 | return ret; | |
464 | } | |
465 | ||
466 | /* see agp.h for the AGPSTAT_* modes available */ | |
467 | mode.mode = info.mode; | |
468 | ret = drm_agp_enable(dev, mode); | |
469 | if (ret) { | |
470 | NV_ERROR(dev, "Unable to enable AGP: %d\n", ret); | |
471 | return ret; | |
472 | } | |
473 | ||
474 | dev_priv->gart_info.type = NOUVEAU_GART_AGP; | |
475 | dev_priv->gart_info.aper_base = info.aperture_base; | |
476 | dev_priv->gart_info.aper_size = info.aperture_size; | |
b694dfb2 | 477 | #endif |
6ee73861 BS |
478 | return 0; |
479 | } | |
480 | ||
481 | int | |
482 | nouveau_mem_init(struct drm_device *dev) | |
483 | { | |
484 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
485 | struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; | |
486 | int ret, dma_bits = 32; | |
487 | ||
488 | dev_priv->fb_phys = drm_get_resource_start(dev, 1); | |
489 | dev_priv->gart_info.type = NOUVEAU_GART_NONE; | |
490 | ||
491 | if (dev_priv->card_type >= NV_50 && | |
492 | pci_dma_supported(dev->pdev, DMA_BIT_MASK(40))) | |
493 | dma_bits = 40; | |
494 | ||
495 | ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits)); | |
496 | if (ret) { | |
497 | NV_ERROR(dev, "Error setting DMA mask: %d\n", ret); | |
498 | return ret; | |
499 | } | |
500 | ||
501 | ret = nouveau_ttm_global_init(dev_priv); | |
502 | if (ret) | |
503 | return ret; | |
504 | ||
505 | ret = ttm_bo_device_init(&dev_priv->ttm.bdev, | |
506 | dev_priv->ttm.bo_global_ref.ref.object, | |
507 | &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET, | |
508 | dma_bits <= 32 ? true : false); | |
509 | if (ret) { | |
510 | NV_ERROR(dev, "Error initialising bo driver: %d\n", ret); | |
511 | return ret; | |
512 | } | |
513 | ||
514 | INIT_LIST_HEAD(&dev_priv->ttm.bo_list); | |
515 | spin_lock_init(&dev_priv->ttm.bo_list_lock); | |
516 | ||
517 | dev_priv->fb_available_size = nouveau_mem_fb_amount(dev); | |
518 | ||
519 | dev_priv->fb_mappable_pages = dev_priv->fb_available_size; | |
520 | if (dev_priv->fb_mappable_pages > drm_get_resource_len(dev, 1)) | |
521 | dev_priv->fb_mappable_pages = drm_get_resource_len(dev, 1); | |
522 | dev_priv->fb_mappable_pages >>= PAGE_SHIFT; | |
523 | ||
524 | NV_INFO(dev, "%d MiB VRAM\n", (int)(dev_priv->fb_available_size >> 20)); | |
525 | ||
526 | /* remove reserved space at end of vram from available amount */ | |
527 | dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram; | |
528 | dev_priv->fb_aper_free = dev_priv->fb_available_size; | |
529 | ||
530 | /* mappable vram */ | |
531 | ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM, | |
532 | dev_priv->fb_available_size >> PAGE_SHIFT); | |
533 | if (ret) { | |
534 | NV_ERROR(dev, "Failed VRAM mm init: %d\n", ret); | |
535 | return ret; | |
536 | } | |
537 | ||
538 | /* GART */ | |
539 | #if !defined(__powerpc__) && !defined(__ia64__) | |
540 | if (drm_device_is_agp(dev) && dev->agp) { | |
541 | ret = nouveau_mem_init_agp(dev); | |
542 | if (ret) | |
543 | NV_ERROR(dev, "Error initialising AGP: %d\n", ret); | |
544 | } | |
545 | #endif | |
546 | ||
547 | if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) { | |
548 | ret = nouveau_sgdma_init(dev); | |
549 | if (ret) { | |
550 | NV_ERROR(dev, "Error initialising PCI(E): %d\n", ret); | |
551 | return ret; | |
552 | } | |
553 | } | |
554 | ||
555 | NV_INFO(dev, "%d MiB GART (aperture)\n", | |
556 | (int)(dev_priv->gart_info.aper_size >> 20)); | |
557 | dev_priv->gart_info.aper_free = dev_priv->gart_info.aper_size; | |
558 | ||
559 | ret = ttm_bo_init_mm(bdev, TTM_PL_TT, | |
560 | dev_priv->gart_info.aper_size >> PAGE_SHIFT); | |
561 | if (ret) { | |
562 | NV_ERROR(dev, "Failed TT mm init: %d\n", ret); | |
563 | return ret; | |
564 | } | |
565 | ||
566 | dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1), | |
567 | drm_get_resource_len(dev, 1), | |
568 | DRM_MTRR_WC); | |
569 | return 0; | |
570 | } | |
571 | ||
572 |