61af036f12521e1404336ab633b107e1d3f261f1
2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <core/gpuobj.h>
26 #include <core/option.h>
28 #include <subdev/timer.h>
29 #include <subdev/mmu.h>
33 #define NV41_GART_SIZE (512 * 1024 * 1024)
34 #define NV41_GART_PAGE ( 4 * 1024)
36 /*******************************************************************************
37 * VM map/unmap callbacks
38 ******************************************************************************/
41 nv41_vm_map_sg(struct nouveau_vma
*vma
, struct nouveau_gpuobj
*pgt
,
42 struct nouveau_mem
*mem
, u32 pte
, u32 cnt
, dma_addr_t
*list
)
46 u32 page
= PAGE_SIZE
/ NV41_GART_PAGE
;
47 u64 phys
= (u64
)*list
++;
48 while (cnt
&& page
--) {
49 nv_wo32(pgt
, pte
, (phys
>> 7) | 1);
50 phys
+= NV41_GART_PAGE
;
58 nv41_vm_unmap(struct nouveau_gpuobj
*pgt
, u32 pte
, u32 cnt
)
62 nv_wo32(pgt
, pte
, 0x00000000);
68 nv41_vm_flush(struct nouveau_vm
*vm
)
70 struct nv04_mmu_priv
*priv
= (void *)vm
->mmu
;
72 mutex_lock(&nv_subdev(priv
)->mutex
);
73 nv_wr32(priv
, 0x100810, 0x00000022);
74 if (!nv_wait(priv
, 0x100810, 0x00000020, 0x00000020)) {
75 nv_warn(priv
, "flush timeout, 0x%08x\n",
76 nv_rd32(priv
, 0x100810));
78 nv_wr32(priv
, 0x100810, 0x00000000);
79 mutex_unlock(&nv_subdev(priv
)->mutex
);
82 /*******************************************************************************
84 ******************************************************************************/
87 nv41_mmu_ctor(struct nouveau_object
*parent
, struct nouveau_object
*engine
,
88 struct nouveau_oclass
*oclass
, void *data
, u32 size
,
89 struct nouveau_object
**pobject
)
91 struct nouveau_device
*device
= nv_device(parent
);
92 struct nv04_mmu_priv
*priv
;
95 if (pci_find_capability(device
->pdev
, PCI_CAP_ID_AGP
) ||
96 !nouveau_boolopt(device
->cfgopt
, "NvPCIE", true)) {
97 return nouveau_object_ctor(parent
, engine
, &nv04_mmu_oclass
,
101 ret
= nouveau_mmu_create(parent
, engine
, oclass
, "PCIEGART",
103 *pobject
= nv_object(priv
);
107 priv
->base
.create
= nv04_vm_create
;
108 priv
->base
.limit
= NV41_GART_SIZE
;
109 priv
->base
.dma_bits
= 39;
110 priv
->base
.pgt_bits
= 32 - 12;
111 priv
->base
.spg_shift
= 12;
112 priv
->base
.lpg_shift
= 12;
113 priv
->base
.map_sg
= nv41_vm_map_sg
;
114 priv
->base
.unmap
= nv41_vm_unmap
;
115 priv
->base
.flush
= nv41_vm_flush
;
117 ret
= nouveau_vm_create(&priv
->base
, 0, NV41_GART_SIZE
, 0, 4096,
122 ret
= nouveau_gpuobj_new(nv_object(priv
), NULL
,
123 (NV41_GART_SIZE
/ NV41_GART_PAGE
) * 4,
124 16, NVOBJ_FLAG_ZERO_ALLOC
,
125 &priv
->vm
->pgt
[0].obj
[0]);
126 priv
->vm
->pgt
[0].refcount
[0] = 1;
134 nv41_mmu_init(struct nouveau_object
*object
)
136 struct nv04_mmu_priv
*priv
= (void *)object
;
137 struct nouveau_gpuobj
*dma
= priv
->vm
->pgt
[0].obj
[0];
140 ret
= nouveau_mmu_init(&priv
->base
);
144 nv_wr32(priv
, 0x100800, dma
->addr
| 0x00000002);
145 nv_mask(priv
, 0x10008c, 0x00000100, 0x00000100);
146 nv_wr32(priv
, 0x100820, 0x00000000);
150 struct nouveau_oclass
152 .handle
= NV_SUBDEV(MMU
, 0x41),
153 .ofuncs
= &(struct nouveau_ofuncs
) {
154 .ctor
= nv41_mmu_ctor
,
155 .dtor
= nv04_mmu_dtor
,
156 .init
= nv41_mmu_init
,
157 .fini
= _nouveau_mmu_fini
,
This page took 0.050842 seconds and 4 git commands to generate.