2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <core/client.h>
27 #include <core/engctx.h>
28 #include <core/ramht.h>
29 #include <subdev/fb.h>
30 #include <subdev/instmem/nv04.h>
32 #include <nvif/class.h>
33 #include <nvif/unpack.h>
35 static struct ramfc_desc
37 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT
},
38 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET
},
39 { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT
},
40 { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE
},
41 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT
},
42 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_STATE
},
43 { 28, 0, 0x18, 0, NV04_PFIFO_CACHE1_DMA_FETCH
},
44 { 2, 28, 0x18, 28, 0x002058 },
45 { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_ENGINE
},
46 { 32, 0, 0x20, 0, NV04_PFIFO_CACHE1_PULL1
},
47 { 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE
},
48 { 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP
},
49 { 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT
},
50 { 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_SEMAPHORE
},
51 { 32, 0, 0x34, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE
},
52 { 32, 0, 0x38, 0, NV40_PFIFO_GRCTX_INSTANCE
},
53 { 17, 0, 0x3c, 0, NV04_PFIFO_DMA_TIMESLICE
},
54 { 32, 0, 0x40, 0, 0x0032e4 },
55 { 32, 0, 0x44, 0, 0x0032e8 },
56 { 32, 0, 0x4c, 0, 0x002088 },
57 { 32, 0, 0x50, 0, 0x003300 },
58 { 32, 0, 0x54, 0, 0x00330c },
62 /*******************************************************************************
63 * FIFO channel objects
64 ******************************************************************************/
67 nv40_fifo_object_attach(struct nvkm_object
*parent
,
68 struct nvkm_object
*object
, u32 handle
)
70 struct nv04_fifo
*fifo
= (void *)parent
->engine
;
71 struct nv04_fifo_chan
*chan
= (void *)parent
;
72 u32 context
, chid
= chan
->base
.chid
;
75 if (nv_iclass(object
, NV_GPUOBJ_CLASS
))
76 context
= nv_gpuobj(object
)->addr
>> 4;
78 context
= 0x00000004; /* just non-zero */
80 switch (nv_engidx(object
->engine
)) {
81 case NVDEV_ENGINE_DMAOBJ
:
83 context
|= 0x00000000;
86 context
|= 0x00100000;
88 case NVDEV_ENGINE_MPEG
:
89 context
|= 0x00200000;
95 context
|= chid
<< 23;
97 mutex_lock(&nv_subdev(fifo
)->mutex
);
98 ret
= nvkm_ramht_insert(fifo
->ramht
, chid
, handle
, context
);
99 mutex_unlock(&nv_subdev(fifo
)->mutex
);
104 nv40_fifo_context_attach(struct nvkm_object
*parent
, struct nvkm_object
*engctx
)
106 struct nv04_fifo
*fifo
= (void *)parent
->engine
;
107 struct nv04_fifo_chan
*chan
= (void *)parent
;
108 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
112 switch (nv_engidx(engctx
->engine
)) {
113 case NVDEV_ENGINE_SW
:
115 case NVDEV_ENGINE_GR
:
119 case NVDEV_ENGINE_MPEG
:
127 spin_lock_irqsave(&fifo
->base
.lock
, flags
);
128 nv_engctx(engctx
)->addr
= nv_gpuobj(engctx
)->addr
>> 4;
129 nvkm_mask(device
, 0x002500, 0x00000001, 0x00000000);
131 if ((nvkm_rd32(device
, 0x003204) & fifo
->base
.max
) == chan
->base
.chid
)
132 nvkm_wr32(device
, reg
, nv_engctx(engctx
)->addr
);
133 nv_wo32(fifo
->ramfc
, chan
->ramfc
+ ctx
, nv_engctx(engctx
)->addr
);
135 nvkm_mask(device
, 0x002500, 0x00000001, 0x00000001);
136 spin_unlock_irqrestore(&fifo
->base
.lock
, flags
);
141 nv40_fifo_context_detach(struct nvkm_object
*parent
, bool suspend
,
142 struct nvkm_object
*engctx
)
144 struct nv04_fifo
*fifo
= (void *)parent
->engine
;
145 struct nv04_fifo_chan
*chan
= (void *)parent
;
146 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
150 switch (nv_engidx(engctx
->engine
)) {
151 case NVDEV_ENGINE_SW
:
153 case NVDEV_ENGINE_GR
:
157 case NVDEV_ENGINE_MPEG
:
165 spin_lock_irqsave(&fifo
->base
.lock
, flags
);
166 nvkm_mask(device
, 0x002500, 0x00000001, 0x00000000);
168 if ((nvkm_rd32(device
, 0x003204) & fifo
->base
.max
) == chan
->base
.chid
)
169 nvkm_wr32(device
, reg
, 0x00000000);
170 nv_wo32(fifo
->ramfc
, chan
->ramfc
+ ctx
, 0x00000000);
172 nvkm_mask(device
, 0x002500, 0x00000001, 0x00000001);
173 spin_unlock_irqrestore(&fifo
->base
.lock
, flags
);
178 nv40_fifo_chan_ctor(struct nvkm_object
*parent
, struct nvkm_object
*engine
,
179 struct nvkm_oclass
*oclass
, void *data
, u32 size
,
180 struct nvkm_object
**pobject
)
183 struct nv03_channel_dma_v0 v0
;
185 struct nv04_fifo
*fifo
= (void *)engine
;
186 struct nv04_fifo_chan
*chan
;
189 nvif_ioctl(parent
, "create channel dma size %d\n", size
);
190 if (nvif_unpack(args
->v0
, 0, 0, false)) {
191 nvif_ioctl(parent
, "create channel dma vers %d pushbuf %08x "
192 "offset %016llx\n", args
->v0
.version
,
193 args
->v0
.pushbuf
, args
->v0
.offset
);
197 ret
= nvkm_fifo_channel_create(parent
, engine
, oclass
, 0, 0xc00000,
198 0x1000, args
->v0
.pushbuf
,
199 (1ULL << NVDEV_ENGINE_DMAOBJ
) |
200 (1ULL << NVDEV_ENGINE_SW
) |
201 (1ULL << NVDEV_ENGINE_GR
) |
202 (1ULL << NVDEV_ENGINE_MPEG
), &chan
);
203 *pobject
= nv_object(chan
);
207 args
->v0
.chid
= chan
->base
.chid
;
209 nv_parent(chan
)->context_attach
= nv40_fifo_context_attach
;
210 nv_parent(chan
)->context_detach
= nv40_fifo_context_detach
;
211 nv_parent(chan
)->object_attach
= nv40_fifo_object_attach
;
212 nv_parent(chan
)->object_detach
= nv04_fifo_object_detach
;
213 chan
->ramfc
= chan
->base
.chid
* 128;
215 nv_wo32(fifo
->ramfc
, chan
->ramfc
+ 0x00, args
->v0
.offset
);
216 nv_wo32(fifo
->ramfc
, chan
->ramfc
+ 0x04, args
->v0
.offset
);
217 nv_wo32(fifo
->ramfc
, chan
->ramfc
+ 0x0c, chan
->base
.pushgpu
->addr
>> 4);
218 nv_wo32(fifo
->ramfc
, chan
->ramfc
+ 0x18, 0x30000000 |
219 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES
|
220 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES
|
222 NV_PFIFO_CACHE1_BIG_ENDIAN
|
224 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8
);
225 nv_wo32(fifo
->ramfc
, chan
->ramfc
+ 0x3c, 0x0001ffff);
229 static struct nvkm_ofuncs
231 .ctor
= nv40_fifo_chan_ctor
,
232 .dtor
= nv04_fifo_chan_dtor
,
233 .init
= nv04_fifo_chan_init
,
234 .fini
= nv04_fifo_chan_fini
,
235 .map
= _nvkm_fifo_channel_map
,
236 .rd32
= _nvkm_fifo_channel_rd32
,
237 .wr32
= _nvkm_fifo_channel_wr32
,
238 .ntfy
= _nvkm_fifo_channel_ntfy
241 static struct nvkm_oclass
242 nv40_fifo_sclass
[] = {
243 { NV40_CHANNEL_DMA
, &nv40_fifo_ofuncs
},
247 /*******************************************************************************
248 * FIFO context - basically just the instmem reserved for the channel
249 ******************************************************************************/
251 static struct nvkm_oclass
253 .handle
= NV_ENGCTX(FIFO
, 0x40),
254 .ofuncs
= &(struct nvkm_ofuncs
) {
255 .ctor
= nv04_fifo_context_ctor
,
256 .dtor
= _nvkm_fifo_context_dtor
,
257 .init
= _nvkm_fifo_context_init
,
258 .fini
= _nvkm_fifo_context_fini
,
259 .rd32
= _nvkm_fifo_context_rd32
,
260 .wr32
= _nvkm_fifo_context_wr32
,
264 /*******************************************************************************
266 ******************************************************************************/
269 nv40_fifo_ctor(struct nvkm_object
*parent
, struct nvkm_object
*engine
,
270 struct nvkm_oclass
*oclass
, void *data
, u32 size
,
271 struct nvkm_object
**pobject
)
273 struct nv04_instmem
*imem
= nv04_instmem(parent
);
274 struct nv04_fifo
*fifo
;
277 ret
= nvkm_fifo_create(parent
, engine
, oclass
, 0, 31, &fifo
);
278 *pobject
= nv_object(fifo
);
282 nvkm_ramht_ref(imem
->ramht
, &fifo
->ramht
);
283 nvkm_gpuobj_ref(imem
->ramro
, &fifo
->ramro
);
284 nvkm_gpuobj_ref(imem
->ramfc
, &fifo
->ramfc
);
286 nv_subdev(fifo
)->unit
= 0x00000100;
287 nv_subdev(fifo
)->intr
= nv04_fifo_intr
;
288 nv_engine(fifo
)->cclass
= &nv40_fifo_cclass
;
289 nv_engine(fifo
)->sclass
= nv40_fifo_sclass
;
290 fifo
->base
.pause
= nv04_fifo_pause
;
291 fifo
->base
.start
= nv04_fifo_start
;
292 fifo
->ramfc_desc
= nv40_ramfc
;
297 nv40_fifo_init(struct nvkm_object
*object
)
299 struct nv04_fifo
*fifo
= (void *)object
;
300 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
301 struct nvkm_fb
*fb
= device
->fb
;
304 ret
= nvkm_fifo_init(&fifo
->base
);
308 nvkm_wr32(device
, 0x002040, 0x000000ff);
309 nvkm_wr32(device
, 0x002044, 0x2101ffff);
310 nvkm_wr32(device
, 0x002058, 0x00000001);
312 nvkm_wr32(device
, NV03_PFIFO_RAMHT
, (0x03 << 24) /* search 128 */ |
313 ((fifo
->ramht
->bits
- 9) << 16) |
314 (fifo
->ramht
->gpuobj
.addr
>> 8));
315 nvkm_wr32(device
, NV03_PFIFO_RAMRO
, fifo
->ramro
->addr
>> 8);
317 switch (nv_device(fifo
)->chipset
) {
321 nvkm_wr32(device
, 0x002230, 0x00000001);
328 nvkm_wr32(device
, 0x002220, 0x00030002);
331 nvkm_wr32(device
, 0x002230, 0x00000000);
332 nvkm_wr32(device
, 0x002220, ((fb
->ram
->size
- 512 * 1024 +
333 fifo
->ramfc
->addr
) >> 16) |
338 nvkm_wr32(device
, NV03_PFIFO_CACHE1_PUSH1
, fifo
->base
.max
);
340 nvkm_wr32(device
, NV03_PFIFO_INTR_0
, 0xffffffff);
341 nvkm_wr32(device
, NV03_PFIFO_INTR_EN_0
, 0xffffffff);
343 nvkm_wr32(device
, NV03_PFIFO_CACHE1_PUSH0
, 1);
344 nvkm_wr32(device
, NV04_PFIFO_CACHE1_PULL0
, 1);
345 nvkm_wr32(device
, NV03_PFIFO_CACHES
, 1);
350 nv40_fifo_oclass
= &(struct nvkm_oclass
) {
351 .handle
= NV_ENGINE(FIFO
, 0x40),
352 .ofuncs
= &(struct nvkm_ofuncs
) {
353 .ctor
= nv40_fifo_ctor
,
354 .dtor
= nv04_fifo_dtor
,
355 .init
= nv40_fifo_init
,
356 .fini
= _nvkm_fifo_fini
,