drm/nouveau/gr: convert user classes to new-style nvkm_object
[deliverable/linux.git] / drivers / gpu / drm / nouveau / nvkm / engine / gr / nv20.c
1 #include "nv20.h"
2 #include "regs.h"
3
4 #include <core/client.h>
5 #include <engine/fifo.h>
6 #include <engine/fifo/chan.h>
7 #include <subdev/fb.h>
8 #include <subdev/timer.h>
9
10 /*******************************************************************************
11 * PGRAPH context
12 ******************************************************************************/
13
14 int
15 nv20_gr_chan_init(struct nvkm_object *object)
16 {
17 struct nv20_gr_chan *chan = nv20_gr_chan(object);
18 struct nv20_gr *gr = chan->gr;
19 u32 inst = nvkm_memory_addr(chan->inst);
20
21 nvkm_kmap(gr->ctxtab);
22 nvkm_wo32(gr->ctxtab, chan->chid * 4, inst >> 4);
23 nvkm_done(gr->ctxtab);
24 return 0;
25 }
26
27 int
28 nv20_gr_chan_fini(struct nvkm_object *object, bool suspend)
29 {
30 struct nv20_gr_chan *chan = nv20_gr_chan(object);
31 struct nv20_gr *gr = chan->gr;
32 struct nvkm_device *device = gr->base.engine.subdev.device;
33 u32 inst = nvkm_memory_addr(chan->inst);
34 int chid = -1;
35
36 nvkm_mask(device, 0x400720, 0x00000001, 0x00000000);
37 if (nvkm_rd32(device, 0x400144) & 0x00010000)
38 chid = (nvkm_rd32(device, 0x400148) & 0x1f000000) >> 24;
39 if (chan->chid == chid) {
40 nvkm_wr32(device, 0x400784, inst >> 4);
41 nvkm_wr32(device, 0x400788, 0x00000002);
42 nvkm_msec(device, 2000,
43 if (!nvkm_rd32(device, 0x400700))
44 break;
45 );
46 nvkm_wr32(device, 0x400144, 0x10000000);
47 nvkm_mask(device, 0x400148, 0xff000000, 0x1f000000);
48 }
49 nvkm_mask(device, 0x400720, 0x00000001, 0x00000001);
50
51 nvkm_kmap(gr->ctxtab);
52 nvkm_wo32(gr->ctxtab, chan->chid * 4, 0x00000000);
53 nvkm_done(gr->ctxtab);
54 return 0;
55 }
56
57 void *
58 nv20_gr_chan_dtor(struct nvkm_object *object)
59 {
60 struct nv20_gr_chan *chan = nv20_gr_chan(object);
61 nvkm_memory_del(&chan->inst);
62 return chan;
63 }
64
65 static const struct nvkm_object_func
66 nv20_gr_chan = {
67 .dtor = nv20_gr_chan_dtor,
68 .init = nv20_gr_chan_init,
69 .fini = nv20_gr_chan_fini,
70 };
71
72 static int
73 nv20_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
74 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
75 {
76 struct nv20_gr *gr = nv20_gr(base);
77 struct nv20_gr_chan *chan;
78 int ret, i;
79
80 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
81 return -ENOMEM;
82 nvkm_object_ctor(&nv20_gr_chan, oclass, &chan->object);
83 chan->gr = gr;
84 chan->chid = fifoch->chid;
85 *pobject = &chan->object;
86
87 ret = nvkm_memory_new(gr->base.engine.subdev.device,
88 NVKM_MEM_TARGET_INST, 0x37f0, 16, true,
89 &chan->inst);
90 if (ret)
91 return ret;
92
93 nvkm_kmap(chan->inst);
94 nvkm_wo32(chan->inst, 0x0000, 0x00000001 | (chan->chid << 24));
95 nvkm_wo32(chan->inst, 0x033c, 0xffff0000);
96 nvkm_wo32(chan->inst, 0x03a0, 0x0fff0000);
97 nvkm_wo32(chan->inst, 0x03a4, 0x0fff0000);
98 nvkm_wo32(chan->inst, 0x047c, 0x00000101);
99 nvkm_wo32(chan->inst, 0x0490, 0x00000111);
100 nvkm_wo32(chan->inst, 0x04a8, 0x44400000);
101 for (i = 0x04d4; i <= 0x04e0; i += 4)
102 nvkm_wo32(chan->inst, i, 0x00030303);
103 for (i = 0x04f4; i <= 0x0500; i += 4)
104 nvkm_wo32(chan->inst, i, 0x00080000);
105 for (i = 0x050c; i <= 0x0518; i += 4)
106 nvkm_wo32(chan->inst, i, 0x01012000);
107 for (i = 0x051c; i <= 0x0528; i += 4)
108 nvkm_wo32(chan->inst, i, 0x000105b8);
109 for (i = 0x052c; i <= 0x0538; i += 4)
110 nvkm_wo32(chan->inst, i, 0x00080008);
111 for (i = 0x055c; i <= 0x0598; i += 4)
112 nvkm_wo32(chan->inst, i, 0x07ff0000);
113 nvkm_wo32(chan->inst, 0x05a4, 0x4b7fffff);
114 nvkm_wo32(chan->inst, 0x05fc, 0x00000001);
115 nvkm_wo32(chan->inst, 0x0604, 0x00004000);
116 nvkm_wo32(chan->inst, 0x0610, 0x00000001);
117 nvkm_wo32(chan->inst, 0x0618, 0x00040000);
118 nvkm_wo32(chan->inst, 0x061c, 0x00010000);
119 for (i = 0x1c1c; i <= 0x248c; i += 16) {
120 nvkm_wo32(chan->inst, (i + 0), 0x10700ff9);
121 nvkm_wo32(chan->inst, (i + 4), 0x0436086c);
122 nvkm_wo32(chan->inst, (i + 8), 0x000c001b);
123 }
124 nvkm_wo32(chan->inst, 0x281c, 0x3f800000);
125 nvkm_wo32(chan->inst, 0x2830, 0x3f800000);
126 nvkm_wo32(chan->inst, 0x285c, 0x40000000);
127 nvkm_wo32(chan->inst, 0x2860, 0x3f800000);
128 nvkm_wo32(chan->inst, 0x2864, 0x3f000000);
129 nvkm_wo32(chan->inst, 0x286c, 0x40000000);
130 nvkm_wo32(chan->inst, 0x2870, 0x3f800000);
131 nvkm_wo32(chan->inst, 0x2878, 0xbf800000);
132 nvkm_wo32(chan->inst, 0x2880, 0xbf800000);
133 nvkm_wo32(chan->inst, 0x34a4, 0x000fe000);
134 nvkm_wo32(chan->inst, 0x3530, 0x000003f8);
135 nvkm_wo32(chan->inst, 0x3540, 0x002fe000);
136 for (i = 0x355c; i <= 0x3578; i += 4)
137 nvkm_wo32(chan->inst, i, 0x001c527c);
138 nvkm_done(chan->inst);
139 return 0;
140 }
141
142 /*******************************************************************************
143 * PGRAPH engine/subdev functions
144 ******************************************************************************/
145
146 void
147 nv20_gr_tile_prog(struct nvkm_engine *engine, int i)
148 {
149 struct nv20_gr *gr = (void *)engine;
150 struct nvkm_device *device = gr->base.engine.subdev.device;
151 struct nvkm_fifo *fifo = device->fifo;
152 struct nvkm_fb_tile *tile = &device->fb->tile.region[i];
153 unsigned long flags;
154
155 fifo->pause(fifo, &flags);
156 nv04_gr_idle(&gr->base);
157
158 nvkm_wr32(device, NV20_PGRAPH_TLIMIT(i), tile->limit);
159 nvkm_wr32(device, NV20_PGRAPH_TSIZE(i), tile->pitch);
160 nvkm_wr32(device, NV20_PGRAPH_TILE(i), tile->addr);
161
162 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
163 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, tile->limit);
164 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
165 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, tile->pitch);
166 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
167 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, tile->addr);
168
169 if (nv_device(engine)->chipset != 0x34) {
170 nvkm_wr32(device, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
171 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
172 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, tile->zcomp);
173 }
174
175 fifo->start(fifo, &flags);
176 }
177
178 void
179 nv20_gr_intr(struct nvkm_subdev *subdev)
180 {
181 struct nv20_gr *gr = (void *)subdev;
182 struct nvkm_device *device = gr->base.engine.subdev.device;
183 struct nvkm_fifo_chan *chan;
184 u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR);
185 u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE);
186 u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS);
187 u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR);
188 u32 chid = (addr & 0x01f00000) >> 20;
189 u32 subc = (addr & 0x00070000) >> 16;
190 u32 mthd = (addr & 0x00001ffc);
191 u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA);
192 u32 class = nvkm_rd32(device, 0x400160 + subc * 4) & 0xfff;
193 u32 show = stat;
194 char msg[128], src[128], sta[128];
195 unsigned long flags;
196
197 chan = nvkm_fifo_chan_chid(device->fifo, chid, &flags);
198
199 nvkm_wr32(device, NV03_PGRAPH_INTR, stat);
200 nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001);
201
202 if (show) {
203 nvkm_snprintbf(msg, sizeof(msg), nv10_gr_intr_name, show);
204 nvkm_snprintbf(src, sizeof(src), nv04_gr_nsource, nsource);
205 nvkm_snprintbf(sta, sizeof(sta), nv10_gr_nstatus, nstatus);
206 nvkm_error(subdev, "intr %08x [%s] nsource %08x [%s] "
207 "nstatus %08x [%s] ch %d [%s] subc %d "
208 "class %04x mthd %04x data %08x\n",
209 show, msg, nsource, src, nstatus, sta, chid,
210 chan ? chan->object.client->name : "unknown",
211 subc, class, mthd, data);
212 }
213
214 nvkm_fifo_chan_put(device->fifo, flags, &chan);
215 }
216
217 static const struct nvkm_gr_func
218 nv20_gr = {
219 .chan_new = nv20_gr_chan_new,
220 .sclass = {
221 { -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
222 { -1, -1, 0x0019, &nv04_gr_object }, /* clip */
223 { -1, -1, 0x0030, &nv04_gr_object }, /* null */
224 { -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
225 { -1, -1, 0x0043, &nv04_gr_object }, /* rop */
226 { -1, -1, 0x0044, &nv04_gr_object }, /* patt */
227 { -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
228 { -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
229 { -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
230 { -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
231 { -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
232 { -1, -1, 0x0096, &nv04_gr_object }, /* celcius */
233 { -1, -1, 0x0097, &nv04_gr_object }, /* kelvin */
234 { -1, -1, 0x009e, &nv04_gr_object }, /* swzsurf */
235 { -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
236 {}
237 }
238 };
239
240 static int
241 nv20_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
242 struct nvkm_oclass *oclass, void *data, u32 size,
243 struct nvkm_object **pobject)
244 {
245 struct nvkm_device *device = (void *)parent;
246 struct nv20_gr *gr;
247 int ret;
248
249 ret = nvkm_gr_create(parent, engine, oclass, true, &gr);
250 *pobject = nv_object(gr);
251 if (ret)
252 return ret;
253
254 gr->base.func = &nv20_gr;
255
256 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 32 * 4, 16, true,
257 &gr->ctxtab);
258 if (ret)
259 return ret;
260
261 nv_subdev(gr)->unit = 0x00001000;
262 nv_subdev(gr)->intr = nv20_gr_intr;
263 nv_engine(gr)->tile_prog = nv20_gr_tile_prog;
264 return 0;
265 }
266
267 void
268 nv20_gr_dtor(struct nvkm_object *object)
269 {
270 struct nv20_gr *gr = (void *)object;
271 nvkm_memory_del(&gr->ctxtab);
272 nvkm_gr_destroy(&gr->base);
273 }
274
275 int
276 nv20_gr_init(struct nvkm_object *object)
277 {
278 struct nvkm_engine *engine = nv_engine(object);
279 struct nv20_gr *gr = (void *)engine;
280 struct nvkm_device *device = gr->base.engine.subdev.device;
281 struct nvkm_fb *fb = device->fb;
282 u32 tmp, vramsz;
283 int ret, i;
284
285 ret = nvkm_gr_init(&gr->base);
286 if (ret)
287 return ret;
288
289 nvkm_wr32(device, NV20_PGRAPH_CHANNEL_CTX_TABLE,
290 nvkm_memory_addr(gr->ctxtab) >> 4);
291
292 if (nv_device(gr)->chipset == 0x20) {
293 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x003d0000);
294 for (i = 0; i < 15; i++)
295 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, 0x00000000);
296 nvkm_msec(device, 2000,
297 if (!nvkm_rd32(device, 0x400700))
298 break;
299 );
300 } else {
301 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x02c80000);
302 for (i = 0; i < 32; i++)
303 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, 0x00000000);
304 nvkm_msec(device, 2000,
305 if (!nvkm_rd32(device, 0x400700))
306 break;
307 );
308 }
309
310 nvkm_wr32(device, NV03_PGRAPH_INTR , 0xFFFFFFFF);
311 nvkm_wr32(device, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
312
313 nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
314 nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x00000000);
315 nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x00118700);
316 nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
317 nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x00000000);
318 nvkm_wr32(device, 0x40009C , 0x00000040);
319
320 if (nv_device(gr)->chipset >= 0x25) {
321 nvkm_wr32(device, 0x400890, 0x00a8cfff);
322 nvkm_wr32(device, 0x400610, 0x304B1FB6);
323 nvkm_wr32(device, 0x400B80, 0x1cbd3883);
324 nvkm_wr32(device, 0x400B84, 0x44000000);
325 nvkm_wr32(device, 0x400098, 0x40000080);
326 nvkm_wr32(device, 0x400B88, 0x000000ff);
327
328 } else {
329 nvkm_wr32(device, 0x400880, 0x0008c7df);
330 nvkm_wr32(device, 0x400094, 0x00000005);
331 nvkm_wr32(device, 0x400B80, 0x45eae20e);
332 nvkm_wr32(device, 0x400B84, 0x24000000);
333 nvkm_wr32(device, 0x400098, 0x00000040);
334 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
335 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00000030);
336 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00E10038);
337 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00000030);
338 }
339
340 /* Turn all the tiling regions off. */
341 for (i = 0; i < fb->tile.regions; i++)
342 engine->tile_prog(engine, i);
343
344 nvkm_wr32(device, 0x4009a0, nvkm_rd32(device, 0x100324));
345 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
346 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, nvkm_rd32(device, 0x100324));
347
348 nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
349 nvkm_wr32(device, NV10_PGRAPH_STATE , 0xFFFFFFFF);
350
351 tmp = nvkm_rd32(device, NV10_PGRAPH_SURFACE) & 0x0007ff00;
352 nvkm_wr32(device, NV10_PGRAPH_SURFACE, tmp);
353 tmp = nvkm_rd32(device, NV10_PGRAPH_SURFACE) | 0x00020100;
354 nvkm_wr32(device, NV10_PGRAPH_SURFACE, tmp);
355
356 /* begin RAM config */
357 vramsz = nv_device_resource_len(nv_device(gr), 1) - 1;
358 nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200));
359 nvkm_wr32(device, 0x4009A8, nvkm_rd32(device, 0x100204));
360 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
361 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , nvkm_rd32(device, 0x100200));
362 nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
363 nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , nvkm_rd32(device, 0x100204));
364 nvkm_wr32(device, 0x400820, 0);
365 nvkm_wr32(device, 0x400824, 0);
366 nvkm_wr32(device, 0x400864, vramsz - 1);
367 nvkm_wr32(device, 0x400868, vramsz - 1);
368
369 /* interesting.. the below overwrites some of the tile setup above.. */
370 nvkm_wr32(device, 0x400B20, 0x00000000);
371 nvkm_wr32(device, 0x400B04, 0xFFFFFFFF);
372
373 nvkm_wr32(device, NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
374 nvkm_wr32(device, NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
375 nvkm_wr32(device, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
376 nvkm_wr32(device, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
377 return 0;
378 }
379
380 struct nvkm_oclass
381 nv20_gr_oclass = {
382 .handle = NV_ENGINE(GR, 0x20),
383 .ofuncs = &(struct nvkm_ofuncs) {
384 .ctor = nv20_gr_ctor,
385 .dtor = nv20_gr_dtor,
386 .init = nv20_gr_init,
387 .fini = _nvkm_gr_fini,
388 },
389 };
This page took 0.041635 seconds and 5 git commands to generate.