drm/nouveau/device: include core/device.h automatically for subdevs/engines
[deliverable/linux.git] / drivers / gpu / drm / nouveau / nvkm / engine / gr / nv50.c
1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24 #include "nv50.h"
25
26 #include <core/client.h>
27 #include <core/handle.h>
28 #include <engine/fifo.h>
29 #include <subdev/timer.h>
30
31 struct nv50_gr_priv {
32 struct nvkm_gr base;
33 spinlock_t lock;
34 u32 size;
35 };
36
37 struct nv50_gr_chan {
38 struct nvkm_gr_chan base;
39 };
40
41 static u64
42 nv50_gr_units(struct nvkm_gr *gr)
43 {
44 struct nv50_gr_priv *priv = (void *)gr;
45
46 return nv_rd32(priv, 0x1540);
47 }
48
49 /*******************************************************************************
50 * Graphics object classes
51 ******************************************************************************/
52
53 static int
54 nv50_gr_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
55 struct nvkm_oclass *oclass, void *data, u32 size,
56 struct nvkm_object **pobject)
57 {
58 struct nvkm_gpuobj *obj;
59 int ret;
60
61 ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
62 16, 16, 0, &obj);
63 *pobject = nv_object(obj);
64 if (ret)
65 return ret;
66
67 nv_wo32(obj, 0x00, nv_mclass(obj));
68 nv_wo32(obj, 0x04, 0x00000000);
69 nv_wo32(obj, 0x08, 0x00000000);
70 nv_wo32(obj, 0x0c, 0x00000000);
71 return 0;
72 }
73
74 static struct nvkm_ofuncs
75 nv50_gr_ofuncs = {
76 .ctor = nv50_gr_object_ctor,
77 .dtor = _nvkm_gpuobj_dtor,
78 .init = _nvkm_gpuobj_init,
79 .fini = _nvkm_gpuobj_fini,
80 .rd32 = _nvkm_gpuobj_rd32,
81 .wr32 = _nvkm_gpuobj_wr32,
82 };
83
84 static struct nvkm_oclass
85 nv50_gr_sclass[] = {
86 { 0x0030, &nv50_gr_ofuncs },
87 { 0x502d, &nv50_gr_ofuncs },
88 { 0x5039, &nv50_gr_ofuncs },
89 { 0x5097, &nv50_gr_ofuncs },
90 { 0x50c0, &nv50_gr_ofuncs },
91 {}
92 };
93
94 static struct nvkm_oclass
95 g84_gr_sclass[] = {
96 { 0x0030, &nv50_gr_ofuncs },
97 { 0x502d, &nv50_gr_ofuncs },
98 { 0x5039, &nv50_gr_ofuncs },
99 { 0x50c0, &nv50_gr_ofuncs },
100 { 0x8297, &nv50_gr_ofuncs },
101 {}
102 };
103
104 static struct nvkm_oclass
105 gt200_gr_sclass[] = {
106 { 0x0030, &nv50_gr_ofuncs },
107 { 0x502d, &nv50_gr_ofuncs },
108 { 0x5039, &nv50_gr_ofuncs },
109 { 0x50c0, &nv50_gr_ofuncs },
110 { 0x8397, &nv50_gr_ofuncs },
111 {}
112 };
113
114 static struct nvkm_oclass
115 gt215_gr_sclass[] = {
116 { 0x0030, &nv50_gr_ofuncs },
117 { 0x502d, &nv50_gr_ofuncs },
118 { 0x5039, &nv50_gr_ofuncs },
119 { 0x50c0, &nv50_gr_ofuncs },
120 { 0x8597, &nv50_gr_ofuncs },
121 { 0x85c0, &nv50_gr_ofuncs },
122 {}
123 };
124
125 static struct nvkm_oclass
126 mcp89_gr_sclass[] = {
127 { 0x0030, &nv50_gr_ofuncs },
128 { 0x502d, &nv50_gr_ofuncs },
129 { 0x5039, &nv50_gr_ofuncs },
130 { 0x50c0, &nv50_gr_ofuncs },
131 { 0x85c0, &nv50_gr_ofuncs },
132 { 0x8697, &nv50_gr_ofuncs },
133 {}
134 };
135
136 /*******************************************************************************
137 * PGRAPH context
138 ******************************************************************************/
139
140 static int
141 nv50_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
142 struct nvkm_oclass *oclass, void *data, u32 size,
143 struct nvkm_object **pobject)
144 {
145 struct nv50_gr_priv *priv = (void *)engine;
146 struct nv50_gr_chan *chan;
147 int ret;
148
149 ret = nvkm_gr_context_create(parent, engine, oclass, NULL, priv->size,
150 0, NVOBJ_FLAG_ZERO_ALLOC, &chan);
151 *pobject = nv_object(chan);
152 if (ret)
153 return ret;
154
155 nv50_grctx_fill(nv_device(priv), nv_gpuobj(chan));
156 return 0;
157 }
158
159 static struct nvkm_oclass
160 nv50_gr_cclass = {
161 .handle = NV_ENGCTX(GR, 0x50),
162 .ofuncs = &(struct nvkm_ofuncs) {
163 .ctor = nv50_gr_context_ctor,
164 .dtor = _nvkm_gr_context_dtor,
165 .init = _nvkm_gr_context_init,
166 .fini = _nvkm_gr_context_fini,
167 .rd32 = _nvkm_gr_context_rd32,
168 .wr32 = _nvkm_gr_context_wr32,
169 },
170 };
171
172 /*******************************************************************************
173 * PGRAPH engine/subdev functions
174 ******************************************************************************/
175
176 static const struct nvkm_bitfield nv50_pgr_status[] = {
177 { 0x00000001, "BUSY" }, /* set when any bit is set */
178 { 0x00000002, "DISPATCH" },
179 { 0x00000004, "UNK2" },
180 { 0x00000008, "UNK3" },
181 { 0x00000010, "UNK4" },
182 { 0x00000020, "UNK5" },
183 { 0x00000040, "M2MF" },
184 { 0x00000080, "UNK7" },
185 { 0x00000100, "CTXPROG" },
186 { 0x00000200, "VFETCH" },
187 { 0x00000400, "CCACHE_PREGEOM" },
188 { 0x00000800, "STRMOUT_VATTR_POSTGEOM" },
189 { 0x00001000, "VCLIP" },
190 { 0x00002000, "RATTR_APLANE" },
191 { 0x00004000, "TRAST" },
192 { 0x00008000, "CLIPID" },
193 { 0x00010000, "ZCULL" },
194 { 0x00020000, "ENG2D" },
195 { 0x00040000, "RMASK" },
196 { 0x00080000, "TPC_RAST" },
197 { 0x00100000, "TPC_PROP" },
198 { 0x00200000, "TPC_TEX" },
199 { 0x00400000, "TPC_GEOM" },
200 { 0x00800000, "TPC_MP" },
201 { 0x01000000, "ROP" },
202 {}
203 };
204
205 static const char *const nv50_pgr_vstatus_0[] = {
206 "VFETCH", "CCACHE", "PREGEOM", "POSTGEOM", "VATTR", "STRMOUT", "VCLIP",
207 NULL
208 };
209
210 static const char *const nv50_pgr_vstatus_1[] = {
211 "TPC_RAST", "TPC_PROP", "TPC_TEX", "TPC_GEOM", "TPC_MP", NULL
212 };
213
214 static const char *const nv50_pgr_vstatus_2[] = {
215 "RATTR", "APLANE", "TRAST", "CLIPID", "ZCULL", "ENG2D", "RMASK",
216 "ROP", NULL
217 };
218
219 static void
220 nvkm_pgr_vstatus_print(struct nv50_gr_priv *priv, int r,
221 const char *const units[], u32 status)
222 {
223 int i;
224
225 nv_error(priv, "PGRAPH_VSTATUS%d: 0x%08x", r, status);
226
227 for (i = 0; units[i] && status; i++) {
228 if ((status & 7) == 1)
229 pr_cont(" %s", units[i]);
230 status >>= 3;
231 }
232 if (status)
233 pr_cont(" (invalid: 0x%x)", status);
234 pr_cont("\n");
235 }
236
237 static int
238 g84_gr_tlb_flush(struct nvkm_engine *engine)
239 {
240 struct nvkm_timer *ptimer = nvkm_timer(engine);
241 struct nv50_gr_priv *priv = (void *)engine;
242 bool idle, timeout = false;
243 unsigned long flags;
244 u64 start;
245 u32 tmp;
246
247 spin_lock_irqsave(&priv->lock, flags);
248 nv_mask(priv, 0x400500, 0x00000001, 0x00000000);
249
250 start = ptimer->read(ptimer);
251 do {
252 idle = true;
253
254 for (tmp = nv_rd32(priv, 0x400380); tmp && idle; tmp >>= 3) {
255 if ((tmp & 7) == 1)
256 idle = false;
257 }
258
259 for (tmp = nv_rd32(priv, 0x400384); tmp && idle; tmp >>= 3) {
260 if ((tmp & 7) == 1)
261 idle = false;
262 }
263
264 for (tmp = nv_rd32(priv, 0x400388); tmp && idle; tmp >>= 3) {
265 if ((tmp & 7) == 1)
266 idle = false;
267 }
268 } while (!idle &&
269 !(timeout = ptimer->read(ptimer) - start > 2000000000));
270
271 if (timeout) {
272 nv_error(priv, "PGRAPH TLB flush idle timeout fail\n");
273
274 tmp = nv_rd32(priv, 0x400700);
275 nv_error(priv, "PGRAPH_STATUS : 0x%08x", tmp);
276 nvkm_bitfield_print(nv50_pgr_status, tmp);
277 pr_cont("\n");
278
279 nvkm_pgr_vstatus_print(priv, 0, nv50_pgr_vstatus_0,
280 nv_rd32(priv, 0x400380));
281 nvkm_pgr_vstatus_print(priv, 1, nv50_pgr_vstatus_1,
282 nv_rd32(priv, 0x400384));
283 nvkm_pgr_vstatus_print(priv, 2, nv50_pgr_vstatus_2,
284 nv_rd32(priv, 0x400388));
285 }
286
287
288 nv_wr32(priv, 0x100c80, 0x00000001);
289 if (!nv_wait(priv, 0x100c80, 0x00000001, 0x00000000))
290 nv_error(priv, "vm flush timeout\n");
291 nv_mask(priv, 0x400500, 0x00000001, 0x00000001);
292 spin_unlock_irqrestore(&priv->lock, flags);
293 return timeout ? -EBUSY : 0;
294 }
295
296 static const struct nvkm_bitfield nv50_mp_exec_errors[] = {
297 { 0x01, "STACK_UNDERFLOW" },
298 { 0x02, "STACK_MISMATCH" },
299 { 0x04, "QUADON_ACTIVE" },
300 { 0x08, "TIMEOUT" },
301 { 0x10, "INVALID_OPCODE" },
302 { 0x20, "PM_OVERFLOW" },
303 { 0x40, "BREAKPOINT" },
304 {}
305 };
306
307 static const struct nvkm_bitfield nv50_mpc_traps[] = {
308 { 0x0000001, "LOCAL_LIMIT_READ" },
309 { 0x0000010, "LOCAL_LIMIT_WRITE" },
310 { 0x0000040, "STACK_LIMIT" },
311 { 0x0000100, "GLOBAL_LIMIT_READ" },
312 { 0x0001000, "GLOBAL_LIMIT_WRITE" },
313 { 0x0010000, "MP0" },
314 { 0x0020000, "MP1" },
315 { 0x0040000, "GLOBAL_LIMIT_RED" },
316 { 0x0400000, "GLOBAL_LIMIT_ATOM" },
317 { 0x4000000, "MP2" },
318 {}
319 };
320
321 static const struct nvkm_bitfield nv50_tex_traps[] = {
322 { 0x00000001, "" }, /* any bit set? */
323 { 0x00000002, "FAULT" },
324 { 0x00000004, "STORAGE_TYPE_MISMATCH" },
325 { 0x00000008, "LINEAR_MISMATCH" },
326 { 0x00000020, "WRONG_MEMTYPE" },
327 {}
328 };
329
330 static const struct nvkm_bitfield nv50_gr_trap_m2mf[] = {
331 { 0x00000001, "NOTIFY" },
332 { 0x00000002, "IN" },
333 { 0x00000004, "OUT" },
334 {}
335 };
336
337 static const struct nvkm_bitfield nv50_gr_trap_vfetch[] = {
338 { 0x00000001, "FAULT" },
339 {}
340 };
341
342 static const struct nvkm_bitfield nv50_gr_trap_strmout[] = {
343 { 0x00000001, "FAULT" },
344 {}
345 };
346
347 static const struct nvkm_bitfield nv50_gr_trap_ccache[] = {
348 { 0x00000001, "FAULT" },
349 {}
350 };
351
352 /* There must be a *lot* of these. Will take some time to gather them up. */
353 const struct nvkm_enum nv50_data_error_names[] = {
354 { 0x00000003, "INVALID_OPERATION", NULL },
355 { 0x00000004, "INVALID_VALUE", NULL },
356 { 0x00000005, "INVALID_ENUM", NULL },
357 { 0x00000008, "INVALID_OBJECT", NULL },
358 { 0x00000009, "READ_ONLY_OBJECT", NULL },
359 { 0x0000000a, "SUPERVISOR_OBJECT", NULL },
360 { 0x0000000b, "INVALID_ADDRESS_ALIGNMENT", NULL },
361 { 0x0000000c, "INVALID_BITFIELD", NULL },
362 { 0x0000000d, "BEGIN_END_ACTIVE", NULL },
363 { 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT", NULL },
364 { 0x0000000f, "VIEWPORT_ID_NEEDS_GP", NULL },
365 { 0x00000010, "RT_DOUBLE_BIND", NULL },
366 { 0x00000011, "RT_TYPES_MISMATCH", NULL },
367 { 0x00000012, "RT_LINEAR_WITH_ZETA", NULL },
368 { 0x00000015, "FP_TOO_FEW_REGS", NULL },
369 { 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH", NULL },
370 { 0x00000017, "RT_LINEAR_WITH_MSAA", NULL },
371 { 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT", NULL },
372 { 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT", NULL },
373 { 0x0000001a, "RT_INVALID_ALIGNMENT", NULL },
374 { 0x0000001b, "SAMPLER_OVER_LIMIT", NULL },
375 { 0x0000001c, "TEXTURE_OVER_LIMIT", NULL },
376 { 0x0000001e, "GP_TOO_MANY_OUTPUTS", NULL },
377 { 0x0000001f, "RT_BPP128_WITH_MS8", NULL },
378 { 0x00000021, "Z_OUT_OF_BOUNDS", NULL },
379 { 0x00000023, "XY_OUT_OF_BOUNDS", NULL },
380 { 0x00000024, "VP_ZERO_INPUTS", NULL },
381 { 0x00000027, "CP_MORE_PARAMS_THAN_SHARED", NULL },
382 { 0x00000028, "CP_NO_REG_SPACE_STRIPED", NULL },
383 { 0x00000029, "CP_NO_REG_SPACE_PACKED", NULL },
384 { 0x0000002a, "CP_NOT_ENOUGH_WARPS", NULL },
385 { 0x0000002b, "CP_BLOCK_SIZE_MISMATCH", NULL },
386 { 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS", NULL },
387 { 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS", NULL },
388 { 0x0000002e, "CP_NO_BLOCKDIM_LATCH", NULL },
389 { 0x00000031, "ENG2D_FORMAT_MISMATCH", NULL },
390 { 0x0000003f, "PRIMITIVE_ID_NEEDS_GP", NULL },
391 { 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT", NULL },
392 { 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT", NULL },
393 { 0x00000046, "LAYER_ID_NEEDS_GP", NULL },
394 { 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT", NULL },
395 { 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT", NULL },
396 {}
397 };
398
399 static const struct nvkm_bitfield nv50_gr_intr_name[] = {
400 { 0x00000001, "NOTIFY" },
401 { 0x00000002, "COMPUTE_QUERY" },
402 { 0x00000010, "ILLEGAL_MTHD" },
403 { 0x00000020, "ILLEGAL_CLASS" },
404 { 0x00000040, "DOUBLE_NOTIFY" },
405 { 0x00001000, "CONTEXT_SWITCH" },
406 { 0x00010000, "BUFFER_NOTIFY" },
407 { 0x00100000, "DATA_ERROR" },
408 { 0x00200000, "TRAP" },
409 { 0x01000000, "SINGLE_STEP" },
410 {}
411 };
412
413 static const struct nvkm_bitfield nv50_gr_trap_prop[] = {
414 { 0x00000004, "SURF_WIDTH_OVERRUN" },
415 { 0x00000008, "SURF_HEIGHT_OVERRUN" },
416 { 0x00000010, "DST2D_FAULT" },
417 { 0x00000020, "ZETA_FAULT" },
418 { 0x00000040, "RT_FAULT" },
419 { 0x00000080, "CUDA_FAULT" },
420 { 0x00000100, "DST2D_STORAGE_TYPE_MISMATCH" },
421 { 0x00000200, "ZETA_STORAGE_TYPE_MISMATCH" },
422 { 0x00000400, "RT_STORAGE_TYPE_MISMATCH" },
423 { 0x00000800, "DST2D_LINEAR_MISMATCH" },
424 { 0x00001000, "RT_LINEAR_MISMATCH" },
425 {}
426 };
427
428 static void
429 nv50_priv_prop_trap(struct nv50_gr_priv *priv,
430 u32 ustatus_addr, u32 ustatus, u32 tp)
431 {
432 u32 e0c = nv_rd32(priv, ustatus_addr + 0x04);
433 u32 e10 = nv_rd32(priv, ustatus_addr + 0x08);
434 u32 e14 = nv_rd32(priv, ustatus_addr + 0x0c);
435 u32 e18 = nv_rd32(priv, ustatus_addr + 0x10);
436 u32 e1c = nv_rd32(priv, ustatus_addr + 0x14);
437 u32 e20 = nv_rd32(priv, ustatus_addr + 0x18);
438 u32 e24 = nv_rd32(priv, ustatus_addr + 0x1c);
439
440 /* CUDA memory: l[], g[] or stack. */
441 if (ustatus & 0x00000080) {
442 if (e18 & 0x80000000) {
443 /* g[] read fault? */
444 nv_error(priv, "TRAP_PROP - TP %d - CUDA_FAULT - Global read fault at address %02x%08x\n",
445 tp, e14, e10 | ((e18 >> 24) & 0x1f));
446 e18 &= ~0x1f000000;
447 } else if (e18 & 0xc) {
448 /* g[] write fault? */
449 nv_error(priv, "TRAP_PROP - TP %d - CUDA_FAULT - Global write fault at address %02x%08x\n",
450 tp, e14, e10 | ((e18 >> 7) & 0x1f));
451 e18 &= ~0x00000f80;
452 } else {
453 nv_error(priv, "TRAP_PROP - TP %d - Unknown CUDA fault at address %02x%08x\n",
454 tp, e14, e10);
455 }
456 ustatus &= ~0x00000080;
457 }
458 if (ustatus) {
459 nv_error(priv, "TRAP_PROP - TP %d -", tp);
460 nvkm_bitfield_print(nv50_gr_trap_prop, ustatus);
461 pr_cont(" - Address %02x%08x\n", e14, e10);
462 }
463 nv_error(priv, "TRAP_PROP - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
464 tp, e0c, e18, e1c, e20, e24);
465 }
466
467 static void
468 nv50_priv_mp_trap(struct nv50_gr_priv *priv, int tpid, int display)
469 {
470 u32 units = nv_rd32(priv, 0x1540);
471 u32 addr, mp10, status, pc, oplow, ophigh;
472 int i;
473 int mps = 0;
474 for (i = 0; i < 4; i++) {
475 if (!(units & 1 << (i+24)))
476 continue;
477 if (nv_device(priv)->chipset < 0xa0)
478 addr = 0x408200 + (tpid << 12) + (i << 7);
479 else
480 addr = 0x408100 + (tpid << 11) + (i << 7);
481 mp10 = nv_rd32(priv, addr + 0x10);
482 status = nv_rd32(priv, addr + 0x14);
483 if (!status)
484 continue;
485 if (display) {
486 nv_rd32(priv, addr + 0x20);
487 pc = nv_rd32(priv, addr + 0x24);
488 oplow = nv_rd32(priv, addr + 0x70);
489 ophigh = nv_rd32(priv, addr + 0x74);
490 nv_error(priv, "TRAP_MP_EXEC - "
491 "TP %d MP %d:", tpid, i);
492 nvkm_bitfield_print(nv50_mp_exec_errors, status);
493 pr_cont(" at %06x warp %d, opcode %08x %08x\n",
494 pc&0xffffff, pc >> 24,
495 oplow, ophigh);
496 }
497 nv_wr32(priv, addr + 0x10, mp10);
498 nv_wr32(priv, addr + 0x14, 0);
499 mps++;
500 }
501 if (!mps && display)
502 nv_error(priv, "TRAP_MP_EXEC - TP %d: "
503 "No MPs claiming errors?\n", tpid);
504 }
505
506 static void
507 nv50_priv_tp_trap(struct nv50_gr_priv *priv, int type, u32 ustatus_old,
508 u32 ustatus_new, int display, const char *name)
509 {
510 int tps = 0;
511 u32 units = nv_rd32(priv, 0x1540);
512 int i, r;
513 u32 ustatus_addr, ustatus;
514 for (i = 0; i < 16; i++) {
515 if (!(units & (1 << i)))
516 continue;
517 if (nv_device(priv)->chipset < 0xa0)
518 ustatus_addr = ustatus_old + (i << 12);
519 else
520 ustatus_addr = ustatus_new + (i << 11);
521 ustatus = nv_rd32(priv, ustatus_addr) & 0x7fffffff;
522 if (!ustatus)
523 continue;
524 tps++;
525 switch (type) {
526 case 6: /* texture error... unknown for now */
527 if (display) {
528 nv_error(priv, "magic set %d:\n", i);
529 for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
530 nv_error(priv, "\t0x%08x: 0x%08x\n", r,
531 nv_rd32(priv, r));
532 if (ustatus) {
533 nv_error(priv, "%s - TP%d:", name, i);
534 nvkm_bitfield_print(nv50_tex_traps,
535 ustatus);
536 pr_cont("\n");
537 ustatus = 0;
538 }
539 }
540 break;
541 case 7: /* MP error */
542 if (ustatus & 0x04030000) {
543 nv50_priv_mp_trap(priv, i, display);
544 ustatus &= ~0x04030000;
545 }
546 if (ustatus && display) {
547 nv_error(priv, "%s - TP%d:", name, i);
548 nvkm_bitfield_print(nv50_mpc_traps, ustatus);
549 pr_cont("\n");
550 ustatus = 0;
551 }
552 break;
553 case 8: /* PROP error */
554 if (display)
555 nv50_priv_prop_trap(
556 priv, ustatus_addr, ustatus, i);
557 ustatus = 0;
558 break;
559 }
560 if (ustatus) {
561 if (display)
562 nv_error(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
563 }
564 nv_wr32(priv, ustatus_addr, 0xc0000000);
565 }
566
567 if (!tps && display)
568 nv_warn(priv, "%s - No TPs claiming errors?\n", name);
569 }
570
571 static int
572 nv50_gr_trap_handler(struct nv50_gr_priv *priv, u32 display,
573 int chid, u64 inst, struct nvkm_object *engctx)
574 {
575 u32 status = nv_rd32(priv, 0x400108);
576 u32 ustatus;
577
578 if (!status && display) {
579 nv_error(priv, "TRAP: no units reporting traps?\n");
580 return 1;
581 }
582
583 /* DISPATCH: Relays commands to other units and handles NOTIFY,
584 * COND, QUERY. If you get a trap from it, the command is still stuck
585 * in DISPATCH and you need to do something about it. */
586 if (status & 0x001) {
587 ustatus = nv_rd32(priv, 0x400804) & 0x7fffffff;
588 if (!ustatus && display) {
589 nv_error(priv, "TRAP_DISPATCH - no ustatus?\n");
590 }
591
592 nv_wr32(priv, 0x400500, 0x00000000);
593
594 /* Known to be triggered by screwed up NOTIFY and COND... */
595 if (ustatus & 0x00000001) {
596 u32 addr = nv_rd32(priv, 0x400808);
597 u32 subc = (addr & 0x00070000) >> 16;
598 u32 mthd = (addr & 0x00001ffc);
599 u32 datal = nv_rd32(priv, 0x40080c);
600 u32 datah = nv_rd32(priv, 0x400810);
601 u32 class = nv_rd32(priv, 0x400814);
602 u32 r848 = nv_rd32(priv, 0x400848);
603
604 nv_error(priv, "TRAP DISPATCH_FAULT\n");
605 if (display && (addr & 0x80000000)) {
606 nv_error(priv,
607 "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x%08x 400808 0x%08x 400848 0x%08x\n",
608 chid, inst,
609 nvkm_client_name(engctx), subc,
610 class, mthd, datah, datal, addr, r848);
611 } else
612 if (display) {
613 nv_error(priv, "no stuck command?\n");
614 }
615
616 nv_wr32(priv, 0x400808, 0);
617 nv_wr32(priv, 0x4008e8, nv_rd32(priv, 0x4008e8) & 3);
618 nv_wr32(priv, 0x400848, 0);
619 ustatus &= ~0x00000001;
620 }
621
622 if (ustatus & 0x00000002) {
623 u32 addr = nv_rd32(priv, 0x40084c);
624 u32 subc = (addr & 0x00070000) >> 16;
625 u32 mthd = (addr & 0x00001ffc);
626 u32 data = nv_rd32(priv, 0x40085c);
627 u32 class = nv_rd32(priv, 0x400814);
628
629 nv_error(priv, "TRAP DISPATCH_QUERY\n");
630 if (display && (addr & 0x80000000)) {
631 nv_error(priv,
632 "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x 40084c 0x%08x\n",
633 chid, inst,
634 nvkm_client_name(engctx), subc,
635 class, mthd, data, addr);
636 } else
637 if (display) {
638 nv_error(priv, "no stuck command?\n");
639 }
640
641 nv_wr32(priv, 0x40084c, 0);
642 ustatus &= ~0x00000002;
643 }
644
645 if (ustatus && display) {
646 nv_error(priv, "TRAP_DISPATCH (unknown "
647 "0x%08x)\n", ustatus);
648 }
649
650 nv_wr32(priv, 0x400804, 0xc0000000);
651 nv_wr32(priv, 0x400108, 0x001);
652 status &= ~0x001;
653 if (!status)
654 return 0;
655 }
656
657 /* M2MF: Memory to memory copy engine. */
658 if (status & 0x002) {
659 u32 ustatus = nv_rd32(priv, 0x406800) & 0x7fffffff;
660 if (display) {
661 nv_error(priv, "TRAP_M2MF");
662 nvkm_bitfield_print(nv50_gr_trap_m2mf, ustatus);
663 pr_cont("\n");
664 nv_error(priv, "TRAP_M2MF %08x %08x %08x %08x\n",
665 nv_rd32(priv, 0x406804), nv_rd32(priv, 0x406808),
666 nv_rd32(priv, 0x40680c), nv_rd32(priv, 0x406810));
667
668 }
669
670 /* No sane way found yet -- just reset the bugger. */
671 nv_wr32(priv, 0x400040, 2);
672 nv_wr32(priv, 0x400040, 0);
673 nv_wr32(priv, 0x406800, 0xc0000000);
674 nv_wr32(priv, 0x400108, 0x002);
675 status &= ~0x002;
676 }
677
678 /* VFETCH: Fetches data from vertex buffers. */
679 if (status & 0x004) {
680 u32 ustatus = nv_rd32(priv, 0x400c04) & 0x7fffffff;
681 if (display) {
682 nv_error(priv, "TRAP_VFETCH");
683 nvkm_bitfield_print(nv50_gr_trap_vfetch, ustatus);
684 pr_cont("\n");
685 nv_error(priv, "TRAP_VFETCH %08x %08x %08x %08x\n",
686 nv_rd32(priv, 0x400c00), nv_rd32(priv, 0x400c08),
687 nv_rd32(priv, 0x400c0c), nv_rd32(priv, 0x400c10));
688 }
689
690 nv_wr32(priv, 0x400c04, 0xc0000000);
691 nv_wr32(priv, 0x400108, 0x004);
692 status &= ~0x004;
693 }
694
695 /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
696 if (status & 0x008) {
697 ustatus = nv_rd32(priv, 0x401800) & 0x7fffffff;
698 if (display) {
699 nv_error(priv, "TRAP_STRMOUT");
700 nvkm_bitfield_print(nv50_gr_trap_strmout, ustatus);
701 pr_cont("\n");
702 nv_error(priv, "TRAP_STRMOUT %08x %08x %08x %08x\n",
703 nv_rd32(priv, 0x401804), nv_rd32(priv, 0x401808),
704 nv_rd32(priv, 0x40180c), nv_rd32(priv, 0x401810));
705
706 }
707
708 /* No sane way found yet -- just reset the bugger. */
709 nv_wr32(priv, 0x400040, 0x80);
710 nv_wr32(priv, 0x400040, 0);
711 nv_wr32(priv, 0x401800, 0xc0000000);
712 nv_wr32(priv, 0x400108, 0x008);
713 status &= ~0x008;
714 }
715
716 /* CCACHE: Handles code and c[] caches and fills them. */
717 if (status & 0x010) {
718 ustatus = nv_rd32(priv, 0x405018) & 0x7fffffff;
719 if (display) {
720 nv_error(priv, "TRAP_CCACHE");
721 nvkm_bitfield_print(nv50_gr_trap_ccache, ustatus);
722 pr_cont("\n");
723 nv_error(priv, "TRAP_CCACHE %08x %08x %08x %08x"
724 " %08x %08x %08x\n",
725 nv_rd32(priv, 0x405000), nv_rd32(priv, 0x405004),
726 nv_rd32(priv, 0x405008), nv_rd32(priv, 0x40500c),
727 nv_rd32(priv, 0x405010), nv_rd32(priv, 0x405014),
728 nv_rd32(priv, 0x40501c));
729
730 }
731
732 nv_wr32(priv, 0x405018, 0xc0000000);
733 nv_wr32(priv, 0x400108, 0x010);
734 status &= ~0x010;
735 }
736
737 /* Unknown, not seen yet... 0x402000 is the only trap status reg
738 * remaining, so try to handle it anyway. Perhaps related to that
739 * unknown DMA slot on tesla? */
740 if (status & 0x20) {
741 ustatus = nv_rd32(priv, 0x402000) & 0x7fffffff;
742 if (display)
743 nv_error(priv, "TRAP_UNKC04 0x%08x\n", ustatus);
744 nv_wr32(priv, 0x402000, 0xc0000000);
745 /* no status modifiction on purpose */
746 }
747
748 /* TEXTURE: CUDA texturing units */
749 if (status & 0x040) {
750 nv50_priv_tp_trap(priv, 6, 0x408900, 0x408600, display,
751 "TRAP_TEXTURE");
752 nv_wr32(priv, 0x400108, 0x040);
753 status &= ~0x040;
754 }
755
756 /* MP: CUDA execution engines. */
757 if (status & 0x080) {
758 nv50_priv_tp_trap(priv, 7, 0x408314, 0x40831c, display,
759 "TRAP_MP");
760 nv_wr32(priv, 0x400108, 0x080);
761 status &= ~0x080;
762 }
763
764 /* PROP: Handles TP-initiated uncached memory accesses:
765 * l[], g[], stack, 2d surfaces, render targets. */
766 if (status & 0x100) {
767 nv50_priv_tp_trap(priv, 8, 0x408e08, 0x408708, display,
768 "TRAP_PROP");
769 nv_wr32(priv, 0x400108, 0x100);
770 status &= ~0x100;
771 }
772
773 if (status) {
774 if (display)
775 nv_error(priv, "TRAP: unknown 0x%08x\n", status);
776 nv_wr32(priv, 0x400108, status);
777 }
778
779 return 1;
780 }
781
782 static void
783 nv50_gr_intr(struct nvkm_subdev *subdev)
784 {
785 struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
786 struct nvkm_engine *engine = nv_engine(subdev);
787 struct nvkm_object *engctx;
788 struct nvkm_handle *handle = NULL;
789 struct nv50_gr_priv *priv = (void *)subdev;
790 u32 stat = nv_rd32(priv, 0x400100);
791 u32 inst = nv_rd32(priv, 0x40032c) & 0x0fffffff;
792 u32 addr = nv_rd32(priv, 0x400704);
793 u32 subc = (addr & 0x00070000) >> 16;
794 u32 mthd = (addr & 0x00001ffc);
795 u32 data = nv_rd32(priv, 0x400708);
796 u32 class = nv_rd32(priv, 0x400814);
797 u32 show = stat, show_bitfield = stat;
798 int chid;
799
800 engctx = nvkm_engctx_get(engine, inst);
801 chid = pfifo->chid(pfifo, engctx);
802
803 if (stat & 0x00000010) {
804 handle = nvkm_handle_get_class(engctx, class);
805 if (handle && !nv_call(handle->object, mthd, data))
806 show &= ~0x00000010;
807 nvkm_handle_put(handle);
808 }
809
810 if (show & 0x00100000) {
811 u32 ecode = nv_rd32(priv, 0x400110);
812 nv_error(priv, "DATA_ERROR ");
813 nvkm_enum_print(nv50_data_error_names, ecode);
814 pr_cont("\n");
815 show_bitfield &= ~0x00100000;
816 }
817
818 if (stat & 0x00200000) {
819 if (!nv50_gr_trap_handler(priv, show, chid, (u64)inst << 12,
820 engctx))
821 show &= ~0x00200000;
822 show_bitfield &= ~0x00200000;
823 }
824
825 nv_wr32(priv, 0x400100, stat);
826 nv_wr32(priv, 0x400500, 0x00010001);
827
828 if (show) {
829 show &= show_bitfield;
830 if (show) {
831 nv_error(priv, "%s", "");
832 nvkm_bitfield_print(nv50_gr_intr_name, show);
833 pr_cont("\n");
834 }
835 nv_error(priv,
836 "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
837 chid, (u64)inst << 12, nvkm_client_name(engctx),
838 subc, class, mthd, data);
839 }
840
841 if (nv_rd32(priv, 0x400824) & (1 << 31))
842 nv_wr32(priv, 0x400824, nv_rd32(priv, 0x400824) & ~(1 << 31));
843
844 nvkm_engctx_put(engctx);
845 }
846
847 static int
848 nv50_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
849 struct nvkm_oclass *oclass, void *data, u32 size,
850 struct nvkm_object **pobject)
851 {
852 struct nv50_gr_priv *priv;
853 int ret;
854
855 ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
856 *pobject = nv_object(priv);
857 if (ret)
858 return ret;
859
860 nv_subdev(priv)->unit = 0x00201000;
861 nv_subdev(priv)->intr = nv50_gr_intr;
862 nv_engine(priv)->cclass = &nv50_gr_cclass;
863
864 priv->base.units = nv50_gr_units;
865
866 switch (nv_device(priv)->chipset) {
867 case 0x50:
868 nv_engine(priv)->sclass = nv50_gr_sclass;
869 break;
870 case 0x84:
871 case 0x86:
872 case 0x92:
873 case 0x94:
874 case 0x96:
875 case 0x98:
876 nv_engine(priv)->sclass = g84_gr_sclass;
877 break;
878 case 0xa0:
879 case 0xaa:
880 case 0xac:
881 nv_engine(priv)->sclass = gt200_gr_sclass;
882 break;
883 case 0xa3:
884 case 0xa5:
885 case 0xa8:
886 nv_engine(priv)->sclass = gt215_gr_sclass;
887 break;
888 case 0xaf:
889 nv_engine(priv)->sclass = mcp89_gr_sclass;
890 break;
891
892 }
893
894 /* unfortunate hw bug workaround... */
895 if (nv_device(priv)->chipset != 0x50 &&
896 nv_device(priv)->chipset != 0xac)
897 nv_engine(priv)->tlb_flush = g84_gr_tlb_flush;
898
899 spin_lock_init(&priv->lock);
900 return 0;
901 }
902
903 static int
904 nv50_gr_init(struct nvkm_object *object)
905 {
906 struct nv50_gr_priv *priv = (void *)object;
907 int ret, units, i;
908
909 ret = nvkm_gr_init(&priv->base);
910 if (ret)
911 return ret;
912
913 /* NV_PGRAPH_DEBUG_3_HW_CTX_SWITCH_ENABLED */
914 nv_wr32(priv, 0x40008c, 0x00000004);
915
916 /* reset/enable traps and interrupts */
917 nv_wr32(priv, 0x400804, 0xc0000000);
918 nv_wr32(priv, 0x406800, 0xc0000000);
919 nv_wr32(priv, 0x400c04, 0xc0000000);
920 nv_wr32(priv, 0x401800, 0xc0000000);
921 nv_wr32(priv, 0x405018, 0xc0000000);
922 nv_wr32(priv, 0x402000, 0xc0000000);
923
924 units = nv_rd32(priv, 0x001540);
925 for (i = 0; i < 16; i++) {
926 if (!(units & (1 << i)))
927 continue;
928
929 if (nv_device(priv)->chipset < 0xa0) {
930 nv_wr32(priv, 0x408900 + (i << 12), 0xc0000000);
931 nv_wr32(priv, 0x408e08 + (i << 12), 0xc0000000);
932 nv_wr32(priv, 0x408314 + (i << 12), 0xc0000000);
933 } else {
934 nv_wr32(priv, 0x408600 + (i << 11), 0xc0000000);
935 nv_wr32(priv, 0x408708 + (i << 11), 0xc0000000);
936 nv_wr32(priv, 0x40831c + (i << 11), 0xc0000000);
937 }
938 }
939
940 nv_wr32(priv, 0x400108, 0xffffffff);
941 nv_wr32(priv, 0x400138, 0xffffffff);
942 nv_wr32(priv, 0x400100, 0xffffffff);
943 nv_wr32(priv, 0x40013c, 0xffffffff);
944 nv_wr32(priv, 0x400500, 0x00010001);
945
946 /* upload context program, initialise ctxctl defaults */
947 ret = nv50_grctx_init(nv_device(priv), &priv->size);
948 if (ret)
949 return ret;
950
951 nv_wr32(priv, 0x400824, 0x00000000);
952 nv_wr32(priv, 0x400828, 0x00000000);
953 nv_wr32(priv, 0x40082c, 0x00000000);
954 nv_wr32(priv, 0x400830, 0x00000000);
955 nv_wr32(priv, 0x40032c, 0x00000000);
956 nv_wr32(priv, 0x400330, 0x00000000);
957
958 /* some unknown zcull magic */
959 switch (nv_device(priv)->chipset & 0xf0) {
960 case 0x50:
961 case 0x80:
962 case 0x90:
963 nv_wr32(priv, 0x402ca8, 0x00000800);
964 break;
965 case 0xa0:
966 default:
967 if (nv_device(priv)->chipset == 0xa0 ||
968 nv_device(priv)->chipset == 0xaa ||
969 nv_device(priv)->chipset == 0xac) {
970 nv_wr32(priv, 0x402ca8, 0x00000802);
971 } else {
972 nv_wr32(priv, 0x402cc0, 0x00000000);
973 nv_wr32(priv, 0x402ca8, 0x00000002);
974 }
975
976 break;
977 }
978
979 /* zero out zcull regions */
980 for (i = 0; i < 8; i++) {
981 nv_wr32(priv, 0x402c20 + (i * 0x10), 0x00000000);
982 nv_wr32(priv, 0x402c24 + (i * 0x10), 0x00000000);
983 nv_wr32(priv, 0x402c28 + (i * 0x10), 0x00000000);
984 nv_wr32(priv, 0x402c2c + (i * 0x10), 0x00000000);
985 }
986 return 0;
987 }
988
989 struct nvkm_oclass
990 nv50_gr_oclass = {
991 .handle = NV_ENGINE(GR, 0x50),
992 .ofuncs = &(struct nvkm_ofuncs) {
993 .ctor = nv50_gr_ctor,
994 .dtor = _nvkm_gr_dtor,
995 .init = nv50_gr_init,
996 .fini = _nvkm_gr_fini,
997 },
998 };
This page took 0.410554 seconds and 6 git commands to generate.