Merge branch 'for-linus' of git://www.jni.nu/cris
[deliverable/linux.git] / drivers / gpu / drm / nouveau / nouveau_irq.c
1 /*
2 * Copyright (C) 2006 Ben Skeggs.
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28 /*
29 * Authors:
30 * Ben Skeggs <darktama@iinet.net.au>
31 */
32
33 #include "drmP.h"
34 #include "drm.h"
35 #include "nouveau_drm.h"
36 #include "nouveau_drv.h"
37 #include "nouveau_reg.h"
38 #include "nouveau_ramht.h"
39 #include <linux/ratelimit.h>
40
41 /* needed for hotplug irq */
42 #include "nouveau_connector.h"
43 #include "nv50_display.h"
44
45 void
46 nouveau_irq_preinstall(struct drm_device *dev)
47 {
48 struct drm_nouveau_private *dev_priv = dev->dev_private;
49
50 /* Master disable */
51 nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
52
53 if (dev_priv->card_type >= NV_50) {
54 INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
55 INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh);
56 INIT_LIST_HEAD(&dev_priv->vbl_waiting);
57 }
58 }
59
60 int
61 nouveau_irq_postinstall(struct drm_device *dev)
62 {
63 /* Master enable */
64 nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
65 return 0;
66 }
67
68 void
69 nouveau_irq_uninstall(struct drm_device *dev)
70 {
71 /* Master disable */
72 nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
73 }
74
75 static int
76 nouveau_call_method(struct nouveau_channel *chan, int class, int mthd, int data)
77 {
78 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
79 struct nouveau_pgraph_object_method *grm;
80 struct nouveau_pgraph_object_class *grc;
81
82 grc = dev_priv->engine.graph.grclass;
83 while (grc->id) {
84 if (grc->id == class)
85 break;
86 grc++;
87 }
88
89 if (grc->id != class || !grc->methods)
90 return -ENOENT;
91
92 grm = grc->methods;
93 while (grm->id) {
94 if (grm->id == mthd)
95 return grm->exec(chan, class, mthd, data);
96 grm++;
97 }
98
99 return -ENOENT;
100 }
101
102 static bool
103 nouveau_fifo_swmthd(struct nouveau_channel *chan, uint32_t addr, uint32_t data)
104 {
105 struct drm_device *dev = chan->dev;
106 const int subc = (addr >> 13) & 0x7;
107 const int mthd = addr & 0x1ffc;
108
109 if (mthd == 0x0000) {
110 struct nouveau_gpuobj *gpuobj;
111
112 gpuobj = nouveau_ramht_find(chan, data);
113 if (!gpuobj)
114 return false;
115
116 if (gpuobj->engine != NVOBJ_ENGINE_SW)
117 return false;
118
119 chan->sw_subchannel[subc] = gpuobj->class;
120 nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev,
121 NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4));
122 return true;
123 }
124
125 /* hw object */
126 if (nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE) & (1 << (subc*4)))
127 return false;
128
129 if (nouveau_call_method(chan, chan->sw_subchannel[subc], mthd, data))
130 return false;
131
132 return true;
133 }
134
135 static void
136 nouveau_fifo_irq_handler(struct drm_device *dev)
137 {
138 struct drm_nouveau_private *dev_priv = dev->dev_private;
139 struct nouveau_engine *engine = &dev_priv->engine;
140 uint32_t status, reassign;
141 int cnt = 0;
142
143 reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
144 while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
145 struct nouveau_channel *chan = NULL;
146 uint32_t chid, get;
147
148 nv_wr32(dev, NV03_PFIFO_CACHES, 0);
149
150 chid = engine->fifo.channel_id(dev);
151 if (chid >= 0 && chid < engine->fifo.channels)
152 chan = dev_priv->fifos[chid];
153 get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
154
155 if (status & NV_PFIFO_INTR_CACHE_ERROR) {
156 uint32_t mthd, data;
157 int ptr;
158
159 /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
160 * wrapping on my G80 chips, but CACHE1 isn't big
161 * enough for this much data.. Tests show that it
162 * wraps around to the start at GET=0x800.. No clue
163 * as to why..
164 */
165 ptr = (get & 0x7ff) >> 2;
166
167 if (dev_priv->card_type < NV_40) {
168 mthd = nv_rd32(dev,
169 NV04_PFIFO_CACHE1_METHOD(ptr));
170 data = nv_rd32(dev,
171 NV04_PFIFO_CACHE1_DATA(ptr));
172 } else {
173 mthd = nv_rd32(dev,
174 NV40_PFIFO_CACHE1_METHOD(ptr));
175 data = nv_rd32(dev,
176 NV40_PFIFO_CACHE1_DATA(ptr));
177 }
178
179 if (!chan || !nouveau_fifo_swmthd(chan, mthd, data)) {
180 NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
181 "Mthd 0x%04x Data 0x%08x\n",
182 chid, (mthd >> 13) & 7, mthd & 0x1ffc,
183 data);
184 }
185
186 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
187 nv_wr32(dev, NV03_PFIFO_INTR_0,
188 NV_PFIFO_INTR_CACHE_ERROR);
189
190 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
191 nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
192 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
193 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
194 nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
195 nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
196
197 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
198 nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
199 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
200
201 status &= ~NV_PFIFO_INTR_CACHE_ERROR;
202 }
203
204 if (status & NV_PFIFO_INTR_DMA_PUSHER) {
205 u32 get = nv_rd32(dev, 0x003244);
206 u32 put = nv_rd32(dev, 0x003240);
207 u32 push = nv_rd32(dev, 0x003220);
208 u32 state = nv_rd32(dev, 0x003228);
209
210 if (dev_priv->card_type == NV_50) {
211 u32 ho_get = nv_rd32(dev, 0x003328);
212 u32 ho_put = nv_rd32(dev, 0x003320);
213 u32 ib_get = nv_rd32(dev, 0x003334);
214 u32 ib_put = nv_rd32(dev, 0x003330);
215
216 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
217 "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
218 "State 0x%08x Push 0x%08x\n",
219 chid, ho_get, get, ho_put, put, ib_get, ib_put,
220 state, push);
221
222 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
223 nv_wr32(dev, 0x003364, 0x00000000);
224 if (get != put || ho_get != ho_put) {
225 nv_wr32(dev, 0x003244, put);
226 nv_wr32(dev, 0x003328, ho_put);
227 } else
228 if (ib_get != ib_put) {
229 nv_wr32(dev, 0x003334, ib_put);
230 }
231 } else {
232 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
233 "Put 0x%08x State 0x%08x Push 0x%08x\n",
234 chid, get, put, state, push);
235
236 if (get != put)
237 nv_wr32(dev, 0x003244, put);
238 }
239
240 nv_wr32(dev, 0x003228, 0x00000000);
241 nv_wr32(dev, 0x003220, 0x00000001);
242 nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
243 status &= ~NV_PFIFO_INTR_DMA_PUSHER;
244 }
245
246 if (status & NV_PFIFO_INTR_SEMAPHORE) {
247 uint32_t sem;
248
249 status &= ~NV_PFIFO_INTR_SEMAPHORE;
250 nv_wr32(dev, NV03_PFIFO_INTR_0,
251 NV_PFIFO_INTR_SEMAPHORE);
252
253 sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
254 nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
255
256 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
257 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
258 }
259
260 if (dev_priv->card_type == NV_50) {
261 if (status & 0x00000010) {
262 nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT");
263 status &= ~0x00000010;
264 nv_wr32(dev, 0x002100, 0x00000010);
265 }
266 }
267
268 if (status) {
269 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
270 status, chid);
271 nv_wr32(dev, NV03_PFIFO_INTR_0, status);
272 status = 0;
273 }
274
275 nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
276 }
277
278 if (status) {
279 NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
280 nv_wr32(dev, 0x2140, 0);
281 nv_wr32(dev, 0x140, 0);
282 }
283
284 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
285 }
286
287 struct nouveau_bitfield_names {
288 uint32_t mask;
289 const char *name;
290 };
291
292 static struct nouveau_bitfield_names nstatus_names[] =
293 {
294 { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
295 { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
296 { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
297 { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
298 };
299
300 static struct nouveau_bitfield_names nstatus_names_nv10[] =
301 {
302 { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
303 { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
304 { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
305 { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
306 };
307
308 static struct nouveau_bitfield_names nsource_names[] =
309 {
310 { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
311 { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
312 { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
313 { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
314 { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
315 { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
316 { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
317 { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
318 { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
319 { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
320 { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
321 { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
322 { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
323 { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
324 { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
325 { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
326 { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
327 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
328 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
329 };
330
331 static void
332 nouveau_print_bitfield_names_(uint32_t value,
333 const struct nouveau_bitfield_names *namelist,
334 const int namelist_len)
335 {
336 /*
337 * Caller must have already printed the KERN_* log level for us.
338 * Also the caller is responsible for adding the newline.
339 */
340 int i;
341 for (i = 0; i < namelist_len; ++i) {
342 uint32_t mask = namelist[i].mask;
343 if (value & mask) {
344 printk(" %s", namelist[i].name);
345 value &= ~mask;
346 }
347 }
348 if (value)
349 printk(" (unknown bits 0x%08x)", value);
350 }
351 #define nouveau_print_bitfield_names(val, namelist) \
352 nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist))
353
354 struct nouveau_enum_names {
355 uint32_t value;
356 const char *name;
357 };
358
359 static void
360 nouveau_print_enum_names_(uint32_t value,
361 const struct nouveau_enum_names *namelist,
362 const int namelist_len)
363 {
364 /*
365 * Caller must have already printed the KERN_* log level for us.
366 * Also the caller is responsible for adding the newline.
367 */
368 int i;
369 for (i = 0; i < namelist_len; ++i) {
370 if (value == namelist[i].value) {
371 printk("%s", namelist[i].name);
372 return;
373 }
374 }
375 printk("unknown value 0x%08x", value);
376 }
377 #define nouveau_print_enum_names(val, namelist) \
378 nouveau_print_enum_names_((val), (namelist), ARRAY_SIZE(namelist))
379
380 static int
381 nouveau_graph_chid_from_grctx(struct drm_device *dev)
382 {
383 struct drm_nouveau_private *dev_priv = dev->dev_private;
384 uint32_t inst;
385 int i;
386
387 if (dev_priv->card_type < NV_40)
388 return dev_priv->engine.fifo.channels;
389 else
390 if (dev_priv->card_type < NV_50) {
391 inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 4;
392
393 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
394 struct nouveau_channel *chan = dev_priv->fifos[i];
395
396 if (!chan || !chan->ramin_grctx)
397 continue;
398
399 if (inst == chan->ramin_grctx->pinst)
400 break;
401 }
402 } else {
403 inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 12;
404
405 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
406 struct nouveau_channel *chan = dev_priv->fifos[i];
407
408 if (!chan || !chan->ramin)
409 continue;
410
411 if (inst == chan->ramin->vinst)
412 break;
413 }
414 }
415
416
417 return i;
418 }
419
420 static int
421 nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
422 {
423 struct drm_nouveau_private *dev_priv = dev->dev_private;
424 struct nouveau_engine *engine = &dev_priv->engine;
425 int channel;
426
427 if (dev_priv->card_type < NV_10)
428 channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf;
429 else
430 if (dev_priv->card_type < NV_40)
431 channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
432 else
433 channel = nouveau_graph_chid_from_grctx(dev);
434
435 if (channel >= engine->fifo.channels || !dev_priv->fifos[channel]) {
436 NV_ERROR(dev, "AIII, invalid/inactive channel id %d\n", channel);
437 return -EINVAL;
438 }
439
440 *channel_ret = channel;
441 return 0;
442 }
443
444 struct nouveau_pgraph_trap {
445 int channel;
446 int class;
447 int subc, mthd, size;
448 uint32_t data, data2;
449 uint32_t nsource, nstatus;
450 };
451
452 static void
453 nouveau_graph_trap_info(struct drm_device *dev,
454 struct nouveau_pgraph_trap *trap)
455 {
456 struct drm_nouveau_private *dev_priv = dev->dev_private;
457 uint32_t address;
458
459 trap->nsource = trap->nstatus = 0;
460 if (dev_priv->card_type < NV_50) {
461 trap->nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
462 trap->nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
463 }
464
465 if (nouveau_graph_trapped_channel(dev, &trap->channel))
466 trap->channel = -1;
467 address = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
468
469 trap->mthd = address & 0x1FFC;
470 trap->data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
471 if (dev_priv->card_type < NV_10) {
472 trap->subc = (address >> 13) & 0x7;
473 } else {
474 trap->subc = (address >> 16) & 0x7;
475 trap->data2 = nv_rd32(dev, NV10_PGRAPH_TRAPPED_DATA_HIGH);
476 }
477
478 if (dev_priv->card_type < NV_10)
479 trap->class = nv_rd32(dev, 0x400180 + trap->subc*4) & 0xFF;
480 else if (dev_priv->card_type < NV_40)
481 trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFF;
482 else if (dev_priv->card_type < NV_50)
483 trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFFF;
484 else
485 trap->class = nv_rd32(dev, 0x400814);
486 }
487
488 static void
489 nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
490 struct nouveau_pgraph_trap *trap)
491 {
492 struct drm_nouveau_private *dev_priv = dev->dev_private;
493 uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
494
495 if (dev_priv->card_type < NV_50) {
496 NV_INFO(dev, "%s - nSource:", id);
497 nouveau_print_bitfield_names(nsource, nsource_names);
498 printk(", nStatus:");
499 if (dev_priv->card_type < NV_10)
500 nouveau_print_bitfield_names(nstatus, nstatus_names);
501 else
502 nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
503 printk("\n");
504 }
505
506 NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x "
507 "Data 0x%08x:0x%08x\n",
508 id, trap->channel, trap->subc,
509 trap->class, trap->mthd,
510 trap->data2, trap->data);
511 }
512
513 static int
514 nouveau_pgraph_intr_swmthd(struct drm_device *dev,
515 struct nouveau_pgraph_trap *trap)
516 {
517 struct drm_nouveau_private *dev_priv = dev->dev_private;
518
519 if (trap->channel < 0 ||
520 trap->channel >= dev_priv->engine.fifo.channels ||
521 !dev_priv->fifos[trap->channel])
522 return -ENODEV;
523
524 return nouveau_call_method(dev_priv->fifos[trap->channel],
525 trap->class, trap->mthd, trap->data);
526 }
527
528 static inline void
529 nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
530 {
531 struct nouveau_pgraph_trap trap;
532 int unhandled = 0;
533
534 nouveau_graph_trap_info(dev, &trap);
535
536 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
537 if (nouveau_pgraph_intr_swmthd(dev, &trap))
538 unhandled = 1;
539 } else {
540 unhandled = 1;
541 }
542
543 if (unhandled)
544 nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap);
545 }
546
547 static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
548
549 static int nouveau_ratelimit(void)
550 {
551 return __ratelimit(&nouveau_ratelimit_state);
552 }
553
554
555 static inline void
556 nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
557 {
558 struct nouveau_pgraph_trap trap;
559 int unhandled = 0;
560
561 nouveau_graph_trap_info(dev, &trap);
562 trap.nsource = nsource;
563
564 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
565 if (nouveau_pgraph_intr_swmthd(dev, &trap))
566 unhandled = 1;
567 } else if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
568 uint32_t v = nv_rd32(dev, 0x402000);
569 nv_wr32(dev, 0x402000, v);
570
571 /* dump the error anyway for now: it's useful for
572 Gallium development */
573 unhandled = 1;
574 } else {
575 unhandled = 1;
576 }
577
578 if (unhandled && nouveau_ratelimit())
579 nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap);
580 }
581
582 static inline void
583 nouveau_pgraph_intr_context_switch(struct drm_device *dev)
584 {
585 struct drm_nouveau_private *dev_priv = dev->dev_private;
586 struct nouveau_engine *engine = &dev_priv->engine;
587 uint32_t chid;
588
589 chid = engine->fifo.channel_id(dev);
590 NV_DEBUG(dev, "PGRAPH context switch interrupt channel %x\n", chid);
591
592 switch (dev_priv->card_type) {
593 case NV_04:
594 nv04_graph_context_switch(dev);
595 break;
596 case NV_10:
597 nv10_graph_context_switch(dev);
598 break;
599 default:
600 NV_ERROR(dev, "Context switch not implemented\n");
601 break;
602 }
603 }
604
605 static void
606 nouveau_pgraph_irq_handler(struct drm_device *dev)
607 {
608 uint32_t status;
609
610 while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
611 uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
612
613 if (status & NV_PGRAPH_INTR_NOTIFY) {
614 nouveau_pgraph_intr_notify(dev, nsource);
615
616 status &= ~NV_PGRAPH_INTR_NOTIFY;
617 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY);
618 }
619
620 if (status & NV_PGRAPH_INTR_ERROR) {
621 nouveau_pgraph_intr_error(dev, nsource);
622
623 status &= ~NV_PGRAPH_INTR_ERROR;
624 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR);
625 }
626
627 if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
628 status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
629 nv_wr32(dev, NV03_PGRAPH_INTR,
630 NV_PGRAPH_INTR_CONTEXT_SWITCH);
631
632 nouveau_pgraph_intr_context_switch(dev);
633 }
634
635 if (status) {
636 NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
637 nv_wr32(dev, NV03_PGRAPH_INTR, status);
638 }
639
640 if ((nv_rd32(dev, NV04_PGRAPH_FIFO) & (1 << 0)) == 0)
641 nv_wr32(dev, NV04_PGRAPH_FIFO, 1);
642 }
643
644 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
645 }
646
647 static struct nouveau_enum_names nv50_mp_exec_error_names[] =
648 {
649 { 3, "STACK_UNDERFLOW" },
650 { 4, "QUADON_ACTIVE" },
651 { 8, "TIMEOUT" },
652 { 0x10, "INVALID_OPCODE" },
653 { 0x40, "BREAKPOINT" },
654 };
655
656 static void
657 nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
658 {
659 struct drm_nouveau_private *dev_priv = dev->dev_private;
660 uint32_t units = nv_rd32(dev, 0x1540);
661 uint32_t addr, mp10, status, pc, oplow, ophigh;
662 int i;
663 int mps = 0;
664 for (i = 0; i < 4; i++) {
665 if (!(units & 1 << (i+24)))
666 continue;
667 if (dev_priv->chipset < 0xa0)
668 addr = 0x408200 + (tpid << 12) + (i << 7);
669 else
670 addr = 0x408100 + (tpid << 11) + (i << 7);
671 mp10 = nv_rd32(dev, addr + 0x10);
672 status = nv_rd32(dev, addr + 0x14);
673 if (!status)
674 continue;
675 if (display) {
676 nv_rd32(dev, addr + 0x20);
677 pc = nv_rd32(dev, addr + 0x24);
678 oplow = nv_rd32(dev, addr + 0x70);
679 ophigh= nv_rd32(dev, addr + 0x74);
680 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
681 "TP %d MP %d: ", tpid, i);
682 nouveau_print_enum_names(status,
683 nv50_mp_exec_error_names);
684 printk(" at %06x warp %d, opcode %08x %08x\n",
685 pc&0xffffff, pc >> 24,
686 oplow, ophigh);
687 }
688 nv_wr32(dev, addr + 0x10, mp10);
689 nv_wr32(dev, addr + 0x14, 0);
690 mps++;
691 }
692 if (!mps && display)
693 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
694 "No MPs claiming errors?\n", tpid);
695 }
696
697 static void
698 nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
699 uint32_t ustatus_new, int display, const char *name)
700 {
701 struct drm_nouveau_private *dev_priv = dev->dev_private;
702 int tps = 0;
703 uint32_t units = nv_rd32(dev, 0x1540);
704 int i, r;
705 uint32_t ustatus_addr, ustatus;
706 for (i = 0; i < 16; i++) {
707 if (!(units & (1 << i)))
708 continue;
709 if (dev_priv->chipset < 0xa0)
710 ustatus_addr = ustatus_old + (i << 12);
711 else
712 ustatus_addr = ustatus_new + (i << 11);
713 ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
714 if (!ustatus)
715 continue;
716 tps++;
717 switch (type) {
718 case 6: /* texture error... unknown for now */
719 nv50_fb_vm_trap(dev, display, name);
720 if (display) {
721 NV_ERROR(dev, "magic set %d:\n", i);
722 for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
723 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
724 nv_rd32(dev, r));
725 }
726 break;
727 case 7: /* MP error */
728 if (ustatus & 0x00010000) {
729 nv50_pgraph_mp_trap(dev, i, display);
730 ustatus &= ~0x00010000;
731 }
732 break;
733 case 8: /* TPDMA error */
734 {
735 uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
736 uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
737 uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
738 uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
739 uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
740 uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
741 uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
742 nv50_fb_vm_trap(dev, display, name);
743 /* 2d engine destination */
744 if (ustatus & 0x00000010) {
745 if (display) {
746 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
747 i, e14, e10);
748 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
749 i, e0c, e18, e1c, e20, e24);
750 }
751 ustatus &= ~0x00000010;
752 }
753 /* Render target */
754 if (ustatus & 0x00000040) {
755 if (display) {
756 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
757 i, e14, e10);
758 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
759 i, e0c, e18, e1c, e20, e24);
760 }
761 ustatus &= ~0x00000040;
762 }
763 /* CUDA memory: l[], g[] or stack. */
764 if (ustatus & 0x00000080) {
765 if (display) {
766 if (e18 & 0x80000000) {
767 /* g[] read fault? */
768 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
769 i, e14, e10 | ((e18 >> 24) & 0x1f));
770 e18 &= ~0x1f000000;
771 } else if (e18 & 0xc) {
772 /* g[] write fault? */
773 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
774 i, e14, e10 | ((e18 >> 7) & 0x1f));
775 e18 &= ~0x00000f80;
776 } else {
777 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
778 i, e14, e10);
779 }
780 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
781 i, e0c, e18, e1c, e20, e24);
782 }
783 ustatus &= ~0x00000080;
784 }
785 }
786 break;
787 }
788 if (ustatus) {
789 if (display)
790 NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
791 }
792 nv_wr32(dev, ustatus_addr, 0xc0000000);
793 }
794
795 if (!tps && display)
796 NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
797 }
798
799 static void
800 nv50_pgraph_trap_handler(struct drm_device *dev)
801 {
802 struct nouveau_pgraph_trap trap;
803 uint32_t status = nv_rd32(dev, 0x400108);
804 uint32_t ustatus;
805 int display = nouveau_ratelimit();
806
807
808 if (!status && display) {
809 nouveau_graph_trap_info(dev, &trap);
810 nouveau_graph_dump_trap_info(dev, "PGRAPH_TRAP", &trap);
811 NV_INFO(dev, "PGRAPH_TRAP - no units reporting traps?\n");
812 }
813
814 /* DISPATCH: Relays commands to other units and handles NOTIFY,
815 * COND, QUERY. If you get a trap from it, the command is still stuck
816 * in DISPATCH and you need to do something about it. */
817 if (status & 0x001) {
818 ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
819 if (!ustatus && display) {
820 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
821 }
822
823 /* Known to be triggered by screwed up NOTIFY and COND... */
824 if (ustatus & 0x00000001) {
825 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT");
826 nv_wr32(dev, 0x400500, 0);
827 if (nv_rd32(dev, 0x400808) & 0x80000000) {
828 if (display) {
829 if (nouveau_graph_trapped_channel(dev, &trap.channel))
830 trap.channel = -1;
831 trap.class = nv_rd32(dev, 0x400814);
832 trap.mthd = nv_rd32(dev, 0x400808) & 0x1ffc;
833 trap.subc = (nv_rd32(dev, 0x400808) >> 16) & 0x7;
834 trap.data = nv_rd32(dev, 0x40080c);
835 trap.data2 = nv_rd32(dev, 0x400810);
836 nouveau_graph_dump_trap_info(dev,
837 "PGRAPH_TRAP_DISPATCH_FAULT", &trap);
838 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400808: %08x\n", nv_rd32(dev, 0x400808));
839 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400848: %08x\n", nv_rd32(dev, 0x400848));
840 }
841 nv_wr32(dev, 0x400808, 0);
842 } else if (display) {
843 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - No stuck command?\n");
844 }
845 nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
846 nv_wr32(dev, 0x400848, 0);
847 ustatus &= ~0x00000001;
848 }
849 if (ustatus & 0x00000002) {
850 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY");
851 nv_wr32(dev, 0x400500, 0);
852 if (nv_rd32(dev, 0x40084c) & 0x80000000) {
853 if (display) {
854 if (nouveau_graph_trapped_channel(dev, &trap.channel))
855 trap.channel = -1;
856 trap.class = nv_rd32(dev, 0x400814);
857 trap.mthd = nv_rd32(dev, 0x40084c) & 0x1ffc;
858 trap.subc = (nv_rd32(dev, 0x40084c) >> 16) & 0x7;
859 trap.data = nv_rd32(dev, 0x40085c);
860 trap.data2 = 0;
861 nouveau_graph_dump_trap_info(dev,
862 "PGRAPH_TRAP_DISPATCH_QUERY", &trap);
863 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - 40084c: %08x\n", nv_rd32(dev, 0x40084c));
864 }
865 nv_wr32(dev, 0x40084c, 0);
866 } else if (display) {
867 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - No stuck command?\n");
868 }
869 ustatus &= ~0x00000002;
870 }
871 if (ustatus && display)
872 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - Unhandled ustatus 0x%08x\n", ustatus);
873 nv_wr32(dev, 0x400804, 0xc0000000);
874 nv_wr32(dev, 0x400108, 0x001);
875 status &= ~0x001;
876 }
877
878 /* TRAPs other than dispatch use the "normal" trap regs. */
879 if (status && display) {
880 nouveau_graph_trap_info(dev, &trap);
881 nouveau_graph_dump_trap_info(dev,
882 "PGRAPH_TRAP", &trap);
883 }
884
885 /* M2MF: Memory to memory copy engine. */
886 if (status & 0x002) {
887 ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
888 if (!ustatus && display) {
889 NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n");
890 }
891 if (ustatus & 0x00000001) {
892 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY");
893 ustatus &= ~0x00000001;
894 }
895 if (ustatus & 0x00000002) {
896 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN");
897 ustatus &= ~0x00000002;
898 }
899 if (ustatus & 0x00000004) {
900 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT");
901 ustatus &= ~0x00000004;
902 }
903 NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n",
904 nv_rd32(dev, 0x406804),
905 nv_rd32(dev, 0x406808),
906 nv_rd32(dev, 0x40680c),
907 nv_rd32(dev, 0x406810));
908 if (ustatus && display)
909 NV_INFO(dev, "PGRAPH_TRAP_M2MF - Unhandled ustatus 0x%08x\n", ustatus);
910 /* No sane way found yet -- just reset the bugger. */
911 nv_wr32(dev, 0x400040, 2);
912 nv_wr32(dev, 0x400040, 0);
913 nv_wr32(dev, 0x406800, 0xc0000000);
914 nv_wr32(dev, 0x400108, 0x002);
915 status &= ~0x002;
916 }
917
918 /* VFETCH: Fetches data from vertex buffers. */
919 if (status & 0x004) {
920 ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
921 if (!ustatus && display) {
922 NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n");
923 }
924 if (ustatus & 0x00000001) {
925 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT");
926 NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n",
927 nv_rd32(dev, 0x400c00),
928 nv_rd32(dev, 0x400c08),
929 nv_rd32(dev, 0x400c0c),
930 nv_rd32(dev, 0x400c10));
931 ustatus &= ~0x00000001;
932 }
933 if (ustatus && display)
934 NV_INFO(dev, "PGRAPH_TRAP_VFETCH - Unhandled ustatus 0x%08x\n", ustatus);
935 nv_wr32(dev, 0x400c04, 0xc0000000);
936 nv_wr32(dev, 0x400108, 0x004);
937 status &= ~0x004;
938 }
939
940 /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
941 if (status & 0x008) {
942 ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
943 if (!ustatus && display) {
944 NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n");
945 }
946 if (ustatus & 0x00000001) {
947 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT");
948 NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n",
949 nv_rd32(dev, 0x401804),
950 nv_rd32(dev, 0x401808),
951 nv_rd32(dev, 0x40180c),
952 nv_rd32(dev, 0x401810));
953 ustatus &= ~0x00000001;
954 }
955 if (ustatus && display)
956 NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - Unhandled ustatus 0x%08x\n", ustatus);
957 /* No sane way found yet -- just reset the bugger. */
958 nv_wr32(dev, 0x400040, 0x80);
959 nv_wr32(dev, 0x400040, 0);
960 nv_wr32(dev, 0x401800, 0xc0000000);
961 nv_wr32(dev, 0x400108, 0x008);
962 status &= ~0x008;
963 }
964
965 /* CCACHE: Handles code and c[] caches and fills them. */
966 if (status & 0x010) {
967 ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
968 if (!ustatus && display) {
969 NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n");
970 }
971 if (ustatus & 0x00000001) {
972 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT");
973 NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n",
974 nv_rd32(dev, 0x405800),
975 nv_rd32(dev, 0x405804),
976 nv_rd32(dev, 0x405808),
977 nv_rd32(dev, 0x40580c),
978 nv_rd32(dev, 0x405810),
979 nv_rd32(dev, 0x405814),
980 nv_rd32(dev, 0x40581c));
981 ustatus &= ~0x00000001;
982 }
983 if (ustatus && display)
984 NV_INFO(dev, "PGRAPH_TRAP_CCACHE - Unhandled ustatus 0x%08x\n", ustatus);
985 nv_wr32(dev, 0x405018, 0xc0000000);
986 nv_wr32(dev, 0x400108, 0x010);
987 status &= ~0x010;
988 }
989
990 /* Unknown, not seen yet... 0x402000 is the only trap status reg
991 * remaining, so try to handle it anyway. Perhaps related to that
992 * unknown DMA slot on tesla? */
993 if (status & 0x20) {
994 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04");
995 ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
996 if (display)
997 NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus);
998 nv_wr32(dev, 0x402000, 0xc0000000);
999 /* no status modifiction on purpose */
1000 }
1001
1002 /* TEXTURE: CUDA texturing units */
1003 if (status & 0x040) {
1004 nv50_pgraph_tp_trap (dev, 6, 0x408900, 0x408600, display,
1005 "PGRAPH_TRAP_TEXTURE");
1006 nv_wr32(dev, 0x400108, 0x040);
1007 status &= ~0x040;
1008 }
1009
1010 /* MP: CUDA execution engines. */
1011 if (status & 0x080) {
1012 nv50_pgraph_tp_trap (dev, 7, 0x408314, 0x40831c, display,
1013 "PGRAPH_TRAP_MP");
1014 nv_wr32(dev, 0x400108, 0x080);
1015 status &= ~0x080;
1016 }
1017
1018 /* TPDMA: Handles TP-initiated uncached memory accesses:
1019 * l[], g[], stack, 2d surfaces, render targets. */
1020 if (status & 0x100) {
1021 nv50_pgraph_tp_trap (dev, 8, 0x408e08, 0x408708, display,
1022 "PGRAPH_TRAP_TPDMA");
1023 nv_wr32(dev, 0x400108, 0x100);
1024 status &= ~0x100;
1025 }
1026
1027 if (status) {
1028 if (display)
1029 NV_INFO(dev, "PGRAPH_TRAP - Unknown trap 0x%08x\n",
1030 status);
1031 nv_wr32(dev, 0x400108, status);
1032 }
1033 }
1034
1035 /* There must be a *lot* of these. Will take some time to gather them up. */
1036 static struct nouveau_enum_names nv50_data_error_names[] =
1037 {
1038 { 4, "INVALID_VALUE" },
1039 { 5, "INVALID_ENUM" },
1040 { 8, "INVALID_OBJECT" },
1041 { 0xc, "INVALID_BITFIELD" },
1042 { 0x28, "MP_NO_REG_SPACE" },
1043 { 0x2b, "MP_BLOCK_SIZE_MISMATCH" },
1044 };
1045
1046 static void
1047 nv50_pgraph_irq_handler(struct drm_device *dev)
1048 {
1049 struct nouveau_pgraph_trap trap;
1050 int unhandled = 0;
1051 uint32_t status;
1052
1053 while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
1054 /* NOTIFY: You've set a NOTIFY an a command and it's done. */
1055 if (status & 0x00000001) {
1056 nouveau_graph_trap_info(dev, &trap);
1057 if (nouveau_ratelimit())
1058 nouveau_graph_dump_trap_info(dev,
1059 "PGRAPH_NOTIFY", &trap);
1060 status &= ~0x00000001;
1061 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
1062 }
1063
1064 /* COMPUTE_QUERY: Purpose and exact cause unknown, happens
1065 * when you write 0x200 to 0x50c0 method 0x31c. */
1066 if (status & 0x00000002) {
1067 nouveau_graph_trap_info(dev, &trap);
1068 if (nouveau_ratelimit())
1069 nouveau_graph_dump_trap_info(dev,
1070 "PGRAPH_COMPUTE_QUERY", &trap);
1071 status &= ~0x00000002;
1072 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000002);
1073 }
1074
1075 /* Unknown, never seen: 0x4 */
1076
1077 /* ILLEGAL_MTHD: You used a wrong method for this class. */
1078 if (status & 0x00000010) {
1079 nouveau_graph_trap_info(dev, &trap);
1080 if (nouveau_pgraph_intr_swmthd(dev, &trap))
1081 unhandled = 1;
1082 if (unhandled && nouveau_ratelimit())
1083 nouveau_graph_dump_trap_info(dev,
1084 "PGRAPH_ILLEGAL_MTHD", &trap);
1085 status &= ~0x00000010;
1086 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
1087 }
1088
1089 /* ILLEGAL_CLASS: You used a wrong class. */
1090 if (status & 0x00000020) {
1091 nouveau_graph_trap_info(dev, &trap);
1092 if (nouveau_ratelimit())
1093 nouveau_graph_dump_trap_info(dev,
1094 "PGRAPH_ILLEGAL_CLASS", &trap);
1095 status &= ~0x00000020;
1096 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000020);
1097 }
1098
1099 /* DOUBLE_NOTIFY: You tried to set a NOTIFY on another NOTIFY. */
1100 if (status & 0x00000040) {
1101 nouveau_graph_trap_info(dev, &trap);
1102 if (nouveau_ratelimit())
1103 nouveau_graph_dump_trap_info(dev,
1104 "PGRAPH_DOUBLE_NOTIFY", &trap);
1105 status &= ~0x00000040;
1106 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000040);
1107 }
1108
1109 /* CONTEXT_SWITCH: PGRAPH needs us to load a new context */
1110 if (status & 0x00001000) {
1111 nv_wr32(dev, 0x400500, 0x00000000);
1112 nv_wr32(dev, NV03_PGRAPH_INTR,
1113 NV_PGRAPH_INTR_CONTEXT_SWITCH);
1114 nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
1115 NV40_PGRAPH_INTR_EN) &
1116 ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
1117 nv_wr32(dev, 0x400500, 0x00010001);
1118
1119 nv50_graph_context_switch(dev);
1120
1121 status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1122 }
1123
1124 /* BUFFER_NOTIFY: Your m2mf transfer finished */
1125 if (status & 0x00010000) {
1126 nouveau_graph_trap_info(dev, &trap);
1127 if (nouveau_ratelimit())
1128 nouveau_graph_dump_trap_info(dev,
1129 "PGRAPH_BUFFER_NOTIFY", &trap);
1130 status &= ~0x00010000;
1131 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00010000);
1132 }
1133
1134 /* DATA_ERROR: Invalid value for this method, or invalid
1135 * state in current PGRAPH context for this operation */
1136 if (status & 0x00100000) {
1137 nouveau_graph_trap_info(dev, &trap);
1138 if (nouveau_ratelimit()) {
1139 nouveau_graph_dump_trap_info(dev,
1140 "PGRAPH_DATA_ERROR", &trap);
1141 NV_INFO (dev, "PGRAPH_DATA_ERROR - ");
1142 nouveau_print_enum_names(nv_rd32(dev, 0x400110),
1143 nv50_data_error_names);
1144 printk("\n");
1145 }
1146 status &= ~0x00100000;
1147 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
1148 }
1149
1150 /* TRAP: Something bad happened in the middle of command
1151 * execution. Has a billion types, subtypes, and even
1152 * subsubtypes. */
1153 if (status & 0x00200000) {
1154 nv50_pgraph_trap_handler(dev);
1155 status &= ~0x00200000;
1156 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
1157 }
1158
1159 /* Unknown, never seen: 0x00400000 */
1160
1161 /* SINGLE_STEP: Happens on every method if you turned on
1162 * single stepping in 40008c */
1163 if (status & 0x01000000) {
1164 nouveau_graph_trap_info(dev, &trap);
1165 if (nouveau_ratelimit())
1166 nouveau_graph_dump_trap_info(dev,
1167 "PGRAPH_SINGLE_STEP", &trap);
1168 status &= ~0x01000000;
1169 nv_wr32(dev, NV03_PGRAPH_INTR, 0x01000000);
1170 }
1171
1172 /* 0x02000000 happens when you pause a ctxprog...
1173 * but the only way this can happen that I know is by
1174 * poking the relevant MMIO register, and we don't
1175 * do that. */
1176
1177 if (status) {
1178 NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
1179 status);
1180 nv_wr32(dev, NV03_PGRAPH_INTR, status);
1181 }
1182
1183 {
1184 const int isb = (1 << 16) | (1 << 0);
1185
1186 if ((nv_rd32(dev, 0x400500) & isb) != isb)
1187 nv_wr32(dev, 0x400500,
1188 nv_rd32(dev, 0x400500) | isb);
1189 }
1190 }
1191
1192 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
1193 if (nv_rd32(dev, 0x400824) & (1 << 31))
1194 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
1195 }
1196
1197 static void
1198 nouveau_crtc_irq_handler(struct drm_device *dev, int crtc)
1199 {
1200 if (crtc & 1)
1201 nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
1202
1203 if (crtc & 2)
1204 nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
1205 }
1206
1207 irqreturn_t
1208 nouveau_irq_handler(DRM_IRQ_ARGS)
1209 {
1210 struct drm_device *dev = (struct drm_device *)arg;
1211 struct drm_nouveau_private *dev_priv = dev->dev_private;
1212 uint32_t status;
1213 unsigned long flags;
1214
1215 status = nv_rd32(dev, NV03_PMC_INTR_0);
1216 if (!status)
1217 return IRQ_NONE;
1218
1219 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
1220
1221 if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
1222 nouveau_fifo_irq_handler(dev);
1223 status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
1224 }
1225
1226 if (status & NV_PMC_INTR_0_PGRAPH_PENDING) {
1227 if (dev_priv->card_type >= NV_50)
1228 nv50_pgraph_irq_handler(dev);
1229 else
1230 nouveau_pgraph_irq_handler(dev);
1231
1232 status &= ~NV_PMC_INTR_0_PGRAPH_PENDING;
1233 }
1234
1235 if (status & NV_PMC_INTR_0_CRTCn_PENDING) {
1236 nouveau_crtc_irq_handler(dev, (status>>24)&3);
1237 status &= ~NV_PMC_INTR_0_CRTCn_PENDING;
1238 }
1239
1240 if (status & (NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
1241 NV_PMC_INTR_0_NV50_I2C_PENDING)) {
1242 nv50_display_irq_handler(dev);
1243 status &= ~(NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
1244 NV_PMC_INTR_0_NV50_I2C_PENDING);
1245 }
1246
1247 if (status)
1248 NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
1249
1250 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
1251
1252 return IRQ_HANDLED;
1253 }
This page took 0.071357 seconds and 6 git commands to generate.