2 * Copyright (c) 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 * Mika Kuoppala <mika.kuoppala@intel.com>
30 #include <generated/utsrelease.h>
33 static const char *engine_str(int engine
)
36 case RCS
: return "render";
37 case VCS
: return "bsd";
38 case BCS
: return "blt";
39 case VECS
: return "vebox";
40 case VCS2
: return "bsd2";
45 static const char *tiling_flag(int tiling
)
49 case I915_TILING_NONE
: return "";
50 case I915_TILING_X
: return " X";
51 case I915_TILING_Y
: return " Y";
55 static const char *dirty_flag(int dirty
)
57 return dirty
? " dirty" : "";
60 static const char *purgeable_flag(int purgeable
)
62 return purgeable
? " purgeable" : "";
65 static bool __i915_error_ok(struct drm_i915_error_state_buf
*e
)
68 if (!e
->err
&& WARN(e
->bytes
> (e
->size
- 1), "overflow")) {
73 if (e
->bytes
== e
->size
- 1 || e
->err
)
79 static bool __i915_error_seek(struct drm_i915_error_state_buf
*e
,
82 if (e
->pos
+ len
<= e
->start
) {
87 /* First vsnprintf needs to fit in its entirety for memmove */
96 static void __i915_error_advance(struct drm_i915_error_state_buf
*e
,
99 /* If this is first printf in this window, adjust it so that
100 * start position matches start of the buffer
103 if (e
->pos
< e
->start
) {
104 const size_t off
= e
->start
- e
->pos
;
106 /* Should not happen but be paranoid */
107 if (off
> len
|| e
->bytes
) {
112 memmove(e
->buf
, e
->buf
+ off
, len
- off
);
113 e
->bytes
= len
- off
;
122 static void i915_error_vprintf(struct drm_i915_error_state_buf
*e
,
123 const char *f
, va_list args
)
127 if (!__i915_error_ok(e
))
130 /* Seek the first printf which is hits start position */
131 if (e
->pos
< e
->start
) {
135 len
= vsnprintf(NULL
, 0, f
, tmp
);
138 if (!__i915_error_seek(e
, len
))
142 len
= vsnprintf(e
->buf
+ e
->bytes
, e
->size
- e
->bytes
, f
, args
);
143 if (len
>= e
->size
- e
->bytes
)
144 len
= e
->size
- e
->bytes
- 1;
146 __i915_error_advance(e
, len
);
149 static void i915_error_puts(struct drm_i915_error_state_buf
*e
,
154 if (!__i915_error_ok(e
))
159 /* Seek the first printf which is hits start position */
160 if (e
->pos
< e
->start
) {
161 if (!__i915_error_seek(e
, len
))
165 if (len
>= e
->size
- e
->bytes
)
166 len
= e
->size
- e
->bytes
- 1;
167 memcpy(e
->buf
+ e
->bytes
, str
, len
);
169 __i915_error_advance(e
, len
);
172 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
173 #define err_puts(e, s) i915_error_puts(e, s)
175 static void print_error_buffers(struct drm_i915_error_state_buf
*m
,
177 struct drm_i915_error_buffer
*err
,
182 err_printf(m
, "%s [%d]:\n", name
, count
);
185 err_printf(m
, " %08x_%08x %8u %02x %02x [ ",
186 upper_32_bits(err
->gtt_offset
),
187 lower_32_bits(err
->gtt_offset
),
191 for (i
= 0; i
< I915_NUM_ENGINES
; i
++)
192 err_printf(m
, "%02x ", err
->rseqno
[i
]);
194 err_printf(m
, "] %02x", err
->wseqno
);
195 err_puts(m
, tiling_flag(err
->tiling
));
196 err_puts(m
, dirty_flag(err
->dirty
));
197 err_puts(m
, purgeable_flag(err
->purgeable
));
198 err_puts(m
, err
->userptr
? " userptr" : "");
199 err_puts(m
, err
->engine
!= -1 ? " " : "");
200 err_puts(m
, engine_str(err
->engine
));
201 err_puts(m
, i915_cache_level_str(m
->i915
, err
->cache_level
));
204 err_printf(m
, " (name: %d)", err
->name
);
205 if (err
->fence_reg
!= I915_FENCE_REG_NONE
)
206 err_printf(m
, " (fence: %d)", err
->fence_reg
);
213 static const char *hangcheck_action_to_str(enum intel_engine_hangcheck_action a
)
220 case HANGCHECK_ACTIVE
:
231 static void error_print_engine(struct drm_i915_error_state_buf
*m
,
232 struct drm_i915_error_engine
*ee
)
234 err_printf(m
, "%s command stream:\n", engine_str(ee
->engine_id
));
235 err_printf(m
, " START: 0x%08x\n", ee
->start
);
236 err_printf(m
, " HEAD: 0x%08x\n", ee
->head
);
237 err_printf(m
, " TAIL: 0x%08x\n", ee
->tail
);
238 err_printf(m
, " CTL: 0x%08x\n", ee
->ctl
);
239 err_printf(m
, " MODE: 0x%08x\n", ee
->mode
);
240 err_printf(m
, " HWS: 0x%08x\n", ee
->hws
);
241 err_printf(m
, " ACTHD: 0x%08x %08x\n",
242 (u32
)(ee
->acthd
>>32), (u32
)ee
->acthd
);
243 err_printf(m
, " IPEIR: 0x%08x\n", ee
->ipeir
);
244 err_printf(m
, " IPEHR: 0x%08x\n", ee
->ipehr
);
245 err_printf(m
, " INSTDONE: 0x%08x\n", ee
->instdone
);
246 if (ee
->batchbuffer
) {
247 u64 start
= ee
->batchbuffer
->gtt_offset
;
248 u64 end
= start
+ ee
->batchbuffer
->gtt_size
;
250 err_printf(m
, " batch: [0x%08x_%08x, 0x%08x_%08x]\n",
251 upper_32_bits(start
), lower_32_bits(start
),
252 upper_32_bits(end
), lower_32_bits(end
));
254 if (INTEL_GEN(m
->i915
) >= 4) {
255 err_printf(m
, " BBADDR: 0x%08x_%08x\n",
256 (u32
)(ee
->bbaddr
>>32), (u32
)ee
->bbaddr
);
257 err_printf(m
, " BB_STATE: 0x%08x\n", ee
->bbstate
);
258 err_printf(m
, " INSTPS: 0x%08x\n", ee
->instps
);
260 err_printf(m
, " INSTPM: 0x%08x\n", ee
->instpm
);
261 err_printf(m
, " FADDR: 0x%08x %08x\n", upper_32_bits(ee
->faddr
),
262 lower_32_bits(ee
->faddr
));
263 if (INTEL_GEN(m
->i915
) >= 6) {
264 err_printf(m
, " RC PSMI: 0x%08x\n", ee
->rc_psmi
);
265 err_printf(m
, " FAULT_REG: 0x%08x\n", ee
->fault_reg
);
266 err_printf(m
, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
267 ee
->semaphore_mboxes
[0],
268 ee
->semaphore_seqno
[0]);
269 err_printf(m
, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
270 ee
->semaphore_mboxes
[1],
271 ee
->semaphore_seqno
[1]);
272 if (HAS_VEBOX(m
->i915
)) {
273 err_printf(m
, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
274 ee
->semaphore_mboxes
[2],
275 ee
->semaphore_seqno
[2]);
278 if (USES_PPGTT(m
->i915
)) {
279 err_printf(m
, " GFX_MODE: 0x%08x\n", ee
->vm_info
.gfx_mode
);
281 if (INTEL_GEN(m
->i915
) >= 8) {
283 for (i
= 0; i
< 4; i
++)
284 err_printf(m
, " PDP%d: 0x%016llx\n",
285 i
, ee
->vm_info
.pdp
[i
]);
287 err_printf(m
, " PP_DIR_BASE: 0x%08x\n",
288 ee
->vm_info
.pp_dir_base
);
291 err_printf(m
, " seqno: 0x%08x\n", ee
->seqno
);
292 err_printf(m
, " last_seqno: 0x%08x\n", ee
->last_seqno
);
293 err_printf(m
, " waiting: %s\n", yesno(ee
->waiting
));
294 err_printf(m
, " ring->head: 0x%08x\n", ee
->cpu_ring_head
);
295 err_printf(m
, " ring->tail: 0x%08x\n", ee
->cpu_ring_tail
);
296 err_printf(m
, " hangcheck: %s [%d]\n",
297 hangcheck_action_to_str(ee
->hangcheck_action
),
298 ee
->hangcheck_score
);
301 void i915_error_printf(struct drm_i915_error_state_buf
*e
, const char *f
, ...)
306 i915_error_vprintf(e
, f
, args
);
310 static void print_error_obj(struct drm_i915_error_state_buf
*m
,
311 struct drm_i915_error_object
*obj
)
313 int page
, offset
, elt
;
315 for (page
= offset
= 0; page
< obj
->page_count
; page
++) {
316 for (elt
= 0; elt
< PAGE_SIZE
/4; elt
++) {
317 err_printf(m
, "%08x : %08x\n", offset
,
318 obj
->pages
[page
][elt
]);
324 static void err_print_capabilities(struct drm_i915_error_state_buf
*m
,
325 const struct intel_device_info
*info
)
327 #define PRINT_FLAG(x) err_printf(m, #x ": %s\n", yesno(info->x))
328 #define SEP_SEMICOLON ;
329 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG
, SEP_SEMICOLON
);
334 int i915_error_state_to_str(struct drm_i915_error_state_buf
*m
,
335 const struct i915_error_state_file_priv
*error_priv
)
337 struct drm_device
*dev
= error_priv
->dev
;
338 struct drm_i915_private
*dev_priv
= to_i915(dev
);
339 struct pci_dev
*pdev
= dev_priv
->drm
.pdev
;
340 struct drm_i915_error_state
*error
= error_priv
->error
;
341 struct drm_i915_error_object
*obj
;
342 int i
, j
, offset
, elt
;
343 int max_hangcheck_score
;
346 err_printf(m
, "no error state collected\n");
350 err_printf(m
, "%s\n", error
->error_msg
);
351 err_printf(m
, "Time: %ld s %ld us\n", error
->time
.tv_sec
,
352 error
->time
.tv_usec
);
353 err_printf(m
, "Kernel: " UTS_RELEASE
"\n");
354 err_print_capabilities(m
, &error
->device_info
);
355 max_hangcheck_score
= 0;
356 for (i
= 0; i
< ARRAY_SIZE(error
->engine
); i
++) {
357 if (error
->engine
[i
].hangcheck_score
> max_hangcheck_score
)
358 max_hangcheck_score
= error
->engine
[i
].hangcheck_score
;
360 for (i
= 0; i
< ARRAY_SIZE(error
->engine
); i
++) {
361 if (error
->engine
[i
].hangcheck_score
== max_hangcheck_score
&&
362 error
->engine
[i
].pid
!= -1) {
363 err_printf(m
, "Active process (on ring %s): %s [%d]\n",
365 error
->engine
[i
].comm
,
366 error
->engine
[i
].pid
);
369 err_printf(m
, "Reset count: %u\n", error
->reset_count
);
370 err_printf(m
, "Suspend count: %u\n", error
->suspend_count
);
371 err_printf(m
, "PCI ID: 0x%04x\n", pdev
->device
);
372 err_printf(m
, "PCI Revision: 0x%02x\n", pdev
->revision
);
373 err_printf(m
, "PCI Subsystem: %04x:%04x\n",
374 pdev
->subsystem_vendor
,
375 pdev
->subsystem_device
);
376 err_printf(m
, "IOMMU enabled?: %d\n", error
->iommu
);
379 struct intel_csr
*csr
= &dev_priv
->csr
;
381 err_printf(m
, "DMC loaded: %s\n",
382 yesno(csr
->dmc_payload
!= NULL
));
383 err_printf(m
, "DMC fw version: %d.%d\n",
384 CSR_VERSION_MAJOR(csr
->version
),
385 CSR_VERSION_MINOR(csr
->version
));
388 err_printf(m
, "EIR: 0x%08x\n", error
->eir
);
389 err_printf(m
, "IER: 0x%08x\n", error
->ier
);
390 if (INTEL_INFO(dev
)->gen
>= 8) {
391 for (i
= 0; i
< 4; i
++)
392 err_printf(m
, "GTIER gt %d: 0x%08x\n", i
,
394 } else if (HAS_PCH_SPLIT(dev
) || IS_VALLEYVIEW(dev
))
395 err_printf(m
, "GTIER: 0x%08x\n", error
->gtier
[0]);
396 err_printf(m
, "PGTBL_ER: 0x%08x\n", error
->pgtbl_er
);
397 err_printf(m
, "FORCEWAKE: 0x%08x\n", error
->forcewake
);
398 err_printf(m
, "DERRMR: 0x%08x\n", error
->derrmr
);
399 err_printf(m
, "CCID: 0x%08x\n", error
->ccid
);
400 err_printf(m
, "Missed interrupts: 0x%08lx\n", dev_priv
->gpu_error
.missed_irq_rings
);
402 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++)
403 err_printf(m
, " fence[%d] = %08llx\n", i
, error
->fence
[i
]);
405 for (i
= 0; i
< ARRAY_SIZE(error
->extra_instdone
); i
++)
406 err_printf(m
, " INSTDONE_%d: 0x%08x\n", i
,
407 error
->extra_instdone
[i
]);
409 if (INTEL_INFO(dev
)->gen
>= 6) {
410 err_printf(m
, "ERROR: 0x%08x\n", error
->error
);
412 if (INTEL_INFO(dev
)->gen
>= 8)
413 err_printf(m
, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
414 error
->fault_data1
, error
->fault_data0
);
416 err_printf(m
, "DONE_REG: 0x%08x\n", error
->done_reg
);
420 err_printf(m
, "ERR_INT: 0x%08x\n", error
->err_int
);
422 for (i
= 0; i
< ARRAY_SIZE(error
->engine
); i
++) {
423 if (error
->engine
[i
].engine_id
!= -1)
424 error_print_engine(m
, &error
->engine
[i
]);
427 for (i
= 0; i
< ARRAY_SIZE(error
->active_vm
); i
++) {
431 if (!error
->active_vm
[i
])
434 len
= scnprintf(buf
, sizeof(buf
), "Active (");
435 for (j
= 0; j
< ARRAY_SIZE(error
->engine
); j
++) {
436 if (error
->engine
[j
].vm
!= error
->active_vm
[i
])
439 len
+= scnprintf(buf
+ len
, sizeof(buf
), "%s%s",
441 dev_priv
->engine
[j
].name
);
444 scnprintf(buf
+ len
, sizeof(buf
), ")");
445 print_error_buffers(m
, buf
,
447 error
->active_bo_count
[i
]);
450 print_error_buffers(m
, "Pinned (global)",
452 error
->pinned_bo_count
);
454 for (i
= 0; i
< ARRAY_SIZE(error
->engine
); i
++) {
455 struct drm_i915_error_engine
*ee
= &error
->engine
[i
];
457 obj
= ee
->batchbuffer
;
459 err_puts(m
, dev_priv
->engine
[i
].name
);
461 err_printf(m
, " (submitted by %s [%d])",
464 err_printf(m
, " --- gtt_offset = 0x%08x %08x\n",
465 upper_32_bits(obj
->gtt_offset
),
466 lower_32_bits(obj
->gtt_offset
));
467 print_error_obj(m
, obj
);
470 obj
= ee
->wa_batchbuffer
;
472 err_printf(m
, "%s (w/a) --- gtt_offset = 0x%08x\n",
473 dev_priv
->engine
[i
].name
,
474 lower_32_bits(obj
->gtt_offset
));
475 print_error_obj(m
, obj
);
478 if (ee
->num_requests
) {
479 err_printf(m
, "%s --- %d requests\n",
480 dev_priv
->engine
[i
].name
,
482 for (j
= 0; j
< ee
->num_requests
; j
++) {
483 err_printf(m
, " pid %d, seqno 0x%08x, emitted %ld, head 0x%08x, tail 0x%08x\n",
485 ee
->requests
[j
].seqno
,
486 ee
->requests
[j
].jiffies
,
487 ee
->requests
[j
].head
,
488 ee
->requests
[j
].tail
);
492 if (IS_ERR(ee
->waiters
)) {
493 err_printf(m
, "%s --- ? waiters [unable to acquire spinlock]\n",
494 dev_priv
->engine
[i
].name
);
495 } else if (ee
->num_waiters
) {
496 err_printf(m
, "%s --- %d waiters\n",
497 dev_priv
->engine
[i
].name
,
499 for (j
= 0; j
< ee
->num_waiters
; j
++) {
500 err_printf(m
, " seqno 0x%08x for %s [%d]\n",
501 ee
->waiters
[j
].seqno
,
507 if ((obj
= ee
->ringbuffer
)) {
508 err_printf(m
, "%s --- ringbuffer = 0x%08x\n",
509 dev_priv
->engine
[i
].name
,
510 lower_32_bits(obj
->gtt_offset
));
511 print_error_obj(m
, obj
);
514 if ((obj
= ee
->hws_page
)) {
515 u64 hws_offset
= obj
->gtt_offset
;
516 u32
*hws_page
= &obj
->pages
[0][0];
518 if (i915
.enable_execlists
) {
519 hws_offset
+= LRC_PPHWSP_PN
* PAGE_SIZE
;
520 hws_page
= &obj
->pages
[LRC_PPHWSP_PN
][0];
522 err_printf(m
, "%s --- HW Status = 0x%08llx\n",
523 dev_priv
->engine
[i
].name
, hws_offset
);
525 for (elt
= 0; elt
< PAGE_SIZE
/16; elt
+= 4) {
526 err_printf(m
, "[%04x] %08x %08x %08x %08x\n",
538 u64 wa_ctx_offset
= obj
->gtt_offset
;
539 u32
*wa_ctx_page
= &obj
->pages
[0][0];
540 struct intel_engine_cs
*engine
= &dev_priv
->engine
[RCS
];
541 u32 wa_ctx_size
= (engine
->wa_ctx
.indirect_ctx
.size
+
542 engine
->wa_ctx
.per_ctx
.size
);
544 err_printf(m
, "%s --- WA ctx batch buffer = 0x%08llx\n",
545 dev_priv
->engine
[i
].name
, wa_ctx_offset
);
547 for (elt
= 0; elt
< wa_ctx_size
; elt
+= 4) {
548 err_printf(m
, "[%04x] %08x %08x %08x %08x\n",
550 wa_ctx_page
[elt
+ 0],
551 wa_ctx_page
[elt
+ 1],
552 wa_ctx_page
[elt
+ 2],
553 wa_ctx_page
[elt
+ 3]);
558 if ((obj
= ee
->ctx
)) {
559 err_printf(m
, "%s --- HW Context = 0x%08x\n",
560 dev_priv
->engine
[i
].name
,
561 lower_32_bits(obj
->gtt_offset
));
562 print_error_obj(m
, obj
);
566 if ((obj
= error
->semaphore
)) {
567 err_printf(m
, "Semaphore page = 0x%08x\n",
568 lower_32_bits(obj
->gtt_offset
));
569 for (elt
= 0; elt
< PAGE_SIZE
/16; elt
+= 4) {
570 err_printf(m
, "[%04x] %08x %08x %08x %08x\n",
573 obj
->pages
[0][elt
+1],
574 obj
->pages
[0][elt
+2],
575 obj
->pages
[0][elt
+3]);
580 intel_overlay_print_error_state(m
, error
->overlay
);
583 intel_display_print_error_state(m
, dev
, error
->display
);
586 if (m
->bytes
== 0 && m
->err
)
592 int i915_error_state_buf_init(struct drm_i915_error_state_buf
*ebuf
,
593 struct drm_i915_private
*i915
,
594 size_t count
, loff_t pos
)
596 memset(ebuf
, 0, sizeof(*ebuf
));
599 /* We need to have enough room to store any i915_error_state printf
600 * so that we can move it to start position.
602 ebuf
->size
= count
+ 1 > PAGE_SIZE
? count
+ 1 : PAGE_SIZE
;
603 ebuf
->buf
= kmalloc(ebuf
->size
,
604 GFP_TEMPORARY
| __GFP_NORETRY
| __GFP_NOWARN
);
606 if (ebuf
->buf
== NULL
) {
607 ebuf
->size
= PAGE_SIZE
;
608 ebuf
->buf
= kmalloc(ebuf
->size
, GFP_TEMPORARY
);
611 if (ebuf
->buf
== NULL
) {
613 ebuf
->buf
= kmalloc(ebuf
->size
, GFP_TEMPORARY
);
616 if (ebuf
->buf
== NULL
)
624 static void i915_error_object_free(struct drm_i915_error_object
*obj
)
631 for (page
= 0; page
< obj
->page_count
; page
++)
632 kfree(obj
->pages
[page
]);
637 static void i915_error_state_free(struct kref
*error_ref
)
639 struct drm_i915_error_state
*error
= container_of(error_ref
,
640 typeof(*error
), ref
);
643 for (i
= 0; i
< ARRAY_SIZE(error
->engine
); i
++) {
644 struct drm_i915_error_engine
*ee
= &error
->engine
[i
];
646 i915_error_object_free(ee
->batchbuffer
);
647 i915_error_object_free(ee
->wa_batchbuffer
);
648 i915_error_object_free(ee
->ringbuffer
);
649 i915_error_object_free(ee
->hws_page
);
650 i915_error_object_free(ee
->ctx
);
651 i915_error_object_free(ee
->wa_ctx
);
654 if (!IS_ERR_OR_NULL(ee
->waiters
))
658 i915_error_object_free(error
->semaphore
);
660 for (i
= 0; i
< ARRAY_SIZE(error
->active_bo
); i
++)
661 kfree(error
->active_bo
[i
]);
662 kfree(error
->pinned_bo
);
664 kfree(error
->overlay
);
665 kfree(error
->display
);
669 static struct drm_i915_error_object
*
670 i915_error_object_create(struct drm_i915_private
*dev_priv
,
671 struct i915_vma
*vma
)
673 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
674 struct drm_i915_gem_object
*src
;
675 struct drm_i915_error_object
*dst
;
688 num_pages
= src
->base
.size
>> PAGE_SHIFT
;
690 dst
= kmalloc(sizeof(*dst
) + num_pages
* sizeof(u32
*), GFP_ATOMIC
);
694 dst
->gtt_offset
= vma
->node
.start
;
695 dst
->gtt_size
= vma
->node
.size
;
697 reloc_offset
= dst
->gtt_offset
;
698 use_ggtt
= (src
->cache_level
== I915_CACHE_NONE
&&
699 (vma
->flags
& I915_VMA_GLOBAL_BIND
) &&
700 reloc_offset
+ num_pages
* PAGE_SIZE
<= ggtt
->mappable_end
);
702 /* Cannot access stolen address directly, try to use the aperture */
706 if (!(vma
->flags
& I915_VMA_GLOBAL_BIND
))
709 reloc_offset
= vma
->node
.start
;
710 if (reloc_offset
+ num_pages
* PAGE_SIZE
> ggtt
->mappable_end
)
714 /* Cannot access snooped pages through the aperture */
715 if (use_ggtt
&& src
->cache_level
!= I915_CACHE_NONE
&&
719 dst
->page_count
= num_pages
;
720 while (num_pages
--) {
724 d
= kmalloc(PAGE_SIZE
, GFP_ATOMIC
);
728 local_irq_save(flags
);
732 /* Simply ignore tiling or any overlapping fence.
733 * It's part of the error state, and this hopefully
734 * captures what the GPU read.
737 s
= io_mapping_map_atomic_wc(&ggtt
->mappable
,
739 memcpy_fromio(d
, s
, PAGE_SIZE
);
740 io_mapping_unmap_atomic(s
);
745 page
= i915_gem_object_get_page(src
, i
);
747 drm_clflush_pages(&page
, 1);
749 s
= kmap_atomic(page
);
750 memcpy(d
, s
, PAGE_SIZE
);
753 drm_clflush_pages(&page
, 1);
755 local_irq_restore(flags
);
758 reloc_offset
+= PAGE_SIZE
;
765 kfree(dst
->pages
[i
]);
770 /* The error capture is special as tries to run underneath the normal
771 * locking rules - so we use the raw version of the i915_gem_active lookup.
773 static inline uint32_t
774 __active_get_seqno(struct i915_gem_active
*active
)
776 return i915_gem_request_get_seqno(__i915_gem_active_peek(active
));
780 __active_get_engine_id(struct i915_gem_active
*active
)
782 struct intel_engine_cs
*engine
;
784 engine
= i915_gem_request_get_engine(__i915_gem_active_peek(active
));
785 return engine
? engine
->id
: -1;
788 static void capture_bo(struct drm_i915_error_buffer
*err
,
789 struct i915_vma
*vma
)
791 struct drm_i915_gem_object
*obj
= vma
->obj
;
794 err
->size
= obj
->base
.size
;
795 err
->name
= obj
->base
.name
;
797 for (i
= 0; i
< I915_NUM_ENGINES
; i
++)
798 err
->rseqno
[i
] = __active_get_seqno(&obj
->last_read
[i
]);
799 err
->wseqno
= __active_get_seqno(&obj
->last_write
);
800 err
->engine
= __active_get_engine_id(&obj
->last_write
);
802 err
->gtt_offset
= vma
->node
.start
;
803 err
->read_domains
= obj
->base
.read_domains
;
804 err
->write_domain
= obj
->base
.write_domain
;
805 err
->fence_reg
= vma
->fence
? vma
->fence
->id
: -1;
806 err
->tiling
= i915_gem_object_get_tiling(obj
);
807 err
->dirty
= obj
->dirty
;
808 err
->purgeable
= obj
->madv
!= I915_MADV_WILLNEED
;
809 err
->userptr
= obj
->userptr
.mm
!= NULL
;
810 err
->cache_level
= obj
->cache_level
;
813 static u32
capture_error_bo(struct drm_i915_error_buffer
*err
,
814 int count
, struct list_head
*head
,
817 struct i915_vma
*vma
;
820 list_for_each_entry(vma
, head
, vm_link
) {
821 if (pinned_only
&& !i915_vma_is_pinned(vma
))
824 capture_bo(err
++, vma
);
832 /* Generate a semi-unique error code. The code is not meant to have meaning, The
833 * code's only purpose is to try to prevent false duplicated bug reports by
834 * grossly estimating a GPU error state.
836 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
837 * the hang if we could strip the GTT offset information from it.
839 * It's only a small step better than a random number in its current form.
841 static uint32_t i915_error_generate_code(struct drm_i915_private
*dev_priv
,
842 struct drm_i915_error_state
*error
,
845 uint32_t error_code
= 0;
848 /* IPEHR would be an ideal way to detect errors, as it's the gross
849 * measure of "the command that hung." However, has some very common
850 * synchronization commands which almost always appear in the case
851 * strictly a client bug. Use instdone to differentiate those some.
853 for (i
= 0; i
< I915_NUM_ENGINES
; i
++) {
854 if (error
->engine
[i
].hangcheck_action
== HANGCHECK_HUNG
) {
858 return error
->engine
[i
].ipehr
^ error
->engine
[i
].instdone
;
865 static void i915_gem_record_fences(struct drm_i915_private
*dev_priv
,
866 struct drm_i915_error_state
*error
)
870 if (IS_GEN3(dev_priv
) || IS_GEN2(dev_priv
)) {
871 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++)
872 error
->fence
[i
] = I915_READ(FENCE_REG(i
));
873 } else if (IS_GEN5(dev_priv
) || IS_GEN4(dev_priv
)) {
874 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++)
875 error
->fence
[i
] = I915_READ64(FENCE_REG_965_LO(i
));
876 } else if (INTEL_GEN(dev_priv
) >= 6) {
877 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++)
878 error
->fence
[i
] = I915_READ64(FENCE_REG_GEN6_LO(i
));
883 static void gen8_record_semaphore_state(struct drm_i915_error_state
*error
,
884 struct intel_engine_cs
*engine
,
885 struct drm_i915_error_engine
*ee
)
887 struct drm_i915_private
*dev_priv
= engine
->i915
;
888 struct intel_engine_cs
*to
;
889 enum intel_engine_id id
;
891 if (!error
->semaphore
)
894 for_each_engine_id(to
, dev_priv
, id
) {
903 (GEN8_SIGNAL_OFFSET(engine
, id
) & (PAGE_SIZE
- 1)) / 4;
904 tmp
= error
->semaphore
->pages
[0];
905 idx
= intel_engine_sync_index(engine
, to
);
907 ee
->semaphore_mboxes
[idx
] = tmp
[signal_offset
];
908 ee
->semaphore_seqno
[idx
] = engine
->semaphore
.sync_seqno
[idx
];
912 static void gen6_record_semaphore_state(struct intel_engine_cs
*engine
,
913 struct drm_i915_error_engine
*ee
)
915 struct drm_i915_private
*dev_priv
= engine
->i915
;
917 ee
->semaphore_mboxes
[0] = I915_READ(RING_SYNC_0(engine
->mmio_base
));
918 ee
->semaphore_mboxes
[1] = I915_READ(RING_SYNC_1(engine
->mmio_base
));
919 ee
->semaphore_seqno
[0] = engine
->semaphore
.sync_seqno
[0];
920 ee
->semaphore_seqno
[1] = engine
->semaphore
.sync_seqno
[1];
922 if (HAS_VEBOX(dev_priv
)) {
923 ee
->semaphore_mboxes
[2] =
924 I915_READ(RING_SYNC_2(engine
->mmio_base
));
925 ee
->semaphore_seqno
[2] = engine
->semaphore
.sync_seqno
[2];
929 static void error_record_engine_waiters(struct intel_engine_cs
*engine
,
930 struct drm_i915_error_engine
*ee
)
932 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
933 struct drm_i915_error_waiter
*waiter
;
940 if (RB_EMPTY_ROOT(&b
->waiters
))
943 if (!spin_trylock(&b
->lock
)) {
944 ee
->waiters
= ERR_PTR(-EDEADLK
);
949 for (rb
= rb_first(&b
->waiters
); rb
!= NULL
; rb
= rb_next(rb
))
951 spin_unlock(&b
->lock
);
955 waiter
= kmalloc_array(count
,
956 sizeof(struct drm_i915_error_waiter
),
961 if (!spin_trylock(&b
->lock
)) {
963 ee
->waiters
= ERR_PTR(-EDEADLK
);
967 ee
->waiters
= waiter
;
968 for (rb
= rb_first(&b
->waiters
); rb
; rb
= rb_next(rb
)) {
969 struct intel_wait
*w
= container_of(rb
, typeof(*w
), node
);
971 strcpy(waiter
->comm
, w
->tsk
->comm
);
972 waiter
->pid
= w
->tsk
->pid
;
973 waiter
->seqno
= w
->seqno
;
976 if (++ee
->num_waiters
== count
)
979 spin_unlock(&b
->lock
);
982 static void error_record_engine_registers(struct drm_i915_error_state
*error
,
983 struct intel_engine_cs
*engine
,
984 struct drm_i915_error_engine
*ee
)
986 struct drm_i915_private
*dev_priv
= engine
->i915
;
988 if (INTEL_GEN(dev_priv
) >= 6) {
989 ee
->rc_psmi
= I915_READ(RING_PSMI_CTL(engine
->mmio_base
));
990 ee
->fault_reg
= I915_READ(RING_FAULT_REG(engine
));
991 if (INTEL_GEN(dev_priv
) >= 8)
992 gen8_record_semaphore_state(error
, engine
, ee
);
994 gen6_record_semaphore_state(engine
, ee
);
997 if (INTEL_GEN(dev_priv
) >= 4) {
998 ee
->faddr
= I915_READ(RING_DMA_FADD(engine
->mmio_base
));
999 ee
->ipeir
= I915_READ(RING_IPEIR(engine
->mmio_base
));
1000 ee
->ipehr
= I915_READ(RING_IPEHR(engine
->mmio_base
));
1001 ee
->instdone
= I915_READ(RING_INSTDONE(engine
->mmio_base
));
1002 ee
->instps
= I915_READ(RING_INSTPS(engine
->mmio_base
));
1003 ee
->bbaddr
= I915_READ(RING_BBADDR(engine
->mmio_base
));
1004 if (INTEL_GEN(dev_priv
) >= 8) {
1005 ee
->faddr
|= (u64
) I915_READ(RING_DMA_FADD_UDW(engine
->mmio_base
)) << 32;
1006 ee
->bbaddr
|= (u64
) I915_READ(RING_BBADDR_UDW(engine
->mmio_base
)) << 32;
1008 ee
->bbstate
= I915_READ(RING_BBSTATE(engine
->mmio_base
));
1010 ee
->faddr
= I915_READ(DMA_FADD_I8XX
);
1011 ee
->ipeir
= I915_READ(IPEIR
);
1012 ee
->ipehr
= I915_READ(IPEHR
);
1013 ee
->instdone
= I915_READ(GEN2_INSTDONE
);
1016 ee
->waiting
= intel_engine_has_waiter(engine
);
1017 ee
->instpm
= I915_READ(RING_INSTPM(engine
->mmio_base
));
1018 ee
->acthd
= intel_engine_get_active_head(engine
);
1019 ee
->seqno
= intel_engine_get_seqno(engine
);
1020 ee
->last_seqno
= engine
->last_submitted_seqno
;
1021 ee
->start
= I915_READ_START(engine
);
1022 ee
->head
= I915_READ_HEAD(engine
);
1023 ee
->tail
= I915_READ_TAIL(engine
);
1024 ee
->ctl
= I915_READ_CTL(engine
);
1025 if (INTEL_GEN(dev_priv
) > 2)
1026 ee
->mode
= I915_READ_MODE(engine
);
1028 if (!HWS_NEEDS_PHYSICAL(dev_priv
)) {
1031 if (IS_GEN7(dev_priv
)) {
1032 switch (engine
->id
) {
1035 mmio
= RENDER_HWS_PGA_GEN7
;
1038 mmio
= BLT_HWS_PGA_GEN7
;
1041 mmio
= BSD_HWS_PGA_GEN7
;
1044 mmio
= VEBOX_HWS_PGA_GEN7
;
1047 } else if (IS_GEN6(engine
->i915
)) {
1048 mmio
= RING_HWS_PGA_GEN6(engine
->mmio_base
);
1050 /* XXX: gen8 returns to sanity */
1051 mmio
= RING_HWS_PGA(engine
->mmio_base
);
1054 ee
->hws
= I915_READ(mmio
);
1057 ee
->hangcheck_score
= engine
->hangcheck
.score
;
1058 ee
->hangcheck_action
= engine
->hangcheck
.action
;
1060 if (USES_PPGTT(dev_priv
)) {
1063 ee
->vm_info
.gfx_mode
= I915_READ(RING_MODE_GEN7(engine
));
1065 if (IS_GEN6(dev_priv
))
1066 ee
->vm_info
.pp_dir_base
=
1067 I915_READ(RING_PP_DIR_BASE_READ(engine
));
1068 else if (IS_GEN7(dev_priv
))
1069 ee
->vm_info
.pp_dir_base
=
1070 I915_READ(RING_PP_DIR_BASE(engine
));
1071 else if (INTEL_GEN(dev_priv
) >= 8)
1072 for (i
= 0; i
< 4; i
++) {
1073 ee
->vm_info
.pdp
[i
] =
1074 I915_READ(GEN8_RING_PDP_UDW(engine
, i
));
1075 ee
->vm_info
.pdp
[i
] <<= 32;
1076 ee
->vm_info
.pdp
[i
] |=
1077 I915_READ(GEN8_RING_PDP_LDW(engine
, i
));
1082 static void engine_record_requests(struct intel_engine_cs
*engine
,
1083 struct drm_i915_gem_request
*first
,
1084 struct drm_i915_error_engine
*ee
)
1086 struct drm_i915_gem_request
*request
;
1091 list_for_each_entry_from(request
, &engine
->request_list
, link
)
1096 ee
->requests
= kcalloc(count
, sizeof(*ee
->requests
), GFP_ATOMIC
);
1100 ee
->num_requests
= count
;
1104 list_for_each_entry_from(request
, &engine
->request_list
, link
) {
1105 struct drm_i915_error_request
*erq
;
1107 if (count
>= ee
->num_requests
) {
1109 * If the ring request list was changed in
1110 * between the point where the error request
1111 * list was created and dimensioned and this
1112 * point then just exit early to avoid crashes.
1114 * We don't need to communicate that the
1115 * request list changed state during error
1116 * state capture and that the error state is
1117 * slightly incorrect as a consequence since we
1118 * are typically only interested in the request
1119 * list state at the point of error state
1120 * capture, not in any changes happening during
1126 erq
= &ee
->requests
[count
++];
1127 erq
->seqno
= request
->fence
.seqno
;
1128 erq
->jiffies
= request
->emitted_jiffies
;
1129 erq
->head
= request
->head
;
1130 erq
->tail
= request
->tail
;
1133 erq
->pid
= request
->ctx
->pid
? pid_nr(request
->ctx
->pid
) : 0;
1136 ee
->num_requests
= count
;
1139 static void i915_gem_record_rings(struct drm_i915_private
*dev_priv
,
1140 struct drm_i915_error_state
*error
)
1142 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
1146 i915_error_object_create(dev_priv
, dev_priv
->semaphore
);
1148 for (i
= 0; i
< I915_NUM_ENGINES
; i
++) {
1149 struct intel_engine_cs
*engine
= &dev_priv
->engine
[i
];
1150 struct drm_i915_error_engine
*ee
= &error
->engine
[i
];
1151 struct drm_i915_gem_request
*request
;
1156 if (!intel_engine_initialized(engine
))
1161 error_record_engine_registers(error
, engine
, ee
);
1162 error_record_engine_waiters(engine
, ee
);
1164 request
= i915_gem_find_active_request(engine
);
1166 struct intel_ring
*ring
;
1169 ee
->vm
= request
->ctx
->ppgtt
?
1170 &request
->ctx
->ppgtt
->base
: &ggtt
->base
;
1172 /* We need to copy these to an anonymous buffer
1173 * as the simplest method to avoid being overwritten
1177 i915_error_object_create(dev_priv
,
1180 if (HAS_BROKEN_CS_TLB(dev_priv
))
1181 ee
->wa_batchbuffer
=
1182 i915_error_object_create(dev_priv
,
1186 i915_error_object_create(dev_priv
,
1187 request
->ctx
->engine
[i
].state
);
1189 pid
= request
->ctx
->pid
;
1191 struct task_struct
*task
;
1194 task
= pid_task(pid
, PIDTYPE_PID
);
1196 strcpy(ee
->comm
, task
->comm
);
1197 ee
->pid
= task
->pid
;
1203 request
->ctx
->flags
& CONTEXT_NO_ERROR_CAPTURE
;
1205 ring
= request
->ring
;
1206 ee
->cpu_ring_head
= ring
->head
;
1207 ee
->cpu_ring_tail
= ring
->tail
;
1209 i915_error_object_create(dev_priv
, ring
->vma
);
1211 engine_record_requests(engine
, request
, ee
);
1215 i915_error_object_create(dev_priv
,
1216 engine
->status_page
.vma
);
1219 i915_error_object_create(dev_priv
, engine
->wa_ctx
.vma
);
1223 static void i915_gem_capture_vm(struct drm_i915_private
*dev_priv
,
1224 struct drm_i915_error_state
*error
,
1225 struct i915_address_space
*vm
,
1228 struct drm_i915_error_buffer
*active_bo
;
1229 struct i915_vma
*vma
;
1233 list_for_each_entry(vma
, &vm
->active_list
, vm_link
)
1238 active_bo
= kcalloc(count
, sizeof(*active_bo
), GFP_ATOMIC
);
1240 count
= capture_error_bo(active_bo
, count
, &vm
->active_list
, false);
1244 error
->active_vm
[idx
] = vm
;
1245 error
->active_bo
[idx
] = active_bo
;
1246 error
->active_bo_count
[idx
] = count
;
1249 static void i915_capture_active_buffers(struct drm_i915_private
*dev_priv
,
1250 struct drm_i915_error_state
*error
)
1254 BUILD_BUG_ON(ARRAY_SIZE(error
->engine
) > ARRAY_SIZE(error
->active_bo
));
1255 BUILD_BUG_ON(ARRAY_SIZE(error
->active_bo
) != ARRAY_SIZE(error
->active_vm
));
1256 BUILD_BUG_ON(ARRAY_SIZE(error
->active_bo
) != ARRAY_SIZE(error
->active_bo_count
));
1258 /* Scan each engine looking for unique active contexts/vm */
1259 for (i
= 0; i
< ARRAY_SIZE(error
->engine
); i
++) {
1260 struct drm_i915_error_engine
*ee
= &error
->engine
[i
];
1267 for (j
= 0; j
< i
&& !found
; j
++)
1268 found
= error
->engine
[j
].vm
== ee
->vm
;
1270 i915_gem_capture_vm(dev_priv
, error
, ee
->vm
, cnt
++);
1274 static void i915_capture_pinned_buffers(struct drm_i915_private
*dev_priv
,
1275 struct drm_i915_error_state
*error
)
1277 struct i915_address_space
*vm
= &dev_priv
->ggtt
.base
;
1278 struct drm_i915_error_buffer
*bo
;
1279 struct i915_vma
*vma
;
1280 int count_inactive
, count_active
;
1283 list_for_each_entry(vma
, &vm
->active_list
, vm_link
)
1287 list_for_each_entry(vma
, &vm
->inactive_list
, vm_link
)
1291 if (count_inactive
+ count_active
)
1292 bo
= kcalloc(count_inactive
+ count_active
,
1293 sizeof(*bo
), GFP_ATOMIC
);
1297 count_inactive
= capture_error_bo(bo
, count_inactive
,
1298 &vm
->active_list
, true);
1299 count_active
= capture_error_bo(bo
+ count_inactive
, count_active
,
1300 &vm
->inactive_list
, true);
1301 error
->pinned_bo_count
= count_inactive
+ count_active
;
1302 error
->pinned_bo
= bo
;
1305 /* Capture all registers which don't fit into another category. */
1306 static void i915_capture_reg_state(struct drm_i915_private
*dev_priv
,
1307 struct drm_i915_error_state
*error
)
1309 struct drm_device
*dev
= &dev_priv
->drm
;
1312 /* General organization
1313 * 1. Registers specific to a single generation
1314 * 2. Registers which belong to multiple generations
1315 * 3. Feature specific registers.
1316 * 4. Everything else
1317 * Please try to follow the order.
1320 /* 1: Registers specific to a single generation */
1321 if (IS_VALLEYVIEW(dev
)) {
1322 error
->gtier
[0] = I915_READ(GTIER
);
1323 error
->ier
= I915_READ(VLV_IER
);
1324 error
->forcewake
= I915_READ_FW(FORCEWAKE_VLV
);
1328 error
->err_int
= I915_READ(GEN7_ERR_INT
);
1330 if (INTEL_INFO(dev
)->gen
>= 8) {
1331 error
->fault_data0
= I915_READ(GEN8_FAULT_TLB_DATA0
);
1332 error
->fault_data1
= I915_READ(GEN8_FAULT_TLB_DATA1
);
1336 error
->forcewake
= I915_READ_FW(FORCEWAKE
);
1337 error
->gab_ctl
= I915_READ(GAB_CTL
);
1338 error
->gfx_mode
= I915_READ(GFX_MODE
);
1341 /* 2: Registers which belong to multiple generations */
1342 if (INTEL_INFO(dev
)->gen
>= 7)
1343 error
->forcewake
= I915_READ_FW(FORCEWAKE_MT
);
1345 if (INTEL_INFO(dev
)->gen
>= 6) {
1346 error
->derrmr
= I915_READ(DERRMR
);
1347 error
->error
= I915_READ(ERROR_GEN6
);
1348 error
->done_reg
= I915_READ(DONE_REG
);
1351 /* 3: Feature specific registers */
1352 if (IS_GEN6(dev
) || IS_GEN7(dev
)) {
1353 error
->gam_ecochk
= I915_READ(GAM_ECOCHK
);
1354 error
->gac_eco
= I915_READ(GAC_ECO_BITS
);
1357 /* 4: Everything else */
1358 if (HAS_HW_CONTEXTS(dev
))
1359 error
->ccid
= I915_READ(CCID
);
1361 if (INTEL_INFO(dev
)->gen
>= 8) {
1362 error
->ier
= I915_READ(GEN8_DE_MISC_IER
);
1363 for (i
= 0; i
< 4; i
++)
1364 error
->gtier
[i
] = I915_READ(GEN8_GT_IER(i
));
1365 } else if (HAS_PCH_SPLIT(dev
)) {
1366 error
->ier
= I915_READ(DEIER
);
1367 error
->gtier
[0] = I915_READ(GTIER
);
1368 } else if (IS_GEN2(dev
)) {
1369 error
->ier
= I915_READ16(IER
);
1370 } else if (!IS_VALLEYVIEW(dev
)) {
1371 error
->ier
= I915_READ(IER
);
1373 error
->eir
= I915_READ(EIR
);
1374 error
->pgtbl_er
= I915_READ(PGTBL_ER
);
1376 i915_get_extra_instdone(dev_priv
, error
->extra_instdone
);
1379 static void i915_error_capture_msg(struct drm_i915_private
*dev_priv
,
1380 struct drm_i915_error_state
*error
,
1382 const char *error_msg
)
1385 int engine_id
= -1, len
;
1387 ecode
= i915_error_generate_code(dev_priv
, error
, &engine_id
);
1389 len
= scnprintf(error
->error_msg
, sizeof(error
->error_msg
),
1390 "GPU HANG: ecode %d:%d:0x%08x",
1391 INTEL_GEN(dev_priv
), engine_id
, ecode
);
1393 if (engine_id
!= -1 && error
->engine
[engine_id
].pid
!= -1)
1394 len
+= scnprintf(error
->error_msg
+ len
,
1395 sizeof(error
->error_msg
) - len
,
1397 error
->engine
[engine_id
].comm
,
1398 error
->engine
[engine_id
].pid
);
1400 scnprintf(error
->error_msg
+ len
, sizeof(error
->error_msg
) - len
,
1401 ", reason: %s, action: %s",
1403 engine_mask
? "reset" : "continue");
1406 static void i915_capture_gen_state(struct drm_i915_private
*dev_priv
,
1407 struct drm_i915_error_state
*error
)
1410 #ifdef CONFIG_INTEL_IOMMU
1411 error
->iommu
= intel_iommu_gfx_mapped
;
1413 error
->reset_count
= i915_reset_count(&dev_priv
->gpu_error
);
1414 error
->suspend_count
= dev_priv
->suspend_count
;
1416 memcpy(&error
->device_info
,
1417 INTEL_INFO(dev_priv
),
1418 sizeof(error
->device_info
));
1422 * i915_capture_error_state - capture an error record for later analysis
1425 * Should be called when an error is detected (either a hang or an error
1426 * interrupt) to capture error state from the time of the error. Fills
1427 * out a structure which becomes available in debugfs for user level tools
1430 void i915_capture_error_state(struct drm_i915_private
*dev_priv
,
1432 const char *error_msg
)
1435 struct drm_i915_error_state
*error
;
1436 unsigned long flags
;
1438 if (READ_ONCE(dev_priv
->gpu_error
.first_error
))
1441 /* Account for pipe specific data like PIPE*STAT */
1442 error
= kzalloc(sizeof(*error
), GFP_ATOMIC
);
1444 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1448 kref_init(&error
->ref
);
1450 i915_capture_gen_state(dev_priv
, error
);
1451 i915_capture_reg_state(dev_priv
, error
);
1452 i915_gem_record_fences(dev_priv
, error
);
1453 i915_gem_record_rings(dev_priv
, error
);
1454 i915_capture_active_buffers(dev_priv
, error
);
1455 i915_capture_pinned_buffers(dev_priv
, error
);
1457 do_gettimeofday(&error
->time
);
1459 error
->overlay
= intel_overlay_capture_error_state(dev_priv
);
1460 error
->display
= intel_display_capture_error_state(dev_priv
);
1462 i915_error_capture_msg(dev_priv
, error
, engine_mask
, error_msg
);
1463 DRM_INFO("%s\n", error
->error_msg
);
1465 if (!error
->simulated
) {
1466 spin_lock_irqsave(&dev_priv
->gpu_error
.lock
, flags
);
1467 if (!dev_priv
->gpu_error
.first_error
) {
1468 dev_priv
->gpu_error
.first_error
= error
;
1471 spin_unlock_irqrestore(&dev_priv
->gpu_error
.lock
, flags
);
1475 i915_error_state_free(&error
->ref
);
1480 DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
1481 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
1482 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1483 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
1484 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
1485 dev_priv
->drm
.primary
->index
);
1490 void i915_error_state_get(struct drm_device
*dev
,
1491 struct i915_error_state_file_priv
*error_priv
)
1493 struct drm_i915_private
*dev_priv
= to_i915(dev
);
1495 spin_lock_irq(&dev_priv
->gpu_error
.lock
);
1496 error_priv
->error
= dev_priv
->gpu_error
.first_error
;
1497 if (error_priv
->error
)
1498 kref_get(&error_priv
->error
->ref
);
1499 spin_unlock_irq(&dev_priv
->gpu_error
.lock
);
1503 void i915_error_state_put(struct i915_error_state_file_priv
*error_priv
)
1505 if (error_priv
->error
)
1506 kref_put(&error_priv
->error
->ref
, i915_error_state_free
);
1509 void i915_destroy_error_state(struct drm_device
*dev
)
1511 struct drm_i915_private
*dev_priv
= to_i915(dev
);
1512 struct drm_i915_error_state
*error
;
1514 spin_lock_irq(&dev_priv
->gpu_error
.lock
);
1515 error
= dev_priv
->gpu_error
.first_error
;
1516 dev_priv
->gpu_error
.first_error
= NULL
;
1517 spin_unlock_irq(&dev_priv
->gpu_error
.lock
);
1520 kref_put(&error
->ref
, i915_error_state_free
);
1523 const char *i915_cache_level_str(struct drm_i915_private
*i915
, int type
)
1526 case I915_CACHE_NONE
: return " uncached";
1527 case I915_CACHE_LLC
: return HAS_LLC(i915
) ? " LLC" : " snooped";
1528 case I915_CACHE_L3_LLC
: return " L3+LLC";
1529 case I915_CACHE_WT
: return " WT";
1534 /* NB: please notice the memset */
1535 void i915_get_extra_instdone(struct drm_i915_private
*dev_priv
,
1538 memset(instdone
, 0, sizeof(*instdone
) * I915_NUM_INSTDONE_REG
);
1540 if (IS_GEN2(dev_priv
) || IS_GEN3(dev_priv
))
1541 instdone
[0] = I915_READ(GEN2_INSTDONE
);
1542 else if (IS_GEN4(dev_priv
) || IS_GEN5(dev_priv
) || IS_GEN6(dev_priv
)) {
1543 instdone
[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE
));
1544 instdone
[1] = I915_READ(GEN4_INSTDONE1
);
1545 } else if (INTEL_GEN(dev_priv
) >= 7) {
1546 instdone
[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE
));
1547 instdone
[1] = I915_READ(GEN7_SC_INSTDONE
);
1548 instdone
[2] = I915_READ(GEN7_SAMPLER_INSTDONE
);
1549 instdone
[3] = I915_READ(GEN7_ROW_INSTDONE
);