2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
29 #include <linux/seq_file.h>
30 #include <linux/circ_buf.h>
31 #include <linux/ctype.h>
32 #include <linux/debugfs.h>
33 #include <linux/slab.h>
34 #include <linux/export.h>
35 #include <linux/list_sort.h>
36 #include <asm/msr-index.h>
38 #include "intel_drv.h"
39 #include "intel_ringbuffer.h"
40 #include <drm/i915_drm.h>
43 static inline struct drm_i915_private
*node_to_i915(struct drm_info_node
*node
)
45 return to_i915(node
->minor
->dev
);
48 /* As the drm_debugfs_init() routines are called before dev->dev_private is
49 * allocated we need to hook into the minor for release. */
51 drm_add_fake_info_node(struct drm_minor
*minor
,
55 struct drm_info_node
*node
;
57 node
= kmalloc(sizeof(*node
), GFP_KERNEL
);
65 node
->info_ent
= (void *)key
;
67 mutex_lock(&minor
->debugfs_lock
);
68 list_add(&node
->list
, &minor
->debugfs_list
);
69 mutex_unlock(&minor
->debugfs_lock
);
74 static int i915_capabilities(struct seq_file
*m
, void *data
)
76 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
77 const struct intel_device_info
*info
= INTEL_INFO(dev_priv
);
79 seq_printf(m
, "gen: %d\n", INTEL_GEN(dev_priv
));
80 seq_printf(m
, "pch: %d\n", INTEL_PCH_TYPE(dev_priv
));
81 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
82 #define SEP_SEMICOLON ;
83 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG
, SEP_SEMICOLON
);
90 static char get_active_flag(struct drm_i915_gem_object
*obj
)
92 return i915_gem_object_is_active(obj
) ? '*' : ' ';
95 static char get_pin_flag(struct drm_i915_gem_object
*obj
)
97 return obj
->pin_display
? 'p' : ' ';
100 static char get_tiling_flag(struct drm_i915_gem_object
*obj
)
102 switch (i915_gem_object_get_tiling(obj
)) {
104 case I915_TILING_NONE
: return ' ';
105 case I915_TILING_X
: return 'X';
106 case I915_TILING_Y
: return 'Y';
110 static char get_global_flag(struct drm_i915_gem_object
*obj
)
112 return i915_gem_object_to_ggtt(obj
, NULL
) ? 'g' : ' ';
115 static char get_pin_mapped_flag(struct drm_i915_gem_object
*obj
)
117 return obj
->mapping
? 'M' : ' ';
120 static u64
i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object
*obj
)
123 struct i915_vma
*vma
;
125 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
) {
126 if (i915_vma_is_ggtt(vma
) && drm_mm_node_allocated(&vma
->node
))
127 size
+= vma
->node
.size
;
134 describe_obj(struct seq_file
*m
, struct drm_i915_gem_object
*obj
)
136 struct drm_i915_private
*dev_priv
= to_i915(obj
->base
.dev
);
137 struct intel_engine_cs
*engine
;
138 struct i915_vma
*vma
;
139 unsigned int frontbuffer_bits
;
141 enum intel_engine_id id
;
143 lockdep_assert_held(&obj
->base
.dev
->struct_mutex
);
145 seq_printf(m
, "%pK: %c%c%c%c%c %8zdKiB %02x %02x [ ",
147 get_active_flag(obj
),
149 get_tiling_flag(obj
),
150 get_global_flag(obj
),
151 get_pin_mapped_flag(obj
),
152 obj
->base
.size
/ 1024,
153 obj
->base
.read_domains
,
154 obj
->base
.write_domain
);
155 for_each_engine_id(engine
, dev_priv
, id
)
157 i915_gem_active_get_seqno(&obj
->last_read
[id
],
158 &obj
->base
.dev
->struct_mutex
));
159 seq_printf(m
, "] %x %s%s%s",
160 i915_gem_active_get_seqno(&obj
->last_write
,
161 &obj
->base
.dev
->struct_mutex
),
162 i915_cache_level_str(dev_priv
, obj
->cache_level
),
163 obj
->dirty
? " dirty" : "",
164 obj
->madv
== I915_MADV_DONTNEED
? " purgeable" : "");
166 seq_printf(m
, " (name: %d)", obj
->base
.name
);
167 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
) {
168 if (i915_vma_is_pinned(vma
))
171 seq_printf(m
, " (pinned x %d)", pin_count
);
172 if (obj
->pin_display
)
173 seq_printf(m
, " (display)");
174 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
) {
175 if (!drm_mm_node_allocated(&vma
->node
))
178 seq_printf(m
, " (%sgtt offset: %08llx, size: %08llx",
179 i915_vma_is_ggtt(vma
) ? "g" : "pp",
180 vma
->node
.start
, vma
->node
.size
);
181 if (i915_vma_is_ggtt(vma
))
182 seq_printf(m
, ", type: %u", vma
->ggtt_view
.type
);
184 seq_printf(m
, " , fence: %d%s",
186 i915_gem_active_isset(&vma
->last_fence
) ? "*" : "");
190 seq_printf(m
, " (stolen: %08llx)", obj
->stolen
->start
);
191 if (obj
->pin_display
|| obj
->fault_mappable
) {
193 if (obj
->pin_display
)
195 if (obj
->fault_mappable
)
198 seq_printf(m
, " (%s mappable)", s
);
201 engine
= i915_gem_active_get_engine(&obj
->last_write
,
202 &dev_priv
->drm
.struct_mutex
);
204 seq_printf(m
, " (%s)", engine
->name
);
206 frontbuffer_bits
= atomic_read(&obj
->frontbuffer_bits
);
207 if (frontbuffer_bits
)
208 seq_printf(m
, " (frontbuffer: 0x%03x)", frontbuffer_bits
);
211 static int obj_rank_by_stolen(void *priv
,
212 struct list_head
*A
, struct list_head
*B
)
214 struct drm_i915_gem_object
*a
=
215 container_of(A
, struct drm_i915_gem_object
, obj_exec_link
);
216 struct drm_i915_gem_object
*b
=
217 container_of(B
, struct drm_i915_gem_object
, obj_exec_link
);
219 if (a
->stolen
->start
< b
->stolen
->start
)
221 if (a
->stolen
->start
> b
->stolen
->start
)
226 static int i915_gem_stolen_list_info(struct seq_file
*m
, void *data
)
228 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
229 struct drm_device
*dev
= &dev_priv
->drm
;
230 struct drm_i915_gem_object
*obj
;
231 u64 total_obj_size
, total_gtt_size
;
235 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
239 total_obj_size
= total_gtt_size
= count
= 0;
240 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
241 if (obj
->stolen
== NULL
)
244 list_add(&obj
->obj_exec_link
, &stolen
);
246 total_obj_size
+= obj
->base
.size
;
247 total_gtt_size
+= i915_gem_obj_total_ggtt_size(obj
);
250 list_for_each_entry(obj
, &dev_priv
->mm
.unbound_list
, global_list
) {
251 if (obj
->stolen
== NULL
)
254 list_add(&obj
->obj_exec_link
, &stolen
);
256 total_obj_size
+= obj
->base
.size
;
259 list_sort(NULL
, &stolen
, obj_rank_by_stolen
);
260 seq_puts(m
, "Stolen:\n");
261 while (!list_empty(&stolen
)) {
262 obj
= list_first_entry(&stolen
, typeof(*obj
), obj_exec_link
);
264 describe_obj(m
, obj
);
266 list_del_init(&obj
->obj_exec_link
);
268 mutex_unlock(&dev
->struct_mutex
);
270 seq_printf(m
, "Total %d objects, %llu bytes, %llu GTT size\n",
271 count
, total_obj_size
, total_gtt_size
);
276 struct drm_i915_file_private
*file_priv
;
280 u64 active
, inactive
;
283 static int per_file_stats(int id
, void *ptr
, void *data
)
285 struct drm_i915_gem_object
*obj
= ptr
;
286 struct file_stats
*stats
= data
;
287 struct i915_vma
*vma
;
290 stats
->total
+= obj
->base
.size
;
291 if (!obj
->bind_count
)
292 stats
->unbound
+= obj
->base
.size
;
293 if (obj
->base
.name
|| obj
->base
.dma_buf
)
294 stats
->shared
+= obj
->base
.size
;
296 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
) {
297 if (!drm_mm_node_allocated(&vma
->node
))
300 if (i915_vma_is_ggtt(vma
)) {
301 stats
->global
+= vma
->node
.size
;
303 struct i915_hw_ppgtt
*ppgtt
= i915_vm_to_ppgtt(vma
->vm
);
305 if (ppgtt
->base
.file
!= stats
->file_priv
)
309 if (i915_vma_is_active(vma
))
310 stats
->active
+= vma
->node
.size
;
312 stats
->inactive
+= vma
->node
.size
;
318 #define print_file_stats(m, name, stats) do { \
320 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
331 static void print_batch_pool_stats(struct seq_file
*m
,
332 struct drm_i915_private
*dev_priv
)
334 struct drm_i915_gem_object
*obj
;
335 struct file_stats stats
;
336 struct intel_engine_cs
*engine
;
339 memset(&stats
, 0, sizeof(stats
));
341 for_each_engine(engine
, dev_priv
) {
342 for (j
= 0; j
< ARRAY_SIZE(engine
->batch_pool
.cache_list
); j
++) {
343 list_for_each_entry(obj
,
344 &engine
->batch_pool
.cache_list
[j
],
346 per_file_stats(0, obj
, &stats
);
350 print_file_stats(m
, "[k]batch pool", stats
);
353 static int per_file_ctx_stats(int id
, void *ptr
, void *data
)
355 struct i915_gem_context
*ctx
= ptr
;
358 for (n
= 0; n
< ARRAY_SIZE(ctx
->engine
); n
++) {
359 if (ctx
->engine
[n
].state
)
360 per_file_stats(0, ctx
->engine
[n
].state
->obj
, data
);
361 if (ctx
->engine
[n
].ring
)
362 per_file_stats(0, ctx
->engine
[n
].ring
->vma
->obj
, data
);
368 static void print_context_stats(struct seq_file
*m
,
369 struct drm_i915_private
*dev_priv
)
371 struct drm_device
*dev
= &dev_priv
->drm
;
372 struct file_stats stats
;
373 struct drm_file
*file
;
375 memset(&stats
, 0, sizeof(stats
));
377 mutex_lock(&dev
->struct_mutex
);
378 if (dev_priv
->kernel_context
)
379 per_file_ctx_stats(0, dev_priv
->kernel_context
, &stats
);
381 list_for_each_entry(file
, &dev
->filelist
, lhead
) {
382 struct drm_i915_file_private
*fpriv
= file
->driver_priv
;
383 idr_for_each(&fpriv
->context_idr
, per_file_ctx_stats
, &stats
);
385 mutex_unlock(&dev
->struct_mutex
);
387 print_file_stats(m
, "[k]contexts", stats
);
390 static int i915_gem_object_info(struct seq_file
*m
, void *data
)
392 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
393 struct drm_device
*dev
= &dev_priv
->drm
;
394 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
395 u32 count
, mapped_count
, purgeable_count
, dpy_count
;
396 u64 size
, mapped_size
, purgeable_size
, dpy_size
;
397 struct drm_i915_gem_object
*obj
;
398 struct drm_file
*file
;
401 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
405 seq_printf(m
, "%u objects, %zu bytes\n",
406 dev_priv
->mm
.object_count
,
407 dev_priv
->mm
.object_memory
);
410 mapped_size
= mapped_count
= 0;
411 purgeable_size
= purgeable_count
= 0;
412 list_for_each_entry(obj
, &dev_priv
->mm
.unbound_list
, global_list
) {
413 size
+= obj
->base
.size
;
416 if (obj
->madv
== I915_MADV_DONTNEED
) {
417 purgeable_size
+= obj
->base
.size
;
423 mapped_size
+= obj
->base
.size
;
426 seq_printf(m
, "%u unbound objects, %llu bytes\n", count
, size
);
428 size
= count
= dpy_size
= dpy_count
= 0;
429 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
430 size
+= obj
->base
.size
;
433 if (obj
->pin_display
) {
434 dpy_size
+= obj
->base
.size
;
438 if (obj
->madv
== I915_MADV_DONTNEED
) {
439 purgeable_size
+= obj
->base
.size
;
445 mapped_size
+= obj
->base
.size
;
448 seq_printf(m
, "%u bound objects, %llu bytes\n",
450 seq_printf(m
, "%u purgeable objects, %llu bytes\n",
451 purgeable_count
, purgeable_size
);
452 seq_printf(m
, "%u mapped objects, %llu bytes\n",
453 mapped_count
, mapped_size
);
454 seq_printf(m
, "%u display objects (pinned), %llu bytes\n",
455 dpy_count
, dpy_size
);
457 seq_printf(m
, "%llu [%llu] gtt total\n",
458 ggtt
->base
.total
, ggtt
->mappable_end
- ggtt
->base
.start
);
461 print_batch_pool_stats(m
, dev_priv
);
462 mutex_unlock(&dev
->struct_mutex
);
464 mutex_lock(&dev
->filelist_mutex
);
465 print_context_stats(m
, dev_priv
);
466 list_for_each_entry_reverse(file
, &dev
->filelist
, lhead
) {
467 struct file_stats stats
;
468 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
469 struct drm_i915_gem_request
*request
;
470 struct task_struct
*task
;
472 memset(&stats
, 0, sizeof(stats
));
473 stats
.file_priv
= file
->driver_priv
;
474 spin_lock(&file
->table_lock
);
475 idr_for_each(&file
->object_idr
, per_file_stats
, &stats
);
476 spin_unlock(&file
->table_lock
);
478 * Although we have a valid reference on file->pid, that does
479 * not guarantee that the task_struct who called get_pid() is
480 * still alive (e.g. get_pid(current) => fork() => exit()).
481 * Therefore, we need to protect this ->comm access using RCU.
483 mutex_lock(&dev
->struct_mutex
);
484 request
= list_first_entry_or_null(&file_priv
->mm
.request_list
,
485 struct drm_i915_gem_request
,
488 task
= pid_task(request
&& request
->ctx
->pid
?
489 request
->ctx
->pid
: file
->pid
,
491 print_file_stats(m
, task
? task
->comm
: "<unknown>", stats
);
493 mutex_unlock(&dev
->struct_mutex
);
495 mutex_unlock(&dev
->filelist_mutex
);
500 static int i915_gem_gtt_info(struct seq_file
*m
, void *data
)
502 struct drm_info_node
*node
= m
->private;
503 struct drm_i915_private
*dev_priv
= node_to_i915(node
);
504 struct drm_device
*dev
= &dev_priv
->drm
;
505 bool show_pin_display_only
= !!node
->info_ent
->data
;
506 struct drm_i915_gem_object
*obj
;
507 u64 total_obj_size
, total_gtt_size
;
510 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
514 total_obj_size
= total_gtt_size
= count
= 0;
515 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
516 if (show_pin_display_only
&& !obj
->pin_display
)
520 describe_obj(m
, obj
);
522 total_obj_size
+= obj
->base
.size
;
523 total_gtt_size
+= i915_gem_obj_total_ggtt_size(obj
);
527 mutex_unlock(&dev
->struct_mutex
);
529 seq_printf(m
, "Total %d objects, %llu bytes, %llu GTT size\n",
530 count
, total_obj_size
, total_gtt_size
);
535 static int i915_gem_pageflip_info(struct seq_file
*m
, void *data
)
537 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
538 struct drm_device
*dev
= &dev_priv
->drm
;
539 struct intel_crtc
*crtc
;
542 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
546 for_each_intel_crtc(dev
, crtc
) {
547 const char pipe
= pipe_name(crtc
->pipe
);
548 const char plane
= plane_name(crtc
->plane
);
549 struct intel_flip_work
*work
;
551 spin_lock_irq(&dev
->event_lock
);
552 work
= crtc
->flip_work
;
554 seq_printf(m
, "No flip due on pipe %c (plane %c)\n",
560 pending
= atomic_read(&work
->pending
);
562 seq_printf(m
, "Flip ioctl preparing on pipe %c (plane %c)\n",
565 seq_printf(m
, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
568 if (work
->flip_queued_req
) {
569 struct intel_engine_cs
*engine
= i915_gem_request_get_engine(work
->flip_queued_req
);
571 seq_printf(m
, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
573 i915_gem_request_get_seqno(work
->flip_queued_req
),
574 dev_priv
->next_seqno
,
575 intel_engine_get_seqno(engine
),
576 i915_gem_request_completed(work
->flip_queued_req
));
578 seq_printf(m
, "Flip not associated with any ring\n");
579 seq_printf(m
, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
580 work
->flip_queued_vblank
,
581 work
->flip_ready_vblank
,
582 intel_crtc_get_vblank_counter(crtc
));
583 seq_printf(m
, "%d prepares\n", atomic_read(&work
->pending
));
585 if (INTEL_GEN(dev_priv
) >= 4)
586 addr
= I915_HI_DISPBASE(I915_READ(DSPSURF(crtc
->plane
)));
588 addr
= I915_READ(DSPADDR(crtc
->plane
));
589 seq_printf(m
, "Current scanout address 0x%08x\n", addr
);
591 if (work
->pending_flip_obj
) {
592 seq_printf(m
, "New framebuffer address 0x%08lx\n", (long)work
->gtt_offset
);
593 seq_printf(m
, "MMIO update completed? %d\n", addr
== work
->gtt_offset
);
596 spin_unlock_irq(&dev
->event_lock
);
599 mutex_unlock(&dev
->struct_mutex
);
604 static int i915_gem_batch_pool_info(struct seq_file
*m
, void *data
)
606 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
607 struct drm_device
*dev
= &dev_priv
->drm
;
608 struct drm_i915_gem_object
*obj
;
609 struct intel_engine_cs
*engine
;
613 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
617 for_each_engine(engine
, dev_priv
) {
618 for (j
= 0; j
< ARRAY_SIZE(engine
->batch_pool
.cache_list
); j
++) {
622 list_for_each_entry(obj
,
623 &engine
->batch_pool
.cache_list
[j
],
626 seq_printf(m
, "%s cache[%d]: %d objects\n",
627 engine
->name
, j
, count
);
629 list_for_each_entry(obj
,
630 &engine
->batch_pool
.cache_list
[j
],
633 describe_obj(m
, obj
);
641 seq_printf(m
, "total: %d\n", total
);
643 mutex_unlock(&dev
->struct_mutex
);
648 static int i915_gem_request_info(struct seq_file
*m
, void *data
)
650 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
651 struct drm_device
*dev
= &dev_priv
->drm
;
652 struct intel_engine_cs
*engine
;
653 struct drm_i915_gem_request
*req
;
656 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
661 for_each_engine(engine
, dev_priv
) {
665 list_for_each_entry(req
, &engine
->request_list
, link
)
670 seq_printf(m
, "%s requests: %d\n", engine
->name
, count
);
671 list_for_each_entry(req
, &engine
->request_list
, link
) {
672 struct pid
*pid
= req
->ctx
->pid
;
673 struct task_struct
*task
;
676 task
= pid
? pid_task(pid
, PIDTYPE_PID
) : NULL
;
677 seq_printf(m
, " %x @ %d: %s [%d]\n",
679 (int) (jiffies
- req
->emitted_jiffies
),
680 task
? task
->comm
: "<unknown>",
681 task
? task
->pid
: -1);
687 mutex_unlock(&dev
->struct_mutex
);
690 seq_puts(m
, "No requests\n");
695 static void i915_ring_seqno_info(struct seq_file
*m
,
696 struct intel_engine_cs
*engine
)
698 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
701 seq_printf(m
, "Current sequence (%s): %x\n",
702 engine
->name
, intel_engine_get_seqno(engine
));
705 for (rb
= rb_first(&b
->waiters
); rb
; rb
= rb_next(rb
)) {
706 struct intel_wait
*w
= container_of(rb
, typeof(*w
), node
);
708 seq_printf(m
, "Waiting (%s): %s [%d] on %x\n",
709 engine
->name
, w
->tsk
->comm
, w
->tsk
->pid
, w
->seqno
);
711 spin_unlock(&b
->lock
);
714 static int i915_gem_seqno_info(struct seq_file
*m
, void *data
)
716 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
717 struct intel_engine_cs
*engine
;
719 for_each_engine(engine
, dev_priv
)
720 i915_ring_seqno_info(m
, engine
);
726 static int i915_interrupt_info(struct seq_file
*m
, void *data
)
728 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
729 struct intel_engine_cs
*engine
;
732 intel_runtime_pm_get(dev_priv
);
734 if (IS_CHERRYVIEW(dev_priv
)) {
735 seq_printf(m
, "Master Interrupt Control:\t%08x\n",
736 I915_READ(GEN8_MASTER_IRQ
));
738 seq_printf(m
, "Display IER:\t%08x\n",
740 seq_printf(m
, "Display IIR:\t%08x\n",
742 seq_printf(m
, "Display IIR_RW:\t%08x\n",
743 I915_READ(VLV_IIR_RW
));
744 seq_printf(m
, "Display IMR:\t%08x\n",
746 for_each_pipe(dev_priv
, pipe
)
747 seq_printf(m
, "Pipe %c stat:\t%08x\n",
749 I915_READ(PIPESTAT(pipe
)));
751 seq_printf(m
, "Port hotplug:\t%08x\n",
752 I915_READ(PORT_HOTPLUG_EN
));
753 seq_printf(m
, "DPFLIPSTAT:\t%08x\n",
754 I915_READ(VLV_DPFLIPSTAT
));
755 seq_printf(m
, "DPINVGTT:\t%08x\n",
756 I915_READ(DPINVGTT
));
758 for (i
= 0; i
< 4; i
++) {
759 seq_printf(m
, "GT Interrupt IMR %d:\t%08x\n",
760 i
, I915_READ(GEN8_GT_IMR(i
)));
761 seq_printf(m
, "GT Interrupt IIR %d:\t%08x\n",
762 i
, I915_READ(GEN8_GT_IIR(i
)));
763 seq_printf(m
, "GT Interrupt IER %d:\t%08x\n",
764 i
, I915_READ(GEN8_GT_IER(i
)));
767 seq_printf(m
, "PCU interrupt mask:\t%08x\n",
768 I915_READ(GEN8_PCU_IMR
));
769 seq_printf(m
, "PCU interrupt identity:\t%08x\n",
770 I915_READ(GEN8_PCU_IIR
));
771 seq_printf(m
, "PCU interrupt enable:\t%08x\n",
772 I915_READ(GEN8_PCU_IER
));
773 } else if (INTEL_GEN(dev_priv
) >= 8) {
774 seq_printf(m
, "Master Interrupt Control:\t%08x\n",
775 I915_READ(GEN8_MASTER_IRQ
));
777 for (i
= 0; i
< 4; i
++) {
778 seq_printf(m
, "GT Interrupt IMR %d:\t%08x\n",
779 i
, I915_READ(GEN8_GT_IMR(i
)));
780 seq_printf(m
, "GT Interrupt IIR %d:\t%08x\n",
781 i
, I915_READ(GEN8_GT_IIR(i
)));
782 seq_printf(m
, "GT Interrupt IER %d:\t%08x\n",
783 i
, I915_READ(GEN8_GT_IER(i
)));
786 for_each_pipe(dev_priv
, pipe
) {
787 enum intel_display_power_domain power_domain
;
789 power_domain
= POWER_DOMAIN_PIPE(pipe
);
790 if (!intel_display_power_get_if_enabled(dev_priv
,
792 seq_printf(m
, "Pipe %c power disabled\n",
796 seq_printf(m
, "Pipe %c IMR:\t%08x\n",
798 I915_READ(GEN8_DE_PIPE_IMR(pipe
)));
799 seq_printf(m
, "Pipe %c IIR:\t%08x\n",
801 I915_READ(GEN8_DE_PIPE_IIR(pipe
)));
802 seq_printf(m
, "Pipe %c IER:\t%08x\n",
804 I915_READ(GEN8_DE_PIPE_IER(pipe
)));
806 intel_display_power_put(dev_priv
, power_domain
);
809 seq_printf(m
, "Display Engine port interrupt mask:\t%08x\n",
810 I915_READ(GEN8_DE_PORT_IMR
));
811 seq_printf(m
, "Display Engine port interrupt identity:\t%08x\n",
812 I915_READ(GEN8_DE_PORT_IIR
));
813 seq_printf(m
, "Display Engine port interrupt enable:\t%08x\n",
814 I915_READ(GEN8_DE_PORT_IER
));
816 seq_printf(m
, "Display Engine misc interrupt mask:\t%08x\n",
817 I915_READ(GEN8_DE_MISC_IMR
));
818 seq_printf(m
, "Display Engine misc interrupt identity:\t%08x\n",
819 I915_READ(GEN8_DE_MISC_IIR
));
820 seq_printf(m
, "Display Engine misc interrupt enable:\t%08x\n",
821 I915_READ(GEN8_DE_MISC_IER
));
823 seq_printf(m
, "PCU interrupt mask:\t%08x\n",
824 I915_READ(GEN8_PCU_IMR
));
825 seq_printf(m
, "PCU interrupt identity:\t%08x\n",
826 I915_READ(GEN8_PCU_IIR
));
827 seq_printf(m
, "PCU interrupt enable:\t%08x\n",
828 I915_READ(GEN8_PCU_IER
));
829 } else if (IS_VALLEYVIEW(dev_priv
)) {
830 seq_printf(m
, "Display IER:\t%08x\n",
832 seq_printf(m
, "Display IIR:\t%08x\n",
834 seq_printf(m
, "Display IIR_RW:\t%08x\n",
835 I915_READ(VLV_IIR_RW
));
836 seq_printf(m
, "Display IMR:\t%08x\n",
838 for_each_pipe(dev_priv
, pipe
)
839 seq_printf(m
, "Pipe %c stat:\t%08x\n",
841 I915_READ(PIPESTAT(pipe
)));
843 seq_printf(m
, "Master IER:\t%08x\n",
844 I915_READ(VLV_MASTER_IER
));
846 seq_printf(m
, "Render IER:\t%08x\n",
848 seq_printf(m
, "Render IIR:\t%08x\n",
850 seq_printf(m
, "Render IMR:\t%08x\n",
853 seq_printf(m
, "PM IER:\t\t%08x\n",
854 I915_READ(GEN6_PMIER
));
855 seq_printf(m
, "PM IIR:\t\t%08x\n",
856 I915_READ(GEN6_PMIIR
));
857 seq_printf(m
, "PM IMR:\t\t%08x\n",
858 I915_READ(GEN6_PMIMR
));
860 seq_printf(m
, "Port hotplug:\t%08x\n",
861 I915_READ(PORT_HOTPLUG_EN
));
862 seq_printf(m
, "DPFLIPSTAT:\t%08x\n",
863 I915_READ(VLV_DPFLIPSTAT
));
864 seq_printf(m
, "DPINVGTT:\t%08x\n",
865 I915_READ(DPINVGTT
));
867 } else if (!HAS_PCH_SPLIT(dev_priv
)) {
868 seq_printf(m
, "Interrupt enable: %08x\n",
870 seq_printf(m
, "Interrupt identity: %08x\n",
872 seq_printf(m
, "Interrupt mask: %08x\n",
874 for_each_pipe(dev_priv
, pipe
)
875 seq_printf(m
, "Pipe %c stat: %08x\n",
877 I915_READ(PIPESTAT(pipe
)));
879 seq_printf(m
, "North Display Interrupt enable: %08x\n",
881 seq_printf(m
, "North Display Interrupt identity: %08x\n",
883 seq_printf(m
, "North Display Interrupt mask: %08x\n",
885 seq_printf(m
, "South Display Interrupt enable: %08x\n",
887 seq_printf(m
, "South Display Interrupt identity: %08x\n",
889 seq_printf(m
, "South Display Interrupt mask: %08x\n",
891 seq_printf(m
, "Graphics Interrupt enable: %08x\n",
893 seq_printf(m
, "Graphics Interrupt identity: %08x\n",
895 seq_printf(m
, "Graphics Interrupt mask: %08x\n",
898 for_each_engine(engine
, dev_priv
) {
899 if (INTEL_GEN(dev_priv
) >= 6) {
901 "Graphics Interrupt mask (%s): %08x\n",
902 engine
->name
, I915_READ_IMR(engine
));
904 i915_ring_seqno_info(m
, engine
);
906 intel_runtime_pm_put(dev_priv
);
911 static int i915_gem_fence_regs_info(struct seq_file
*m
, void *data
)
913 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
914 struct drm_device
*dev
= &dev_priv
->drm
;
917 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
921 seq_printf(m
, "Total fences = %d\n", dev_priv
->num_fence_regs
);
922 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++) {
923 struct i915_vma
*vma
= dev_priv
->fence_regs
[i
].vma
;
925 seq_printf(m
, "Fence %d, pin count = %d, object = ",
926 i
, dev_priv
->fence_regs
[i
].pin_count
);
928 seq_puts(m
, "unused");
930 describe_obj(m
, vma
->obj
);
934 mutex_unlock(&dev
->struct_mutex
);
938 static int i915_hws_info(struct seq_file
*m
, void *data
)
940 struct drm_info_node
*node
= m
->private;
941 struct drm_i915_private
*dev_priv
= node_to_i915(node
);
942 struct intel_engine_cs
*engine
;
946 engine
= &dev_priv
->engine
[(uintptr_t)node
->info_ent
->data
];
947 hws
= engine
->status_page
.page_addr
;
951 for (i
= 0; i
< 4096 / sizeof(u32
) / 4; i
+= 4) {
952 seq_printf(m
, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
954 hws
[i
], hws
[i
+ 1], hws
[i
+ 2], hws
[i
+ 3]);
960 i915_error_state_write(struct file
*filp
,
961 const char __user
*ubuf
,
965 struct i915_error_state_file_priv
*error_priv
= filp
->private_data
;
967 DRM_DEBUG_DRIVER("Resetting error state\n");
968 i915_destroy_error_state(error_priv
->dev
);
973 static int i915_error_state_open(struct inode
*inode
, struct file
*file
)
975 struct drm_i915_private
*dev_priv
= inode
->i_private
;
976 struct i915_error_state_file_priv
*error_priv
;
978 error_priv
= kzalloc(sizeof(*error_priv
), GFP_KERNEL
);
982 error_priv
->dev
= &dev_priv
->drm
;
984 i915_error_state_get(&dev_priv
->drm
, error_priv
);
986 file
->private_data
= error_priv
;
991 static int i915_error_state_release(struct inode
*inode
, struct file
*file
)
993 struct i915_error_state_file_priv
*error_priv
= file
->private_data
;
995 i915_error_state_put(error_priv
);
1001 static ssize_t
i915_error_state_read(struct file
*file
, char __user
*userbuf
,
1002 size_t count
, loff_t
*pos
)
1004 struct i915_error_state_file_priv
*error_priv
= file
->private_data
;
1005 struct drm_i915_error_state_buf error_str
;
1007 ssize_t ret_count
= 0;
1010 ret
= i915_error_state_buf_init(&error_str
,
1011 to_i915(error_priv
->dev
), count
, *pos
);
1015 ret
= i915_error_state_to_str(&error_str
, error_priv
);
1019 ret_count
= simple_read_from_buffer(userbuf
, count
, &tmp_pos
,
1026 *pos
= error_str
.start
+ ret_count
;
1028 i915_error_state_buf_release(&error_str
);
1029 return ret
?: ret_count
;
1032 static const struct file_operations i915_error_state_fops
= {
1033 .owner
= THIS_MODULE
,
1034 .open
= i915_error_state_open
,
1035 .read
= i915_error_state_read
,
1036 .write
= i915_error_state_write
,
1037 .llseek
= default_llseek
,
1038 .release
= i915_error_state_release
,
1042 i915_next_seqno_get(void *data
, u64
*val
)
1044 struct drm_i915_private
*dev_priv
= data
;
1047 ret
= mutex_lock_interruptible(&dev_priv
->drm
.struct_mutex
);
1051 *val
= dev_priv
->next_seqno
;
1052 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
1058 i915_next_seqno_set(void *data
, u64 val
)
1060 struct drm_i915_private
*dev_priv
= data
;
1061 struct drm_device
*dev
= &dev_priv
->drm
;
1064 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1068 ret
= i915_gem_set_seqno(dev
, val
);
1069 mutex_unlock(&dev
->struct_mutex
);
1074 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops
,
1075 i915_next_seqno_get
, i915_next_seqno_set
,
1078 static int i915_frequency_info(struct seq_file
*m
, void *unused
)
1080 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1081 struct drm_device
*dev
= &dev_priv
->drm
;
1084 intel_runtime_pm_get(dev_priv
);
1086 if (IS_GEN5(dev_priv
)) {
1087 u16 rgvswctl
= I915_READ16(MEMSWCTL
);
1088 u16 rgvstat
= I915_READ16(MEMSTAT_ILK
);
1090 seq_printf(m
, "Requested P-state: %d\n", (rgvswctl
>> 8) & 0xf);
1091 seq_printf(m
, "Requested VID: %d\n", rgvswctl
& 0x3f);
1092 seq_printf(m
, "Current VID: %d\n", (rgvstat
& MEMSTAT_VID_MASK
) >>
1094 seq_printf(m
, "Current P-state: %d\n",
1095 (rgvstat
& MEMSTAT_PSTATE_MASK
) >> MEMSTAT_PSTATE_SHIFT
);
1096 } else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
1099 mutex_lock(&dev_priv
->rps
.hw_lock
);
1100 freq_sts
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
1101 seq_printf(m
, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts
);
1102 seq_printf(m
, "DDR freq: %d MHz\n", dev_priv
->mem_freq
);
1104 seq_printf(m
, "actual GPU freq: %d MHz\n",
1105 intel_gpu_freq(dev_priv
, (freq_sts
>> 8) & 0xff));
1107 seq_printf(m
, "current GPU freq: %d MHz\n",
1108 intel_gpu_freq(dev_priv
, dev_priv
->rps
.cur_freq
));
1110 seq_printf(m
, "max GPU freq: %d MHz\n",
1111 intel_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq
));
1113 seq_printf(m
, "min GPU freq: %d MHz\n",
1114 intel_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq
));
1116 seq_printf(m
, "idle GPU freq: %d MHz\n",
1117 intel_gpu_freq(dev_priv
, dev_priv
->rps
.idle_freq
));
1120 "efficient (RPe) frequency: %d MHz\n",
1121 intel_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
));
1122 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1123 } else if (INTEL_GEN(dev_priv
) >= 6) {
1124 u32 rp_state_limits
;
1127 u32 rpmodectl
, rpinclimit
, rpdeclimit
;
1128 u32 rpstat
, cagf
, reqf
;
1129 u32 rpupei
, rpcurup
, rpprevup
;
1130 u32 rpdownei
, rpcurdown
, rpprevdown
;
1131 u32 pm_ier
, pm_imr
, pm_isr
, pm_iir
, pm_mask
;
1134 rp_state_limits
= I915_READ(GEN6_RP_STATE_LIMITS
);
1135 if (IS_BROXTON(dev_priv
)) {
1136 rp_state_cap
= I915_READ(BXT_RP_STATE_CAP
);
1137 gt_perf_status
= I915_READ(BXT_GT_PERF_STATUS
);
1139 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
1140 gt_perf_status
= I915_READ(GEN6_GT_PERF_STATUS
);
1143 /* RPSTAT1 is in the GT power well */
1144 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1148 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
1150 reqf
= I915_READ(GEN6_RPNSWREQ
);
1151 if (IS_GEN9(dev_priv
))
1154 reqf
&= ~GEN6_TURBO_DISABLE
;
1155 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
1160 reqf
= intel_gpu_freq(dev_priv
, reqf
);
1162 rpmodectl
= I915_READ(GEN6_RP_CONTROL
);
1163 rpinclimit
= I915_READ(GEN6_RP_UP_THRESHOLD
);
1164 rpdeclimit
= I915_READ(GEN6_RP_DOWN_THRESHOLD
);
1166 rpstat
= I915_READ(GEN6_RPSTAT1
);
1167 rpupei
= I915_READ(GEN6_RP_CUR_UP_EI
) & GEN6_CURICONT_MASK
;
1168 rpcurup
= I915_READ(GEN6_RP_CUR_UP
) & GEN6_CURBSYTAVG_MASK
;
1169 rpprevup
= I915_READ(GEN6_RP_PREV_UP
) & GEN6_CURBSYTAVG_MASK
;
1170 rpdownei
= I915_READ(GEN6_RP_CUR_DOWN_EI
) & GEN6_CURIAVG_MASK
;
1171 rpcurdown
= I915_READ(GEN6_RP_CUR_DOWN
) & GEN6_CURBSYTAVG_MASK
;
1172 rpprevdown
= I915_READ(GEN6_RP_PREV_DOWN
) & GEN6_CURBSYTAVG_MASK
;
1173 if (IS_GEN9(dev_priv
))
1174 cagf
= (rpstat
& GEN9_CAGF_MASK
) >> GEN9_CAGF_SHIFT
;
1175 else if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
1176 cagf
= (rpstat
& HSW_CAGF_MASK
) >> HSW_CAGF_SHIFT
;
1178 cagf
= (rpstat
& GEN6_CAGF_MASK
) >> GEN6_CAGF_SHIFT
;
1179 cagf
= intel_gpu_freq(dev_priv
, cagf
);
1181 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
1182 mutex_unlock(&dev
->struct_mutex
);
1184 if (IS_GEN6(dev_priv
) || IS_GEN7(dev_priv
)) {
1185 pm_ier
= I915_READ(GEN6_PMIER
);
1186 pm_imr
= I915_READ(GEN6_PMIMR
);
1187 pm_isr
= I915_READ(GEN6_PMISR
);
1188 pm_iir
= I915_READ(GEN6_PMIIR
);
1189 pm_mask
= I915_READ(GEN6_PMINTRMSK
);
1191 pm_ier
= I915_READ(GEN8_GT_IER(2));
1192 pm_imr
= I915_READ(GEN8_GT_IMR(2));
1193 pm_isr
= I915_READ(GEN8_GT_ISR(2));
1194 pm_iir
= I915_READ(GEN8_GT_IIR(2));
1195 pm_mask
= I915_READ(GEN6_PMINTRMSK
);
1197 seq_printf(m
, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1198 pm_ier
, pm_imr
, pm_isr
, pm_iir
, pm_mask
);
1199 seq_printf(m
, "pm_intr_keep: 0x%08x\n", dev_priv
->rps
.pm_intr_keep
);
1200 seq_printf(m
, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status
);
1201 seq_printf(m
, "Render p-state ratio: %d\n",
1202 (gt_perf_status
& (IS_GEN9(dev_priv
) ? 0x1ff00 : 0xff00)) >> 8);
1203 seq_printf(m
, "Render p-state VID: %d\n",
1204 gt_perf_status
& 0xff);
1205 seq_printf(m
, "Render p-state limit: %d\n",
1206 rp_state_limits
& 0xff);
1207 seq_printf(m
, "RPSTAT1: 0x%08x\n", rpstat
);
1208 seq_printf(m
, "RPMODECTL: 0x%08x\n", rpmodectl
);
1209 seq_printf(m
, "RPINCLIMIT: 0x%08x\n", rpinclimit
);
1210 seq_printf(m
, "RPDECLIMIT: 0x%08x\n", rpdeclimit
);
1211 seq_printf(m
, "RPNSWREQ: %dMHz\n", reqf
);
1212 seq_printf(m
, "CAGF: %dMHz\n", cagf
);
1213 seq_printf(m
, "RP CUR UP EI: %d (%dus)\n",
1214 rpupei
, GT_PM_INTERVAL_TO_US(dev_priv
, rpupei
));
1215 seq_printf(m
, "RP CUR UP: %d (%dus)\n",
1216 rpcurup
, GT_PM_INTERVAL_TO_US(dev_priv
, rpcurup
));
1217 seq_printf(m
, "RP PREV UP: %d (%dus)\n",
1218 rpprevup
, GT_PM_INTERVAL_TO_US(dev_priv
, rpprevup
));
1219 seq_printf(m
, "Up threshold: %d%%\n",
1220 dev_priv
->rps
.up_threshold
);
1222 seq_printf(m
, "RP CUR DOWN EI: %d (%dus)\n",
1223 rpdownei
, GT_PM_INTERVAL_TO_US(dev_priv
, rpdownei
));
1224 seq_printf(m
, "RP CUR DOWN: %d (%dus)\n",
1225 rpcurdown
, GT_PM_INTERVAL_TO_US(dev_priv
, rpcurdown
));
1226 seq_printf(m
, "RP PREV DOWN: %d (%dus)\n",
1227 rpprevdown
, GT_PM_INTERVAL_TO_US(dev_priv
, rpprevdown
));
1228 seq_printf(m
, "Down threshold: %d%%\n",
1229 dev_priv
->rps
.down_threshold
);
1231 max_freq
= (IS_BROXTON(dev_priv
) ? rp_state_cap
>> 0 :
1232 rp_state_cap
>> 16) & 0xff;
1233 max_freq
*= (IS_SKYLAKE(dev_priv
) || IS_KABYLAKE(dev_priv
) ?
1234 GEN9_FREQ_SCALER
: 1);
1235 seq_printf(m
, "Lowest (RPN) frequency: %dMHz\n",
1236 intel_gpu_freq(dev_priv
, max_freq
));
1238 max_freq
= (rp_state_cap
& 0xff00) >> 8;
1239 max_freq
*= (IS_SKYLAKE(dev_priv
) || IS_KABYLAKE(dev_priv
) ?
1240 GEN9_FREQ_SCALER
: 1);
1241 seq_printf(m
, "Nominal (RP1) frequency: %dMHz\n",
1242 intel_gpu_freq(dev_priv
, max_freq
));
1244 max_freq
= (IS_BROXTON(dev_priv
) ? rp_state_cap
>> 16 :
1245 rp_state_cap
>> 0) & 0xff;
1246 max_freq
*= (IS_SKYLAKE(dev_priv
) || IS_KABYLAKE(dev_priv
) ?
1247 GEN9_FREQ_SCALER
: 1);
1248 seq_printf(m
, "Max non-overclocked (RP0) frequency: %dMHz\n",
1249 intel_gpu_freq(dev_priv
, max_freq
));
1250 seq_printf(m
, "Max overclocked frequency: %dMHz\n",
1251 intel_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq
));
1253 seq_printf(m
, "Current freq: %d MHz\n",
1254 intel_gpu_freq(dev_priv
, dev_priv
->rps
.cur_freq
));
1255 seq_printf(m
, "Actual freq: %d MHz\n", cagf
);
1256 seq_printf(m
, "Idle freq: %d MHz\n",
1257 intel_gpu_freq(dev_priv
, dev_priv
->rps
.idle_freq
));
1258 seq_printf(m
, "Min freq: %d MHz\n",
1259 intel_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq
));
1260 seq_printf(m
, "Boost freq: %d MHz\n",
1261 intel_gpu_freq(dev_priv
, dev_priv
->rps
.boost_freq
));
1262 seq_printf(m
, "Max freq: %d MHz\n",
1263 intel_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq
));
1265 "efficient (RPe) frequency: %d MHz\n",
1266 intel_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
));
1268 seq_puts(m
, "no P-state info available\n");
1271 seq_printf(m
, "Current CD clock frequency: %d kHz\n", dev_priv
->cdclk_freq
);
1272 seq_printf(m
, "Max CD clock frequency: %d kHz\n", dev_priv
->max_cdclk_freq
);
1273 seq_printf(m
, "Max pixel clock frequency: %d kHz\n", dev_priv
->max_dotclk_freq
);
1276 intel_runtime_pm_put(dev_priv
);
1280 static int i915_hangcheck_info(struct seq_file
*m
, void *unused
)
1282 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1283 struct intel_engine_cs
*engine
;
1284 u64 acthd
[I915_NUM_ENGINES
];
1285 u32 seqno
[I915_NUM_ENGINES
];
1286 u32 instdone
[I915_NUM_INSTDONE_REG
];
1287 enum intel_engine_id id
;
1290 if (test_bit(I915_WEDGED
, &dev_priv
->gpu_error
.flags
))
1291 seq_printf(m
, "Wedged\n");
1292 if (test_bit(I915_RESET_IN_PROGRESS
, &dev_priv
->gpu_error
.flags
))
1293 seq_printf(m
, "Reset in progress\n");
1294 if (waitqueue_active(&dev_priv
->gpu_error
.wait_queue
))
1295 seq_printf(m
, "Waiter holding struct mutex\n");
1296 if (waitqueue_active(&dev_priv
->gpu_error
.reset_queue
))
1297 seq_printf(m
, "struct_mutex blocked for reset\n");
1299 if (!i915
.enable_hangcheck
) {
1300 seq_printf(m
, "Hangcheck disabled\n");
1304 intel_runtime_pm_get(dev_priv
);
1306 for_each_engine_id(engine
, dev_priv
, id
) {
1307 acthd
[id
] = intel_engine_get_active_head(engine
);
1308 seqno
[id
] = intel_engine_get_seqno(engine
);
1311 i915_get_extra_instdone(dev_priv
, instdone
);
1313 intel_runtime_pm_put(dev_priv
);
1315 if (delayed_work_pending(&dev_priv
->gpu_error
.hangcheck_work
)) {
1316 seq_printf(m
, "Hangcheck active, fires in %dms\n",
1317 jiffies_to_msecs(dev_priv
->gpu_error
.hangcheck_work
.timer
.expires
-
1320 seq_printf(m
, "Hangcheck inactive\n");
1322 for_each_engine_id(engine
, dev_priv
, id
) {
1323 seq_printf(m
, "%s:\n", engine
->name
);
1324 seq_printf(m
, "\tseqno = %x [current %x, last %x]\n",
1325 engine
->hangcheck
.seqno
,
1327 engine
->last_submitted_seqno
);
1328 seq_printf(m
, "\twaiters? %s, fake irq active? %s\n",
1329 yesno(intel_engine_has_waiter(engine
)),
1330 yesno(test_bit(engine
->id
,
1331 &dev_priv
->gpu_error
.missed_irq_rings
)));
1332 seq_printf(m
, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1333 (long long)engine
->hangcheck
.acthd
,
1334 (long long)acthd
[id
]);
1335 seq_printf(m
, "\tscore = %d\n", engine
->hangcheck
.score
);
1336 seq_printf(m
, "\taction = %d\n", engine
->hangcheck
.action
);
1338 if (engine
->id
== RCS
) {
1339 seq_puts(m
, "\tinstdone read =");
1341 for (j
= 0; j
< I915_NUM_INSTDONE_REG
; j
++)
1342 seq_printf(m
, " 0x%08x", instdone
[j
]);
1344 seq_puts(m
, "\n\tinstdone accu =");
1346 for (j
= 0; j
< I915_NUM_INSTDONE_REG
; j
++)
1347 seq_printf(m
, " 0x%08x",
1348 engine
->hangcheck
.instdone
[j
]);
1357 static int ironlake_drpc_info(struct seq_file
*m
)
1359 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1360 struct drm_device
*dev
= &dev_priv
->drm
;
1361 u32 rgvmodectl
, rstdbyctl
;
1365 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1368 intel_runtime_pm_get(dev_priv
);
1370 rgvmodectl
= I915_READ(MEMMODECTL
);
1371 rstdbyctl
= I915_READ(RSTDBYCTL
);
1372 crstandvid
= I915_READ16(CRSTANDVID
);
1374 intel_runtime_pm_put(dev_priv
);
1375 mutex_unlock(&dev
->struct_mutex
);
1377 seq_printf(m
, "HD boost: %s\n", yesno(rgvmodectl
& MEMMODE_BOOST_EN
));
1378 seq_printf(m
, "Boost freq: %d\n",
1379 (rgvmodectl
& MEMMODE_BOOST_FREQ_MASK
) >>
1380 MEMMODE_BOOST_FREQ_SHIFT
);
1381 seq_printf(m
, "HW control enabled: %s\n",
1382 yesno(rgvmodectl
& MEMMODE_HWIDLE_EN
));
1383 seq_printf(m
, "SW control enabled: %s\n",
1384 yesno(rgvmodectl
& MEMMODE_SWMODE_EN
));
1385 seq_printf(m
, "Gated voltage change: %s\n",
1386 yesno(rgvmodectl
& MEMMODE_RCLK_GATE
));
1387 seq_printf(m
, "Starting frequency: P%d\n",
1388 (rgvmodectl
& MEMMODE_FSTART_MASK
) >> MEMMODE_FSTART_SHIFT
);
1389 seq_printf(m
, "Max P-state: P%d\n",
1390 (rgvmodectl
& MEMMODE_FMAX_MASK
) >> MEMMODE_FMAX_SHIFT
);
1391 seq_printf(m
, "Min P-state: P%d\n", (rgvmodectl
& MEMMODE_FMIN_MASK
));
1392 seq_printf(m
, "RS1 VID: %d\n", (crstandvid
& 0x3f));
1393 seq_printf(m
, "RS2 VID: %d\n", ((crstandvid
>> 8) & 0x3f));
1394 seq_printf(m
, "Render standby enabled: %s\n",
1395 yesno(!(rstdbyctl
& RCX_SW_EXIT
)));
1396 seq_puts(m
, "Current RS state: ");
1397 switch (rstdbyctl
& RSX_STATUS_MASK
) {
1399 seq_puts(m
, "on\n");
1401 case RSX_STATUS_RC1
:
1402 seq_puts(m
, "RC1\n");
1404 case RSX_STATUS_RC1E
:
1405 seq_puts(m
, "RC1E\n");
1407 case RSX_STATUS_RS1
:
1408 seq_puts(m
, "RS1\n");
1410 case RSX_STATUS_RS2
:
1411 seq_puts(m
, "RS2 (RC6)\n");
1413 case RSX_STATUS_RS3
:
1414 seq_puts(m
, "RC3 (RC6+)\n");
1417 seq_puts(m
, "unknown\n");
1424 static int i915_forcewake_domains(struct seq_file
*m
, void *data
)
1426 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1427 struct intel_uncore_forcewake_domain
*fw_domain
;
1429 spin_lock_irq(&dev_priv
->uncore
.lock
);
1430 for_each_fw_domain(fw_domain
, dev_priv
) {
1431 seq_printf(m
, "%s.wake_count = %u\n",
1432 intel_uncore_forcewake_domain_to_str(fw_domain
->id
),
1433 fw_domain
->wake_count
);
1435 spin_unlock_irq(&dev_priv
->uncore
.lock
);
1440 static int vlv_drpc_info(struct seq_file
*m
)
1442 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1443 u32 rpmodectl1
, rcctl1
, pw_status
;
1445 intel_runtime_pm_get(dev_priv
);
1447 pw_status
= I915_READ(VLV_GTLC_PW_STATUS
);
1448 rpmodectl1
= I915_READ(GEN6_RP_CONTROL
);
1449 rcctl1
= I915_READ(GEN6_RC_CONTROL
);
1451 intel_runtime_pm_put(dev_priv
);
1453 seq_printf(m
, "Video Turbo Mode: %s\n",
1454 yesno(rpmodectl1
& GEN6_RP_MEDIA_TURBO
));
1455 seq_printf(m
, "Turbo enabled: %s\n",
1456 yesno(rpmodectl1
& GEN6_RP_ENABLE
));
1457 seq_printf(m
, "HW control enabled: %s\n",
1458 yesno(rpmodectl1
& GEN6_RP_ENABLE
));
1459 seq_printf(m
, "SW control enabled: %s\n",
1460 yesno((rpmodectl1
& GEN6_RP_MEDIA_MODE_MASK
) ==
1461 GEN6_RP_MEDIA_SW_MODE
));
1462 seq_printf(m
, "RC6 Enabled: %s\n",
1463 yesno(rcctl1
& (GEN7_RC_CTL_TO_MODE
|
1464 GEN6_RC_CTL_EI_MODE(1))));
1465 seq_printf(m
, "Render Power Well: %s\n",
1466 (pw_status
& VLV_GTLC_PW_RENDER_STATUS_MASK
) ? "Up" : "Down");
1467 seq_printf(m
, "Media Power Well: %s\n",
1468 (pw_status
& VLV_GTLC_PW_MEDIA_STATUS_MASK
) ? "Up" : "Down");
1470 seq_printf(m
, "Render RC6 residency since boot: %u\n",
1471 I915_READ(VLV_GT_RENDER_RC6
));
1472 seq_printf(m
, "Media RC6 residency since boot: %u\n",
1473 I915_READ(VLV_GT_MEDIA_RC6
));
1475 return i915_forcewake_domains(m
, NULL
);
1478 static int gen6_drpc_info(struct seq_file
*m
)
1480 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1481 struct drm_device
*dev
= &dev_priv
->drm
;
1482 u32 rpmodectl1
, gt_core_status
, rcctl1
, rc6vids
= 0;
1483 u32 gen9_powergate_enable
= 0, gen9_powergate_status
= 0;
1484 unsigned forcewake_count
;
1487 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1490 intel_runtime_pm_get(dev_priv
);
1492 spin_lock_irq(&dev_priv
->uncore
.lock
);
1493 forcewake_count
= dev_priv
->uncore
.fw_domain
[FW_DOMAIN_ID_RENDER
].wake_count
;
1494 spin_unlock_irq(&dev_priv
->uncore
.lock
);
1496 if (forcewake_count
) {
1497 seq_puts(m
, "RC information inaccurate because somebody "
1498 "holds a forcewake reference \n");
1500 /* NB: we cannot use forcewake, else we read the wrong values */
1501 while (count
++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK
) & 1))
1503 seq_printf(m
, "RC information accurate: %s\n", yesno(count
< 51));
1506 gt_core_status
= I915_READ_FW(GEN6_GT_CORE_STATUS
);
1507 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS
, gt_core_status
, 4, true);
1509 rpmodectl1
= I915_READ(GEN6_RP_CONTROL
);
1510 rcctl1
= I915_READ(GEN6_RC_CONTROL
);
1511 if (INTEL_GEN(dev_priv
) >= 9) {
1512 gen9_powergate_enable
= I915_READ(GEN9_PG_ENABLE
);
1513 gen9_powergate_status
= I915_READ(GEN9_PWRGT_DOMAIN_STATUS
);
1515 mutex_unlock(&dev
->struct_mutex
);
1516 mutex_lock(&dev_priv
->rps
.hw_lock
);
1517 sandybridge_pcode_read(dev_priv
, GEN6_PCODE_READ_RC6VIDS
, &rc6vids
);
1518 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1520 intel_runtime_pm_put(dev_priv
);
1522 seq_printf(m
, "Video Turbo Mode: %s\n",
1523 yesno(rpmodectl1
& GEN6_RP_MEDIA_TURBO
));
1524 seq_printf(m
, "HW control enabled: %s\n",
1525 yesno(rpmodectl1
& GEN6_RP_ENABLE
));
1526 seq_printf(m
, "SW control enabled: %s\n",
1527 yesno((rpmodectl1
& GEN6_RP_MEDIA_MODE_MASK
) ==
1528 GEN6_RP_MEDIA_SW_MODE
));
1529 seq_printf(m
, "RC1e Enabled: %s\n",
1530 yesno(rcctl1
& GEN6_RC_CTL_RC1e_ENABLE
));
1531 seq_printf(m
, "RC6 Enabled: %s\n",
1532 yesno(rcctl1
& GEN6_RC_CTL_RC6_ENABLE
));
1533 if (INTEL_GEN(dev_priv
) >= 9) {
1534 seq_printf(m
, "Render Well Gating Enabled: %s\n",
1535 yesno(gen9_powergate_enable
& GEN9_RENDER_PG_ENABLE
));
1536 seq_printf(m
, "Media Well Gating Enabled: %s\n",
1537 yesno(gen9_powergate_enable
& GEN9_MEDIA_PG_ENABLE
));
1539 seq_printf(m
, "Deep RC6 Enabled: %s\n",
1540 yesno(rcctl1
& GEN6_RC_CTL_RC6p_ENABLE
));
1541 seq_printf(m
, "Deepest RC6 Enabled: %s\n",
1542 yesno(rcctl1
& GEN6_RC_CTL_RC6pp_ENABLE
));
1543 seq_puts(m
, "Current RC state: ");
1544 switch (gt_core_status
& GEN6_RCn_MASK
) {
1546 if (gt_core_status
& GEN6_CORE_CPD_STATE_MASK
)
1547 seq_puts(m
, "Core Power Down\n");
1549 seq_puts(m
, "on\n");
1552 seq_puts(m
, "RC3\n");
1555 seq_puts(m
, "RC6\n");
1558 seq_puts(m
, "RC7\n");
1561 seq_puts(m
, "Unknown\n");
1565 seq_printf(m
, "Core Power Down: %s\n",
1566 yesno(gt_core_status
& GEN6_CORE_CPD_STATE_MASK
));
1567 if (INTEL_GEN(dev_priv
) >= 9) {
1568 seq_printf(m
, "Render Power Well: %s\n",
1569 (gen9_powergate_status
&
1570 GEN9_PWRGT_RENDER_STATUS_MASK
) ? "Up" : "Down");
1571 seq_printf(m
, "Media Power Well: %s\n",
1572 (gen9_powergate_status
&
1573 GEN9_PWRGT_MEDIA_STATUS_MASK
) ? "Up" : "Down");
1576 /* Not exactly sure what this is */
1577 seq_printf(m
, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1578 I915_READ(GEN6_GT_GFX_RC6_LOCKED
));
1579 seq_printf(m
, "RC6 residency since boot: %u\n",
1580 I915_READ(GEN6_GT_GFX_RC6
));
1581 seq_printf(m
, "RC6+ residency since boot: %u\n",
1582 I915_READ(GEN6_GT_GFX_RC6p
));
1583 seq_printf(m
, "RC6++ residency since boot: %u\n",
1584 I915_READ(GEN6_GT_GFX_RC6pp
));
1586 seq_printf(m
, "RC6 voltage: %dmV\n",
1587 GEN6_DECODE_RC6_VID(((rc6vids
>> 0) & 0xff)));
1588 seq_printf(m
, "RC6+ voltage: %dmV\n",
1589 GEN6_DECODE_RC6_VID(((rc6vids
>> 8) & 0xff)));
1590 seq_printf(m
, "RC6++ voltage: %dmV\n",
1591 GEN6_DECODE_RC6_VID(((rc6vids
>> 16) & 0xff)));
1592 return i915_forcewake_domains(m
, NULL
);
1595 static int i915_drpc_info(struct seq_file
*m
, void *unused
)
1597 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1599 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
1600 return vlv_drpc_info(m
);
1601 else if (INTEL_GEN(dev_priv
) >= 6)
1602 return gen6_drpc_info(m
);
1604 return ironlake_drpc_info(m
);
1607 static int i915_frontbuffer_tracking(struct seq_file
*m
, void *unused
)
1609 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1611 seq_printf(m
, "FB tracking busy bits: 0x%08x\n",
1612 dev_priv
->fb_tracking
.busy_bits
);
1614 seq_printf(m
, "FB tracking flip bits: 0x%08x\n",
1615 dev_priv
->fb_tracking
.flip_bits
);
1620 static int i915_fbc_status(struct seq_file
*m
, void *unused
)
1622 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1624 if (!HAS_FBC(dev_priv
)) {
1625 seq_puts(m
, "FBC unsupported on this chipset\n");
1629 intel_runtime_pm_get(dev_priv
);
1630 mutex_lock(&dev_priv
->fbc
.lock
);
1632 if (intel_fbc_is_active(dev_priv
))
1633 seq_puts(m
, "FBC enabled\n");
1635 seq_printf(m
, "FBC disabled: %s\n",
1636 dev_priv
->fbc
.no_fbc_reason
);
1638 if (INTEL_GEN(dev_priv
) >= 7)
1639 seq_printf(m
, "Compressing: %s\n",
1640 yesno(I915_READ(FBC_STATUS2
) &
1641 FBC_COMPRESSION_MASK
));
1643 mutex_unlock(&dev_priv
->fbc
.lock
);
1644 intel_runtime_pm_put(dev_priv
);
1649 static int i915_fbc_fc_get(void *data
, u64
*val
)
1651 struct drm_i915_private
*dev_priv
= data
;
1653 if (INTEL_GEN(dev_priv
) < 7 || !HAS_FBC(dev_priv
))
1656 *val
= dev_priv
->fbc
.false_color
;
1661 static int i915_fbc_fc_set(void *data
, u64 val
)
1663 struct drm_i915_private
*dev_priv
= data
;
1666 if (INTEL_GEN(dev_priv
) < 7 || !HAS_FBC(dev_priv
))
1669 mutex_lock(&dev_priv
->fbc
.lock
);
1671 reg
= I915_READ(ILK_DPFC_CONTROL
);
1672 dev_priv
->fbc
.false_color
= val
;
1674 I915_WRITE(ILK_DPFC_CONTROL
, val
?
1675 (reg
| FBC_CTL_FALSE_COLOR
) :
1676 (reg
& ~FBC_CTL_FALSE_COLOR
));
1678 mutex_unlock(&dev_priv
->fbc
.lock
);
1682 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops
,
1683 i915_fbc_fc_get
, i915_fbc_fc_set
,
1686 static int i915_ips_status(struct seq_file
*m
, void *unused
)
1688 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1690 if (!HAS_IPS(dev_priv
)) {
1691 seq_puts(m
, "not supported\n");
1695 intel_runtime_pm_get(dev_priv
);
1697 seq_printf(m
, "Enabled by kernel parameter: %s\n",
1698 yesno(i915
.enable_ips
));
1700 if (INTEL_GEN(dev_priv
) >= 8) {
1701 seq_puts(m
, "Currently: unknown\n");
1703 if (I915_READ(IPS_CTL
) & IPS_ENABLE
)
1704 seq_puts(m
, "Currently: enabled\n");
1706 seq_puts(m
, "Currently: disabled\n");
1709 intel_runtime_pm_put(dev_priv
);
1714 static int i915_sr_status(struct seq_file
*m
, void *unused
)
1716 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1717 bool sr_enabled
= false;
1719 intel_runtime_pm_get(dev_priv
);
1721 if (HAS_PCH_SPLIT(dev_priv
))
1722 sr_enabled
= I915_READ(WM1_LP_ILK
) & WM1_LP_SR_EN
;
1723 else if (IS_CRESTLINE(dev_priv
) || IS_G4X(dev_priv
) ||
1724 IS_I945G(dev_priv
) || IS_I945GM(dev_priv
))
1725 sr_enabled
= I915_READ(FW_BLC_SELF
) & FW_BLC_SELF_EN
;
1726 else if (IS_I915GM(dev_priv
))
1727 sr_enabled
= I915_READ(INSTPM
) & INSTPM_SELF_EN
;
1728 else if (IS_PINEVIEW(dev_priv
))
1729 sr_enabled
= I915_READ(DSPFW3
) & PINEVIEW_SELF_REFRESH_EN
;
1730 else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
1731 sr_enabled
= I915_READ(FW_BLC_SELF_VLV
) & FW_CSPWRDWNEN
;
1733 intel_runtime_pm_put(dev_priv
);
1735 seq_printf(m
, "self-refresh: %s\n",
1736 sr_enabled
? "enabled" : "disabled");
1741 static int i915_emon_status(struct seq_file
*m
, void *unused
)
1743 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1744 struct drm_device
*dev
= &dev_priv
->drm
;
1745 unsigned long temp
, chipset
, gfx
;
1748 if (!IS_GEN5(dev_priv
))
1751 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1755 temp
= i915_mch_val(dev_priv
);
1756 chipset
= i915_chipset_val(dev_priv
);
1757 gfx
= i915_gfx_val(dev_priv
);
1758 mutex_unlock(&dev
->struct_mutex
);
1760 seq_printf(m
, "GMCH temp: %ld\n", temp
);
1761 seq_printf(m
, "Chipset power: %ld\n", chipset
);
1762 seq_printf(m
, "GFX power: %ld\n", gfx
);
1763 seq_printf(m
, "Total power: %ld\n", chipset
+ gfx
);
1768 static int i915_ring_freq_table(struct seq_file
*m
, void *unused
)
1770 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1772 int gpu_freq
, ia_freq
;
1773 unsigned int max_gpu_freq
, min_gpu_freq
;
1775 if (!HAS_LLC(dev_priv
)) {
1776 seq_puts(m
, "unsupported on this chipset\n");
1780 intel_runtime_pm_get(dev_priv
);
1782 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
1786 if (IS_SKYLAKE(dev_priv
) || IS_KABYLAKE(dev_priv
)) {
1787 /* Convert GT frequency to 50 HZ units */
1789 dev_priv
->rps
.min_freq_softlimit
/ GEN9_FREQ_SCALER
;
1791 dev_priv
->rps
.max_freq_softlimit
/ GEN9_FREQ_SCALER
;
1793 min_gpu_freq
= dev_priv
->rps
.min_freq_softlimit
;
1794 max_gpu_freq
= dev_priv
->rps
.max_freq_softlimit
;
1797 seq_puts(m
, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1799 for (gpu_freq
= min_gpu_freq
; gpu_freq
<= max_gpu_freq
; gpu_freq
++) {
1801 sandybridge_pcode_read(dev_priv
,
1802 GEN6_PCODE_READ_MIN_FREQ_TABLE
,
1804 seq_printf(m
, "%d\t\t%d\t\t\t\t%d\n",
1805 intel_gpu_freq(dev_priv
, (gpu_freq
*
1806 (IS_SKYLAKE(dev_priv
) || IS_KABYLAKE(dev_priv
) ?
1807 GEN9_FREQ_SCALER
: 1))),
1808 ((ia_freq
>> 0) & 0xff) * 100,
1809 ((ia_freq
>> 8) & 0xff) * 100);
1812 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1815 intel_runtime_pm_put(dev_priv
);
1819 static int i915_opregion(struct seq_file
*m
, void *unused
)
1821 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1822 struct drm_device
*dev
= &dev_priv
->drm
;
1823 struct intel_opregion
*opregion
= &dev_priv
->opregion
;
1826 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1830 if (opregion
->header
)
1831 seq_write(m
, opregion
->header
, OPREGION_SIZE
);
1833 mutex_unlock(&dev
->struct_mutex
);
1839 static int i915_vbt(struct seq_file
*m
, void *unused
)
1841 struct intel_opregion
*opregion
= &node_to_i915(m
->private)->opregion
;
1844 seq_write(m
, opregion
->vbt
, opregion
->vbt_size
);
1849 static int i915_gem_framebuffer_info(struct seq_file
*m
, void *data
)
1851 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1852 struct drm_device
*dev
= &dev_priv
->drm
;
1853 struct intel_framebuffer
*fbdev_fb
= NULL
;
1854 struct drm_framebuffer
*drm_fb
;
1857 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1861 #ifdef CONFIG_DRM_FBDEV_EMULATION
1862 if (dev_priv
->fbdev
) {
1863 fbdev_fb
= to_intel_framebuffer(dev_priv
->fbdev
->helper
.fb
);
1865 seq_printf(m
, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1866 fbdev_fb
->base
.width
,
1867 fbdev_fb
->base
.height
,
1868 fbdev_fb
->base
.depth
,
1869 fbdev_fb
->base
.bits_per_pixel
,
1870 fbdev_fb
->base
.modifier
[0],
1871 drm_framebuffer_read_refcount(&fbdev_fb
->base
));
1872 describe_obj(m
, fbdev_fb
->obj
);
1877 mutex_lock(&dev
->mode_config
.fb_lock
);
1878 drm_for_each_fb(drm_fb
, dev
) {
1879 struct intel_framebuffer
*fb
= to_intel_framebuffer(drm_fb
);
1883 seq_printf(m
, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1887 fb
->base
.bits_per_pixel
,
1888 fb
->base
.modifier
[0],
1889 drm_framebuffer_read_refcount(&fb
->base
));
1890 describe_obj(m
, fb
->obj
);
1893 mutex_unlock(&dev
->mode_config
.fb_lock
);
1894 mutex_unlock(&dev
->struct_mutex
);
1899 static void describe_ctx_ring(struct seq_file
*m
, struct intel_ring
*ring
)
1901 seq_printf(m
, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
1902 ring
->space
, ring
->head
, ring
->tail
,
1903 ring
->last_retired_head
);
1906 static int i915_context_status(struct seq_file
*m
, void *unused
)
1908 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1909 struct drm_device
*dev
= &dev_priv
->drm
;
1910 struct intel_engine_cs
*engine
;
1911 struct i915_gem_context
*ctx
;
1914 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1918 list_for_each_entry(ctx
, &dev_priv
->context_list
, link
) {
1919 seq_printf(m
, "HW context %u ", ctx
->hw_id
);
1921 struct task_struct
*task
;
1923 task
= get_pid_task(ctx
->pid
, PIDTYPE_PID
);
1925 seq_printf(m
, "(%s [%d]) ",
1926 task
->comm
, task
->pid
);
1927 put_task_struct(task
);
1929 } else if (IS_ERR(ctx
->file_priv
)) {
1930 seq_puts(m
, "(deleted) ");
1932 seq_puts(m
, "(kernel) ");
1935 seq_putc(m
, ctx
->remap_slice
? 'R' : 'r');
1938 for_each_engine(engine
, dev_priv
) {
1939 struct intel_context
*ce
= &ctx
->engine
[engine
->id
];
1941 seq_printf(m
, "%s: ", engine
->name
);
1942 seq_putc(m
, ce
->initialised
? 'I' : 'i');
1944 describe_obj(m
, ce
->state
->obj
);
1946 describe_ctx_ring(m
, ce
->ring
);
1953 mutex_unlock(&dev
->struct_mutex
);
1958 static void i915_dump_lrc_obj(struct seq_file
*m
,
1959 struct i915_gem_context
*ctx
,
1960 struct intel_engine_cs
*engine
)
1962 struct i915_vma
*vma
= ctx
->engine
[engine
->id
].state
;
1966 seq_printf(m
, "CONTEXT: %s %u\n", engine
->name
, ctx
->hw_id
);
1969 seq_puts(m
, "\tFake context\n");
1973 if (vma
->flags
& I915_VMA_GLOBAL_BIND
)
1974 seq_printf(m
, "\tBound in GGTT at 0x%08x\n",
1975 i915_ggtt_offset(vma
));
1977 if (i915_gem_object_get_pages(vma
->obj
)) {
1978 seq_puts(m
, "\tFailed to get pages for context object\n\n");
1982 page
= i915_gem_object_get_page(vma
->obj
, LRC_STATE_PN
);
1984 u32
*reg_state
= kmap_atomic(page
);
1986 for (j
= 0; j
< 0x600 / sizeof(u32
) / 4; j
+= 4) {
1988 "\t[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
1990 reg_state
[j
], reg_state
[j
+ 1],
1991 reg_state
[j
+ 2], reg_state
[j
+ 3]);
1993 kunmap_atomic(reg_state
);
1999 static int i915_dump_lrc(struct seq_file
*m
, void *unused
)
2001 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2002 struct drm_device
*dev
= &dev_priv
->drm
;
2003 struct intel_engine_cs
*engine
;
2004 struct i915_gem_context
*ctx
;
2007 if (!i915
.enable_execlists
) {
2008 seq_printf(m
, "Logical Ring Contexts are disabled\n");
2012 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
2016 list_for_each_entry(ctx
, &dev_priv
->context_list
, link
)
2017 for_each_engine(engine
, dev_priv
)
2018 i915_dump_lrc_obj(m
, ctx
, engine
);
2020 mutex_unlock(&dev
->struct_mutex
);
2025 static int i915_execlists(struct seq_file
*m
, void *data
)
2027 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2028 struct drm_device
*dev
= &dev_priv
->drm
;
2029 struct intel_engine_cs
*engine
;
2035 struct list_head
*cursor
;
2038 if (!i915
.enable_execlists
) {
2039 seq_puts(m
, "Logical Ring Contexts are disabled\n");
2043 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
2047 intel_runtime_pm_get(dev_priv
);
2049 for_each_engine(engine
, dev_priv
) {
2050 struct drm_i915_gem_request
*head_req
= NULL
;
2053 seq_printf(m
, "%s\n", engine
->name
);
2055 status
= I915_READ(RING_EXECLIST_STATUS_LO(engine
));
2056 ctx_id
= I915_READ(RING_EXECLIST_STATUS_HI(engine
));
2057 seq_printf(m
, "\tExeclist status: 0x%08X, context: %u\n",
2060 status_pointer
= I915_READ(RING_CONTEXT_STATUS_PTR(engine
));
2061 seq_printf(m
, "\tStatus pointer: 0x%08X\n", status_pointer
);
2063 read_pointer
= GEN8_CSB_READ_PTR(status_pointer
);
2064 write_pointer
= GEN8_CSB_WRITE_PTR(status_pointer
);
2065 if (read_pointer
> write_pointer
)
2066 write_pointer
+= GEN8_CSB_ENTRIES
;
2067 seq_printf(m
, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
2068 read_pointer
, write_pointer
);
2070 for (i
= 0; i
< GEN8_CSB_ENTRIES
; i
++) {
2071 status
= I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine
, i
));
2072 ctx_id
= I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine
, i
));
2074 seq_printf(m
, "\tStatus buffer %d: 0x%08X, context: %u\n",
2078 spin_lock_bh(&engine
->execlist_lock
);
2079 list_for_each(cursor
, &engine
->execlist_queue
)
2081 head_req
= list_first_entry_or_null(&engine
->execlist_queue
,
2082 struct drm_i915_gem_request
,
2084 spin_unlock_bh(&engine
->execlist_lock
);
2086 seq_printf(m
, "\t%d requests in queue\n", count
);
2088 seq_printf(m
, "\tHead request context: %u\n",
2089 head_req
->ctx
->hw_id
);
2090 seq_printf(m
, "\tHead request tail: %u\n",
2097 intel_runtime_pm_put(dev_priv
);
2098 mutex_unlock(&dev
->struct_mutex
);
2103 static const char *swizzle_string(unsigned swizzle
)
2106 case I915_BIT_6_SWIZZLE_NONE
:
2108 case I915_BIT_6_SWIZZLE_9
:
2110 case I915_BIT_6_SWIZZLE_9_10
:
2111 return "bit9/bit10";
2112 case I915_BIT_6_SWIZZLE_9_11
:
2113 return "bit9/bit11";
2114 case I915_BIT_6_SWIZZLE_9_10_11
:
2115 return "bit9/bit10/bit11";
2116 case I915_BIT_6_SWIZZLE_9_17
:
2117 return "bit9/bit17";
2118 case I915_BIT_6_SWIZZLE_9_10_17
:
2119 return "bit9/bit10/bit17";
2120 case I915_BIT_6_SWIZZLE_UNKNOWN
:
2127 static int i915_swizzle_info(struct seq_file
*m
, void *data
)
2129 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2130 struct drm_device
*dev
= &dev_priv
->drm
;
2133 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
2136 intel_runtime_pm_get(dev_priv
);
2138 seq_printf(m
, "bit6 swizzle for X-tiling = %s\n",
2139 swizzle_string(dev_priv
->mm
.bit_6_swizzle_x
));
2140 seq_printf(m
, "bit6 swizzle for Y-tiling = %s\n",
2141 swizzle_string(dev_priv
->mm
.bit_6_swizzle_y
));
2143 if (IS_GEN3(dev_priv
) || IS_GEN4(dev_priv
)) {
2144 seq_printf(m
, "DDC = 0x%08x\n",
2146 seq_printf(m
, "DDC2 = 0x%08x\n",
2148 seq_printf(m
, "C0DRB3 = 0x%04x\n",
2149 I915_READ16(C0DRB3
));
2150 seq_printf(m
, "C1DRB3 = 0x%04x\n",
2151 I915_READ16(C1DRB3
));
2152 } else if (INTEL_GEN(dev_priv
) >= 6) {
2153 seq_printf(m
, "MAD_DIMM_C0 = 0x%08x\n",
2154 I915_READ(MAD_DIMM_C0
));
2155 seq_printf(m
, "MAD_DIMM_C1 = 0x%08x\n",
2156 I915_READ(MAD_DIMM_C1
));
2157 seq_printf(m
, "MAD_DIMM_C2 = 0x%08x\n",
2158 I915_READ(MAD_DIMM_C2
));
2159 seq_printf(m
, "TILECTL = 0x%08x\n",
2160 I915_READ(TILECTL
));
2161 if (INTEL_GEN(dev_priv
) >= 8)
2162 seq_printf(m
, "GAMTARBMODE = 0x%08x\n",
2163 I915_READ(GAMTARBMODE
));
2165 seq_printf(m
, "ARB_MODE = 0x%08x\n",
2166 I915_READ(ARB_MODE
));
2167 seq_printf(m
, "DISP_ARB_CTL = 0x%08x\n",
2168 I915_READ(DISP_ARB_CTL
));
2171 if (dev_priv
->quirks
& QUIRK_PIN_SWIZZLED_PAGES
)
2172 seq_puts(m
, "L-shaped memory detected\n");
2174 intel_runtime_pm_put(dev_priv
);
2175 mutex_unlock(&dev
->struct_mutex
);
2180 static int per_file_ctx(int id
, void *ptr
, void *data
)
2182 struct i915_gem_context
*ctx
= ptr
;
2183 struct seq_file
*m
= data
;
2184 struct i915_hw_ppgtt
*ppgtt
= ctx
->ppgtt
;
2187 seq_printf(m
, " no ppgtt for context %d\n",
2192 if (i915_gem_context_is_default(ctx
))
2193 seq_puts(m
, " default context:\n");
2195 seq_printf(m
, " context %d:\n", ctx
->user_handle
);
2196 ppgtt
->debug_dump(ppgtt
, m
);
2201 static void gen8_ppgtt_info(struct seq_file
*m
,
2202 struct drm_i915_private
*dev_priv
)
2204 struct intel_engine_cs
*engine
;
2205 struct i915_hw_ppgtt
*ppgtt
= dev_priv
->mm
.aliasing_ppgtt
;
2211 for_each_engine(engine
, dev_priv
) {
2212 seq_printf(m
, "%s\n", engine
->name
);
2213 for (i
= 0; i
< 4; i
++) {
2214 u64 pdp
= I915_READ(GEN8_RING_PDP_UDW(engine
, i
));
2216 pdp
|= I915_READ(GEN8_RING_PDP_LDW(engine
, i
));
2217 seq_printf(m
, "\tPDP%d 0x%016llx\n", i
, pdp
);
2222 static void gen6_ppgtt_info(struct seq_file
*m
,
2223 struct drm_i915_private
*dev_priv
)
2225 struct intel_engine_cs
*engine
;
2227 if (IS_GEN6(dev_priv
))
2228 seq_printf(m
, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE
));
2230 for_each_engine(engine
, dev_priv
) {
2231 seq_printf(m
, "%s\n", engine
->name
);
2232 if (IS_GEN7(dev_priv
))
2233 seq_printf(m
, "GFX_MODE: 0x%08x\n",
2234 I915_READ(RING_MODE_GEN7(engine
)));
2235 seq_printf(m
, "PP_DIR_BASE: 0x%08x\n",
2236 I915_READ(RING_PP_DIR_BASE(engine
)));
2237 seq_printf(m
, "PP_DIR_BASE_READ: 0x%08x\n",
2238 I915_READ(RING_PP_DIR_BASE_READ(engine
)));
2239 seq_printf(m
, "PP_DIR_DCLV: 0x%08x\n",
2240 I915_READ(RING_PP_DIR_DCLV(engine
)));
2242 if (dev_priv
->mm
.aliasing_ppgtt
) {
2243 struct i915_hw_ppgtt
*ppgtt
= dev_priv
->mm
.aliasing_ppgtt
;
2245 seq_puts(m
, "aliasing PPGTT:\n");
2246 seq_printf(m
, "pd gtt offset: 0x%08x\n", ppgtt
->pd
.base
.ggtt_offset
);
2248 ppgtt
->debug_dump(ppgtt
, m
);
2251 seq_printf(m
, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK
));
2254 static int i915_ppgtt_info(struct seq_file
*m
, void *data
)
2256 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2257 struct drm_device
*dev
= &dev_priv
->drm
;
2258 struct drm_file
*file
;
2261 mutex_lock(&dev
->filelist_mutex
);
2262 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
2266 intel_runtime_pm_get(dev_priv
);
2268 if (INTEL_GEN(dev_priv
) >= 8)
2269 gen8_ppgtt_info(m
, dev_priv
);
2270 else if (INTEL_GEN(dev_priv
) >= 6)
2271 gen6_ppgtt_info(m
, dev_priv
);
2273 list_for_each_entry_reverse(file
, &dev
->filelist
, lhead
) {
2274 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
2275 struct task_struct
*task
;
2277 task
= get_pid_task(file
->pid
, PIDTYPE_PID
);
2282 seq_printf(m
, "\nproc: %s\n", task
->comm
);
2283 put_task_struct(task
);
2284 idr_for_each(&file_priv
->context_idr
, per_file_ctx
,
2285 (void *)(unsigned long)m
);
2289 intel_runtime_pm_put(dev_priv
);
2290 mutex_unlock(&dev
->struct_mutex
);
2292 mutex_unlock(&dev
->filelist_mutex
);
2296 static int count_irq_waiters(struct drm_i915_private
*i915
)
2298 struct intel_engine_cs
*engine
;
2301 for_each_engine(engine
, i915
)
2302 count
+= intel_engine_has_waiter(engine
);
2307 static const char *rps_power_to_str(unsigned int power
)
2309 static const char * const strings
[] = {
2310 [LOW_POWER
] = "low power",
2311 [BETWEEN
] = "mixed",
2312 [HIGH_POWER
] = "high power",
2315 if (power
>= ARRAY_SIZE(strings
) || !strings
[power
])
2318 return strings
[power
];
2321 static int i915_rps_boost_info(struct seq_file
*m
, void *data
)
2323 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2324 struct drm_device
*dev
= &dev_priv
->drm
;
2325 struct drm_file
*file
;
2327 seq_printf(m
, "RPS enabled? %d\n", dev_priv
->rps
.enabled
);
2328 seq_printf(m
, "GPU busy? %s [%x]\n",
2329 yesno(dev_priv
->gt
.awake
), dev_priv
->gt
.active_engines
);
2330 seq_printf(m
, "CPU waiting? %d\n", count_irq_waiters(dev_priv
));
2331 seq_printf(m
, "Frequency requested %d\n",
2332 intel_gpu_freq(dev_priv
, dev_priv
->rps
.cur_freq
));
2333 seq_printf(m
, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2334 intel_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq
),
2335 intel_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq_softlimit
),
2336 intel_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq_softlimit
),
2337 intel_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq
));
2338 seq_printf(m
, " idle:%d, efficient:%d, boost:%d\n",
2339 intel_gpu_freq(dev_priv
, dev_priv
->rps
.idle_freq
),
2340 intel_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
),
2341 intel_gpu_freq(dev_priv
, dev_priv
->rps
.boost_freq
));
2343 mutex_lock(&dev
->filelist_mutex
);
2344 spin_lock(&dev_priv
->rps
.client_lock
);
2345 list_for_each_entry_reverse(file
, &dev
->filelist
, lhead
) {
2346 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
2347 struct task_struct
*task
;
2350 task
= pid_task(file
->pid
, PIDTYPE_PID
);
2351 seq_printf(m
, "%s [%d]: %d boosts%s\n",
2352 task
? task
->comm
: "<unknown>",
2353 task
? task
->pid
: -1,
2354 file_priv
->rps
.boosts
,
2355 list_empty(&file_priv
->rps
.link
) ? "" : ", active");
2358 seq_printf(m
, "Kernel (anonymous) boosts: %d\n", dev_priv
->rps
.boosts
);
2359 spin_unlock(&dev_priv
->rps
.client_lock
);
2360 mutex_unlock(&dev
->filelist_mutex
);
2362 if (INTEL_GEN(dev_priv
) >= 6 &&
2363 dev_priv
->rps
.enabled
&&
2364 dev_priv
->gt
.active_engines
) {
2366 u32 rpdown
, rpdownei
;
2368 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
2369 rpup
= I915_READ_FW(GEN6_RP_CUR_UP
) & GEN6_RP_EI_MASK
;
2370 rpupei
= I915_READ_FW(GEN6_RP_CUR_UP_EI
) & GEN6_RP_EI_MASK
;
2371 rpdown
= I915_READ_FW(GEN6_RP_CUR_DOWN
) & GEN6_RP_EI_MASK
;
2372 rpdownei
= I915_READ_FW(GEN6_RP_CUR_DOWN_EI
) & GEN6_RP_EI_MASK
;
2373 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
2375 seq_printf(m
, "\nRPS Autotuning (current \"%s\" window):\n",
2376 rps_power_to_str(dev_priv
->rps
.power
));
2377 seq_printf(m
, " Avg. up: %d%% [above threshold? %d%%]\n",
2378 100 * rpup
/ rpupei
,
2379 dev_priv
->rps
.up_threshold
);
2380 seq_printf(m
, " Avg. down: %d%% [below threshold? %d%%]\n",
2381 100 * rpdown
/ rpdownei
,
2382 dev_priv
->rps
.down_threshold
);
2384 seq_puts(m
, "\nRPS Autotuning inactive\n");
2390 static int i915_llc(struct seq_file
*m
, void *data
)
2392 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2393 const bool edram
= INTEL_GEN(dev_priv
) > 8;
2395 seq_printf(m
, "LLC: %s\n", yesno(HAS_LLC(dev_priv
)));
2396 seq_printf(m
, "%s: %lluMB\n", edram
? "eDRAM" : "eLLC",
2397 intel_uncore_edram_size(dev_priv
)/1024/1024);
2402 static int i915_guc_load_status_info(struct seq_file
*m
, void *data
)
2404 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2405 struct intel_guc_fw
*guc_fw
= &dev_priv
->guc
.guc_fw
;
2408 if (!HAS_GUC_UCODE(dev_priv
))
2411 seq_printf(m
, "GuC firmware status:\n");
2412 seq_printf(m
, "\tpath: %s\n",
2413 guc_fw
->guc_fw_path
);
2414 seq_printf(m
, "\tfetch: %s\n",
2415 intel_guc_fw_status_repr(guc_fw
->guc_fw_fetch_status
));
2416 seq_printf(m
, "\tload: %s\n",
2417 intel_guc_fw_status_repr(guc_fw
->guc_fw_load_status
));
2418 seq_printf(m
, "\tversion wanted: %d.%d\n",
2419 guc_fw
->guc_fw_major_wanted
, guc_fw
->guc_fw_minor_wanted
);
2420 seq_printf(m
, "\tversion found: %d.%d\n",
2421 guc_fw
->guc_fw_major_found
, guc_fw
->guc_fw_minor_found
);
2422 seq_printf(m
, "\theader: offset is %d; size = %d\n",
2423 guc_fw
->header_offset
, guc_fw
->header_size
);
2424 seq_printf(m
, "\tuCode: offset is %d; size = %d\n",
2425 guc_fw
->ucode_offset
, guc_fw
->ucode_size
);
2426 seq_printf(m
, "\tRSA: offset is %d; size = %d\n",
2427 guc_fw
->rsa_offset
, guc_fw
->rsa_size
);
2429 tmp
= I915_READ(GUC_STATUS
);
2431 seq_printf(m
, "\nGuC status 0x%08x:\n", tmp
);
2432 seq_printf(m
, "\tBootrom status = 0x%x\n",
2433 (tmp
& GS_BOOTROM_MASK
) >> GS_BOOTROM_SHIFT
);
2434 seq_printf(m
, "\tuKernel status = 0x%x\n",
2435 (tmp
& GS_UKERNEL_MASK
) >> GS_UKERNEL_SHIFT
);
2436 seq_printf(m
, "\tMIA Core status = 0x%x\n",
2437 (tmp
& GS_MIA_MASK
) >> GS_MIA_SHIFT
);
2438 seq_puts(m
, "\nScratch registers:\n");
2439 for (i
= 0; i
< 16; i
++)
2440 seq_printf(m
, "\t%2d: \t0x%x\n", i
, I915_READ(SOFT_SCRATCH(i
)));
2445 static void i915_guc_client_info(struct seq_file
*m
,
2446 struct drm_i915_private
*dev_priv
,
2447 struct i915_guc_client
*client
)
2449 struct intel_engine_cs
*engine
;
2450 enum intel_engine_id id
;
2453 seq_printf(m
, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n",
2454 client
->priority
, client
->ctx_index
, client
->proc_desc_offset
);
2455 seq_printf(m
, "\tDoorbell id %d, offset: 0x%x, cookie 0x%x\n",
2456 client
->doorbell_id
, client
->doorbell_offset
, client
->cookie
);
2457 seq_printf(m
, "\tWQ size %d, offset: 0x%x, tail %d\n",
2458 client
->wq_size
, client
->wq_offset
, client
->wq_tail
);
2460 seq_printf(m
, "\tWork queue full: %u\n", client
->no_wq_space
);
2461 seq_printf(m
, "\tFailed doorbell: %u\n", client
->b_fail
);
2462 seq_printf(m
, "\tLast submission result: %d\n", client
->retcode
);
2464 for_each_engine_id(engine
, dev_priv
, id
) {
2465 u64 submissions
= client
->submissions
[id
];
2467 seq_printf(m
, "\tSubmissions: %llu %s\n",
2468 submissions
, engine
->name
);
2470 seq_printf(m
, "\tTotal: %llu\n", tot
);
2473 static int i915_guc_info(struct seq_file
*m
, void *data
)
2475 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2476 struct drm_device
*dev
= &dev_priv
->drm
;
2477 struct intel_guc guc
;
2478 struct i915_guc_client client
= {};
2479 struct intel_engine_cs
*engine
;
2480 enum intel_engine_id id
;
2483 if (!HAS_GUC_SCHED(dev_priv
))
2486 if (mutex_lock_interruptible(&dev
->struct_mutex
))
2489 /* Take a local copy of the GuC data, so we can dump it at leisure */
2490 guc
= dev_priv
->guc
;
2491 if (guc
.execbuf_client
)
2492 client
= *guc
.execbuf_client
;
2494 mutex_unlock(&dev
->struct_mutex
);
2496 seq_printf(m
, "Doorbell map:\n");
2497 seq_printf(m
, "\t%*pb\n", GUC_MAX_DOORBELLS
, guc
.doorbell_bitmap
);
2498 seq_printf(m
, "Doorbell next cacheline: 0x%x\n\n", guc
.db_cacheline
);
2500 seq_printf(m
, "GuC total action count: %llu\n", guc
.action_count
);
2501 seq_printf(m
, "GuC action failure count: %u\n", guc
.action_fail
);
2502 seq_printf(m
, "GuC last action command: 0x%x\n", guc
.action_cmd
);
2503 seq_printf(m
, "GuC last action status: 0x%x\n", guc
.action_status
);
2504 seq_printf(m
, "GuC last action error code: %d\n", guc
.action_err
);
2506 seq_printf(m
, "\nGuC submissions:\n");
2507 for_each_engine_id(engine
, dev_priv
, id
) {
2508 u64 submissions
= guc
.submissions
[id
];
2509 total
+= submissions
;
2510 seq_printf(m
, "\t%-24s: %10llu, last seqno 0x%08x\n",
2511 engine
->name
, submissions
, guc
.last_seqno
[id
]);
2513 seq_printf(m
, "\t%s: %llu\n", "Total", total
);
2515 seq_printf(m
, "\nGuC execbuf client @ %p:\n", guc
.execbuf_client
);
2516 i915_guc_client_info(m
, dev_priv
, &client
);
2518 /* Add more as required ... */
2523 static int i915_guc_log_dump(struct seq_file
*m
, void *data
)
2525 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2526 struct drm_i915_gem_object
*obj
;
2529 if (!dev_priv
->guc
.log_vma
)
2532 obj
= dev_priv
->guc
.log_vma
->obj
;
2533 for (pg
= 0; pg
< obj
->base
.size
/ PAGE_SIZE
; pg
++) {
2534 u32
*log
= kmap_atomic(i915_gem_object_get_page(obj
, pg
));
2536 for (i
= 0; i
< PAGE_SIZE
/ sizeof(u32
); i
+= 4)
2537 seq_printf(m
, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2538 *(log
+ i
), *(log
+ i
+ 1),
2539 *(log
+ i
+ 2), *(log
+ i
+ 3));
2549 static int i915_edp_psr_status(struct seq_file
*m
, void *data
)
2551 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2555 bool enabled
= false;
2557 if (!HAS_PSR(dev_priv
)) {
2558 seq_puts(m
, "PSR not supported\n");
2562 intel_runtime_pm_get(dev_priv
);
2564 mutex_lock(&dev_priv
->psr
.lock
);
2565 seq_printf(m
, "Sink_Support: %s\n", yesno(dev_priv
->psr
.sink_support
));
2566 seq_printf(m
, "Source_OK: %s\n", yesno(dev_priv
->psr
.source_ok
));
2567 seq_printf(m
, "Enabled: %s\n", yesno((bool)dev_priv
->psr
.enabled
));
2568 seq_printf(m
, "Active: %s\n", yesno(dev_priv
->psr
.active
));
2569 seq_printf(m
, "Busy frontbuffer bits: 0x%03x\n",
2570 dev_priv
->psr
.busy_frontbuffer_bits
);
2571 seq_printf(m
, "Re-enable work scheduled: %s\n",
2572 yesno(work_busy(&dev_priv
->psr
.work
.work
)));
2574 if (HAS_DDI(dev_priv
))
2575 enabled
= I915_READ(EDP_PSR_CTL
) & EDP_PSR_ENABLE
;
2577 for_each_pipe(dev_priv
, pipe
) {
2578 stat
[pipe
] = I915_READ(VLV_PSRSTAT(pipe
)) &
2579 VLV_EDP_PSR_CURR_STATE_MASK
;
2580 if ((stat
[pipe
] == VLV_EDP_PSR_ACTIVE_NORFB_UP
) ||
2581 (stat
[pipe
] == VLV_EDP_PSR_ACTIVE_SF_UPDATE
))
2586 seq_printf(m
, "Main link in standby mode: %s\n",
2587 yesno(dev_priv
->psr
.link_standby
));
2589 seq_printf(m
, "HW Enabled & Active bit: %s", yesno(enabled
));
2591 if (!HAS_DDI(dev_priv
))
2592 for_each_pipe(dev_priv
, pipe
) {
2593 if ((stat
[pipe
] == VLV_EDP_PSR_ACTIVE_NORFB_UP
) ||
2594 (stat
[pipe
] == VLV_EDP_PSR_ACTIVE_SF_UPDATE
))
2595 seq_printf(m
, " pipe %c", pipe_name(pipe
));
2600 * VLV/CHV PSR has no kind of performance counter
2601 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2603 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
)) {
2604 psrperf
= I915_READ(EDP_PSR_PERF_CNT
) &
2605 EDP_PSR_PERF_CNT_MASK
;
2607 seq_printf(m
, "Performance_Counter: %u\n", psrperf
);
2609 mutex_unlock(&dev_priv
->psr
.lock
);
2611 intel_runtime_pm_put(dev_priv
);
2615 static int i915_sink_crc(struct seq_file
*m
, void *data
)
2617 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2618 struct drm_device
*dev
= &dev_priv
->drm
;
2619 struct intel_connector
*connector
;
2620 struct intel_dp
*intel_dp
= NULL
;
2624 drm_modeset_lock_all(dev
);
2625 for_each_intel_connector(dev
, connector
) {
2626 struct drm_crtc
*crtc
;
2628 if (!connector
->base
.state
->best_encoder
)
2631 crtc
= connector
->base
.state
->crtc
;
2632 if (!crtc
->state
->active
)
2635 if (connector
->base
.connector_type
!= DRM_MODE_CONNECTOR_eDP
)
2638 intel_dp
= enc_to_intel_dp(connector
->base
.state
->best_encoder
);
2640 ret
= intel_dp_sink_crc(intel_dp
, crc
);
2644 seq_printf(m
, "%02x%02x%02x%02x%02x%02x\n",
2645 crc
[0], crc
[1], crc
[2],
2646 crc
[3], crc
[4], crc
[5]);
2651 drm_modeset_unlock_all(dev
);
2655 static int i915_energy_uJ(struct seq_file
*m
, void *data
)
2657 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2661 if (INTEL_GEN(dev_priv
) < 6)
2664 intel_runtime_pm_get(dev_priv
);
2666 rdmsrl(MSR_RAPL_POWER_UNIT
, power
);
2667 power
= (power
& 0x1f00) >> 8;
2668 units
= 1000000 / (1 << power
); /* convert to uJ */
2669 power
= I915_READ(MCH_SECP_NRG_STTS
);
2672 intel_runtime_pm_put(dev_priv
);
2674 seq_printf(m
, "%llu", (long long unsigned)power
);
2679 static int i915_runtime_pm_status(struct seq_file
*m
, void *unused
)
2681 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2682 struct pci_dev
*pdev
= dev_priv
->drm
.pdev
;
2684 if (!HAS_RUNTIME_PM(dev_priv
))
2685 seq_puts(m
, "Runtime power management not supported\n");
2687 seq_printf(m
, "GPU idle: %s\n", yesno(!dev_priv
->gt
.awake
));
2688 seq_printf(m
, "IRQs disabled: %s\n",
2689 yesno(!intel_irqs_enabled(dev_priv
)));
2691 seq_printf(m
, "Usage count: %d\n",
2692 atomic_read(&dev_priv
->drm
.dev
->power
.usage_count
));
2694 seq_printf(m
, "Device Power Management (CONFIG_PM) disabled\n");
2696 seq_printf(m
, "PCI device power state: %s [%d]\n",
2697 pci_power_name(pdev
->current_state
),
2698 pdev
->current_state
);
2703 static int i915_power_domain_info(struct seq_file
*m
, void *unused
)
2705 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2706 struct i915_power_domains
*power_domains
= &dev_priv
->power_domains
;
2709 mutex_lock(&power_domains
->lock
);
2711 seq_printf(m
, "%-25s %s\n", "Power well/domain", "Use count");
2712 for (i
= 0; i
< power_domains
->power_well_count
; i
++) {
2713 struct i915_power_well
*power_well
;
2714 enum intel_display_power_domain power_domain
;
2716 power_well
= &power_domains
->power_wells
[i
];
2717 seq_printf(m
, "%-25s %d\n", power_well
->name
,
2720 for (power_domain
= 0; power_domain
< POWER_DOMAIN_NUM
;
2722 if (!(BIT(power_domain
) & power_well
->domains
))
2725 seq_printf(m
, " %-23s %d\n",
2726 intel_display_power_domain_str(power_domain
),
2727 power_domains
->domain_use_count
[power_domain
]);
2731 mutex_unlock(&power_domains
->lock
);
2736 static int i915_dmc_info(struct seq_file
*m
, void *unused
)
2738 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2739 struct intel_csr
*csr
;
2741 if (!HAS_CSR(dev_priv
)) {
2742 seq_puts(m
, "not supported\n");
2746 csr
= &dev_priv
->csr
;
2748 intel_runtime_pm_get(dev_priv
);
2750 seq_printf(m
, "fw loaded: %s\n", yesno(csr
->dmc_payload
!= NULL
));
2751 seq_printf(m
, "path: %s\n", csr
->fw_path
);
2753 if (!csr
->dmc_payload
)
2756 seq_printf(m
, "version: %d.%d\n", CSR_VERSION_MAJOR(csr
->version
),
2757 CSR_VERSION_MINOR(csr
->version
));
2759 if (IS_SKYLAKE(dev_priv
) && csr
->version
>= CSR_VERSION(1, 6)) {
2760 seq_printf(m
, "DC3 -> DC5 count: %d\n",
2761 I915_READ(SKL_CSR_DC3_DC5_COUNT
));
2762 seq_printf(m
, "DC5 -> DC6 count: %d\n",
2763 I915_READ(SKL_CSR_DC5_DC6_COUNT
));
2764 } else if (IS_BROXTON(dev_priv
) && csr
->version
>= CSR_VERSION(1, 4)) {
2765 seq_printf(m
, "DC3 -> DC5 count: %d\n",
2766 I915_READ(BXT_CSR_DC3_DC5_COUNT
));
2770 seq_printf(m
, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2771 seq_printf(m
, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE
));
2772 seq_printf(m
, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL
));
2774 intel_runtime_pm_put(dev_priv
);
2779 static void intel_seq_print_mode(struct seq_file
*m
, int tabs
,
2780 struct drm_display_mode
*mode
)
2784 for (i
= 0; i
< tabs
; i
++)
2787 seq_printf(m
, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2788 mode
->base
.id
, mode
->name
,
2789 mode
->vrefresh
, mode
->clock
,
2790 mode
->hdisplay
, mode
->hsync_start
,
2791 mode
->hsync_end
, mode
->htotal
,
2792 mode
->vdisplay
, mode
->vsync_start
,
2793 mode
->vsync_end
, mode
->vtotal
,
2794 mode
->type
, mode
->flags
);
2797 static void intel_encoder_info(struct seq_file
*m
,
2798 struct intel_crtc
*intel_crtc
,
2799 struct intel_encoder
*intel_encoder
)
2801 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2802 struct drm_device
*dev
= &dev_priv
->drm
;
2803 struct drm_crtc
*crtc
= &intel_crtc
->base
;
2804 struct intel_connector
*intel_connector
;
2805 struct drm_encoder
*encoder
;
2807 encoder
= &intel_encoder
->base
;
2808 seq_printf(m
, "\tencoder %d: type: %s, connectors:\n",
2809 encoder
->base
.id
, encoder
->name
);
2810 for_each_connector_on_encoder(dev
, encoder
, intel_connector
) {
2811 struct drm_connector
*connector
= &intel_connector
->base
;
2812 seq_printf(m
, "\t\tconnector %d: type: %s, status: %s",
2815 drm_get_connector_status_name(connector
->status
));
2816 if (connector
->status
== connector_status_connected
) {
2817 struct drm_display_mode
*mode
= &crtc
->mode
;
2818 seq_printf(m
, ", mode:\n");
2819 intel_seq_print_mode(m
, 2, mode
);
2826 static void intel_crtc_info(struct seq_file
*m
, struct intel_crtc
*intel_crtc
)
2828 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2829 struct drm_device
*dev
= &dev_priv
->drm
;
2830 struct drm_crtc
*crtc
= &intel_crtc
->base
;
2831 struct intel_encoder
*intel_encoder
;
2832 struct drm_plane_state
*plane_state
= crtc
->primary
->state
;
2833 struct drm_framebuffer
*fb
= plane_state
->fb
;
2836 seq_printf(m
, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2837 fb
->base
.id
, plane_state
->src_x
>> 16,
2838 plane_state
->src_y
>> 16, fb
->width
, fb
->height
);
2840 seq_puts(m
, "\tprimary plane disabled\n");
2841 for_each_encoder_on_crtc(dev
, crtc
, intel_encoder
)
2842 intel_encoder_info(m
, intel_crtc
, intel_encoder
);
2845 static void intel_panel_info(struct seq_file
*m
, struct intel_panel
*panel
)
2847 struct drm_display_mode
*mode
= panel
->fixed_mode
;
2849 seq_printf(m
, "\tfixed mode:\n");
2850 intel_seq_print_mode(m
, 2, mode
);
2853 static void intel_dp_info(struct seq_file
*m
,
2854 struct intel_connector
*intel_connector
)
2856 struct intel_encoder
*intel_encoder
= intel_connector
->encoder
;
2857 struct intel_dp
*intel_dp
= enc_to_intel_dp(&intel_encoder
->base
);
2859 seq_printf(m
, "\tDPCD rev: %x\n", intel_dp
->dpcd
[DP_DPCD_REV
]);
2860 seq_printf(m
, "\taudio support: %s\n", yesno(intel_dp
->has_audio
));
2861 if (intel_connector
->base
.connector_type
== DRM_MODE_CONNECTOR_eDP
)
2862 intel_panel_info(m
, &intel_connector
->panel
);
2865 static void intel_hdmi_info(struct seq_file
*m
,
2866 struct intel_connector
*intel_connector
)
2868 struct intel_encoder
*intel_encoder
= intel_connector
->encoder
;
2869 struct intel_hdmi
*intel_hdmi
= enc_to_intel_hdmi(&intel_encoder
->base
);
2871 seq_printf(m
, "\taudio support: %s\n", yesno(intel_hdmi
->has_audio
));
2874 static void intel_lvds_info(struct seq_file
*m
,
2875 struct intel_connector
*intel_connector
)
2877 intel_panel_info(m
, &intel_connector
->panel
);
2880 static void intel_connector_info(struct seq_file
*m
,
2881 struct drm_connector
*connector
)
2883 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
2884 struct intel_encoder
*intel_encoder
= intel_connector
->encoder
;
2885 struct drm_display_mode
*mode
;
2887 seq_printf(m
, "connector %d: type %s, status: %s\n",
2888 connector
->base
.id
, connector
->name
,
2889 drm_get_connector_status_name(connector
->status
));
2890 if (connector
->status
== connector_status_connected
) {
2891 seq_printf(m
, "\tname: %s\n", connector
->display_info
.name
);
2892 seq_printf(m
, "\tphysical dimensions: %dx%dmm\n",
2893 connector
->display_info
.width_mm
,
2894 connector
->display_info
.height_mm
);
2895 seq_printf(m
, "\tsubpixel order: %s\n",
2896 drm_get_subpixel_order_name(connector
->display_info
.subpixel_order
));
2897 seq_printf(m
, "\tCEA rev: %d\n",
2898 connector
->display_info
.cea_rev
);
2901 if (!intel_encoder
|| intel_encoder
->type
== INTEL_OUTPUT_DP_MST
)
2904 switch (connector
->connector_type
) {
2905 case DRM_MODE_CONNECTOR_DisplayPort
:
2906 case DRM_MODE_CONNECTOR_eDP
:
2907 intel_dp_info(m
, intel_connector
);
2909 case DRM_MODE_CONNECTOR_LVDS
:
2910 if (intel_encoder
->type
== INTEL_OUTPUT_LVDS
)
2911 intel_lvds_info(m
, intel_connector
);
2913 case DRM_MODE_CONNECTOR_HDMIA
:
2914 if (intel_encoder
->type
== INTEL_OUTPUT_HDMI
||
2915 intel_encoder
->type
== INTEL_OUTPUT_UNKNOWN
)
2916 intel_hdmi_info(m
, intel_connector
);
2922 seq_printf(m
, "\tmodes:\n");
2923 list_for_each_entry(mode
, &connector
->modes
, head
)
2924 intel_seq_print_mode(m
, 2, mode
);
2927 static bool cursor_active(struct drm_i915_private
*dev_priv
, int pipe
)
2931 if (IS_845G(dev_priv
) || IS_I865G(dev_priv
))
2932 state
= I915_READ(CURCNTR(PIPE_A
)) & CURSOR_ENABLE
;
2934 state
= I915_READ(CURCNTR(pipe
)) & CURSOR_MODE
;
2939 static bool cursor_position(struct drm_i915_private
*dev_priv
,
2940 int pipe
, int *x
, int *y
)
2944 pos
= I915_READ(CURPOS(pipe
));
2946 *x
= (pos
>> CURSOR_X_SHIFT
) & CURSOR_POS_MASK
;
2947 if (pos
& (CURSOR_POS_SIGN
<< CURSOR_X_SHIFT
))
2950 *y
= (pos
>> CURSOR_Y_SHIFT
) & CURSOR_POS_MASK
;
2951 if (pos
& (CURSOR_POS_SIGN
<< CURSOR_Y_SHIFT
))
2954 return cursor_active(dev_priv
, pipe
);
2957 static const char *plane_type(enum drm_plane_type type
)
2960 case DRM_PLANE_TYPE_OVERLAY
:
2962 case DRM_PLANE_TYPE_PRIMARY
:
2964 case DRM_PLANE_TYPE_CURSOR
:
2967 * Deliberately omitting default: to generate compiler warnings
2968 * when a new drm_plane_type gets added.
2975 static const char *plane_rotation(unsigned int rotation
)
2977 static char buf
[48];
2979 * According to doc only one DRM_ROTATE_ is allowed but this
2980 * will print them all to visualize if the values are misused
2982 snprintf(buf
, sizeof(buf
),
2983 "%s%s%s%s%s%s(0x%08x)",
2984 (rotation
& DRM_ROTATE_0
) ? "0 " : "",
2985 (rotation
& DRM_ROTATE_90
) ? "90 " : "",
2986 (rotation
& DRM_ROTATE_180
) ? "180 " : "",
2987 (rotation
& DRM_ROTATE_270
) ? "270 " : "",
2988 (rotation
& DRM_REFLECT_X
) ? "FLIPX " : "",
2989 (rotation
& DRM_REFLECT_Y
) ? "FLIPY " : "",
2995 static void intel_plane_info(struct seq_file
*m
, struct intel_crtc
*intel_crtc
)
2997 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2998 struct drm_device
*dev
= &dev_priv
->drm
;
2999 struct intel_plane
*intel_plane
;
3001 for_each_intel_plane_on_crtc(dev
, intel_crtc
, intel_plane
) {
3002 struct drm_plane_state
*state
;
3003 struct drm_plane
*plane
= &intel_plane
->base
;
3006 if (!plane
->state
) {
3007 seq_puts(m
, "plane->state is NULL!\n");
3011 state
= plane
->state
;
3014 format_name
= drm_get_format_name(state
->fb
->pixel_format
);
3016 format_name
= kstrdup("N/A", GFP_KERNEL
);
3019 seq_printf(m
, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3021 plane_type(intel_plane
->base
.type
),
3022 state
->crtc_x
, state
->crtc_y
,
3023 state
->crtc_w
, state
->crtc_h
,
3024 (state
->src_x
>> 16),
3025 ((state
->src_x
& 0xffff) * 15625) >> 10,
3026 (state
->src_y
>> 16),
3027 ((state
->src_y
& 0xffff) * 15625) >> 10,
3028 (state
->src_w
>> 16),
3029 ((state
->src_w
& 0xffff) * 15625) >> 10,
3030 (state
->src_h
>> 16),
3031 ((state
->src_h
& 0xffff) * 15625) >> 10,
3033 plane_rotation(state
->rotation
));
3039 static void intel_scaler_info(struct seq_file
*m
, struct intel_crtc
*intel_crtc
)
3041 struct intel_crtc_state
*pipe_config
;
3042 int num_scalers
= intel_crtc
->num_scalers
;
3045 pipe_config
= to_intel_crtc_state(intel_crtc
->base
.state
);
3047 /* Not all platformas have a scaler */
3049 seq_printf(m
, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3051 pipe_config
->scaler_state
.scaler_users
,
3052 pipe_config
->scaler_state
.scaler_id
);
3054 for (i
= 0; i
< SKL_NUM_SCALERS
; i
++) {
3055 struct intel_scaler
*sc
=
3056 &pipe_config
->scaler_state
.scalers
[i
];
3058 seq_printf(m
, ", scalers[%d]: use=%s, mode=%x",
3059 i
, yesno(sc
->in_use
), sc
->mode
);
3063 seq_puts(m
, "\tNo scalers available on this platform\n");
3067 static int i915_display_info(struct seq_file
*m
, void *unused
)
3069 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
3070 struct drm_device
*dev
= &dev_priv
->drm
;
3071 struct intel_crtc
*crtc
;
3072 struct drm_connector
*connector
;
3074 intel_runtime_pm_get(dev_priv
);
3075 drm_modeset_lock_all(dev
);
3076 seq_printf(m
, "CRTC info\n");
3077 seq_printf(m
, "---------\n");
3078 for_each_intel_crtc(dev
, crtc
) {
3080 struct intel_crtc_state
*pipe_config
;
3083 pipe_config
= to_intel_crtc_state(crtc
->base
.state
);
3085 seq_printf(m
, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3086 crtc
->base
.base
.id
, pipe_name(crtc
->pipe
),
3087 yesno(pipe_config
->base
.active
),
3088 pipe_config
->pipe_src_w
, pipe_config
->pipe_src_h
,
3089 yesno(pipe_config
->dither
), pipe_config
->pipe_bpp
);
3091 if (pipe_config
->base
.active
) {
3092 intel_crtc_info(m
, crtc
);
3094 active
= cursor_position(dev_priv
, crtc
->pipe
, &x
, &y
);
3095 seq_printf(m
, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
3096 yesno(crtc
->cursor_base
),
3097 x
, y
, crtc
->base
.cursor
->state
->crtc_w
,
3098 crtc
->base
.cursor
->state
->crtc_h
,
3099 crtc
->cursor_addr
, yesno(active
));
3100 intel_scaler_info(m
, crtc
);
3101 intel_plane_info(m
, crtc
);
3104 seq_printf(m
, "\tunderrun reporting: cpu=%s pch=%s \n",
3105 yesno(!crtc
->cpu_fifo_underrun_disabled
),
3106 yesno(!crtc
->pch_fifo_underrun_disabled
));
3109 seq_printf(m
, "\n");
3110 seq_printf(m
, "Connector info\n");
3111 seq_printf(m
, "--------------\n");
3112 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
3113 intel_connector_info(m
, connector
);
3115 drm_modeset_unlock_all(dev
);
3116 intel_runtime_pm_put(dev_priv
);
3121 static int i915_semaphore_status(struct seq_file
*m
, void *unused
)
3123 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
3124 struct drm_device
*dev
= &dev_priv
->drm
;
3125 struct intel_engine_cs
*engine
;
3126 int num_rings
= INTEL_INFO(dev_priv
)->num_rings
;
3127 enum intel_engine_id id
;
3130 if (!i915
.semaphores
) {
3131 seq_puts(m
, "Semaphores are disabled\n");
3135 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
3138 intel_runtime_pm_get(dev_priv
);
3140 if (IS_BROADWELL(dev_priv
)) {
3144 page
= i915_gem_object_get_page(dev_priv
->semaphore
->obj
, 0);
3146 seqno
= (uint64_t *)kmap_atomic(page
);
3147 for_each_engine_id(engine
, dev_priv
, id
) {
3150 seq_printf(m
, "%s\n", engine
->name
);
3152 seq_puts(m
, " Last signal:");
3153 for (j
= 0; j
< num_rings
; j
++) {
3154 offset
= id
* I915_NUM_ENGINES
+ j
;
3155 seq_printf(m
, "0x%08llx (0x%02llx) ",
3156 seqno
[offset
], offset
* 8);
3160 seq_puts(m
, " Last wait: ");
3161 for (j
= 0; j
< num_rings
; j
++) {
3162 offset
= id
+ (j
* I915_NUM_ENGINES
);
3163 seq_printf(m
, "0x%08llx (0x%02llx) ",
3164 seqno
[offset
], offset
* 8);
3169 kunmap_atomic(seqno
);
3171 seq_puts(m
, " Last signal:");
3172 for_each_engine(engine
, dev_priv
)
3173 for (j
= 0; j
< num_rings
; j
++)
3174 seq_printf(m
, "0x%08x\n",
3175 I915_READ(engine
->semaphore
.mbox
.signal
[j
]));
3179 seq_puts(m
, "\nSync seqno:\n");
3180 for_each_engine(engine
, dev_priv
) {
3181 for (j
= 0; j
< num_rings
; j
++)
3182 seq_printf(m
, " 0x%08x ",
3183 engine
->semaphore
.sync_seqno
[j
]);
3188 intel_runtime_pm_put(dev_priv
);
3189 mutex_unlock(&dev
->struct_mutex
);
3193 static int i915_shared_dplls_info(struct seq_file
*m
, void *unused
)
3195 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
3196 struct drm_device
*dev
= &dev_priv
->drm
;
3199 drm_modeset_lock_all(dev
);
3200 for (i
= 0; i
< dev_priv
->num_shared_dpll
; i
++) {
3201 struct intel_shared_dpll
*pll
= &dev_priv
->shared_dplls
[i
];
3203 seq_printf(m
, "DPLL%i: %s, id: %i\n", i
, pll
->name
, pll
->id
);
3204 seq_printf(m
, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
3205 pll
->config
.crtc_mask
, pll
->active_mask
, yesno(pll
->on
));
3206 seq_printf(m
, " tracked hardware state:\n");
3207 seq_printf(m
, " dpll: 0x%08x\n", pll
->config
.hw_state
.dpll
);
3208 seq_printf(m
, " dpll_md: 0x%08x\n",
3209 pll
->config
.hw_state
.dpll_md
);
3210 seq_printf(m
, " fp0: 0x%08x\n", pll
->config
.hw_state
.fp0
);
3211 seq_printf(m
, " fp1: 0x%08x\n", pll
->config
.hw_state
.fp1
);
3212 seq_printf(m
, " wrpll: 0x%08x\n", pll
->config
.hw_state
.wrpll
);
3214 drm_modeset_unlock_all(dev
);
3219 static int i915_wa_registers(struct seq_file
*m
, void *unused
)
3223 struct intel_engine_cs
*engine
;
3224 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
3225 struct drm_device
*dev
= &dev_priv
->drm
;
3226 struct i915_workarounds
*workarounds
= &dev_priv
->workarounds
;
3227 enum intel_engine_id id
;
3229 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
3233 intel_runtime_pm_get(dev_priv
);
3235 seq_printf(m
, "Workarounds applied: %d\n", workarounds
->count
);
3236 for_each_engine_id(engine
, dev_priv
, id
)
3237 seq_printf(m
, "HW whitelist count for %s: %d\n",
3238 engine
->name
, workarounds
->hw_whitelist_count
[id
]);
3239 for (i
= 0; i
< workarounds
->count
; ++i
) {
3241 u32 mask
, value
, read
;
3244 addr
= workarounds
->reg
[i
].addr
;
3245 mask
= workarounds
->reg
[i
].mask
;
3246 value
= workarounds
->reg
[i
].value
;
3247 read
= I915_READ(addr
);
3248 ok
= (value
& mask
) == (read
& mask
);
3249 seq_printf(m
, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
3250 i915_mmio_reg_offset(addr
), value
, mask
, read
, ok
? "OK" : "FAIL");
3253 intel_runtime_pm_put(dev_priv
);
3254 mutex_unlock(&dev
->struct_mutex
);
3259 static int i915_ddb_info(struct seq_file
*m
, void *unused
)
3261 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
3262 struct drm_device
*dev
= &dev_priv
->drm
;
3263 struct skl_ddb_allocation
*ddb
;
3264 struct skl_ddb_entry
*entry
;
3268 if (INTEL_GEN(dev_priv
) < 9)
3271 drm_modeset_lock_all(dev
);
3273 ddb
= &dev_priv
->wm
.skl_hw
.ddb
;
3275 seq_printf(m
, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3277 for_each_pipe(dev_priv
, pipe
) {
3278 seq_printf(m
, "Pipe %c\n", pipe_name(pipe
));
3280 for_each_plane(dev_priv
, pipe
, plane
) {
3281 entry
= &ddb
->plane
[pipe
][plane
];
3282 seq_printf(m
, " Plane%-8d%8u%8u%8u\n", plane
+ 1,
3283 entry
->start
, entry
->end
,
3284 skl_ddb_entry_size(entry
));
3287 entry
= &ddb
->plane
[pipe
][PLANE_CURSOR
];
3288 seq_printf(m
, " %-13s%8u%8u%8u\n", "Cursor", entry
->start
,
3289 entry
->end
, skl_ddb_entry_size(entry
));
3292 drm_modeset_unlock_all(dev
);
3297 static void drrs_status_per_crtc(struct seq_file
*m
,
3298 struct drm_device
*dev
,
3299 struct intel_crtc
*intel_crtc
)
3301 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3302 struct i915_drrs
*drrs
= &dev_priv
->drrs
;
3304 struct drm_connector
*connector
;
3306 drm_for_each_connector(connector
, dev
) {
3307 if (connector
->state
->crtc
!= &intel_crtc
->base
)
3310 seq_printf(m
, "%s:\n", connector
->name
);
3313 if (dev_priv
->vbt
.drrs_type
== STATIC_DRRS_SUPPORT
)
3314 seq_puts(m
, "\tVBT: DRRS_type: Static");
3315 else if (dev_priv
->vbt
.drrs_type
== SEAMLESS_DRRS_SUPPORT
)
3316 seq_puts(m
, "\tVBT: DRRS_type: Seamless");
3317 else if (dev_priv
->vbt
.drrs_type
== DRRS_NOT_SUPPORTED
)
3318 seq_puts(m
, "\tVBT: DRRS_type: None");
3320 seq_puts(m
, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3322 seq_puts(m
, "\n\n");
3324 if (to_intel_crtc_state(intel_crtc
->base
.state
)->has_drrs
) {
3325 struct intel_panel
*panel
;
3327 mutex_lock(&drrs
->mutex
);
3328 /* DRRS Supported */
3329 seq_puts(m
, "\tDRRS Supported: Yes\n");
3331 /* disable_drrs() will make drrs->dp NULL */
3333 seq_puts(m
, "Idleness DRRS: Disabled");
3334 mutex_unlock(&drrs
->mutex
);
3338 panel
= &drrs
->dp
->attached_connector
->panel
;
3339 seq_printf(m
, "\t\tBusy_frontbuffer_bits: 0x%X",
3340 drrs
->busy_frontbuffer_bits
);
3342 seq_puts(m
, "\n\t\t");
3343 if (drrs
->refresh_rate_type
== DRRS_HIGH_RR
) {
3344 seq_puts(m
, "DRRS_State: DRRS_HIGH_RR\n");
3345 vrefresh
= panel
->fixed_mode
->vrefresh
;
3346 } else if (drrs
->refresh_rate_type
== DRRS_LOW_RR
) {
3347 seq_puts(m
, "DRRS_State: DRRS_LOW_RR\n");
3348 vrefresh
= panel
->downclock_mode
->vrefresh
;
3350 seq_printf(m
, "DRRS_State: Unknown(%d)\n",
3351 drrs
->refresh_rate_type
);
3352 mutex_unlock(&drrs
->mutex
);
3355 seq_printf(m
, "\t\tVrefresh: %d", vrefresh
);
3357 seq_puts(m
, "\n\t\t");
3358 mutex_unlock(&drrs
->mutex
);
3360 /* DRRS not supported. Print the VBT parameter*/
3361 seq_puts(m
, "\tDRRS Supported : No");
3366 static int i915_drrs_status(struct seq_file
*m
, void *unused
)
3368 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
3369 struct drm_device
*dev
= &dev_priv
->drm
;
3370 struct intel_crtc
*intel_crtc
;
3371 int active_crtc_cnt
= 0;
3373 drm_modeset_lock_all(dev
);
3374 for_each_intel_crtc(dev
, intel_crtc
) {
3375 if (intel_crtc
->base
.state
->active
) {
3377 seq_printf(m
, "\nCRTC %d: ", active_crtc_cnt
);
3379 drrs_status_per_crtc(m
, dev
, intel_crtc
);
3382 drm_modeset_unlock_all(dev
);
3384 if (!active_crtc_cnt
)
3385 seq_puts(m
, "No active crtc found\n");
3390 struct pipe_crc_info
{
3392 struct drm_i915_private
*dev_priv
;
3396 static int i915_dp_mst_info(struct seq_file
*m
, void *unused
)
3398 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
3399 struct drm_device
*dev
= &dev_priv
->drm
;
3400 struct intel_encoder
*intel_encoder
;
3401 struct intel_digital_port
*intel_dig_port
;
3402 struct drm_connector
*connector
;
3404 drm_modeset_lock_all(dev
);
3405 drm_for_each_connector(connector
, dev
) {
3406 if (connector
->connector_type
!= DRM_MODE_CONNECTOR_DisplayPort
)
3409 intel_encoder
= intel_attached_encoder(connector
);
3410 if (!intel_encoder
|| intel_encoder
->type
== INTEL_OUTPUT_DP_MST
)
3413 intel_dig_port
= enc_to_dig_port(&intel_encoder
->base
);
3414 if (!intel_dig_port
->dp
.can_mst
)
3417 seq_printf(m
, "MST Source Port %c\n",
3418 port_name(intel_dig_port
->port
));
3419 drm_dp_mst_dump_topology(m
, &intel_dig_port
->dp
.mst_mgr
);
3421 drm_modeset_unlock_all(dev
);
3425 static int i915_pipe_crc_open(struct inode
*inode
, struct file
*filep
)
3427 struct pipe_crc_info
*info
= inode
->i_private
;
3428 struct drm_i915_private
*dev_priv
= info
->dev_priv
;
3429 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[info
->pipe
];
3431 if (info
->pipe
>= INTEL_INFO(dev_priv
)->num_pipes
)
3434 spin_lock_irq(&pipe_crc
->lock
);
3436 if (pipe_crc
->opened
) {
3437 spin_unlock_irq(&pipe_crc
->lock
);
3438 return -EBUSY
; /* already open */
3441 pipe_crc
->opened
= true;
3442 filep
->private_data
= inode
->i_private
;
3444 spin_unlock_irq(&pipe_crc
->lock
);
3449 static int i915_pipe_crc_release(struct inode
*inode
, struct file
*filep
)
3451 struct pipe_crc_info
*info
= inode
->i_private
;
3452 struct drm_i915_private
*dev_priv
= info
->dev_priv
;
3453 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[info
->pipe
];
3455 spin_lock_irq(&pipe_crc
->lock
);
3456 pipe_crc
->opened
= false;
3457 spin_unlock_irq(&pipe_crc
->lock
);
3462 /* (6 fields, 8 chars each, space separated (5) + '\n') */
3463 #define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1)
3464 /* account for \'0' */
3465 #define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1)
3467 static int pipe_crc_data_count(struct intel_pipe_crc
*pipe_crc
)
3469 assert_spin_locked(&pipe_crc
->lock
);
3470 return CIRC_CNT(pipe_crc
->head
, pipe_crc
->tail
,
3471 INTEL_PIPE_CRC_ENTRIES_NR
);
3475 i915_pipe_crc_read(struct file
*filep
, char __user
*user_buf
, size_t count
,
3478 struct pipe_crc_info
*info
= filep
->private_data
;
3479 struct drm_i915_private
*dev_priv
= info
->dev_priv
;
3480 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[info
->pipe
];
3481 char buf
[PIPE_CRC_BUFFER_LEN
];
3486 * Don't allow user space to provide buffers not big enough to hold
3489 if (count
< PIPE_CRC_LINE_LEN
)
3492 if (pipe_crc
->source
== INTEL_PIPE_CRC_SOURCE_NONE
)
3495 /* nothing to read */
3496 spin_lock_irq(&pipe_crc
->lock
);
3497 while (pipe_crc_data_count(pipe_crc
) == 0) {
3500 if (filep
->f_flags
& O_NONBLOCK
) {
3501 spin_unlock_irq(&pipe_crc
->lock
);
3505 ret
= wait_event_interruptible_lock_irq(pipe_crc
->wq
,
3506 pipe_crc_data_count(pipe_crc
), pipe_crc
->lock
);
3508 spin_unlock_irq(&pipe_crc
->lock
);
3513 /* We now have one or more entries to read */
3514 n_entries
= count
/ PIPE_CRC_LINE_LEN
;
3517 while (n_entries
> 0) {
3518 struct intel_pipe_crc_entry
*entry
=
3519 &pipe_crc
->entries
[pipe_crc
->tail
];
3521 if (CIRC_CNT(pipe_crc
->head
, pipe_crc
->tail
,
3522 INTEL_PIPE_CRC_ENTRIES_NR
) < 1)
3525 BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR
);
3526 pipe_crc
->tail
= (pipe_crc
->tail
+ 1) & (INTEL_PIPE_CRC_ENTRIES_NR
- 1);
3528 bytes_read
+= snprintf(buf
, PIPE_CRC_BUFFER_LEN
,
3529 "%8u %8x %8x %8x %8x %8x\n",
3530 entry
->frame
, entry
->crc
[0],
3531 entry
->crc
[1], entry
->crc
[2],
3532 entry
->crc
[3], entry
->crc
[4]);
3534 spin_unlock_irq(&pipe_crc
->lock
);
3536 if (copy_to_user(user_buf
, buf
, PIPE_CRC_LINE_LEN
))
3539 user_buf
+= PIPE_CRC_LINE_LEN
;
3542 spin_lock_irq(&pipe_crc
->lock
);
3545 spin_unlock_irq(&pipe_crc
->lock
);
3550 static const struct file_operations i915_pipe_crc_fops
= {
3551 .owner
= THIS_MODULE
,
3552 .open
= i915_pipe_crc_open
,
3553 .read
= i915_pipe_crc_read
,
3554 .release
= i915_pipe_crc_release
,
3557 static struct pipe_crc_info i915_pipe_crc_data
[I915_MAX_PIPES
] = {
3559 .name
= "i915_pipe_A_crc",
3563 .name
= "i915_pipe_B_crc",
3567 .name
= "i915_pipe_C_crc",
3572 static int i915_pipe_crc_create(struct dentry
*root
, struct drm_minor
*minor
,
3575 struct drm_i915_private
*dev_priv
= to_i915(minor
->dev
);
3577 struct pipe_crc_info
*info
= &i915_pipe_crc_data
[pipe
];
3579 info
->dev_priv
= dev_priv
;
3580 ent
= debugfs_create_file(info
->name
, S_IRUGO
, root
, info
,
3581 &i915_pipe_crc_fops
);
3585 return drm_add_fake_info_node(minor
, ent
, info
);
3588 static const char * const pipe_crc_sources
[] = {
3601 static const char *pipe_crc_source_name(enum intel_pipe_crc_source source
)
3603 BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources
) != INTEL_PIPE_CRC_SOURCE_MAX
);
3604 return pipe_crc_sources
[source
];
3607 static int display_crc_ctl_show(struct seq_file
*m
, void *data
)
3609 struct drm_i915_private
*dev_priv
= m
->private;
3612 for (i
= 0; i
< I915_MAX_PIPES
; i
++)
3613 seq_printf(m
, "%c %s\n", pipe_name(i
),
3614 pipe_crc_source_name(dev_priv
->pipe_crc
[i
].source
));
3619 static int display_crc_ctl_open(struct inode
*inode
, struct file
*file
)
3621 return single_open(file
, display_crc_ctl_show
, inode
->i_private
);
3624 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source
*source
,
3627 if (*source
== INTEL_PIPE_CRC_SOURCE_AUTO
)
3628 *source
= INTEL_PIPE_CRC_SOURCE_PIPE
;
3631 case INTEL_PIPE_CRC_SOURCE_PIPE
:
3632 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_INCLUDE_BORDER_I8XX
;
3634 case INTEL_PIPE_CRC_SOURCE_NONE
:
3644 static int i9xx_pipe_crc_auto_source(struct drm_i915_private
*dev_priv
,
3646 enum intel_pipe_crc_source
*source
)
3648 struct drm_device
*dev
= &dev_priv
->drm
;
3649 struct intel_encoder
*encoder
;
3650 struct intel_crtc
*crtc
;
3651 struct intel_digital_port
*dig_port
;
3654 *source
= INTEL_PIPE_CRC_SOURCE_PIPE
;
3656 drm_modeset_lock_all(dev
);
3657 for_each_intel_encoder(dev
, encoder
) {
3658 if (!encoder
->base
.crtc
)
3661 crtc
= to_intel_crtc(encoder
->base
.crtc
);
3663 if (crtc
->pipe
!= pipe
)
3666 switch (encoder
->type
) {
3667 case INTEL_OUTPUT_TVOUT
:
3668 *source
= INTEL_PIPE_CRC_SOURCE_TV
;
3670 case INTEL_OUTPUT_DP
:
3671 case INTEL_OUTPUT_EDP
:
3672 dig_port
= enc_to_dig_port(&encoder
->base
);
3673 switch (dig_port
->port
) {
3675 *source
= INTEL_PIPE_CRC_SOURCE_DP_B
;
3678 *source
= INTEL_PIPE_CRC_SOURCE_DP_C
;
3681 *source
= INTEL_PIPE_CRC_SOURCE_DP_D
;
3684 WARN(1, "nonexisting DP port %c\n",
3685 port_name(dig_port
->port
));
3693 drm_modeset_unlock_all(dev
);
3698 static int vlv_pipe_crc_ctl_reg(struct drm_i915_private
*dev_priv
,
3700 enum intel_pipe_crc_source
*source
,
3703 bool need_stable_symbols
= false;
3705 if (*source
== INTEL_PIPE_CRC_SOURCE_AUTO
) {
3706 int ret
= i9xx_pipe_crc_auto_source(dev_priv
, pipe
, source
);
3712 case INTEL_PIPE_CRC_SOURCE_PIPE
:
3713 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PIPE_VLV
;
3715 case INTEL_PIPE_CRC_SOURCE_DP_B
:
3716 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_B_VLV
;
3717 need_stable_symbols
= true;
3719 case INTEL_PIPE_CRC_SOURCE_DP_C
:
3720 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_C_VLV
;
3721 need_stable_symbols
= true;
3723 case INTEL_PIPE_CRC_SOURCE_DP_D
:
3724 if (!IS_CHERRYVIEW(dev_priv
))
3726 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_D_VLV
;
3727 need_stable_symbols
= true;
3729 case INTEL_PIPE_CRC_SOURCE_NONE
:
3737 * When the pipe CRC tap point is after the transcoders we need
3738 * to tweak symbol-level features to produce a deterministic series of
3739 * symbols for a given frame. We need to reset those features only once
3740 * a frame (instead of every nth symbol):
3741 * - DC-balance: used to ensure a better clock recovery from the data
3743 * - DisplayPort scrambling: used for EMI reduction
3745 if (need_stable_symbols
) {
3746 uint32_t tmp
= I915_READ(PORT_DFT2_G4X
);
3748 tmp
|= DC_BALANCE_RESET_VLV
;
3751 tmp
|= PIPE_A_SCRAMBLE_RESET
;
3754 tmp
|= PIPE_B_SCRAMBLE_RESET
;
3757 tmp
|= PIPE_C_SCRAMBLE_RESET
;
3762 I915_WRITE(PORT_DFT2_G4X
, tmp
);
3768 static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private
*dev_priv
,
3770 enum intel_pipe_crc_source
*source
,
3773 bool need_stable_symbols
= false;
3775 if (*source
== INTEL_PIPE_CRC_SOURCE_AUTO
) {
3776 int ret
= i9xx_pipe_crc_auto_source(dev_priv
, pipe
, source
);
3782 case INTEL_PIPE_CRC_SOURCE_PIPE
:
3783 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PIPE_I9XX
;
3785 case INTEL_PIPE_CRC_SOURCE_TV
:
3786 if (!SUPPORTS_TV(dev_priv
))
3788 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_TV_PRE
;
3790 case INTEL_PIPE_CRC_SOURCE_DP_B
:
3791 if (!IS_G4X(dev_priv
))
3793 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_B_G4X
;
3794 need_stable_symbols
= true;
3796 case INTEL_PIPE_CRC_SOURCE_DP_C
:
3797 if (!IS_G4X(dev_priv
))
3799 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_C_G4X
;
3800 need_stable_symbols
= true;
3802 case INTEL_PIPE_CRC_SOURCE_DP_D
:
3803 if (!IS_G4X(dev_priv
))
3805 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_D_G4X
;
3806 need_stable_symbols
= true;
3808 case INTEL_PIPE_CRC_SOURCE_NONE
:
3816 * When the pipe CRC tap point is after the transcoders we need
3817 * to tweak symbol-level features to produce a deterministic series of
3818 * symbols for a given frame. We need to reset those features only once
3819 * a frame (instead of every nth symbol):
3820 * - DC-balance: used to ensure a better clock recovery from the data
3822 * - DisplayPort scrambling: used for EMI reduction
3824 if (need_stable_symbols
) {
3825 uint32_t tmp
= I915_READ(PORT_DFT2_G4X
);
3827 WARN_ON(!IS_G4X(dev_priv
));
3829 I915_WRITE(PORT_DFT_I9XX
,
3830 I915_READ(PORT_DFT_I9XX
) | DC_BALANCE_RESET
);
3833 tmp
|= PIPE_A_SCRAMBLE_RESET
;
3835 tmp
|= PIPE_B_SCRAMBLE_RESET
;
3837 I915_WRITE(PORT_DFT2_G4X
, tmp
);
3843 static void vlv_undo_pipe_scramble_reset(struct drm_i915_private
*dev_priv
,
3846 uint32_t tmp
= I915_READ(PORT_DFT2_G4X
);
3850 tmp
&= ~PIPE_A_SCRAMBLE_RESET
;
3853 tmp
&= ~PIPE_B_SCRAMBLE_RESET
;
3856 tmp
&= ~PIPE_C_SCRAMBLE_RESET
;
3861 if (!(tmp
& PIPE_SCRAMBLE_RESET_MASK
))
3862 tmp
&= ~DC_BALANCE_RESET_VLV
;
3863 I915_WRITE(PORT_DFT2_G4X
, tmp
);
3867 static void g4x_undo_pipe_scramble_reset(struct drm_i915_private
*dev_priv
,
3870 uint32_t tmp
= I915_READ(PORT_DFT2_G4X
);
3873 tmp
&= ~PIPE_A_SCRAMBLE_RESET
;
3875 tmp
&= ~PIPE_B_SCRAMBLE_RESET
;
3876 I915_WRITE(PORT_DFT2_G4X
, tmp
);
3878 if (!(tmp
& PIPE_SCRAMBLE_RESET_MASK
)) {
3879 I915_WRITE(PORT_DFT_I9XX
,
3880 I915_READ(PORT_DFT_I9XX
) & ~DC_BALANCE_RESET
);
3884 static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source
*source
,
3887 if (*source
== INTEL_PIPE_CRC_SOURCE_AUTO
)
3888 *source
= INTEL_PIPE_CRC_SOURCE_PIPE
;
3891 case INTEL_PIPE_CRC_SOURCE_PLANE1
:
3892 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PRIMARY_ILK
;
3894 case INTEL_PIPE_CRC_SOURCE_PLANE2
:
3895 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_SPRITE_ILK
;
3897 case INTEL_PIPE_CRC_SOURCE_PIPE
:
3898 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PIPE_ILK
;
3900 case INTEL_PIPE_CRC_SOURCE_NONE
:
3910 static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private
*dev_priv
,
3913 struct drm_device
*dev
= &dev_priv
->drm
;
3914 struct intel_crtc
*crtc
=
3915 to_intel_crtc(dev_priv
->pipe_to_crtc_mapping
[PIPE_A
]);
3916 struct intel_crtc_state
*pipe_config
;
3917 struct drm_atomic_state
*state
;
3920 drm_modeset_lock_all(dev
);
3921 state
= drm_atomic_state_alloc(dev
);
3927 state
->acquire_ctx
= drm_modeset_legacy_acquire_ctx(&crtc
->base
);
3928 pipe_config
= intel_atomic_get_crtc_state(state
, crtc
);
3929 if (IS_ERR(pipe_config
)) {
3930 ret
= PTR_ERR(pipe_config
);
3934 pipe_config
->pch_pfit
.force_thru
= enable
;
3935 if (pipe_config
->cpu_transcoder
== TRANSCODER_EDP
&&
3936 pipe_config
->pch_pfit
.enabled
!= enable
)
3937 pipe_config
->base
.connectors_changed
= true;
3939 ret
= drm_atomic_commit(state
);
3941 drm_modeset_unlock_all(dev
);
3942 WARN(ret
, "Toggling workaround to %i returns %i\n", enable
, ret
);
3944 drm_atomic_state_free(state
);
3947 static int ivb_pipe_crc_ctl_reg(struct drm_i915_private
*dev_priv
,
3949 enum intel_pipe_crc_source
*source
,
3952 if (*source
== INTEL_PIPE_CRC_SOURCE_AUTO
)
3953 *source
= INTEL_PIPE_CRC_SOURCE_PF
;
3956 case INTEL_PIPE_CRC_SOURCE_PLANE1
:
3957 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PRIMARY_IVB
;
3959 case INTEL_PIPE_CRC_SOURCE_PLANE2
:
3960 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_SPRITE_IVB
;
3962 case INTEL_PIPE_CRC_SOURCE_PF
:
3963 if (IS_HASWELL(dev_priv
) && pipe
== PIPE_A
)
3964 hsw_trans_edp_pipe_A_crc_wa(dev_priv
, true);
3966 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PF_IVB
;
3968 case INTEL_PIPE_CRC_SOURCE_NONE
:
3978 static int pipe_crc_set_source(struct drm_i915_private
*dev_priv
,
3980 enum intel_pipe_crc_source source
)
3982 struct drm_device
*dev
= &dev_priv
->drm
;
3983 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[pipe
];
3984 struct intel_crtc
*crtc
=
3985 to_intel_crtc(intel_get_crtc_for_pipe(dev
, pipe
));
3986 enum intel_display_power_domain power_domain
;
3987 u32 val
= 0; /* shut up gcc */
3990 if (pipe_crc
->source
== source
)
3993 /* forbid changing the source without going back to 'none' */
3994 if (pipe_crc
->source
&& source
)
3997 power_domain
= POWER_DOMAIN_PIPE(pipe
);
3998 if (!intel_display_power_get_if_enabled(dev_priv
, power_domain
)) {
3999 DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
4003 if (IS_GEN2(dev_priv
))
4004 ret
= i8xx_pipe_crc_ctl_reg(&source
, &val
);
4005 else if (INTEL_GEN(dev_priv
) < 5)
4006 ret
= i9xx_pipe_crc_ctl_reg(dev_priv
, pipe
, &source
, &val
);
4007 else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
4008 ret
= vlv_pipe_crc_ctl_reg(dev_priv
, pipe
, &source
, &val
);
4009 else if (IS_GEN5(dev_priv
) || IS_GEN6(dev_priv
))
4010 ret
= ilk_pipe_crc_ctl_reg(&source
, &val
);
4012 ret
= ivb_pipe_crc_ctl_reg(dev_priv
, pipe
, &source
, &val
);
4017 /* none -> real source transition */
4019 struct intel_pipe_crc_entry
*entries
;
4021 DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
4022 pipe_name(pipe
), pipe_crc_source_name(source
));
4024 entries
= kcalloc(INTEL_PIPE_CRC_ENTRIES_NR
,
4025 sizeof(pipe_crc
->entries
[0]),
4033 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
4034 * enabled and disabled dynamically based on package C states,
4035 * user space can't make reliable use of the CRCs, so let's just
4036 * completely disable it.
4038 hsw_disable_ips(crtc
);
4040 spin_lock_irq(&pipe_crc
->lock
);
4041 kfree(pipe_crc
->entries
);
4042 pipe_crc
->entries
= entries
;
4045 spin_unlock_irq(&pipe_crc
->lock
);
4048 pipe_crc
->source
= source
;
4050 I915_WRITE(PIPE_CRC_CTL(pipe
), val
);
4051 POSTING_READ(PIPE_CRC_CTL(pipe
));
4053 /* real source -> none transition */
4054 if (source
== INTEL_PIPE_CRC_SOURCE_NONE
) {
4055 struct intel_pipe_crc_entry
*entries
;
4056 struct intel_crtc
*crtc
=
4057 to_intel_crtc(dev_priv
->pipe_to_crtc_mapping
[pipe
]);
4059 DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
4062 drm_modeset_lock(&crtc
->base
.mutex
, NULL
);
4063 if (crtc
->base
.state
->active
)
4064 intel_wait_for_vblank(dev
, pipe
);
4065 drm_modeset_unlock(&crtc
->base
.mutex
);
4067 spin_lock_irq(&pipe_crc
->lock
);
4068 entries
= pipe_crc
->entries
;
4069 pipe_crc
->entries
= NULL
;
4072 spin_unlock_irq(&pipe_crc
->lock
);
4076 if (IS_G4X(dev_priv
))
4077 g4x_undo_pipe_scramble_reset(dev_priv
, pipe
);
4078 else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
4079 vlv_undo_pipe_scramble_reset(dev_priv
, pipe
);
4080 else if (IS_HASWELL(dev_priv
) && pipe
== PIPE_A
)
4081 hsw_trans_edp_pipe_A_crc_wa(dev_priv
, false);
4083 hsw_enable_ips(crtc
);
4089 intel_display_power_put(dev_priv
, power_domain
);
4095 * Parse pipe CRC command strings:
4096 * command: wsp* object wsp+ name wsp+ source wsp*
4099 * source: (none | plane1 | plane2 | pf)
4100 * wsp: (#0x20 | #0x9 | #0xA)+
4103 * "pipe A plane1" -> Start CRC computations on plane1 of pipe A
4104 * "pipe A none" -> Stop CRC
4106 static int display_crc_ctl_tokenize(char *buf
, char *words
[], int max_words
)
4113 /* skip leading white space */
4114 buf
= skip_spaces(buf
);
4116 break; /* end of buffer */
4118 /* find end of word */
4119 for (end
= buf
; *end
&& !isspace(*end
); end
++)
4122 if (n_words
== max_words
) {
4123 DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
4125 return -EINVAL
; /* ran out of words[] before bytes */
4130 words
[n_words
++] = buf
;
4137 enum intel_pipe_crc_object
{
4138 PIPE_CRC_OBJECT_PIPE
,
4141 static const char * const pipe_crc_objects
[] = {
4146 display_crc_ctl_parse_object(const char *buf
, enum intel_pipe_crc_object
*o
)
4150 for (i
= 0; i
< ARRAY_SIZE(pipe_crc_objects
); i
++)
4151 if (!strcmp(buf
, pipe_crc_objects
[i
])) {
4159 static int display_crc_ctl_parse_pipe(const char *buf
, enum pipe
*pipe
)
4161 const char name
= buf
[0];
4163 if (name
< 'A' || name
>= pipe_name(I915_MAX_PIPES
))
4172 display_crc_ctl_parse_source(const char *buf
, enum intel_pipe_crc_source
*s
)
4176 for (i
= 0; i
< ARRAY_SIZE(pipe_crc_sources
); i
++)
4177 if (!strcmp(buf
, pipe_crc_sources
[i
])) {
4185 static int display_crc_ctl_parse(struct drm_i915_private
*dev_priv
,
4186 char *buf
, size_t len
)
4190 char *words
[N_WORDS
];
4192 enum intel_pipe_crc_object object
;
4193 enum intel_pipe_crc_source source
;
4195 n_words
= display_crc_ctl_tokenize(buf
, words
, N_WORDS
);
4196 if (n_words
!= N_WORDS
) {
4197 DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
4202 if (display_crc_ctl_parse_object(words
[0], &object
) < 0) {
4203 DRM_DEBUG_DRIVER("unknown object %s\n", words
[0]);
4207 if (display_crc_ctl_parse_pipe(words
[1], &pipe
) < 0) {
4208 DRM_DEBUG_DRIVER("unknown pipe %s\n", words
[1]);
4212 if (display_crc_ctl_parse_source(words
[2], &source
) < 0) {
4213 DRM_DEBUG_DRIVER("unknown source %s\n", words
[2]);
4217 return pipe_crc_set_source(dev_priv
, pipe
, source
);
4220 static ssize_t
display_crc_ctl_write(struct file
*file
, const char __user
*ubuf
,
4221 size_t len
, loff_t
*offp
)
4223 struct seq_file
*m
= file
->private_data
;
4224 struct drm_i915_private
*dev_priv
= m
->private;
4231 if (len
> PAGE_SIZE
- 1) {
4232 DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
4237 tmpbuf
= kmalloc(len
+ 1, GFP_KERNEL
);
4241 if (copy_from_user(tmpbuf
, ubuf
, len
)) {
4247 ret
= display_crc_ctl_parse(dev_priv
, tmpbuf
, len
);
4258 static const struct file_operations i915_display_crc_ctl_fops
= {
4259 .owner
= THIS_MODULE
,
4260 .open
= display_crc_ctl_open
,
4262 .llseek
= seq_lseek
,
4263 .release
= single_release
,
4264 .write
= display_crc_ctl_write
4267 static ssize_t
i915_displayport_test_active_write(struct file
*file
,
4268 const char __user
*ubuf
,
4269 size_t len
, loff_t
*offp
)
4273 struct drm_device
*dev
;
4274 struct drm_connector
*connector
;
4275 struct list_head
*connector_list
;
4276 struct intel_dp
*intel_dp
;
4279 dev
= ((struct seq_file
*)file
->private_data
)->private;
4281 connector_list
= &dev
->mode_config
.connector_list
;
4286 input_buffer
= kmalloc(len
+ 1, GFP_KERNEL
);
4290 if (copy_from_user(input_buffer
, ubuf
, len
)) {
4295 input_buffer
[len
] = '\0';
4296 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len
);
4298 list_for_each_entry(connector
, connector_list
, head
) {
4299 if (connector
->connector_type
!=
4300 DRM_MODE_CONNECTOR_DisplayPort
)
4303 if (connector
->status
== connector_status_connected
&&
4304 connector
->encoder
!= NULL
) {
4305 intel_dp
= enc_to_intel_dp(connector
->encoder
);
4306 status
= kstrtoint(input_buffer
, 10, &val
);
4309 DRM_DEBUG_DRIVER("Got %d for test active\n", val
);
4310 /* To prevent erroneous activation of the compliance
4311 * testing code, only accept an actual value of 1 here
4314 intel_dp
->compliance_test_active
= 1;
4316 intel_dp
->compliance_test_active
= 0;
4320 kfree(input_buffer
);
4328 static int i915_displayport_test_active_show(struct seq_file
*m
, void *data
)
4330 struct drm_device
*dev
= m
->private;
4331 struct drm_connector
*connector
;
4332 struct list_head
*connector_list
= &dev
->mode_config
.connector_list
;
4333 struct intel_dp
*intel_dp
;
4335 list_for_each_entry(connector
, connector_list
, head
) {
4336 if (connector
->connector_type
!=
4337 DRM_MODE_CONNECTOR_DisplayPort
)
4340 if (connector
->status
== connector_status_connected
&&
4341 connector
->encoder
!= NULL
) {
4342 intel_dp
= enc_to_intel_dp(connector
->encoder
);
4343 if (intel_dp
->compliance_test_active
)
4354 static int i915_displayport_test_active_open(struct inode
*inode
,
4357 struct drm_i915_private
*dev_priv
= inode
->i_private
;
4359 return single_open(file
, i915_displayport_test_active_show
,
4363 static const struct file_operations i915_displayport_test_active_fops
= {
4364 .owner
= THIS_MODULE
,
4365 .open
= i915_displayport_test_active_open
,
4367 .llseek
= seq_lseek
,
4368 .release
= single_release
,
4369 .write
= i915_displayport_test_active_write
4372 static int i915_displayport_test_data_show(struct seq_file
*m
, void *data
)
4374 struct drm_device
*dev
= m
->private;
4375 struct drm_connector
*connector
;
4376 struct list_head
*connector_list
= &dev
->mode_config
.connector_list
;
4377 struct intel_dp
*intel_dp
;
4379 list_for_each_entry(connector
, connector_list
, head
) {
4380 if (connector
->connector_type
!=
4381 DRM_MODE_CONNECTOR_DisplayPort
)
4384 if (connector
->status
== connector_status_connected
&&
4385 connector
->encoder
!= NULL
) {
4386 intel_dp
= enc_to_intel_dp(connector
->encoder
);
4387 seq_printf(m
, "%lx", intel_dp
->compliance_test_data
);
4394 static int i915_displayport_test_data_open(struct inode
*inode
,
4397 struct drm_i915_private
*dev_priv
= inode
->i_private
;
4399 return single_open(file
, i915_displayport_test_data_show
,
4403 static const struct file_operations i915_displayport_test_data_fops
= {
4404 .owner
= THIS_MODULE
,
4405 .open
= i915_displayport_test_data_open
,
4407 .llseek
= seq_lseek
,
4408 .release
= single_release
4411 static int i915_displayport_test_type_show(struct seq_file
*m
, void *data
)
4413 struct drm_device
*dev
= m
->private;
4414 struct drm_connector
*connector
;
4415 struct list_head
*connector_list
= &dev
->mode_config
.connector_list
;
4416 struct intel_dp
*intel_dp
;
4418 list_for_each_entry(connector
, connector_list
, head
) {
4419 if (connector
->connector_type
!=
4420 DRM_MODE_CONNECTOR_DisplayPort
)
4423 if (connector
->status
== connector_status_connected
&&
4424 connector
->encoder
!= NULL
) {
4425 intel_dp
= enc_to_intel_dp(connector
->encoder
);
4426 seq_printf(m
, "%02lx", intel_dp
->compliance_test_type
);
4434 static int i915_displayport_test_type_open(struct inode
*inode
,
4437 struct drm_i915_private
*dev_priv
= inode
->i_private
;
4439 return single_open(file
, i915_displayport_test_type_show
,
4443 static const struct file_operations i915_displayport_test_type_fops
= {
4444 .owner
= THIS_MODULE
,
4445 .open
= i915_displayport_test_type_open
,
4447 .llseek
= seq_lseek
,
4448 .release
= single_release
4451 static void wm_latency_show(struct seq_file
*m
, const uint16_t wm
[8])
4453 struct drm_i915_private
*dev_priv
= m
->private;
4454 struct drm_device
*dev
= &dev_priv
->drm
;
4458 if (IS_CHERRYVIEW(dev_priv
))
4460 else if (IS_VALLEYVIEW(dev_priv
))
4463 num_levels
= ilk_wm_max_level(dev
) + 1;
4465 drm_modeset_lock_all(dev
);
4467 for (level
= 0; level
< num_levels
; level
++) {
4468 unsigned int latency
= wm
[level
];
4471 * - WM1+ latency values in 0.5us units
4472 * - latencies are in us on gen9/vlv/chv
4474 if (INTEL_GEN(dev_priv
) >= 9 || IS_VALLEYVIEW(dev_priv
) ||
4475 IS_CHERRYVIEW(dev_priv
))
4480 seq_printf(m
, "WM%d %u (%u.%u usec)\n",
4481 level
, wm
[level
], latency
/ 10, latency
% 10);
4484 drm_modeset_unlock_all(dev
);
4487 static int pri_wm_latency_show(struct seq_file
*m
, void *data
)
4489 struct drm_i915_private
*dev_priv
= m
->private;
4490 const uint16_t *latencies
;
4492 if (INTEL_GEN(dev_priv
) >= 9)
4493 latencies
= dev_priv
->wm
.skl_latency
;
4495 latencies
= dev_priv
->wm
.pri_latency
;
4497 wm_latency_show(m
, latencies
);
4502 static int spr_wm_latency_show(struct seq_file
*m
, void *data
)
4504 struct drm_i915_private
*dev_priv
= m
->private;
4505 const uint16_t *latencies
;
4507 if (INTEL_GEN(dev_priv
) >= 9)
4508 latencies
= dev_priv
->wm
.skl_latency
;
4510 latencies
= dev_priv
->wm
.spr_latency
;
4512 wm_latency_show(m
, latencies
);
4517 static int cur_wm_latency_show(struct seq_file
*m
, void *data
)
4519 struct drm_i915_private
*dev_priv
= m
->private;
4520 const uint16_t *latencies
;
4522 if (INTEL_GEN(dev_priv
) >= 9)
4523 latencies
= dev_priv
->wm
.skl_latency
;
4525 latencies
= dev_priv
->wm
.cur_latency
;
4527 wm_latency_show(m
, latencies
);
4532 static int pri_wm_latency_open(struct inode
*inode
, struct file
*file
)
4534 struct drm_i915_private
*dev_priv
= inode
->i_private
;
4536 if (INTEL_GEN(dev_priv
) < 5)
4539 return single_open(file
, pri_wm_latency_show
, dev_priv
);
4542 static int spr_wm_latency_open(struct inode
*inode
, struct file
*file
)
4544 struct drm_i915_private
*dev_priv
= inode
->i_private
;
4546 if (HAS_GMCH_DISPLAY(dev_priv
))
4549 return single_open(file
, spr_wm_latency_show
, dev_priv
);
4552 static int cur_wm_latency_open(struct inode
*inode
, struct file
*file
)
4554 struct drm_i915_private
*dev_priv
= inode
->i_private
;
4556 if (HAS_GMCH_DISPLAY(dev_priv
))
4559 return single_open(file
, cur_wm_latency_show
, dev_priv
);
4562 static ssize_t
wm_latency_write(struct file
*file
, const char __user
*ubuf
,
4563 size_t len
, loff_t
*offp
, uint16_t wm
[8])
4565 struct seq_file
*m
= file
->private_data
;
4566 struct drm_i915_private
*dev_priv
= m
->private;
4567 struct drm_device
*dev
= &dev_priv
->drm
;
4568 uint16_t new[8] = { 0 };
4574 if (IS_CHERRYVIEW(dev_priv
))
4576 else if (IS_VALLEYVIEW(dev_priv
))
4579 num_levels
= ilk_wm_max_level(dev
) + 1;
4581 if (len
>= sizeof(tmp
))
4584 if (copy_from_user(tmp
, ubuf
, len
))
4589 ret
= sscanf(tmp
, "%hu %hu %hu %hu %hu %hu %hu %hu",
4590 &new[0], &new[1], &new[2], &new[3],
4591 &new[4], &new[5], &new[6], &new[7]);
4592 if (ret
!= num_levels
)
4595 drm_modeset_lock_all(dev
);
4597 for (level
= 0; level
< num_levels
; level
++)
4598 wm
[level
] = new[level
];
4600 drm_modeset_unlock_all(dev
);
4606 static ssize_t
pri_wm_latency_write(struct file
*file
, const char __user
*ubuf
,
4607 size_t len
, loff_t
*offp
)
4609 struct seq_file
*m
= file
->private_data
;
4610 struct drm_i915_private
*dev_priv
= m
->private;
4611 uint16_t *latencies
;
4613 if (INTEL_GEN(dev_priv
) >= 9)
4614 latencies
= dev_priv
->wm
.skl_latency
;
4616 latencies
= dev_priv
->wm
.pri_latency
;
4618 return wm_latency_write(file
, ubuf
, len
, offp
, latencies
);
4621 static ssize_t
spr_wm_latency_write(struct file
*file
, const char __user
*ubuf
,
4622 size_t len
, loff_t
*offp
)
4624 struct seq_file
*m
= file
->private_data
;
4625 struct drm_i915_private
*dev_priv
= m
->private;
4626 uint16_t *latencies
;
4628 if (INTEL_GEN(dev_priv
) >= 9)
4629 latencies
= dev_priv
->wm
.skl_latency
;
4631 latencies
= dev_priv
->wm
.spr_latency
;
4633 return wm_latency_write(file
, ubuf
, len
, offp
, latencies
);
4636 static ssize_t
cur_wm_latency_write(struct file
*file
, const char __user
*ubuf
,
4637 size_t len
, loff_t
*offp
)
4639 struct seq_file
*m
= file
->private_data
;
4640 struct drm_i915_private
*dev_priv
= m
->private;
4641 uint16_t *latencies
;
4643 if (INTEL_GEN(dev_priv
) >= 9)
4644 latencies
= dev_priv
->wm
.skl_latency
;
4646 latencies
= dev_priv
->wm
.cur_latency
;
4648 return wm_latency_write(file
, ubuf
, len
, offp
, latencies
);
4651 static const struct file_operations i915_pri_wm_latency_fops
= {
4652 .owner
= THIS_MODULE
,
4653 .open
= pri_wm_latency_open
,
4655 .llseek
= seq_lseek
,
4656 .release
= single_release
,
4657 .write
= pri_wm_latency_write
4660 static const struct file_operations i915_spr_wm_latency_fops
= {
4661 .owner
= THIS_MODULE
,
4662 .open
= spr_wm_latency_open
,
4664 .llseek
= seq_lseek
,
4665 .release
= single_release
,
4666 .write
= spr_wm_latency_write
4669 static const struct file_operations i915_cur_wm_latency_fops
= {
4670 .owner
= THIS_MODULE
,
4671 .open
= cur_wm_latency_open
,
4673 .llseek
= seq_lseek
,
4674 .release
= single_release
,
4675 .write
= cur_wm_latency_write
4679 i915_wedged_get(void *data
, u64
*val
)
4681 struct drm_i915_private
*dev_priv
= data
;
4683 *val
= i915_terminally_wedged(&dev_priv
->gpu_error
);
4689 i915_wedged_set(void *data
, u64 val
)
4691 struct drm_i915_private
*dev_priv
= data
;
4694 * There is no safeguard against this debugfs entry colliding
4695 * with the hangcheck calling same i915_handle_error() in
4696 * parallel, causing an explosion. For now we assume that the
4697 * test harness is responsible enough not to inject gpu hangs
4698 * while it is writing to 'i915_wedged'
4701 if (i915_reset_in_progress(&dev_priv
->gpu_error
))
4704 intel_runtime_pm_get(dev_priv
);
4706 i915_handle_error(dev_priv
, val
,
4707 "Manually setting wedged to %llu", val
);
4709 intel_runtime_pm_put(dev_priv
);
4714 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops
,
4715 i915_wedged_get
, i915_wedged_set
,
4719 i915_ring_missed_irq_get(void *data
, u64
*val
)
4721 struct drm_i915_private
*dev_priv
= data
;
4723 *val
= dev_priv
->gpu_error
.missed_irq_rings
;
4728 i915_ring_missed_irq_set(void *data
, u64 val
)
4730 struct drm_i915_private
*dev_priv
= data
;
4731 struct drm_device
*dev
= &dev_priv
->drm
;
4734 /* Lock against concurrent debugfs callers */
4735 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
4738 dev_priv
->gpu_error
.missed_irq_rings
= val
;
4739 mutex_unlock(&dev
->struct_mutex
);
4744 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops
,
4745 i915_ring_missed_irq_get
, i915_ring_missed_irq_set
,
4749 i915_ring_test_irq_get(void *data
, u64
*val
)
4751 struct drm_i915_private
*dev_priv
= data
;
4753 *val
= dev_priv
->gpu_error
.test_irq_rings
;
4759 i915_ring_test_irq_set(void *data
, u64 val
)
4761 struct drm_i915_private
*dev_priv
= data
;
4763 val
&= INTEL_INFO(dev_priv
)->ring_mask
;
4764 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val
);
4765 dev_priv
->gpu_error
.test_irq_rings
= val
;
4770 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops
,
4771 i915_ring_test_irq_get
, i915_ring_test_irq_set
,
4774 #define DROP_UNBOUND 0x1
4775 #define DROP_BOUND 0x2
4776 #define DROP_RETIRE 0x4
4777 #define DROP_ACTIVE 0x8
4778 #define DROP_ALL (DROP_UNBOUND | \
4783 i915_drop_caches_get(void *data
, u64
*val
)
4791 i915_drop_caches_set(void *data
, u64 val
)
4793 struct drm_i915_private
*dev_priv
= data
;
4794 struct drm_device
*dev
= &dev_priv
->drm
;
4797 DRM_DEBUG("Dropping caches: 0x%08llx\n", val
);
4799 /* No need to check and wait for gpu resets, only libdrm auto-restarts
4800 * on ioctls on -EAGAIN. */
4801 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
4805 if (val
& DROP_ACTIVE
) {
4806 ret
= i915_gem_wait_for_idle(dev_priv
,
4807 I915_WAIT_INTERRUPTIBLE
|
4813 if (val
& (DROP_RETIRE
| DROP_ACTIVE
))
4814 i915_gem_retire_requests(dev_priv
);
4816 if (val
& DROP_BOUND
)
4817 i915_gem_shrink(dev_priv
, LONG_MAX
, I915_SHRINK_BOUND
);
4819 if (val
& DROP_UNBOUND
)
4820 i915_gem_shrink(dev_priv
, LONG_MAX
, I915_SHRINK_UNBOUND
);
4823 mutex_unlock(&dev
->struct_mutex
);
4828 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops
,
4829 i915_drop_caches_get
, i915_drop_caches_set
,
4833 i915_max_freq_get(void *data
, u64
*val
)
4835 struct drm_i915_private
*dev_priv
= data
;
4837 if (INTEL_GEN(dev_priv
) < 6)
4840 *val
= intel_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq_softlimit
);
4845 i915_max_freq_set(void *data
, u64 val
)
4847 struct drm_i915_private
*dev_priv
= data
;
4851 if (INTEL_GEN(dev_priv
) < 6)
4854 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val
);
4856 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
4861 * Turbo will still be enabled, but won't go above the set value.
4863 val
= intel_freq_opcode(dev_priv
, val
);
4865 hw_max
= dev_priv
->rps
.max_freq
;
4866 hw_min
= dev_priv
->rps
.min_freq
;
4868 if (val
< hw_min
|| val
> hw_max
|| val
< dev_priv
->rps
.min_freq_softlimit
) {
4869 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4873 dev_priv
->rps
.max_freq_softlimit
= val
;
4875 intel_set_rps(dev_priv
, val
);
4877 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4882 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops
,
4883 i915_max_freq_get
, i915_max_freq_set
,
4887 i915_min_freq_get(void *data
, u64
*val
)
4889 struct drm_i915_private
*dev_priv
= data
;
4891 if (INTEL_GEN(dev_priv
) < 6)
4894 *val
= intel_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq_softlimit
);
4899 i915_min_freq_set(void *data
, u64 val
)
4901 struct drm_i915_private
*dev_priv
= data
;
4905 if (INTEL_GEN(dev_priv
) < 6)
4908 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val
);
4910 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
4915 * Turbo will still be enabled, but won't go below the set value.
4917 val
= intel_freq_opcode(dev_priv
, val
);
4919 hw_max
= dev_priv
->rps
.max_freq
;
4920 hw_min
= dev_priv
->rps
.min_freq
;
4923 val
> hw_max
|| val
> dev_priv
->rps
.max_freq_softlimit
) {
4924 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4928 dev_priv
->rps
.min_freq_softlimit
= val
;
4930 intel_set_rps(dev_priv
, val
);
4932 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4937 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops
,
4938 i915_min_freq_get
, i915_min_freq_set
,
4942 i915_cache_sharing_get(void *data
, u64
*val
)
4944 struct drm_i915_private
*dev_priv
= data
;
4945 struct drm_device
*dev
= &dev_priv
->drm
;
4949 if (!(IS_GEN6(dev_priv
) || IS_GEN7(dev_priv
)))
4952 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
4955 intel_runtime_pm_get(dev_priv
);
4957 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
4959 intel_runtime_pm_put(dev_priv
);
4960 mutex_unlock(&dev
->struct_mutex
);
4962 *val
= (snpcr
& GEN6_MBC_SNPCR_MASK
) >> GEN6_MBC_SNPCR_SHIFT
;
4968 i915_cache_sharing_set(void *data
, u64 val
)
4970 struct drm_i915_private
*dev_priv
= data
;
4973 if (!(IS_GEN6(dev_priv
) || IS_GEN7(dev_priv
)))
4979 intel_runtime_pm_get(dev_priv
);
4980 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val
);
4982 /* Update the cache sharing policy here as well */
4983 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
4984 snpcr
&= ~GEN6_MBC_SNPCR_MASK
;
4985 snpcr
|= (val
<< GEN6_MBC_SNPCR_SHIFT
);
4986 I915_WRITE(GEN6_MBCUNIT_SNPCR
, snpcr
);
4988 intel_runtime_pm_put(dev_priv
);
4992 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops
,
4993 i915_cache_sharing_get
, i915_cache_sharing_set
,
4996 static void cherryview_sseu_device_status(struct drm_i915_private
*dev_priv
,
4997 struct sseu_dev_info
*sseu
)
5001 u32 sig1
[ss_max
], sig2
[ss_max
];
5003 sig1
[0] = I915_READ(CHV_POWER_SS0_SIG1
);
5004 sig1
[1] = I915_READ(CHV_POWER_SS1_SIG1
);
5005 sig2
[0] = I915_READ(CHV_POWER_SS0_SIG2
);
5006 sig2
[1] = I915_READ(CHV_POWER_SS1_SIG2
);
5008 for (ss
= 0; ss
< ss_max
; ss
++) {
5009 unsigned int eu_cnt
;
5011 if (sig1
[ss
] & CHV_SS_PG_ENABLE
)
5012 /* skip disabled subslice */
5015 sseu
->slice_mask
= BIT(0);
5016 sseu
->subslice_mask
|= BIT(ss
);
5017 eu_cnt
= ((sig1
[ss
] & CHV_EU08_PG_ENABLE
) ? 0 : 2) +
5018 ((sig1
[ss
] & CHV_EU19_PG_ENABLE
) ? 0 : 2) +
5019 ((sig1
[ss
] & CHV_EU210_PG_ENABLE
) ? 0 : 2) +
5020 ((sig2
[ss
] & CHV_EU311_PG_ENABLE
) ? 0 : 2);
5021 sseu
->eu_total
+= eu_cnt
;
5022 sseu
->eu_per_subslice
= max_t(unsigned int,
5023 sseu
->eu_per_subslice
, eu_cnt
);
5027 static void gen9_sseu_device_status(struct drm_i915_private
*dev_priv
,
5028 struct sseu_dev_info
*sseu
)
5030 int s_max
= 3, ss_max
= 4;
5032 u32 s_reg
[s_max
], eu_reg
[2*s_max
], eu_mask
[2];
5034 /* BXT has a single slice and at most 3 subslices. */
5035 if (IS_BROXTON(dev_priv
)) {
5040 for (s
= 0; s
< s_max
; s
++) {
5041 s_reg
[s
] = I915_READ(GEN9_SLICE_PGCTL_ACK(s
));
5042 eu_reg
[2*s
] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s
));
5043 eu_reg
[2*s
+ 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s
));
5046 eu_mask
[0] = GEN9_PGCTL_SSA_EU08_ACK
|
5047 GEN9_PGCTL_SSA_EU19_ACK
|
5048 GEN9_PGCTL_SSA_EU210_ACK
|
5049 GEN9_PGCTL_SSA_EU311_ACK
;
5050 eu_mask
[1] = GEN9_PGCTL_SSB_EU08_ACK
|
5051 GEN9_PGCTL_SSB_EU19_ACK
|
5052 GEN9_PGCTL_SSB_EU210_ACK
|
5053 GEN9_PGCTL_SSB_EU311_ACK
;
5055 for (s
= 0; s
< s_max
; s
++) {
5056 if ((s_reg
[s
] & GEN9_PGCTL_SLICE_ACK
) == 0)
5057 /* skip disabled slice */
5060 sseu
->slice_mask
|= BIT(s
);
5062 if (IS_SKYLAKE(dev_priv
) || IS_KABYLAKE(dev_priv
))
5063 sseu
->subslice_mask
=
5064 INTEL_INFO(dev_priv
)->sseu
.subslice_mask
;
5066 for (ss
= 0; ss
< ss_max
; ss
++) {
5067 unsigned int eu_cnt
;
5069 if (IS_BROXTON(dev_priv
)) {
5070 if (!(s_reg
[s
] & (GEN9_PGCTL_SS_ACK(ss
))))
5071 /* skip disabled subslice */
5074 sseu
->subslice_mask
|= BIT(ss
);
5077 eu_cnt
= 2 * hweight32(eu_reg
[2*s
+ ss
/2] &
5079 sseu
->eu_total
+= eu_cnt
;
5080 sseu
->eu_per_subslice
= max_t(unsigned int,
5081 sseu
->eu_per_subslice
,
5087 static void broadwell_sseu_device_status(struct drm_i915_private
*dev_priv
,
5088 struct sseu_dev_info
*sseu
)
5090 u32 slice_info
= I915_READ(GEN8_GT_SLICE_INFO
);
5093 sseu
->slice_mask
= slice_info
& GEN8_LSLICESTAT_MASK
;
5095 if (sseu
->slice_mask
) {
5096 sseu
->subslice_mask
= INTEL_INFO(dev_priv
)->sseu
.subslice_mask
;
5097 sseu
->eu_per_subslice
=
5098 INTEL_INFO(dev_priv
)->sseu
.eu_per_subslice
;
5099 sseu
->eu_total
= sseu
->eu_per_subslice
*
5100 sseu_subslice_total(sseu
);
5102 /* subtract fused off EU(s) from enabled slice(s) */
5103 for (s
= 0; s
< fls(sseu
->slice_mask
); s
++) {
5105 INTEL_INFO(dev_priv
)->sseu
.subslice_7eu
[s
];
5107 sseu
->eu_total
-= hweight8(subslice_7eu
);
5112 static void i915_print_sseu_info(struct seq_file
*m
, bool is_available_info
,
5113 const struct sseu_dev_info
*sseu
)
5115 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
5116 const char *type
= is_available_info
? "Available" : "Enabled";
5118 seq_printf(m
, " %s Slice Mask: %04x\n", type
,
5120 seq_printf(m
, " %s Slice Total: %u\n", type
,
5121 hweight8(sseu
->slice_mask
));
5122 seq_printf(m
, " %s Subslice Total: %u\n", type
,
5123 sseu_subslice_total(sseu
));
5124 seq_printf(m
, " %s Subslice Mask: %04x\n", type
,
5125 sseu
->subslice_mask
);
5126 seq_printf(m
, " %s Subslice Per Slice: %u\n", type
,
5127 hweight8(sseu
->subslice_mask
));
5128 seq_printf(m
, " %s EU Total: %u\n", type
,
5130 seq_printf(m
, " %s EU Per Subslice: %u\n", type
,
5131 sseu
->eu_per_subslice
);
5133 if (!is_available_info
)
5136 seq_printf(m
, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv
)));
5137 if (HAS_POOLED_EU(dev_priv
))
5138 seq_printf(m
, " Min EU in pool: %u\n", sseu
->min_eu_in_pool
);
5140 seq_printf(m
, " Has Slice Power Gating: %s\n",
5141 yesno(sseu
->has_slice_pg
));
5142 seq_printf(m
, " Has Subslice Power Gating: %s\n",
5143 yesno(sseu
->has_subslice_pg
));
5144 seq_printf(m
, " Has EU Power Gating: %s\n",
5145 yesno(sseu
->has_eu_pg
));
5148 static int i915_sseu_status(struct seq_file
*m
, void *unused
)
5150 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
5151 struct sseu_dev_info sseu
;
5153 if (INTEL_GEN(dev_priv
) < 8)
5156 seq_puts(m
, "SSEU Device Info\n");
5157 i915_print_sseu_info(m
, true, &INTEL_INFO(dev_priv
)->sseu
);
5159 seq_puts(m
, "SSEU Device Status\n");
5160 memset(&sseu
, 0, sizeof(sseu
));
5162 intel_runtime_pm_get(dev_priv
);
5164 if (IS_CHERRYVIEW(dev_priv
)) {
5165 cherryview_sseu_device_status(dev_priv
, &sseu
);
5166 } else if (IS_BROADWELL(dev_priv
)) {
5167 broadwell_sseu_device_status(dev_priv
, &sseu
);
5168 } else if (INTEL_GEN(dev_priv
) >= 9) {
5169 gen9_sseu_device_status(dev_priv
, &sseu
);
5172 intel_runtime_pm_put(dev_priv
);
5174 i915_print_sseu_info(m
, false, &sseu
);
5179 static int i915_forcewake_open(struct inode
*inode
, struct file
*file
)
5181 struct drm_i915_private
*dev_priv
= inode
->i_private
;
5183 if (INTEL_GEN(dev_priv
) < 6)
5186 intel_runtime_pm_get(dev_priv
);
5187 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
5192 static int i915_forcewake_release(struct inode
*inode
, struct file
*file
)
5194 struct drm_i915_private
*dev_priv
= inode
->i_private
;
5196 if (INTEL_GEN(dev_priv
) < 6)
5199 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
5200 intel_runtime_pm_put(dev_priv
);
5205 static const struct file_operations i915_forcewake_fops
= {
5206 .owner
= THIS_MODULE
,
5207 .open
= i915_forcewake_open
,
5208 .release
= i915_forcewake_release
,
5211 static int i915_forcewake_create(struct dentry
*root
, struct drm_minor
*minor
)
5215 ent
= debugfs_create_file("i915_forcewake_user",
5217 root
, to_i915(minor
->dev
),
5218 &i915_forcewake_fops
);
5222 return drm_add_fake_info_node(minor
, ent
, &i915_forcewake_fops
);
5225 static int i915_debugfs_create(struct dentry
*root
,
5226 struct drm_minor
*minor
,
5228 const struct file_operations
*fops
)
5232 ent
= debugfs_create_file(name
,
5234 root
, to_i915(minor
->dev
),
5239 return drm_add_fake_info_node(minor
, ent
, fops
);
5242 static const struct drm_info_list i915_debugfs_list
[] = {
5243 {"i915_capabilities", i915_capabilities
, 0},
5244 {"i915_gem_objects", i915_gem_object_info
, 0},
5245 {"i915_gem_gtt", i915_gem_gtt_info
, 0},
5246 {"i915_gem_pin_display", i915_gem_gtt_info
, 0, (void *)1},
5247 {"i915_gem_stolen", i915_gem_stolen_list_info
},
5248 {"i915_gem_pageflip", i915_gem_pageflip_info
, 0},
5249 {"i915_gem_request", i915_gem_request_info
, 0},
5250 {"i915_gem_seqno", i915_gem_seqno_info
, 0},
5251 {"i915_gem_fence_regs", i915_gem_fence_regs_info
, 0},
5252 {"i915_gem_interrupt", i915_interrupt_info
, 0},
5253 {"i915_gem_hws", i915_hws_info
, 0, (void *)RCS
},
5254 {"i915_gem_hws_blt", i915_hws_info
, 0, (void *)BCS
},
5255 {"i915_gem_hws_bsd", i915_hws_info
, 0, (void *)VCS
},
5256 {"i915_gem_hws_vebox", i915_hws_info
, 0, (void *)VECS
},
5257 {"i915_gem_batch_pool", i915_gem_batch_pool_info
, 0},
5258 {"i915_guc_info", i915_guc_info
, 0},
5259 {"i915_guc_load_status", i915_guc_load_status_info
, 0},
5260 {"i915_guc_log_dump", i915_guc_log_dump
, 0},
5261 {"i915_frequency_info", i915_frequency_info
, 0},
5262 {"i915_hangcheck_info", i915_hangcheck_info
, 0},
5263 {"i915_drpc_info", i915_drpc_info
, 0},
5264 {"i915_emon_status", i915_emon_status
, 0},
5265 {"i915_ring_freq_table", i915_ring_freq_table
, 0},
5266 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking
, 0},
5267 {"i915_fbc_status", i915_fbc_status
, 0},
5268 {"i915_ips_status", i915_ips_status
, 0},
5269 {"i915_sr_status", i915_sr_status
, 0},
5270 {"i915_opregion", i915_opregion
, 0},
5271 {"i915_vbt", i915_vbt
, 0},
5272 {"i915_gem_framebuffer", i915_gem_framebuffer_info
, 0},
5273 {"i915_context_status", i915_context_status
, 0},
5274 {"i915_dump_lrc", i915_dump_lrc
, 0},
5275 {"i915_execlists", i915_execlists
, 0},
5276 {"i915_forcewake_domains", i915_forcewake_domains
, 0},
5277 {"i915_swizzle_info", i915_swizzle_info
, 0},
5278 {"i915_ppgtt_info", i915_ppgtt_info
, 0},
5279 {"i915_llc", i915_llc
, 0},
5280 {"i915_edp_psr_status", i915_edp_psr_status
, 0},
5281 {"i915_sink_crc_eDP1", i915_sink_crc
, 0},
5282 {"i915_energy_uJ", i915_energy_uJ
, 0},
5283 {"i915_runtime_pm_status", i915_runtime_pm_status
, 0},
5284 {"i915_power_domain_info", i915_power_domain_info
, 0},
5285 {"i915_dmc_info", i915_dmc_info
, 0},
5286 {"i915_display_info", i915_display_info
, 0},
5287 {"i915_semaphore_status", i915_semaphore_status
, 0},
5288 {"i915_shared_dplls_info", i915_shared_dplls_info
, 0},
5289 {"i915_dp_mst_info", i915_dp_mst_info
, 0},
5290 {"i915_wa_registers", i915_wa_registers
, 0},
5291 {"i915_ddb_info", i915_ddb_info
, 0},
5292 {"i915_sseu_status", i915_sseu_status
, 0},
5293 {"i915_drrs_status", i915_drrs_status
, 0},
5294 {"i915_rps_boost_info", i915_rps_boost_info
, 0},
5296 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
5298 static const struct i915_debugfs_files
{
5300 const struct file_operations
*fops
;
5301 } i915_debugfs_files
[] = {
5302 {"i915_wedged", &i915_wedged_fops
},
5303 {"i915_max_freq", &i915_max_freq_fops
},
5304 {"i915_min_freq", &i915_min_freq_fops
},
5305 {"i915_cache_sharing", &i915_cache_sharing_fops
},
5306 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops
},
5307 {"i915_ring_test_irq", &i915_ring_test_irq_fops
},
5308 {"i915_gem_drop_caches", &i915_drop_caches_fops
},
5309 {"i915_error_state", &i915_error_state_fops
},
5310 {"i915_next_seqno", &i915_next_seqno_fops
},
5311 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops
},
5312 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops
},
5313 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops
},
5314 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops
},
5315 {"i915_fbc_false_color", &i915_fbc_fc_fops
},
5316 {"i915_dp_test_data", &i915_displayport_test_data_fops
},
5317 {"i915_dp_test_type", &i915_displayport_test_type_fops
},
5318 {"i915_dp_test_active", &i915_displayport_test_active_fops
}
5321 void intel_display_crc_init(struct drm_i915_private
*dev_priv
)
5325 for_each_pipe(dev_priv
, pipe
) {
5326 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[pipe
];
5328 pipe_crc
->opened
= false;
5329 spin_lock_init(&pipe_crc
->lock
);
5330 init_waitqueue_head(&pipe_crc
->wq
);
5334 int i915_debugfs_register(struct drm_i915_private
*dev_priv
)
5336 struct drm_minor
*minor
= dev_priv
->drm
.primary
;
5339 ret
= i915_forcewake_create(minor
->debugfs_root
, minor
);
5343 for (i
= 0; i
< ARRAY_SIZE(i915_pipe_crc_data
); i
++) {
5344 ret
= i915_pipe_crc_create(minor
->debugfs_root
, minor
, i
);
5349 for (i
= 0; i
< ARRAY_SIZE(i915_debugfs_files
); i
++) {
5350 ret
= i915_debugfs_create(minor
->debugfs_root
, minor
,
5351 i915_debugfs_files
[i
].name
,
5352 i915_debugfs_files
[i
].fops
);
5357 return drm_debugfs_create_files(i915_debugfs_list
,
5358 I915_DEBUGFS_ENTRIES
,
5359 minor
->debugfs_root
, minor
);
5362 void i915_debugfs_unregister(struct drm_i915_private
*dev_priv
)
5364 struct drm_minor
*minor
= dev_priv
->drm
.primary
;
5367 drm_debugfs_remove_files(i915_debugfs_list
,
5368 I915_DEBUGFS_ENTRIES
, minor
);
5370 drm_debugfs_remove_files((struct drm_info_list
*)&i915_forcewake_fops
,
5373 for (i
= 0; i
< ARRAY_SIZE(i915_pipe_crc_data
); i
++) {
5374 struct drm_info_list
*info_list
=
5375 (struct drm_info_list
*)&i915_pipe_crc_data
[i
];
5377 drm_debugfs_remove_files(info_list
, 1, minor
);
5380 for (i
= 0; i
< ARRAY_SIZE(i915_debugfs_files
); i
++) {
5381 struct drm_info_list
*info_list
=
5382 (struct drm_info_list
*)i915_debugfs_files
[i
].fops
;
5384 drm_debugfs_remove_files(info_list
, 1, minor
);
5389 /* DPCD dump start address. */
5390 unsigned int offset
;
5391 /* DPCD dump end address, inclusive. If unset, .size will be used. */
5393 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
5395 /* Only valid for eDP. */
5399 static const struct dpcd_block i915_dpcd_debug
[] = {
5400 { .offset
= DP_DPCD_REV
, .size
= DP_RECEIVER_CAP_SIZE
},
5401 { .offset
= DP_PSR_SUPPORT
, .end
= DP_PSR_CAPS
},
5402 { .offset
= DP_DOWNSTREAM_PORT_0
, .size
= 16 },
5403 { .offset
= DP_LINK_BW_SET
, .end
= DP_EDP_CONFIGURATION_SET
},
5404 { .offset
= DP_SINK_COUNT
, .end
= DP_ADJUST_REQUEST_LANE2_3
},
5405 { .offset
= DP_SET_POWER
},
5406 { .offset
= DP_EDP_DPCD_REV
},
5407 { .offset
= DP_EDP_GENERAL_CAP_1
, .end
= DP_EDP_GENERAL_CAP_3
},
5408 { .offset
= DP_EDP_DISPLAY_CONTROL_REGISTER
, .end
= DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB
},
5409 { .offset
= DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET
, .end
= DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET
},
5412 static int i915_dpcd_show(struct seq_file
*m
, void *data
)
5414 struct drm_connector
*connector
= m
->private;
5415 struct intel_dp
*intel_dp
=
5416 enc_to_intel_dp(&intel_attached_encoder(connector
)->base
);
5421 if (connector
->status
!= connector_status_connected
)
5424 for (i
= 0; i
< ARRAY_SIZE(i915_dpcd_debug
); i
++) {
5425 const struct dpcd_block
*b
= &i915_dpcd_debug
[i
];
5426 size_t size
= b
->end
? b
->end
- b
->offset
+ 1 : (b
->size
?: 1);
5429 connector
->connector_type
!= DRM_MODE_CONNECTOR_eDP
)
5432 /* low tech for now */
5433 if (WARN_ON(size
> sizeof(buf
)))
5436 err
= drm_dp_dpcd_read(&intel_dp
->aux
, b
->offset
, buf
, size
);
5438 DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
5439 size
, b
->offset
, err
);
5443 seq_printf(m
, "%04x: %*ph\n", b
->offset
, (int) size
, buf
);
5449 static int i915_dpcd_open(struct inode
*inode
, struct file
*file
)
5451 return single_open(file
, i915_dpcd_show
, inode
->i_private
);
5454 static const struct file_operations i915_dpcd_fops
= {
5455 .owner
= THIS_MODULE
,
5456 .open
= i915_dpcd_open
,
5458 .llseek
= seq_lseek
,
5459 .release
= single_release
,
5462 static int i915_panel_show(struct seq_file
*m
, void *data
)
5464 struct drm_connector
*connector
= m
->private;
5465 struct intel_dp
*intel_dp
=
5466 enc_to_intel_dp(&intel_attached_encoder(connector
)->base
);
5468 if (connector
->status
!= connector_status_connected
)
5471 seq_printf(m
, "Panel power up delay: %d\n",
5472 intel_dp
->panel_power_up_delay
);
5473 seq_printf(m
, "Panel power down delay: %d\n",
5474 intel_dp
->panel_power_down_delay
);
5475 seq_printf(m
, "Backlight on delay: %d\n",
5476 intel_dp
->backlight_on_delay
);
5477 seq_printf(m
, "Backlight off delay: %d\n",
5478 intel_dp
->backlight_off_delay
);
5483 static int i915_panel_open(struct inode
*inode
, struct file
*file
)
5485 return single_open(file
, i915_panel_show
, inode
->i_private
);
5488 static const struct file_operations i915_panel_fops
= {
5489 .owner
= THIS_MODULE
,
5490 .open
= i915_panel_open
,
5492 .llseek
= seq_lseek
,
5493 .release
= single_release
,
5497 * i915_debugfs_connector_add - add i915 specific connector debugfs files
5498 * @connector: pointer to a registered drm_connector
5500 * Cleanup will be done by drm_connector_unregister() through a call to
5501 * drm_debugfs_connector_remove().
5503 * Returns 0 on success, negative error codes on error.
5505 int i915_debugfs_connector_add(struct drm_connector
*connector
)
5507 struct dentry
*root
= connector
->debugfs_entry
;
5509 /* The connector must have been registered beforehands. */
5513 if (connector
->connector_type
== DRM_MODE_CONNECTOR_DisplayPort
||
5514 connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
)
5515 debugfs_create_file("i915_dpcd", S_IRUGO
, root
,
5516 connector
, &i915_dpcd_fops
);
5518 if (connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
)
5519 debugfs_create_file("i915_panel_timings", S_IRUGO
, root
,
5520 connector
, &i915_panel_fops
);