tracing/drm: Remove unused TRACE_SYSTEM_STRING define
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_trace.h
1 #if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
2 #define _I915_TRACE_H_
3
4 #include <linux/stringify.h>
5 #include <linux/types.h>
6 #include <linux/tracepoint.h>
7
8 #include <drm/drmP.h>
9 #include "i915_drv.h"
10 #include "intel_drv.h"
11 #include "intel_ringbuffer.h"
12
13 #undef TRACE_SYSTEM
14 #define TRACE_SYSTEM i915
15 #define TRACE_INCLUDE_FILE i915_trace
16
17 /* pipe updates */
18
19 TRACE_EVENT(i915_pipe_update_start,
20 TP_PROTO(struct intel_crtc *crtc, u32 min, u32 max),
21 TP_ARGS(crtc, min, max),
22
23 TP_STRUCT__entry(
24 __field(enum pipe, pipe)
25 __field(u32, frame)
26 __field(u32, scanline)
27 __field(u32, min)
28 __field(u32, max)
29 ),
30
31 TP_fast_assign(
32 __entry->pipe = crtc->pipe;
33 __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
34 crtc->pipe);
35 __entry->scanline = intel_get_crtc_scanline(crtc);
36 __entry->min = min;
37 __entry->max = max;
38 ),
39
40 TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
41 pipe_name(__entry->pipe), __entry->frame,
42 __entry->scanline, __entry->min, __entry->max)
43 );
44
45 TRACE_EVENT(i915_pipe_update_vblank_evaded,
46 TP_PROTO(struct intel_crtc *crtc, u32 min, u32 max, u32 frame),
47 TP_ARGS(crtc, min, max, frame),
48
49 TP_STRUCT__entry(
50 __field(enum pipe, pipe)
51 __field(u32, frame)
52 __field(u32, scanline)
53 __field(u32, min)
54 __field(u32, max)
55 ),
56
57 TP_fast_assign(
58 __entry->pipe = crtc->pipe;
59 __entry->frame = frame;
60 __entry->scanline = intel_get_crtc_scanline(crtc);
61 __entry->min = min;
62 __entry->max = max;
63 ),
64
65 TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
66 pipe_name(__entry->pipe), __entry->frame,
67 __entry->scanline, __entry->min, __entry->max)
68 );
69
70 TRACE_EVENT(i915_pipe_update_end,
71 TP_PROTO(struct intel_crtc *crtc, u32 frame),
72 TP_ARGS(crtc, frame),
73
74 TP_STRUCT__entry(
75 __field(enum pipe, pipe)
76 __field(u32, frame)
77 __field(u32, scanline)
78 ),
79
80 TP_fast_assign(
81 __entry->pipe = crtc->pipe;
82 __entry->frame = frame;
83 __entry->scanline = intel_get_crtc_scanline(crtc);
84 ),
85
86 TP_printk("pipe %c, frame=%u, scanline=%u",
87 pipe_name(__entry->pipe), __entry->frame,
88 __entry->scanline)
89 );
90
91 /* object tracking */
92
93 TRACE_EVENT(i915_gem_object_create,
94 TP_PROTO(struct drm_i915_gem_object *obj),
95 TP_ARGS(obj),
96
97 TP_STRUCT__entry(
98 __field(struct drm_i915_gem_object *, obj)
99 __field(u32, size)
100 ),
101
102 TP_fast_assign(
103 __entry->obj = obj;
104 __entry->size = obj->base.size;
105 ),
106
107 TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
108 );
109
110 TRACE_EVENT(i915_vma_bind,
111 TP_PROTO(struct i915_vma *vma, unsigned flags),
112 TP_ARGS(vma, flags),
113
114 TP_STRUCT__entry(
115 __field(struct drm_i915_gem_object *, obj)
116 __field(struct i915_address_space *, vm)
117 __field(u32, offset)
118 __field(u32, size)
119 __field(unsigned, flags)
120 ),
121
122 TP_fast_assign(
123 __entry->obj = vma->obj;
124 __entry->vm = vma->vm;
125 __entry->offset = vma->node.start;
126 __entry->size = vma->node.size;
127 __entry->flags = flags;
128 ),
129
130 TP_printk("obj=%p, offset=%08x size=%x%s vm=%p",
131 __entry->obj, __entry->offset, __entry->size,
132 __entry->flags & PIN_MAPPABLE ? ", mappable" : "",
133 __entry->vm)
134 );
135
136 TRACE_EVENT(i915_vma_unbind,
137 TP_PROTO(struct i915_vma *vma),
138 TP_ARGS(vma),
139
140 TP_STRUCT__entry(
141 __field(struct drm_i915_gem_object *, obj)
142 __field(struct i915_address_space *, vm)
143 __field(u32, offset)
144 __field(u32, size)
145 ),
146
147 TP_fast_assign(
148 __entry->obj = vma->obj;
149 __entry->vm = vma->vm;
150 __entry->offset = vma->node.start;
151 __entry->size = vma->node.size;
152 ),
153
154 TP_printk("obj=%p, offset=%08x size=%x vm=%p",
155 __entry->obj, __entry->offset, __entry->size, __entry->vm)
156 );
157
158 TRACE_EVENT(i915_gem_object_change_domain,
159 TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write),
160 TP_ARGS(obj, old_read, old_write),
161
162 TP_STRUCT__entry(
163 __field(struct drm_i915_gem_object *, obj)
164 __field(u32, read_domains)
165 __field(u32, write_domain)
166 ),
167
168 TP_fast_assign(
169 __entry->obj = obj;
170 __entry->read_domains = obj->base.read_domains | (old_read << 16);
171 __entry->write_domain = obj->base.write_domain | (old_write << 16);
172 ),
173
174 TP_printk("obj=%p, read=%02x=>%02x, write=%02x=>%02x",
175 __entry->obj,
176 __entry->read_domains >> 16,
177 __entry->read_domains & 0xffff,
178 __entry->write_domain >> 16,
179 __entry->write_domain & 0xffff)
180 );
181
182 TRACE_EVENT(i915_gem_object_pwrite,
183 TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
184 TP_ARGS(obj, offset, len),
185
186 TP_STRUCT__entry(
187 __field(struct drm_i915_gem_object *, obj)
188 __field(u32, offset)
189 __field(u32, len)
190 ),
191
192 TP_fast_assign(
193 __entry->obj = obj;
194 __entry->offset = offset;
195 __entry->len = len;
196 ),
197
198 TP_printk("obj=%p, offset=%u, len=%u",
199 __entry->obj, __entry->offset, __entry->len)
200 );
201
202 TRACE_EVENT(i915_gem_object_pread,
203 TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
204 TP_ARGS(obj, offset, len),
205
206 TP_STRUCT__entry(
207 __field(struct drm_i915_gem_object *, obj)
208 __field(u32, offset)
209 __field(u32, len)
210 ),
211
212 TP_fast_assign(
213 __entry->obj = obj;
214 __entry->offset = offset;
215 __entry->len = len;
216 ),
217
218 TP_printk("obj=%p, offset=%u, len=%u",
219 __entry->obj, __entry->offset, __entry->len)
220 );
221
222 TRACE_EVENT(i915_gem_object_fault,
223 TP_PROTO(struct drm_i915_gem_object *obj, u32 index, bool gtt, bool write),
224 TP_ARGS(obj, index, gtt, write),
225
226 TP_STRUCT__entry(
227 __field(struct drm_i915_gem_object *, obj)
228 __field(u32, index)
229 __field(bool, gtt)
230 __field(bool, write)
231 ),
232
233 TP_fast_assign(
234 __entry->obj = obj;
235 __entry->index = index;
236 __entry->gtt = gtt;
237 __entry->write = write;
238 ),
239
240 TP_printk("obj=%p, %s index=%u %s",
241 __entry->obj,
242 __entry->gtt ? "GTT" : "CPU",
243 __entry->index,
244 __entry->write ? ", writable" : "")
245 );
246
247 DECLARE_EVENT_CLASS(i915_gem_object,
248 TP_PROTO(struct drm_i915_gem_object *obj),
249 TP_ARGS(obj),
250
251 TP_STRUCT__entry(
252 __field(struct drm_i915_gem_object *, obj)
253 ),
254
255 TP_fast_assign(
256 __entry->obj = obj;
257 ),
258
259 TP_printk("obj=%p", __entry->obj)
260 );
261
262 DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
263 TP_PROTO(struct drm_i915_gem_object *obj),
264 TP_ARGS(obj)
265 );
266
267 DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
268 TP_PROTO(struct drm_i915_gem_object *obj),
269 TP_ARGS(obj)
270 );
271
272 TRACE_EVENT(i915_gem_evict,
273 TP_PROTO(struct drm_device *dev, u32 size, u32 align, unsigned flags),
274 TP_ARGS(dev, size, align, flags),
275
276 TP_STRUCT__entry(
277 __field(u32, dev)
278 __field(u32, size)
279 __field(u32, align)
280 __field(unsigned, flags)
281 ),
282
283 TP_fast_assign(
284 __entry->dev = dev->primary->index;
285 __entry->size = size;
286 __entry->align = align;
287 __entry->flags = flags;
288 ),
289
290 TP_printk("dev=%d, size=%d, align=%d %s",
291 __entry->dev, __entry->size, __entry->align,
292 __entry->flags & PIN_MAPPABLE ? ", mappable" : "")
293 );
294
295 TRACE_EVENT(i915_gem_evict_everything,
296 TP_PROTO(struct drm_device *dev),
297 TP_ARGS(dev),
298
299 TP_STRUCT__entry(
300 __field(u32, dev)
301 ),
302
303 TP_fast_assign(
304 __entry->dev = dev->primary->index;
305 ),
306
307 TP_printk("dev=%d", __entry->dev)
308 );
309
310 TRACE_EVENT(i915_gem_evict_vm,
311 TP_PROTO(struct i915_address_space *vm),
312 TP_ARGS(vm),
313
314 TP_STRUCT__entry(
315 __field(u32, dev)
316 __field(struct i915_address_space *, vm)
317 ),
318
319 TP_fast_assign(
320 __entry->dev = vm->dev->primary->index;
321 __entry->vm = vm;
322 ),
323
324 TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
325 );
326
327 TRACE_EVENT(i915_gem_ring_sync_to,
328 TP_PROTO(struct intel_engine_cs *from,
329 struct intel_engine_cs *to,
330 struct drm_i915_gem_request *req),
331 TP_ARGS(from, to, req),
332
333 TP_STRUCT__entry(
334 __field(u32, dev)
335 __field(u32, sync_from)
336 __field(u32, sync_to)
337 __field(u32, seqno)
338 ),
339
340 TP_fast_assign(
341 __entry->dev = from->dev->primary->index;
342 __entry->sync_from = from->id;
343 __entry->sync_to = to->id;
344 __entry->seqno = i915_gem_request_get_seqno(req);
345 ),
346
347 TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
348 __entry->dev,
349 __entry->sync_from, __entry->sync_to,
350 __entry->seqno)
351 );
352
353 TRACE_EVENT(i915_gem_ring_dispatch,
354 TP_PROTO(struct drm_i915_gem_request *req, u32 flags),
355 TP_ARGS(req, flags),
356
357 TP_STRUCT__entry(
358 __field(u32, dev)
359 __field(u32, ring)
360 __field(u32, seqno)
361 __field(u32, flags)
362 ),
363
364 TP_fast_assign(
365 struct intel_engine_cs *ring =
366 i915_gem_request_get_ring(req);
367 __entry->dev = ring->dev->primary->index;
368 __entry->ring = ring->id;
369 __entry->seqno = i915_gem_request_get_seqno(req);
370 __entry->flags = flags;
371 i915_trace_irq_get(ring, req);
372 ),
373
374 TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
375 __entry->dev, __entry->ring, __entry->seqno, __entry->flags)
376 );
377
378 TRACE_EVENT(i915_gem_ring_flush,
379 TP_PROTO(struct intel_engine_cs *ring, u32 invalidate, u32 flush),
380 TP_ARGS(ring, invalidate, flush),
381
382 TP_STRUCT__entry(
383 __field(u32, dev)
384 __field(u32, ring)
385 __field(u32, invalidate)
386 __field(u32, flush)
387 ),
388
389 TP_fast_assign(
390 __entry->dev = ring->dev->primary->index;
391 __entry->ring = ring->id;
392 __entry->invalidate = invalidate;
393 __entry->flush = flush;
394 ),
395
396 TP_printk("dev=%u, ring=%x, invalidate=%04x, flush=%04x",
397 __entry->dev, __entry->ring,
398 __entry->invalidate, __entry->flush)
399 );
400
401 DECLARE_EVENT_CLASS(i915_gem_request,
402 TP_PROTO(struct drm_i915_gem_request *req),
403 TP_ARGS(req),
404
405 TP_STRUCT__entry(
406 __field(u32, dev)
407 __field(u32, ring)
408 __field(u32, uniq)
409 __field(u32, seqno)
410 ),
411
412 TP_fast_assign(
413 struct intel_engine_cs *ring =
414 i915_gem_request_get_ring(req);
415 __entry->dev = ring->dev->primary->index;
416 __entry->ring = ring->id;
417 __entry->uniq = req ? req->uniq : 0;
418 __entry->seqno = i915_gem_request_get_seqno(req);
419 ),
420
421 TP_printk("dev=%u, ring=%u, uniq=%u, seqno=%u",
422 __entry->dev, __entry->ring, __entry->uniq,
423 __entry->seqno)
424 );
425
426 DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
427 TP_PROTO(struct drm_i915_gem_request *req),
428 TP_ARGS(req)
429 );
430
431 TRACE_EVENT(i915_gem_request_notify,
432 TP_PROTO(struct intel_engine_cs *ring),
433 TP_ARGS(ring),
434
435 TP_STRUCT__entry(
436 __field(u32, dev)
437 __field(u32, ring)
438 __field(u32, seqno)
439 ),
440
441 TP_fast_assign(
442 __entry->dev = ring->dev->primary->index;
443 __entry->ring = ring->id;
444 __entry->seqno = ring->get_seqno(ring, false);
445 ),
446
447 TP_printk("dev=%u, ring=%u, seqno=%u",
448 __entry->dev, __entry->ring, __entry->seqno)
449 );
450
451 DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
452 TP_PROTO(struct drm_i915_gem_request *req),
453 TP_ARGS(req)
454 );
455
456 DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
457 TP_PROTO(struct drm_i915_gem_request *req),
458 TP_ARGS(req)
459 );
460
461 TRACE_EVENT(i915_gem_request_wait_begin,
462 TP_PROTO(struct drm_i915_gem_request *req),
463 TP_ARGS(req),
464
465 TP_STRUCT__entry(
466 __field(u32, dev)
467 __field(u32, ring)
468 __field(u32, uniq)
469 __field(u32, seqno)
470 __field(bool, blocking)
471 ),
472
473 /* NB: the blocking information is racy since mutex_is_locked
474 * doesn't check that the current thread holds the lock. The only
475 * other option would be to pass the boolean information of whether
476 * or not the class was blocking down through the stack which is
477 * less desirable.
478 */
479 TP_fast_assign(
480 struct intel_engine_cs *ring =
481 i915_gem_request_get_ring(req);
482 __entry->dev = ring->dev->primary->index;
483 __entry->ring = ring->id;
484 __entry->uniq = req ? req->uniq : 0;
485 __entry->seqno = i915_gem_request_get_seqno(req);
486 __entry->blocking =
487 mutex_is_locked(&ring->dev->struct_mutex);
488 ),
489
490 TP_printk("dev=%u, ring=%u, uniq=%u, seqno=%u, blocking=%s",
491 __entry->dev, __entry->ring, __entry->uniq,
492 __entry->seqno, __entry->blocking ? "yes (NB)" : "no")
493 );
494
495 DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
496 TP_PROTO(struct drm_i915_gem_request *req),
497 TP_ARGS(req)
498 );
499
500 DECLARE_EVENT_CLASS(i915_ring,
501 TP_PROTO(struct intel_engine_cs *ring),
502 TP_ARGS(ring),
503
504 TP_STRUCT__entry(
505 __field(u32, dev)
506 __field(u32, ring)
507 ),
508
509 TP_fast_assign(
510 __entry->dev = ring->dev->primary->index;
511 __entry->ring = ring->id;
512 ),
513
514 TP_printk("dev=%u, ring=%u", __entry->dev, __entry->ring)
515 );
516
517 DEFINE_EVENT(i915_ring, i915_ring_wait_begin,
518 TP_PROTO(struct intel_engine_cs *ring),
519 TP_ARGS(ring)
520 );
521
522 DEFINE_EVENT(i915_ring, i915_ring_wait_end,
523 TP_PROTO(struct intel_engine_cs *ring),
524 TP_ARGS(ring)
525 );
526
527 TRACE_EVENT(i915_flip_request,
528 TP_PROTO(int plane, struct drm_i915_gem_object *obj),
529
530 TP_ARGS(plane, obj),
531
532 TP_STRUCT__entry(
533 __field(int, plane)
534 __field(struct drm_i915_gem_object *, obj)
535 ),
536
537 TP_fast_assign(
538 __entry->plane = plane;
539 __entry->obj = obj;
540 ),
541
542 TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
543 );
544
545 TRACE_EVENT(i915_flip_complete,
546 TP_PROTO(int plane, struct drm_i915_gem_object *obj),
547
548 TP_ARGS(plane, obj),
549
550 TP_STRUCT__entry(
551 __field(int, plane)
552 __field(struct drm_i915_gem_object *, obj)
553 ),
554
555 TP_fast_assign(
556 __entry->plane = plane;
557 __entry->obj = obj;
558 ),
559
560 TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
561 );
562
563 TRACE_EVENT_CONDITION(i915_reg_rw,
564 TP_PROTO(bool write, u32 reg, u64 val, int len, bool trace),
565
566 TP_ARGS(write, reg, val, len, trace),
567
568 TP_CONDITION(trace),
569
570 TP_STRUCT__entry(
571 __field(u64, val)
572 __field(u32, reg)
573 __field(u16, write)
574 __field(u16, len)
575 ),
576
577 TP_fast_assign(
578 __entry->val = (u64)val;
579 __entry->reg = reg;
580 __entry->write = write;
581 __entry->len = len;
582 ),
583
584 TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
585 __entry->write ? "write" : "read",
586 __entry->reg, __entry->len,
587 (u32)(__entry->val & 0xffffffff),
588 (u32)(__entry->val >> 32))
589 );
590
591 TRACE_EVENT(intel_gpu_freq_change,
592 TP_PROTO(u32 freq),
593 TP_ARGS(freq),
594
595 TP_STRUCT__entry(
596 __field(u32, freq)
597 ),
598
599 TP_fast_assign(
600 __entry->freq = freq;
601 ),
602
603 TP_printk("new_freq=%u", __entry->freq)
604 );
605
606 /**
607 * DOC: i915_ppgtt_create and i915_ppgtt_release tracepoints
608 *
609 * With full ppgtt enabled each process using drm will allocate at least one
610 * translation table. With these traces it is possible to keep track of the
611 * allocation and of the lifetime of the tables; this can be used during
612 * testing/debug to verify that we are not leaking ppgtts.
613 * These traces identify the ppgtt through the vm pointer, which is also printed
614 * by the i915_vma_bind and i915_vma_unbind tracepoints.
615 */
616 DECLARE_EVENT_CLASS(i915_ppgtt,
617 TP_PROTO(struct i915_address_space *vm),
618 TP_ARGS(vm),
619
620 TP_STRUCT__entry(
621 __field(struct i915_address_space *, vm)
622 __field(u32, dev)
623 ),
624
625 TP_fast_assign(
626 __entry->vm = vm;
627 __entry->dev = vm->dev->primary->index;
628 ),
629
630 TP_printk("dev=%u, vm=%p", __entry->dev, __entry->vm)
631 )
632
633 DEFINE_EVENT(i915_ppgtt, i915_ppgtt_create,
634 TP_PROTO(struct i915_address_space *vm),
635 TP_ARGS(vm)
636 );
637
638 DEFINE_EVENT(i915_ppgtt, i915_ppgtt_release,
639 TP_PROTO(struct i915_address_space *vm),
640 TP_ARGS(vm)
641 );
642
643 /**
644 * DOC: i915_context_create and i915_context_free tracepoints
645 *
646 * These tracepoints are used to track creation and deletion of contexts.
647 * If full ppgtt is enabled, they also print the address of the vm assigned to
648 * the context.
649 */
650 DECLARE_EVENT_CLASS(i915_context,
651 TP_PROTO(struct intel_context *ctx),
652 TP_ARGS(ctx),
653
654 TP_STRUCT__entry(
655 __field(u32, dev)
656 __field(struct intel_context *, ctx)
657 __field(struct i915_address_space *, vm)
658 ),
659
660 TP_fast_assign(
661 __entry->ctx = ctx;
662 __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
663 __entry->dev = ctx->file_priv->dev_priv->dev->primary->index;
664 ),
665
666 TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
667 __entry->dev, __entry->ctx, __entry->vm)
668 )
669
670 DEFINE_EVENT(i915_context, i915_context_create,
671 TP_PROTO(struct intel_context *ctx),
672 TP_ARGS(ctx)
673 );
674
675 DEFINE_EVENT(i915_context, i915_context_free,
676 TP_PROTO(struct intel_context *ctx),
677 TP_ARGS(ctx)
678 );
679
680 /**
681 * DOC: switch_mm tracepoint
682 *
683 * This tracepoint allows tracking of the mm switch, which is an important point
684 * in the lifetime of the vm in the legacy submission path. This tracepoint is
685 * called only if full ppgtt is enabled.
686 */
687 TRACE_EVENT(switch_mm,
688 TP_PROTO(struct intel_engine_cs *ring, struct intel_context *to),
689
690 TP_ARGS(ring, to),
691
692 TP_STRUCT__entry(
693 __field(u32, ring)
694 __field(struct intel_context *, to)
695 __field(struct i915_address_space *, vm)
696 __field(u32, dev)
697 ),
698
699 TP_fast_assign(
700 __entry->ring = ring->id;
701 __entry->to = to;
702 __entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
703 __entry->dev = ring->dev->primary->index;
704 ),
705
706 TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
707 __entry->dev, __entry->ring, __entry->to, __entry->vm)
708 );
709
710 #endif /* _I915_TRACE_H_ */
711
712 /* This part must be outside protection */
713 #undef TRACE_INCLUDE_PATH
714 #define TRACE_INCLUDE_PATH .
715 #include <trace/define_trace.h>
This page took 0.083806 seconds and 5 git commands to generate.