5 #include "util/evlist.h"
6 #include "util/cache.h"
7 #include "util/evsel.h"
8 #include "util/symbol.h"
9 #include "util/thread.h"
10 #include "util/header.h"
11 #include "util/session.h"
12 #include "util/tool.h"
14 #include "util/parse-options.h"
15 #include "util/trace-event.h"
17 #include "util/debug.h"
19 #include <sys/prctl.h>
20 #include <sys/resource.h>
22 #include <semaphore.h>
26 static const char *input_name
;
28 static char default_sort_order
[] = "avg, max, switch, runtime";
29 static const char *sort_order
= default_sort_order
;
31 static int profile_cpu
= -1;
33 #define PR_SET_NAME 15 /* Set process name */
36 static u64 run_measurement_overhead
;
37 static u64 sleep_measurement_overhead
;
44 static unsigned long nr_tasks
;
53 unsigned long nr_events
;
54 unsigned long curr_event
;
55 struct sched_atom
**atoms
;
66 enum sched_event_type
{
70 SCHED_EVENT_MIGRATION
,
74 enum sched_event_type type
;
80 struct task_desc
*wakee
;
83 static struct task_desc
*pid_to_task
[MAX_PID
];
85 static struct task_desc
**tasks
;
87 static pthread_mutex_t start_work_mutex
= PTHREAD_MUTEX_INITIALIZER
;
88 static u64 start_time
;
90 static pthread_mutex_t work_done_wait_mutex
= PTHREAD_MUTEX_INITIALIZER
;
92 static unsigned long nr_run_events
;
93 static unsigned long nr_sleep_events
;
94 static unsigned long nr_wakeup_events
;
96 static unsigned long nr_sleep_corrections
;
97 static unsigned long nr_run_events_optimized
;
99 static unsigned long targetless_wakeups
;
100 static unsigned long multitarget_wakeups
;
102 static u64 cpu_usage
;
103 static u64 runavg_cpu_usage
;
104 static u64 parent_cpu_usage
;
105 static u64 runavg_parent_cpu_usage
;
107 static unsigned long nr_runs
;
108 static u64 sum_runtime
;
109 static u64 sum_fluct
;
112 static unsigned int replay_repeat
= 10;
113 static unsigned long nr_timestamps
;
114 static unsigned long nr_unordered_timestamps
;
115 static unsigned long nr_state_machine_bugs
;
116 static unsigned long nr_context_switch_bugs
;
117 static unsigned long nr_events
;
118 static unsigned long nr_lost_chunks
;
119 static unsigned long nr_lost_events
;
121 #define TASK_STATE_TO_CHAR_STR "RSDTtZX"
131 struct list_head list
;
132 enum thread_state state
;
140 struct list_head work_list
;
141 struct thread
*thread
;
150 typedef int (*sort_fn_t
)(struct work_atoms
*, struct work_atoms
*);
152 static struct rb_root atom_root
, sorted_atom_root
;
154 static u64 all_runtime
;
155 static u64 all_count
;
158 static u64
get_nsecs(void)
162 clock_gettime(CLOCK_MONOTONIC
, &ts
);
164 return ts
.tv_sec
* 1000000000ULL + ts
.tv_nsec
;
167 static void burn_nsecs(u64 nsecs
)
169 u64 T0
= get_nsecs(), T1
;
173 } while (T1
+ run_measurement_overhead
< T0
+ nsecs
);
176 static void sleep_nsecs(u64 nsecs
)
180 ts
.tv_nsec
= nsecs
% 999999999;
181 ts
.tv_sec
= nsecs
/ 999999999;
183 nanosleep(&ts
, NULL
);
186 static void calibrate_run_measurement_overhead(void)
188 u64 T0
, T1
, delta
, min_delta
= 1000000000ULL;
191 for (i
= 0; i
< 10; i
++) {
196 min_delta
= min(min_delta
, delta
);
198 run_measurement_overhead
= min_delta
;
200 printf("run measurement overhead: %" PRIu64
" nsecs\n", min_delta
);
203 static void calibrate_sleep_measurement_overhead(void)
205 u64 T0
, T1
, delta
, min_delta
= 1000000000ULL;
208 for (i
= 0; i
< 10; i
++) {
213 min_delta
= min(min_delta
, delta
);
216 sleep_measurement_overhead
= min_delta
;
218 printf("sleep measurement overhead: %" PRIu64
" nsecs\n", min_delta
);
221 static struct sched_atom
*
222 get_new_event(struct task_desc
*task
, u64 timestamp
)
224 struct sched_atom
*event
= zalloc(sizeof(*event
));
225 unsigned long idx
= task
->nr_events
;
228 event
->timestamp
= timestamp
;
232 size
= sizeof(struct sched_atom
*) * task
->nr_events
;
233 task
->atoms
= realloc(task
->atoms
, size
);
234 BUG_ON(!task
->atoms
);
236 task
->atoms
[idx
] = event
;
241 static struct sched_atom
*last_event(struct task_desc
*task
)
243 if (!task
->nr_events
)
246 return task
->atoms
[task
->nr_events
- 1];
250 add_sched_event_run(struct task_desc
*task
, u64 timestamp
, u64 duration
)
252 struct sched_atom
*event
, *curr_event
= last_event(task
);
255 * optimize an existing RUN event by merging this one
258 if (curr_event
&& curr_event
->type
== SCHED_EVENT_RUN
) {
259 nr_run_events_optimized
++;
260 curr_event
->duration
+= duration
;
264 event
= get_new_event(task
, timestamp
);
266 event
->type
= SCHED_EVENT_RUN
;
267 event
->duration
= duration
;
273 add_sched_event_wakeup(struct task_desc
*task
, u64 timestamp
,
274 struct task_desc
*wakee
)
276 struct sched_atom
*event
, *wakee_event
;
278 event
= get_new_event(task
, timestamp
);
279 event
->type
= SCHED_EVENT_WAKEUP
;
280 event
->wakee
= wakee
;
282 wakee_event
= last_event(wakee
);
283 if (!wakee_event
|| wakee_event
->type
!= SCHED_EVENT_SLEEP
) {
284 targetless_wakeups
++;
287 if (wakee_event
->wait_sem
) {
288 multitarget_wakeups
++;
292 wakee_event
->wait_sem
= zalloc(sizeof(*wakee_event
->wait_sem
));
293 sem_init(wakee_event
->wait_sem
, 0, 0);
294 wakee_event
->specific_wait
= 1;
295 event
->wait_sem
= wakee_event
->wait_sem
;
301 add_sched_event_sleep(struct task_desc
*task
, u64 timestamp
,
302 u64 task_state __maybe_unused
)
304 struct sched_atom
*event
= get_new_event(task
, timestamp
);
306 event
->type
= SCHED_EVENT_SLEEP
;
311 static struct task_desc
*register_pid(unsigned long pid
, const char *comm
)
313 struct task_desc
*task
;
315 BUG_ON(pid
>= MAX_PID
);
317 task
= pid_to_task
[pid
];
322 task
= zalloc(sizeof(*task
));
325 strcpy(task
->comm
, comm
);
327 * every task starts in sleeping state - this gets ignored
328 * if there's no wakeup pointing to this sleep state:
330 add_sched_event_sleep(task
, 0, 0);
332 pid_to_task
[pid
] = task
;
334 tasks
= realloc(tasks
, nr_tasks
*sizeof(struct task_task
*));
336 tasks
[task
->nr
] = task
;
339 printf("registered task #%ld, PID %ld (%s)\n", nr_tasks
, pid
, comm
);
345 static void print_task_traces(void)
347 struct task_desc
*task
;
350 for (i
= 0; i
< nr_tasks
; i
++) {
352 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
353 task
->nr
, task
->comm
, task
->pid
, task
->nr_events
);
357 static void add_cross_task_wakeups(void)
359 struct task_desc
*task1
, *task2
;
362 for (i
= 0; i
< nr_tasks
; i
++) {
368 add_sched_event_wakeup(task1
, 0, task2
);
372 static void process_sched_event(struct task_desc
*this_task __maybe_unused
,
373 struct sched_atom
*atom
)
377 switch (atom
->type
) {
378 case SCHED_EVENT_RUN
:
379 burn_nsecs(atom
->duration
);
381 case SCHED_EVENT_SLEEP
:
383 ret
= sem_wait(atom
->wait_sem
);
386 case SCHED_EVENT_WAKEUP
:
388 ret
= sem_post(atom
->wait_sem
);
391 case SCHED_EVENT_MIGRATION
:
398 static u64
get_cpu_usage_nsec_parent(void)
404 err
= getrusage(RUSAGE_SELF
, &ru
);
407 sum
= ru
.ru_utime
.tv_sec
*1e9
+ ru
.ru_utime
.tv_usec
*1e3
;
408 sum
+= ru
.ru_stime
.tv_sec
*1e9
+ ru
.ru_stime
.tv_usec
*1e3
;
413 static int self_open_counters(void)
415 struct perf_event_attr attr
;
418 memset(&attr
, 0, sizeof(attr
));
420 attr
.type
= PERF_TYPE_SOFTWARE
;
421 attr
.config
= PERF_COUNT_SW_TASK_CLOCK
;
423 fd
= sys_perf_event_open(&attr
, 0, -1, -1, 0);
426 pr_debug("Error: sys_perf_event_open() syscall returned"
427 "with %d (%s)\n", fd
, strerror(errno
));
431 static u64
get_cpu_usage_nsec_self(int fd
)
436 ret
= read(fd
, &runtime
, sizeof(runtime
));
437 BUG_ON(ret
!= sizeof(runtime
));
442 static void *thread_func(void *ctx
)
444 struct task_desc
*this_task
= ctx
;
445 u64 cpu_usage_0
, cpu_usage_1
;
446 unsigned long i
, ret
;
450 sprintf(comm2
, ":%s", this_task
->comm
);
451 prctl(PR_SET_NAME
, comm2
);
452 fd
= self_open_counters();
456 ret
= sem_post(&this_task
->ready_for_work
);
458 ret
= pthread_mutex_lock(&start_work_mutex
);
460 ret
= pthread_mutex_unlock(&start_work_mutex
);
463 cpu_usage_0
= get_cpu_usage_nsec_self(fd
);
465 for (i
= 0; i
< this_task
->nr_events
; i
++) {
466 this_task
->curr_event
= i
;
467 process_sched_event(this_task
, this_task
->atoms
[i
]);
470 cpu_usage_1
= get_cpu_usage_nsec_self(fd
);
471 this_task
->cpu_usage
= cpu_usage_1
- cpu_usage_0
;
472 ret
= sem_post(&this_task
->work_done_sem
);
475 ret
= pthread_mutex_lock(&work_done_wait_mutex
);
477 ret
= pthread_mutex_unlock(&work_done_wait_mutex
);
483 static void create_tasks(void)
485 struct task_desc
*task
;
490 err
= pthread_attr_init(&attr
);
492 err
= pthread_attr_setstacksize(&attr
,
493 (size_t) max(16 * 1024, PTHREAD_STACK_MIN
));
495 err
= pthread_mutex_lock(&start_work_mutex
);
497 err
= pthread_mutex_lock(&work_done_wait_mutex
);
499 for (i
= 0; i
< nr_tasks
; i
++) {
501 sem_init(&task
->sleep_sem
, 0, 0);
502 sem_init(&task
->ready_for_work
, 0, 0);
503 sem_init(&task
->work_done_sem
, 0, 0);
504 task
->curr_event
= 0;
505 err
= pthread_create(&task
->thread
, &attr
, thread_func
, task
);
510 static void wait_for_tasks(void)
512 u64 cpu_usage_0
, cpu_usage_1
;
513 struct task_desc
*task
;
514 unsigned long i
, ret
;
516 start_time
= get_nsecs();
518 pthread_mutex_unlock(&work_done_wait_mutex
);
520 for (i
= 0; i
< nr_tasks
; i
++) {
522 ret
= sem_wait(&task
->ready_for_work
);
524 sem_init(&task
->ready_for_work
, 0, 0);
526 ret
= pthread_mutex_lock(&work_done_wait_mutex
);
529 cpu_usage_0
= get_cpu_usage_nsec_parent();
531 pthread_mutex_unlock(&start_work_mutex
);
533 for (i
= 0; i
< nr_tasks
; i
++) {
535 ret
= sem_wait(&task
->work_done_sem
);
537 sem_init(&task
->work_done_sem
, 0, 0);
538 cpu_usage
+= task
->cpu_usage
;
542 cpu_usage_1
= get_cpu_usage_nsec_parent();
543 if (!runavg_cpu_usage
)
544 runavg_cpu_usage
= cpu_usage
;
545 runavg_cpu_usage
= (runavg_cpu_usage
*9 + cpu_usage
)/10;
547 parent_cpu_usage
= cpu_usage_1
- cpu_usage_0
;
548 if (!runavg_parent_cpu_usage
)
549 runavg_parent_cpu_usage
= parent_cpu_usage
;
550 runavg_parent_cpu_usage
= (runavg_parent_cpu_usage
*9 +
551 parent_cpu_usage
)/10;
553 ret
= pthread_mutex_lock(&start_work_mutex
);
556 for (i
= 0; i
< nr_tasks
; i
++) {
558 sem_init(&task
->sleep_sem
, 0, 0);
559 task
->curr_event
= 0;
563 static void run_one_test(void)
565 u64 T0
, T1
, delta
, avg_delta
, fluct
;
572 sum_runtime
+= delta
;
575 avg_delta
= sum_runtime
/ nr_runs
;
576 if (delta
< avg_delta
)
577 fluct
= avg_delta
- delta
;
579 fluct
= delta
- avg_delta
;
583 run_avg
= (run_avg
*9 + delta
)/10;
585 printf("#%-3ld: %0.3f, ",
586 nr_runs
, (double)delta
/1000000.0);
588 printf("ravg: %0.2f, ",
589 (double)run_avg
/1e6
);
591 printf("cpu: %0.2f / %0.2f",
592 (double)cpu_usage
/1e6
, (double)runavg_cpu_usage
/1e6
);
596 * rusage statistics done by the parent, these are less
597 * accurate than the sum_exec_runtime based statistics:
599 printf(" [%0.2f / %0.2f]",
600 (double)parent_cpu_usage
/1e6
,
601 (double)runavg_parent_cpu_usage
/1e6
);
606 if (nr_sleep_corrections
)
607 printf(" (%ld sleep corrections)\n", nr_sleep_corrections
);
608 nr_sleep_corrections
= 0;
611 static void test_calibrations(void)
619 printf("the run test took %" PRIu64
" nsecs\n", T1
- T0
);
625 printf("the sleep test took %" PRIu64
" nsecs\n", T1
- T0
);
628 #define FILL_FIELD(ptr, field, event, data) \
629 ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data)
631 #define FILL_ARRAY(ptr, array, event, data) \
633 void *__array = raw_field_ptr(event, #array, data); \
634 memcpy(ptr.array, __array, sizeof(ptr.array)); \
637 #define FILL_COMMON_FIELDS(ptr, event, data) \
639 FILL_FIELD(ptr, common_type, event, data); \
640 FILL_FIELD(ptr, common_flags, event, data); \
641 FILL_FIELD(ptr, common_preempt_count, event, data); \
642 FILL_FIELD(ptr, common_pid, event, data); \
643 FILL_FIELD(ptr, common_tgid, event, data); \
648 struct trace_switch_event
{
653 u8 common_preempt_count
;
666 struct trace_runtime_event
{
671 u8 common_preempt_count
;
681 struct trace_wakeup_event
{
686 u8 common_preempt_count
;
698 struct trace_fork_event
{
703 u8 common_preempt_count
;
707 char parent_comm
[16];
713 struct trace_migrate_task_event
{
718 u8 common_preempt_count
;
729 struct trace_sched_handler
{
730 int (*switch_event
)(struct trace_switch_event
*event
,
731 struct machine
*machine
,
732 struct event_format
*tp_format
,
733 struct perf_sample
*sample
);
735 int (*runtime_event
)(struct trace_runtime_event
*event
,
736 struct machine
*machine
,
737 struct perf_sample
*sample
);
739 int (*wakeup_event
)(struct trace_wakeup_event
*event
,
740 struct machine
*machine
,
741 struct event_format
*tp_format
,
742 struct perf_sample
*sample
);
744 int (*fork_event
)(struct trace_fork_event
*event
,
745 struct event_format
*tp_format
);
747 int (*migrate_task_event
)(struct trace_migrate_task_event
*event
,
748 struct machine
*machine
,
749 struct perf_sample
*sample
);
754 replay_wakeup_event(struct trace_wakeup_event
*wakeup_event
,
755 struct machine
*machine __maybe_unused
,
756 struct event_format
*event
, struct perf_sample
*sample
)
758 struct task_desc
*waker
, *wakee
;
761 printf("sched_wakeup event %p\n", event
);
763 printf(" ... pid %d woke up %s/%d\n",
764 wakeup_event
->common_pid
,
769 waker
= register_pid(wakeup_event
->common_pid
, "<unknown>");
770 wakee
= register_pid(wakeup_event
->pid
, wakeup_event
->comm
);
772 add_sched_event_wakeup(waker
, sample
->time
, wakee
);
776 static u64 cpu_last_switched
[MAX_CPUS
];
779 replay_switch_event(struct trace_switch_event
*switch_event
,
780 struct machine
*machine __maybe_unused
,
781 struct event_format
*event
,
782 struct perf_sample
*sample
)
784 struct task_desc
*prev
, __maybe_unused
*next
;
785 u64 timestamp0
, timestamp
= sample
->time
;
786 int cpu
= sample
->cpu
;
790 printf("sched_switch event %p\n", event
);
792 if (cpu
>= MAX_CPUS
|| cpu
< 0)
795 timestamp0
= cpu_last_switched
[cpu
];
797 delta
= timestamp
- timestamp0
;
802 pr_debug("hm, delta: %" PRIu64
" < 0 ?\n", delta
);
807 printf(" ... switch from %s/%d to %s/%d [ran %" PRIu64
" nsecs]\n",
808 switch_event
->prev_comm
, switch_event
->prev_pid
,
809 switch_event
->next_comm
, switch_event
->next_pid
,
813 prev
= register_pid(switch_event
->prev_pid
, switch_event
->prev_comm
);
814 next
= register_pid(switch_event
->next_pid
, switch_event
->next_comm
);
816 cpu_last_switched
[cpu
] = timestamp
;
818 add_sched_event_run(prev
, timestamp
, delta
);
819 add_sched_event_sleep(prev
, timestamp
, switch_event
->prev_state
);
826 replay_fork_event(struct trace_fork_event
*fork_event
,
827 struct event_format
*event
)
830 printf("sched_fork event %p\n", event
);
831 printf("... parent: %s/%d\n", fork_event
->parent_comm
, fork_event
->parent_pid
);
832 printf("... child: %s/%d\n", fork_event
->child_comm
, fork_event
->child_pid
);
834 register_pid(fork_event
->parent_pid
, fork_event
->parent_comm
);
835 register_pid(fork_event
->child_pid
, fork_event
->child_comm
);
839 static struct trace_sched_handler replay_ops
= {
840 .wakeup_event
= replay_wakeup_event
,
841 .switch_event
= replay_switch_event
,
842 .fork_event
= replay_fork_event
,
845 struct sort_dimension
{
848 struct list_head list
;
851 static LIST_HEAD(cmp_pid
);
854 thread_lat_cmp(struct list_head
*list
, struct work_atoms
*l
, struct work_atoms
*r
)
856 struct sort_dimension
*sort
;
859 BUG_ON(list_empty(list
));
861 list_for_each_entry(sort
, list
, list
) {
862 ret
= sort
->cmp(l
, r
);
870 static struct work_atoms
*
871 thread_atoms_search(struct rb_root
*root
, struct thread
*thread
,
872 struct list_head
*sort_list
)
874 struct rb_node
*node
= root
->rb_node
;
875 struct work_atoms key
= { .thread
= thread
};
878 struct work_atoms
*atoms
;
881 atoms
= container_of(node
, struct work_atoms
, node
);
883 cmp
= thread_lat_cmp(sort_list
, &key
, atoms
);
885 node
= node
->rb_left
;
887 node
= node
->rb_right
;
889 BUG_ON(thread
!= atoms
->thread
);
897 __thread_latency_insert(struct rb_root
*root
, struct work_atoms
*data
,
898 struct list_head
*sort_list
)
900 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
903 struct work_atoms
*this;
906 this = container_of(*new, struct work_atoms
, node
);
909 cmp
= thread_lat_cmp(sort_list
, data
, this);
912 new = &((*new)->rb_left
);
914 new = &((*new)->rb_right
);
917 rb_link_node(&data
->node
, parent
, new);
918 rb_insert_color(&data
->node
, root
);
921 static int thread_atoms_insert(struct thread
*thread
)
923 struct work_atoms
*atoms
= zalloc(sizeof(*atoms
));
925 pr_err("No memory at %s\n", __func__
);
929 atoms
->thread
= thread
;
930 INIT_LIST_HEAD(&atoms
->work_list
);
931 __thread_latency_insert(&atom_root
, atoms
, &cmp_pid
);
935 static int latency_fork_event(struct trace_fork_event
*fork_event __maybe_unused
,
936 struct event_format
*event __maybe_unused
)
938 /* should insert the newcomer */
942 static char sched_out_state(struct trace_switch_event
*switch_event
)
944 const char *str
= TASK_STATE_TO_CHAR_STR
;
946 return str
[switch_event
->prev_state
];
950 add_sched_out_event(struct work_atoms
*atoms
,
954 struct work_atom
*atom
= zalloc(sizeof(*atom
));
956 pr_err("Non memory at %s", __func__
);
960 atom
->sched_out_time
= timestamp
;
962 if (run_state
== 'R') {
963 atom
->state
= THREAD_WAIT_CPU
;
964 atom
->wake_up_time
= atom
->sched_out_time
;
967 list_add_tail(&atom
->list
, &atoms
->work_list
);
972 add_runtime_event(struct work_atoms
*atoms
, u64 delta
,
973 u64 timestamp __maybe_unused
)
975 struct work_atom
*atom
;
977 BUG_ON(list_empty(&atoms
->work_list
));
979 atom
= list_entry(atoms
->work_list
.prev
, struct work_atom
, list
);
981 atom
->runtime
+= delta
;
982 atoms
->total_runtime
+= delta
;
986 add_sched_in_event(struct work_atoms
*atoms
, u64 timestamp
)
988 struct work_atom
*atom
;
991 if (list_empty(&atoms
->work_list
))
994 atom
= list_entry(atoms
->work_list
.prev
, struct work_atom
, list
);
996 if (atom
->state
!= THREAD_WAIT_CPU
)
999 if (timestamp
< atom
->wake_up_time
) {
1000 atom
->state
= THREAD_IGNORE
;
1004 atom
->state
= THREAD_SCHED_IN
;
1005 atom
->sched_in_time
= timestamp
;
1007 delta
= atom
->sched_in_time
- atom
->wake_up_time
;
1008 atoms
->total_lat
+= delta
;
1009 if (delta
> atoms
->max_lat
) {
1010 atoms
->max_lat
= delta
;
1011 atoms
->max_lat_at
= timestamp
;
1017 latency_switch_event(struct trace_switch_event
*switch_event
,
1018 struct machine
*machine
,
1019 struct event_format
*event __maybe_unused
,
1020 struct perf_sample
*sample
)
1022 struct work_atoms
*out_events
, *in_events
;
1023 struct thread
*sched_out
, *sched_in
;
1024 u64 timestamp0
, timestamp
= sample
->time
;
1025 int cpu
= sample
->cpu
;
1028 BUG_ON(cpu
>= MAX_CPUS
|| cpu
< 0);
1030 timestamp0
= cpu_last_switched
[cpu
];
1031 cpu_last_switched
[cpu
] = timestamp
;
1033 delta
= timestamp
- timestamp0
;
1038 pr_err("hm, delta: %" PRIu64
" < 0 ?\n", delta
);
1042 sched_out
= machine__findnew_thread(machine
, switch_event
->prev_pid
);
1043 sched_in
= machine__findnew_thread(machine
, switch_event
->next_pid
);
1045 out_events
= thread_atoms_search(&atom_root
, sched_out
, &cmp_pid
);
1047 if (thread_atoms_insert(sched_out
))
1049 out_events
= thread_atoms_search(&atom_root
, sched_out
, &cmp_pid
);
1051 pr_err("out-event: Internal tree error");
1055 if (add_sched_out_event(out_events
, sched_out_state(switch_event
), timestamp
))
1058 in_events
= thread_atoms_search(&atom_root
, sched_in
, &cmp_pid
);
1060 if (thread_atoms_insert(sched_in
))
1062 in_events
= thread_atoms_search(&atom_root
, sched_in
, &cmp_pid
);
1064 pr_err("in-event: Internal tree error");
1068 * Take came in we have not heard about yet,
1069 * add in an initial atom in runnable state:
1071 if (add_sched_out_event(in_events
, 'R', timestamp
))
1074 add_sched_in_event(in_events
, timestamp
);
1080 latency_runtime_event(struct trace_runtime_event
*runtime_event
,
1081 struct machine
*machine
, struct perf_sample
*sample
)
1083 struct thread
*thread
= machine__findnew_thread(machine
, runtime_event
->pid
);
1084 struct work_atoms
*atoms
= thread_atoms_search(&atom_root
, thread
, &cmp_pid
);
1085 u64 timestamp
= sample
->time
;
1086 int cpu
= sample
->cpu
;
1088 BUG_ON(cpu
>= MAX_CPUS
|| cpu
< 0);
1090 if (thread_atoms_insert(thread
))
1092 atoms
= thread_atoms_search(&atom_root
, thread
, &cmp_pid
);
1094 pr_debug("in-event: Internal tree error");
1097 if (add_sched_out_event(atoms
, 'R', timestamp
))
1101 add_runtime_event(atoms
, runtime_event
->runtime
, timestamp
);
1106 latency_wakeup_event(struct trace_wakeup_event
*wakeup_event
,
1107 struct machine
*machine
,
1108 struct event_format
*event __maybe_unused
,
1109 struct perf_sample
*sample
)
1111 struct work_atoms
*atoms
;
1112 struct work_atom
*atom
;
1113 struct thread
*wakee
;
1114 u64 timestamp
= sample
->time
;
1116 /* Note for later, it may be interesting to observe the failing cases */
1117 if (!wakeup_event
->success
)
1120 wakee
= machine__findnew_thread(machine
, wakeup_event
->pid
);
1121 atoms
= thread_atoms_search(&atom_root
, wakee
, &cmp_pid
);
1123 if (thread_atoms_insert(wakee
))
1125 atoms
= thread_atoms_search(&atom_root
, wakee
, &cmp_pid
);
1127 pr_debug("wakeup-event: Internal tree error");
1130 if (add_sched_out_event(atoms
, 'S', timestamp
))
1134 BUG_ON(list_empty(&atoms
->work_list
));
1136 atom
= list_entry(atoms
->work_list
.prev
, struct work_atom
, list
);
1139 * You WILL be missing events if you've recorded only
1140 * one CPU, or are only looking at only one, so don't
1141 * make useless noise.
1143 if (profile_cpu
== -1 && atom
->state
!= THREAD_SLEEPING
)
1144 nr_state_machine_bugs
++;
1147 if (atom
->sched_out_time
> timestamp
) {
1148 nr_unordered_timestamps
++;
1152 atom
->state
= THREAD_WAIT_CPU
;
1153 atom
->wake_up_time
= timestamp
;
1158 latency_migrate_task_event(struct trace_migrate_task_event
*migrate_task_event
,
1159 struct machine
*machine
, struct perf_sample
*sample
)
1161 u64 timestamp
= sample
->time
;
1162 struct work_atoms
*atoms
;
1163 struct work_atom
*atom
;
1164 struct thread
*migrant
;
1167 * Only need to worry about migration when profiling one CPU.
1169 if (profile_cpu
== -1)
1172 migrant
= machine__findnew_thread(machine
, migrate_task_event
->pid
);
1173 atoms
= thread_atoms_search(&atom_root
, migrant
, &cmp_pid
);
1175 if (thread_atoms_insert(migrant
))
1177 register_pid(migrant
->pid
, migrant
->comm
);
1178 atoms
= thread_atoms_search(&atom_root
, migrant
, &cmp_pid
);
1180 pr_debug("migration-event: Internal tree error");
1183 if (add_sched_out_event(atoms
, 'R', timestamp
))
1187 BUG_ON(list_empty(&atoms
->work_list
));
1189 atom
= list_entry(atoms
->work_list
.prev
, struct work_atom
, list
);
1190 atom
->sched_in_time
= atom
->sched_out_time
= atom
->wake_up_time
= timestamp
;
1194 if (atom
->sched_out_time
> timestamp
)
1195 nr_unordered_timestamps
++;
1200 static struct trace_sched_handler lat_ops
= {
1201 .wakeup_event
= latency_wakeup_event
,
1202 .switch_event
= latency_switch_event
,
1203 .runtime_event
= latency_runtime_event
,
1204 .fork_event
= latency_fork_event
,
1205 .migrate_task_event
= latency_migrate_task_event
,
1208 static void output_lat_thread(struct work_atoms
*work_list
)
1214 if (!work_list
->nb_atoms
)
1217 * Ignore idle threads:
1219 if (!strcmp(work_list
->thread
->comm
, "swapper"))
1222 all_runtime
+= work_list
->total_runtime
;
1223 all_count
+= work_list
->nb_atoms
;
1225 ret
= printf(" %s:%d ", work_list
->thread
->comm
, work_list
->thread
->pid
);
1227 for (i
= 0; i
< 24 - ret
; i
++)
1230 avg
= work_list
->total_lat
/ work_list
->nb_atoms
;
1232 printf("|%11.3f ms |%9" PRIu64
" | avg:%9.3f ms | max:%9.3f ms | max at: %9.6f s\n",
1233 (double)work_list
->total_runtime
/ 1e6
,
1234 work_list
->nb_atoms
, (double)avg
/ 1e6
,
1235 (double)work_list
->max_lat
/ 1e6
,
1236 (double)work_list
->max_lat_at
/ 1e9
);
1239 static int pid_cmp(struct work_atoms
*l
, struct work_atoms
*r
)
1241 if (l
->thread
->pid
< r
->thread
->pid
)
1243 if (l
->thread
->pid
> r
->thread
->pid
)
1249 static struct sort_dimension pid_sort_dimension
= {
1254 static int avg_cmp(struct work_atoms
*l
, struct work_atoms
*r
)
1264 avgl
= l
->total_lat
/ l
->nb_atoms
;
1265 avgr
= r
->total_lat
/ r
->nb_atoms
;
1275 static struct sort_dimension avg_sort_dimension
= {
1280 static int max_cmp(struct work_atoms
*l
, struct work_atoms
*r
)
1282 if (l
->max_lat
< r
->max_lat
)
1284 if (l
->max_lat
> r
->max_lat
)
1290 static struct sort_dimension max_sort_dimension
= {
1295 static int switch_cmp(struct work_atoms
*l
, struct work_atoms
*r
)
1297 if (l
->nb_atoms
< r
->nb_atoms
)
1299 if (l
->nb_atoms
> r
->nb_atoms
)
1305 static struct sort_dimension switch_sort_dimension
= {
1310 static int runtime_cmp(struct work_atoms
*l
, struct work_atoms
*r
)
1312 if (l
->total_runtime
< r
->total_runtime
)
1314 if (l
->total_runtime
> r
->total_runtime
)
1320 static struct sort_dimension runtime_sort_dimension
= {
1325 static struct sort_dimension
*available_sorts
[] = {
1326 &pid_sort_dimension
,
1327 &avg_sort_dimension
,
1328 &max_sort_dimension
,
1329 &switch_sort_dimension
,
1330 &runtime_sort_dimension
,
1333 #define NB_AVAILABLE_SORTS (int)(sizeof(available_sorts) / sizeof(struct sort_dimension *))
1335 static LIST_HEAD(sort_list
);
1337 static int sort_dimension__add(const char *tok
, struct list_head
*list
)
1341 for (i
= 0; i
< NB_AVAILABLE_SORTS
; i
++) {
1342 if (!strcmp(available_sorts
[i
]->name
, tok
)) {
1343 list_add_tail(&available_sorts
[i
]->list
, list
);
1352 static void setup_sorting(void);
1354 static void sort_lat(void)
1356 struct rb_node
*node
;
1359 struct work_atoms
*data
;
1360 node
= rb_first(&atom_root
);
1364 rb_erase(node
, &atom_root
);
1365 data
= rb_entry(node
, struct work_atoms
, node
);
1366 __thread_latency_insert(&sorted_atom_root
, data
, &sort_list
);
1370 static struct trace_sched_handler
*trace_handler
;
1372 static int process_sched_wakeup_event(struct perf_tool
*tool __maybe_unused
,
1373 struct event_format
*event
,
1374 struct perf_sample
*sample
,
1375 struct machine
*machine
,
1376 struct thread
*thread __maybe_unused
)
1378 void *data
= sample
->raw_data
;
1379 struct trace_wakeup_event wakeup_event
;
1382 FILL_COMMON_FIELDS(wakeup_event
, event
, data
);
1384 FILL_ARRAY(wakeup_event
, comm
, event
, data
);
1385 FILL_FIELD(wakeup_event
, pid
, event
, data
);
1386 FILL_FIELD(wakeup_event
, prio
, event
, data
);
1387 FILL_FIELD(wakeup_event
, success
, event
, data
);
1388 FILL_FIELD(wakeup_event
, cpu
, event
, data
);
1390 if (trace_handler
->wakeup_event
)
1391 err
= trace_handler
->wakeup_event(&wakeup_event
, machine
, event
, sample
);
1397 * Track the current task - that way we can know whether there's any
1398 * weird events, such as a task being switched away that is not current.
1402 static u32 curr_pid
[MAX_CPUS
] = { [0 ... MAX_CPUS
-1] = -1 };
1404 static struct thread
*curr_thread
[MAX_CPUS
];
1406 static char next_shortname1
= 'A';
1407 static char next_shortname2
= '0';
1410 map_switch_event(struct trace_switch_event
*switch_event
,
1411 struct machine
*machine
,
1412 struct event_format
*event __maybe_unused
,
1413 struct perf_sample
*sample
)
1415 struct thread
*sched_out __maybe_unused
, *sched_in
;
1417 u64 timestamp0
, timestamp
= sample
->time
;
1419 int cpu
, this_cpu
= sample
->cpu
;
1421 BUG_ON(this_cpu
>= MAX_CPUS
|| this_cpu
< 0);
1423 if (this_cpu
> max_cpu
)
1426 timestamp0
= cpu_last_switched
[this_cpu
];
1427 cpu_last_switched
[this_cpu
] = timestamp
;
1429 delta
= timestamp
- timestamp0
;
1434 pr_debug("hm, delta: %" PRIu64
" < 0 ?\n", delta
);
1438 sched_out
= machine__findnew_thread(machine
, switch_event
->prev_pid
);
1439 sched_in
= machine__findnew_thread(machine
, switch_event
->next_pid
);
1441 curr_thread
[this_cpu
] = sched_in
;
1446 if (!sched_in
->shortname
[0]) {
1447 sched_in
->shortname
[0] = next_shortname1
;
1448 sched_in
->shortname
[1] = next_shortname2
;
1450 if (next_shortname1
< 'Z') {
1453 next_shortname1
='A';
1454 if (next_shortname2
< '9') {
1457 next_shortname2
='0';
1463 for (cpu
= 0; cpu
<= max_cpu
; cpu
++) {
1464 if (cpu
!= this_cpu
)
1469 if (curr_thread
[cpu
]) {
1470 if (curr_thread
[cpu
]->pid
)
1471 printf("%2s ", curr_thread
[cpu
]->shortname
);
1478 printf(" %12.6f secs ", (double)timestamp
/1e9
);
1479 if (new_shortname
) {
1480 printf("%s => %s:%d\n",
1481 sched_in
->shortname
, sched_in
->comm
, sched_in
->pid
);
1489 static int process_sched_switch_event(struct perf_tool
*tool __maybe_unused
,
1490 struct event_format
*event
,
1491 struct perf_sample
*sample
,
1492 struct machine
*machine
,
1493 struct thread
*thread __maybe_unused
)
1495 int this_cpu
= sample
->cpu
, err
= 0;
1496 void *data
= sample
->raw_data
;
1497 struct trace_switch_event switch_event
;
1499 FILL_COMMON_FIELDS(switch_event
, event
, data
);
1501 FILL_ARRAY(switch_event
, prev_comm
, event
, data
);
1502 FILL_FIELD(switch_event
, prev_pid
, event
, data
);
1503 FILL_FIELD(switch_event
, prev_prio
, event
, data
);
1504 FILL_FIELD(switch_event
, prev_state
, event
, data
);
1505 FILL_ARRAY(switch_event
, next_comm
, event
, data
);
1506 FILL_FIELD(switch_event
, next_pid
, event
, data
);
1507 FILL_FIELD(switch_event
, next_prio
, event
, data
);
1509 if (curr_pid
[this_cpu
] != (u32
)-1) {
1511 * Are we trying to switch away a PID that is
1514 if (curr_pid
[this_cpu
] != switch_event
.prev_pid
)
1515 nr_context_switch_bugs
++;
1517 if (trace_handler
->switch_event
)
1518 err
= trace_handler
->switch_event(&switch_event
, machine
, event
, sample
);
1520 curr_pid
[this_cpu
] = switch_event
.next_pid
;
1524 static int process_sched_runtime_event(struct perf_tool
*tool __maybe_unused
,
1525 struct event_format
*event
,
1526 struct perf_sample
*sample
,
1527 struct machine
*machine
,
1528 struct thread
*thread __maybe_unused
)
1530 void *data
= sample
->raw_data
;
1531 struct trace_runtime_event runtime_event
;
1534 FILL_ARRAY(runtime_event
, comm
, event
, data
);
1535 FILL_FIELD(runtime_event
, pid
, event
, data
);
1536 FILL_FIELD(runtime_event
, runtime
, event
, data
);
1537 FILL_FIELD(runtime_event
, vruntime
, event
, data
);
1539 if (trace_handler
->runtime_event
)
1540 err
= trace_handler
->runtime_event(&runtime_event
, machine
, sample
);
1545 static int process_sched_fork_event(struct perf_tool
*tool __maybe_unused
,
1546 struct event_format
*event
,
1547 struct perf_sample
*sample
,
1548 struct machine
*machine __maybe_unused
,
1549 struct thread
*thread __maybe_unused
)
1551 void *data
= sample
->raw_data
;
1552 struct trace_fork_event fork_event
;
1555 FILL_COMMON_FIELDS(fork_event
, event
, data
);
1557 FILL_ARRAY(fork_event
, parent_comm
, event
, data
);
1558 FILL_FIELD(fork_event
, parent_pid
, event
, data
);
1559 FILL_ARRAY(fork_event
, child_comm
, event
, data
);
1560 FILL_FIELD(fork_event
, child_pid
, event
, data
);
1562 if (trace_handler
->fork_event
)
1563 err
= trace_handler
->fork_event(&fork_event
, event
);
1568 static int process_sched_exit_event(struct perf_tool
*tool __maybe_unused
,
1569 struct event_format
*event
,
1570 struct perf_sample
*sample __maybe_unused
,
1571 struct machine
*machine __maybe_unused
,
1572 struct thread
*thread __maybe_unused
)
1575 printf("sched_exit event %p\n", event
);
1580 static int process_sched_migrate_task_event(struct perf_tool
*tool __maybe_unused
,
1581 struct event_format
*event
,
1582 struct perf_sample
*sample
,
1583 struct machine
*machine
,
1584 struct thread
*thread __maybe_unused
)
1586 void *data
= sample
->raw_data
;
1587 struct trace_migrate_task_event migrate_task_event
;
1590 FILL_COMMON_FIELDS(migrate_task_event
, event
, data
);
1592 FILL_ARRAY(migrate_task_event
, comm
, event
, data
);
1593 FILL_FIELD(migrate_task_event
, pid
, event
, data
);
1594 FILL_FIELD(migrate_task_event
, prio
, event
, data
);
1595 FILL_FIELD(migrate_task_event
, cpu
, event
, data
);
1597 if (trace_handler
->migrate_task_event
)
1598 err
= trace_handler
->migrate_task_event(&migrate_task_event
, machine
, sample
);
1603 typedef int (*tracepoint_handler
)(struct perf_tool
*tool
,
1604 struct event_format
*tp_format
,
1605 struct perf_sample
*sample
,
1606 struct machine
*machine
,
1607 struct thread
*thread
);
1609 static int perf_sched__process_tracepoint_sample(struct perf_tool
*tool __maybe_unused
,
1610 union perf_event
*event __maybe_unused
,
1611 struct perf_sample
*sample
,
1612 struct perf_evsel
*evsel
,
1613 struct machine
*machine
)
1615 struct thread
*thread
= machine__findnew_thread(machine
, sample
->pid
);
1618 if (thread
== NULL
) {
1619 pr_debug("problem processing %s event, skipping it.\n",
1620 perf_evsel__name(evsel
));
1624 evsel
->hists
.stats
.total_period
+= sample
->period
;
1625 hists__inc_nr_events(&evsel
->hists
, PERF_RECORD_SAMPLE
);
1627 if (evsel
->handler
.func
!= NULL
) {
1628 tracepoint_handler f
= evsel
->handler
.func
;
1629 err
= f(tool
, evsel
->tp_format
, sample
, machine
, thread
);
1635 static struct perf_tool perf_sched
= {
1636 .sample
= perf_sched__process_tracepoint_sample
,
1637 .comm
= perf_event__process_comm
,
1638 .lost
= perf_event__process_lost
,
1639 .fork
= perf_event__process_task
,
1640 .ordered_samples
= true,
1643 static int read_events(bool destroy
, struct perf_session
**psession
)
1645 const struct perf_evsel_str_handler handlers
[] = {
1646 { "sched:sched_switch", process_sched_switch_event
, },
1647 { "sched:sched_stat_runtime", process_sched_runtime_event
, },
1648 { "sched:sched_wakeup", process_sched_wakeup_event
, },
1649 { "sched:sched_wakeup_new", process_sched_wakeup_event
, },
1650 { "sched:sched_process_fork", process_sched_fork_event
, },
1651 { "sched:sched_process_exit", process_sched_exit_event
, },
1652 { "sched:sched_migrate_task", process_sched_migrate_task_event
, },
1654 struct perf_session
*session
;
1656 session
= perf_session__new(input_name
, O_RDONLY
, 0, false, &perf_sched
);
1657 if (session
== NULL
) {
1658 pr_debug("No Memory for session\n");
1662 if (perf_session__set_tracepoints_handlers(session
, handlers
))
1665 if (perf_session__has_traces(session
, "record -R")) {
1666 int err
= perf_session__process_events(session
, &perf_sched
);
1668 pr_err("Failed to process events, error %d", err
);
1672 nr_events
= session
->hists
.stats
.nr_events
[0];
1673 nr_lost_events
= session
->hists
.stats
.total_lost
;
1674 nr_lost_chunks
= session
->hists
.stats
.nr_events
[PERF_RECORD_LOST
];
1678 perf_session__delete(session
);
1681 *psession
= session
;
1686 perf_session__delete(session
);
1690 static void print_bad_events(void)
1692 if (nr_unordered_timestamps
&& nr_timestamps
) {
1693 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
1694 (double)nr_unordered_timestamps
/(double)nr_timestamps
*100.0,
1695 nr_unordered_timestamps
, nr_timestamps
);
1697 if (nr_lost_events
&& nr_events
) {
1698 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
1699 (double)nr_lost_events
/(double)nr_events
*100.0,
1700 nr_lost_events
, nr_events
, nr_lost_chunks
);
1702 if (nr_state_machine_bugs
&& nr_timestamps
) {
1703 printf(" INFO: %.3f%% state machine bugs (%ld out of %ld)",
1704 (double)nr_state_machine_bugs
/(double)nr_timestamps
*100.0,
1705 nr_state_machine_bugs
, nr_timestamps
);
1707 printf(" (due to lost events?)");
1710 if (nr_context_switch_bugs
&& nr_timestamps
) {
1711 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
1712 (double)nr_context_switch_bugs
/(double)nr_timestamps
*100.0,
1713 nr_context_switch_bugs
, nr_timestamps
);
1715 printf(" (due to lost events?)");
1720 static int __cmd_lat(void)
1722 struct rb_node
*next
;
1723 struct perf_session
*session
;
1726 if (read_events(false, &session
))
1730 printf("\n ---------------------------------------------------------------------------------------------------------------\n");
1731 printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n");
1732 printf(" ---------------------------------------------------------------------------------------------------------------\n");
1734 next
= rb_first(&sorted_atom_root
);
1737 struct work_atoms
*work_list
;
1739 work_list
= rb_entry(next
, struct work_atoms
, node
);
1740 output_lat_thread(work_list
);
1741 next
= rb_next(next
);
1744 printf(" -----------------------------------------------------------------------------------------\n");
1745 printf(" TOTAL: |%11.3f ms |%9" PRIu64
" |\n",
1746 (double)all_runtime
/1e6
, all_count
);
1748 printf(" ---------------------------------------------------\n");
1753 perf_session__delete(session
);
1757 static struct trace_sched_handler map_ops
= {
1758 .wakeup_event
= NULL
,
1759 .switch_event
= map_switch_event
,
1760 .runtime_event
= NULL
,
1764 static int __cmd_map(void)
1766 max_cpu
= sysconf(_SC_NPROCESSORS_CONF
);
1769 if (read_events(true, NULL
))
1775 static int __cmd_replay(void)
1779 calibrate_run_measurement_overhead();
1780 calibrate_sleep_measurement_overhead();
1782 test_calibrations();
1784 if (read_events(true, NULL
))
1787 printf("nr_run_events: %ld\n", nr_run_events
);
1788 printf("nr_sleep_events: %ld\n", nr_sleep_events
);
1789 printf("nr_wakeup_events: %ld\n", nr_wakeup_events
);
1791 if (targetless_wakeups
)
1792 printf("target-less wakeups: %ld\n", targetless_wakeups
);
1793 if (multitarget_wakeups
)
1794 printf("multi-target wakeups: %ld\n", multitarget_wakeups
);
1795 if (nr_run_events_optimized
)
1796 printf("run atoms optimized: %ld\n",
1797 nr_run_events_optimized
);
1799 print_task_traces();
1800 add_cross_task_wakeups();
1803 printf("------------------------------------------------------------\n");
1804 for (i
= 0; i
< replay_repeat
; i
++)
1811 static const char * const sched_usage
[] = {
1812 "perf sched [<options>] {record|latency|map|replay|script}",
1816 static const struct option sched_options
[] = {
1817 OPT_STRING('i', "input", &input_name
, "file",
1819 OPT_INCR('v', "verbose", &verbose
,
1820 "be more verbose (show symbol address, etc)"),
1821 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace
,
1822 "dump raw trace in ASCII"),
1826 static const char * const latency_usage
[] = {
1827 "perf sched latency [<options>]",
1831 static const struct option latency_options
[] = {
1832 OPT_STRING('s', "sort", &sort_order
, "key[,key2...]",
1833 "sort by key(s): runtime, switch, avg, max"),
1834 OPT_INCR('v', "verbose", &verbose
,
1835 "be more verbose (show symbol address, etc)"),
1836 OPT_INTEGER('C', "CPU", &profile_cpu
,
1837 "CPU to profile on"),
1838 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace
,
1839 "dump raw trace in ASCII"),
1843 static const char * const replay_usage
[] = {
1844 "perf sched replay [<options>]",
1848 static const struct option replay_options
[] = {
1849 OPT_UINTEGER('r', "repeat", &replay_repeat
,
1850 "repeat the workload replay N times (-1: infinite)"),
1851 OPT_INCR('v', "verbose", &verbose
,
1852 "be more verbose (show symbol address, etc)"),
1853 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace
,
1854 "dump raw trace in ASCII"),
1858 static void setup_sorting(void)
1860 char *tmp
, *tok
, *str
= strdup(sort_order
);
1862 for (tok
= strtok_r(str
, ", ", &tmp
);
1863 tok
; tok
= strtok_r(NULL
, ", ", &tmp
)) {
1864 if (sort_dimension__add(tok
, &sort_list
) < 0) {
1865 error("Unknown --sort key: `%s'", tok
);
1866 usage_with_options(latency_usage
, latency_options
);
1872 sort_dimension__add("pid", &cmp_pid
);
1875 static const char *record_args
[] = {
1882 "-e", "sched:sched_switch",
1883 "-e", "sched:sched_stat_wait",
1884 "-e", "sched:sched_stat_sleep",
1885 "-e", "sched:sched_stat_iowait",
1886 "-e", "sched:sched_stat_runtime",
1887 "-e", "sched:sched_process_exit",
1888 "-e", "sched:sched_process_fork",
1889 "-e", "sched:sched_wakeup",
1890 "-e", "sched:sched_migrate_task",
1893 static int __cmd_record(int argc
, const char **argv
)
1895 unsigned int rec_argc
, i
, j
;
1896 const char **rec_argv
;
1898 rec_argc
= ARRAY_SIZE(record_args
) + argc
- 1;
1899 rec_argv
= calloc(rec_argc
+ 1, sizeof(char *));
1901 if (rec_argv
== NULL
)
1904 for (i
= 0; i
< ARRAY_SIZE(record_args
); i
++)
1905 rec_argv
[i
] = strdup(record_args
[i
]);
1907 for (j
= 1; j
< (unsigned int)argc
; j
++, i
++)
1908 rec_argv
[i
] = argv
[j
];
1910 BUG_ON(i
!= rec_argc
);
1912 return cmd_record(i
, rec_argv
, NULL
);
1915 int cmd_sched(int argc
, const char **argv
, const char *prefix __maybe_unused
)
1917 argc
= parse_options(argc
, argv
, sched_options
, sched_usage
,
1918 PARSE_OPT_STOP_AT_NON_OPTION
);
1920 usage_with_options(sched_usage
, sched_options
);
1923 * Aliased to 'perf script' for now:
1925 if (!strcmp(argv
[0], "script"))
1926 return cmd_script(argc
, argv
, prefix
);
1929 if (!strncmp(argv
[0], "rec", 3)) {
1930 return __cmd_record(argc
, argv
);
1931 } else if (!strncmp(argv
[0], "lat", 3)) {
1932 trace_handler
= &lat_ops
;
1934 argc
= parse_options(argc
, argv
, latency_options
, latency_usage
, 0);
1936 usage_with_options(latency_usage
, latency_options
);
1940 } else if (!strcmp(argv
[0], "map")) {
1941 trace_handler
= &map_ops
;
1944 } else if (!strncmp(argv
[0], "rep", 3)) {
1945 trace_handler
= &replay_ops
;
1947 argc
= parse_options(argc
, argv
, replay_options
, replay_usage
, 0);
1949 usage_with_options(replay_usage
, replay_options
);
1951 return __cmd_replay();
1953 usage_with_options(sched_usage
, sched_options
);