Merge tag 'perf-core-for-mingo-20160615' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / tools / perf / util / evlist.c
index c4bfe11479a0e0d7559ff941c63e96557400bce4..1b918aa075d6a13a2f9cf07784c6f3285f90f45f 100644 (file)
@@ -44,6 +44,7 @@ void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
        perf_evlist__set_maps(evlist, cpus, threads);
        fdarray__init(&evlist->pollfd, 64);
        evlist->workload.pid = -1;
+       evlist->backward = false;
 }
 
 struct perf_evlist *perf_evlist__new(void)
@@ -461,9 +462,9 @@ int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
        return 0;
 }
 
-static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, int idx)
+static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, int idx, short revent)
 {
-       int pos = fdarray__add(&evlist->pollfd, fd, POLLIN | POLLERR | POLLHUP);
+       int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP);
        /*
         * Save the idx so that when we filter out fds POLLHUP'ed we can
         * close the associated evlist->mmap[] entry.
@@ -479,10 +480,11 @@ static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, int idx
 
 int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
 {
-       return __perf_evlist__add_pollfd(evlist, fd, -1);
+       return __perf_evlist__add_pollfd(evlist, fd, -1, POLLIN);
 }
 
-static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd)
+static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
+                                        void *arg __maybe_unused)
 {
        struct perf_evlist *evlist = container_of(fda, struct perf_evlist, pollfd);
 
@@ -492,7 +494,7 @@ static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd)
 int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
 {
        return fdarray__filter(&evlist->pollfd, revents_and_mask,
-                              perf_evlist__munmap_filtered);
+                              perf_evlist__munmap_filtered, NULL);
 }
 
 int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
@@ -679,6 +681,33 @@ static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
        return NULL;
 }
 
+static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value)
+{
+       int i;
+
+       for (i = 0; i < evlist->nr_mmaps; i++) {
+               int fd = evlist->mmap[i].fd;
+               int err;
+
+               if (fd < 0)
+                       continue;
+               err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
+               if (err)
+                       return err;
+       }
+       return 0;
+}
+
+int perf_evlist__pause(struct perf_evlist *evlist)
+{
+       return perf_evlist__set_paused(evlist, true);
+}
+
+int perf_evlist__resume(struct perf_evlist *evlist)
+{
+       return perf_evlist__set_paused(evlist, false);
+}
+
 /* When check_messup is true, 'end' must points to a good entry */
 static union perf_event *
 perf_mmap__read(struct perf_mmap *md, bool check_messup, u64 start,
@@ -749,7 +778,7 @@ broken_event:
        return event;
 }
 
-union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
+union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int idx)
 {
        struct perf_mmap *md = &evlist->mmap[idx];
        u64 head;
@@ -804,6 +833,13 @@ perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx)
        return perf_mmap__read(md, false, start, end, &md->prev);
 }
 
+union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
+{
+       if (!evlist->backward)
+               return perf_evlist__mmap_read_forward(evlist, idx);
+       return perf_evlist__mmap_read_backward(evlist, idx);
+}
+
 void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx)
 {
        struct perf_mmap *md = &evlist->mmap[idx];
@@ -828,9 +864,11 @@ static void perf_evlist__mmap_get(struct perf_evlist *evlist, int idx)
 
 static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx)
 {
-       BUG_ON(atomic_read(&evlist->mmap[idx].refcnt) == 0);
+       struct perf_mmap *md = &evlist->mmap[idx];
+
+       BUG_ON(md->base && atomic_read(&md->refcnt) == 0);
 
-       if (atomic_dec_and_test(&evlist->mmap[idx].refcnt))
+       if (atomic_dec_and_test(&md->refcnt))
                __perf_evlist__munmap(evlist, idx);
 }
 
@@ -881,6 +919,7 @@ static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
        if (evlist->mmap[idx].base != NULL) {
                munmap(evlist->mmap[idx].base, evlist->mmap_len);
                evlist->mmap[idx].base = NULL;
+               evlist->mmap[idx].fd = -1;
                atomic_set(&evlist->mmap[idx].refcnt, 0);
        }
        auxtrace_mmap__munmap(&evlist->mmap[idx].auxtrace_mmap);
@@ -901,11 +940,18 @@ void perf_evlist__munmap(struct perf_evlist *evlist)
 
 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
 {
+       int i;
+
        evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
        if (cpu_map__empty(evlist->cpus))
                evlist->nr_mmaps = thread_map__nr(evlist->threads);
        evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
-       return evlist->mmap != NULL ? 0 : -ENOMEM;
+       if (!evlist->mmap)
+               return -ENOMEM;
+
+       for (i = 0; i < evlist->nr_mmaps; i++)
+               evlist->mmap[i].fd = -1;
+       return 0;
 }
 
 struct mmap_params {
@@ -941,6 +987,7 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
                evlist->mmap[idx].base = NULL;
                return -1;
        }
+       evlist->mmap[idx].fd = fd;
 
        if (auxtrace_mmap__mmap(&evlist->mmap[idx].auxtrace_mmap,
                                &mp->auxtrace_mp, evlist->mmap[idx].base, fd))
@@ -949,15 +996,28 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
        return 0;
 }
 
+static bool
+perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused,
+                        struct perf_evsel *evsel)
+{
+       if (evsel->overwrite)
+               return false;
+       return true;
+}
+
 static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
                                       struct mmap_params *mp, int cpu,
                                       int thread, int *output)
 {
        struct perf_evsel *evsel;
+       int revent;
 
        evlist__for_each(evlist, evsel) {
                int fd;
 
+               if (evsel->overwrite != (evlist->overwrite && evlist->backward))
+                       continue;
+
                if (evsel->system_wide && thread)
                        continue;
 
@@ -974,6 +1034,8 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
                        perf_evlist__mmap_get(evlist, idx);
                }
 
+               revent = perf_evlist__should_poll(evlist, evsel) ? POLLIN : 0;
+
                /*
                 * The system_wide flag causes a selected event to be opened
                 * always without a pid.  Consequently it will never get a
@@ -982,7 +1044,7 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
                 * Therefore don't add it for polling.
                 */
                if (!evsel->system_wide &&
-                   __perf_evlist__add_pollfd(evlist, fd, idx) < 0) {
+                   __perf_evlist__add_pollfd(evlist, fd, idx, revent) < 0) {
                        perf_evlist__mmap_put(evlist, idx);
                        return -1;
                }
This page took 0.029163 seconds and 5 git commands to generate.