Merge remote-tracking branch 'cgroup/for-next'
authorStephen Rothwell <sfr@canb.auug.org.au>
Tue, 13 Sep 2016 02:53:12 +0000 (12:53 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Tue, 13 Sep 2016 02:53:12 +0000 (12:53 +1000)
1  2 
fs/kernfs/dir.c
include/linux/cgroup.h
kernel/cgroup.c
kernel/sched/debug.c

diff --combined fs/kernfs/dir.c
index 390390212b4324855331862d0110515bc5c618e5,6e7fd37615f805962d7942df2b638de7ba733c6f..498e2a6f401eee7d1a0c881177a467f1022058ae
@@@ -110,8 -110,9 +110,9 @@@ static struct kernfs_node *kernfs_commo
   * kn_to:   /n1/n2/n3         [depth=3]
   * result:  /../..
   *
-  * return value: length of the string.  If greater than buflen,
-  * then contents of buf are undefined.  On error, -1 is returned.
+  * Returns the length of the full path.  If the full length is equal to or
+  * greater than @buflen, @buf contains the truncated path with the trailing
+  * '\0'.  On error, -errno is returned.
   */
  static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
                                        struct kernfs_node *kn_from,
  {
        struct kernfs_node *kn, *common;
        const char parent_str[] = "/..";
-       size_t depth_from, depth_to, len = 0, nlen = 0;
-       char *p;
-       int i;
+       size_t depth_from, depth_to, len = 0;
+       int i, j;
  
        if (!kn_from)
                kn_from = kernfs_root(kn_to)->kn;
  
        common = kernfs_common_ancestor(kn_from, kn_to);
        if (WARN_ON(!common))
-               return -1;
+               return -EINVAL;
  
        depth_to = kernfs_depth(common, kn_to);
        depth_from = kernfs_depth(common, kn_from);
                               len < buflen ? buflen - len : 0);
  
        /* Calculate how many bytes we need for the rest */
-       for (kn = kn_to; kn != common; kn = kn->parent)
-               nlen += strlen(kn->name) + 1;
-       if (len + nlen >= buflen)
-               return len + nlen;
-       p = buf + len + nlen;
-       *p = '\0';
-       for (kn = kn_to; kn != common; kn = kn->parent) {
-               size_t tmp = strlen(kn->name);
-               p -= tmp;
-               memcpy(p, kn->name, tmp);
-               *(--p) = '/';
+       for (i = depth_to - 1; i >= 0; i--) {
+               for (kn = kn_to, j = 0; j < i; j++)
+                       kn = kn->parent;
+               len += strlcpy(buf + len, "/",
+                              len < buflen ? buflen - len : 0);
+               len += strlcpy(buf + len, kn->name,
+                              len < buflen ? buflen - len : 0);
        }
  
-       return len + nlen;
+       return len;
  }
  
  /**
@@@ -185,29 -179,6 +179,6 @@@ int kernfs_name(struct kernfs_node *kn
        return ret;
  }
  
- /**
-  * kernfs_path_len - determine the length of the full path of a given node
-  * @kn: kernfs_node of interest
-  *
-  * The returned length doesn't include the space for the terminating '\0'.
-  */
- size_t kernfs_path_len(struct kernfs_node *kn)
- {
-       size_t len = 0;
-       unsigned long flags;
-       spin_lock_irqsave(&kernfs_rename_lock, flags);
-       do {
-               len += strlen(kn->name) + 1;
-               kn = kn->parent;
-       } while (kn && kn->parent);
-       spin_unlock_irqrestore(&kernfs_rename_lock, flags);
-       return len;
- }
  /**
   * kernfs_path_from_node - build path of node @to relative to @from.
   * @from: parent kernfs_node relative to which we need to build the path
   * path (which includes '..'s) as needed to reach from @from to @to is
   * returned.
   *
-  * If @buf isn't long enough, the return value will be greater than @buflen
-  * and @buf contents are undefined.
+  * Returns the length of the full path.  If the full length is equal to or
+  * greater than @buflen, @buf contains the truncated path with the trailing
+  * '\0'.  On error, -errno is returned.
   */
  int kernfs_path_from_node(struct kernfs_node *to, struct kernfs_node *from,
                          char *buf, size_t buflen)
  }
  EXPORT_SYMBOL_GPL(kernfs_path_from_node);
  
- /**
-  * kernfs_path - build full path of a given node
-  * @kn: kernfs_node of interest
-  * @buf: buffer to copy @kn's name into
-  * @buflen: size of @buf
-  *
-  * Builds and returns the full path of @kn in @buf of @buflen bytes.  The
-  * path is built from the end of @buf so the returned pointer usually
-  * doesn't match @buf.  If @buf isn't long enough, @buf is nul terminated
-  * and %NULL is returned.
-  */
- char *kernfs_path(struct kernfs_node *kn, char *buf, size_t buflen)
- {
-       int ret;
-       ret = kernfs_path_from_node(kn, NULL, buf, buflen);
-       if (ret < 0 || ret >= buflen)
-               return NULL;
-       return buf;
- }
- EXPORT_SYMBOL_GPL(kernfs_path);
  /**
   * pr_cont_kernfs_name - pr_cont name of a kernfs_node
   * @kn: kernfs_node of interest
@@@ -1096,17 -1046,13 +1046,17 @@@ static int kernfs_iop_rmdir(struct inod
  }
  
  static int kernfs_iop_rename(struct inode *old_dir, struct dentry *old_dentry,
 -                           struct inode *new_dir, struct dentry *new_dentry)
 +                           struct inode *new_dir, struct dentry *new_dentry,
 +                           unsigned int flags)
  {
        struct kernfs_node *kn  = old_dentry->d_fsdata;
        struct kernfs_node *new_parent = new_dir->i_private;
        struct kernfs_syscall_ops *scops = kernfs_root(kn)->syscall_ops;
        int ret;
  
 +      if (flags)
 +              return -EINVAL;
 +
        if (!scops || !scops->rename)
                return -EPERM;
  
diff --combined include/linux/cgroup.h
index a4414a11eea71329d07e8cae504ce6f9791ed2c6,6df36361a492405644c9a4c61ba01d14ceeb9d29..c4688742ddc402030df5049a1051fae5f00aa8c3
@@@ -97,7 -97,7 +97,7 @@@ int cgroup_add_legacy_cftypes(struct cg
  int cgroup_rm_cftypes(struct cftype *cfts);
  void cgroup_file_notify(struct cgroup_file *cfile);
  
char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
  int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
  int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
                     struct pid *pid, struct task_struct *tsk);
@@@ -497,23 -497,6 +497,23 @@@ static inline bool cgroup_is_descendant
        return cgrp->ancestor_ids[ancestor->level] == ancestor->id;
  }
  
 +/**
 + * task_under_cgroup_hierarchy - test task's membership of cgroup ancestry
 + * @task: the task to be tested
 + * @ancestor: possible ancestor of @task's cgroup
 + *
 + * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
 + * It follows all the same rules as cgroup_is_descendant, and only applies
 + * to the default hierarchy.
 + */
 +static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
 +                                             struct cgroup *ancestor)
 +{
 +      struct css_set *cset = task_css_set(task);
 +
 +      return cgroup_is_descendant(cset->dfl_cgrp, ancestor);
 +}
 +
  /* no synchronization, the result can only be used as a hint */
  static inline bool cgroup_is_populated(struct cgroup *cgrp)
  {
@@@ -555,8 -538,7 +555,7 @@@ static inline int cgroup_name(struct cg
        return kernfs_name(cgrp->kn, buf, buflen);
  }
  
- static inline char * __must_check cgroup_path(struct cgroup *cgrp, char *buf,
-                                             size_t buflen)
+ static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen)
  {
        return kernfs_path(cgrp->kn, buf, buflen);
  }
@@@ -574,7 -556,6 +573,7 @@@ static inline void pr_cont_cgroup_path(
  #else /* !CONFIG_CGROUPS */
  
  struct cgroup_subsys_state;
 +struct cgroup;
  
  static inline void css_put(struct cgroup_subsys_state *css) {}
  static inline int cgroup_attach_task_all(struct task_struct *from,
@@@ -592,11 -573,6 +591,11 @@@ static inline void cgroup_free(struct t
  static inline int cgroup_init_early(void) { return 0; }
  static inline int cgroup_init(void) { return 0; }
  
 +static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
 +                                             struct cgroup *ancestor)
 +{
 +      return true;
 +}
  #endif /* !CONFIG_CGROUPS */
  
  /*
@@@ -657,8 -633,8 +656,8 @@@ struct cgroup_namespace *copy_cgroup_ns
                                        struct user_namespace *user_ns,
                                        struct cgroup_namespace *old_ns);
  
char *cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
-                    struct cgroup_namespace *ns);
int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
+                  struct cgroup_namespace *ns);
  
  #else /* !CONFIG_CGROUPS */
  
diff --combined kernel/cgroup.c
index 9f51cdf58f5a127002e4fa6d10ac677fc9141675,5e2e81ad9175cd08762d7e8e907caf61b64974ad..0c4db7908264bf5c55c2271f5dd4ec65f05503bf
@@@ -64,6 -64,9 +64,9 @@@
  #include <linux/file.h>
  #include <net/sock.h>
  
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/cgroup.h>
  /*
   * pidlists linger the following amount before being destroyed.  The goal
   * is avoiding frequent destruction in the middle of consecutive read calls
@@@ -1176,6 -1179,8 +1179,8 @@@ static void cgroup_destroy_root(struct 
        struct cgroup *cgrp = &root->cgrp;
        struct cgrp_cset_link *link, *tmp_link;
  
+       trace_cgroup_destroy_root(root);
        cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
  
        BUG_ON(atomic_read(&root->nr_cgrps));
@@@ -1874,6 -1879,9 +1879,9 @@@ static int cgroup_remount(struct kernfs
                strcpy(root->release_agent_path, opts.release_agent);
                spin_unlock(&release_agent_path_lock);
        }
+       trace_cgroup_remount(root);
   out_unlock:
        kfree(opts.release_agent);
        kfree(opts.name);
@@@ -2031,6 -2039,8 +2039,8 @@@ static int cgroup_setup_root(struct cgr
        if (ret)
                goto destroy_root;
  
+       trace_cgroup_setup_root(root);
        /*
         * There must be no failure case after here, since rebinding takes
         * care of subsystems' refcounts, which are explicitly dropped in
@@@ -2315,22 -2325,18 +2325,18 @@@ static struct file_system_type cgroup2_
        .fs_flags = FS_USERNS_MOUNT,
  };
  
- static char *cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen,
-                                  struct cgroup_namespace *ns)
+ static int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen,
+                                struct cgroup_namespace *ns)
  {
        struct cgroup *root = cset_cgroup_from_root(ns->root_cset, cgrp->root);
-       int ret;
  
-       ret = kernfs_path_from_node(cgrp->kn, root->kn, buf, buflen);
-       if (ret < 0 || ret >= buflen)
-               return NULL;
-       return buf;
+       return kernfs_path_from_node(cgrp->kn, root->kn, buf, buflen);
  }
  
char *cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
-                    struct cgroup_namespace *ns)
int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
+                  struct cgroup_namespace *ns)
  {
-       char *ret;
+       int ret;
  
        mutex_lock(&cgroup_mutex);
        spin_lock_irq(&css_set_lock);
@@@ -2357,12 -2363,12 +2363,12 @@@ EXPORT_SYMBOL_GPL(cgroup_path_ns)
   *
   * Return value is the same as kernfs_path().
   */
char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
  {
        struct cgroup_root *root;
        struct cgroup *cgrp;
        int hierarchy_id = 1;
-       char *path = NULL;
+       int ret;
  
        mutex_lock(&cgroup_mutex);
        spin_lock_irq(&css_set_lock);
  
        if (root) {
                cgrp = task_cgroup_from_root(task, root);
-               path = cgroup_path_ns_locked(cgrp, buf, buflen, &init_cgroup_ns);
+               ret = cgroup_path_ns_locked(cgrp, buf, buflen, &init_cgroup_ns);
        } else {
                /* if no hierarchy exists, everyone is in "/" */
-               if (strlcpy(buf, "/", buflen) < buflen)
-                       path = buf;
+               ret = strlcpy(buf, "/", buflen);
        }
  
        spin_unlock_irq(&css_set_lock);
        mutex_unlock(&cgroup_mutex);
-       return path;
+       return ret;
  }
  EXPORT_SYMBOL_GPL(task_cgroup_path);
  
@@@ -2830,6 -2835,10 +2835,10 @@@ static int cgroup_attach_task(struct cg
                ret = cgroup_migrate(leader, threadgroup, dst_cgrp->root);
  
        cgroup_migrate_finish(&preloaded_csets);
+       if (!ret)
+               trace_cgroup_attach_task(dst_cgrp, leader, threadgroup);
        return ret;
  }
  
@@@ -3592,6 -3601,8 +3601,8 @@@ static int cgroup_rename(struct kernfs_
        mutex_lock(&cgroup_mutex);
  
        ret = kernfs_rename(kn, new_parent, new_name_str);
+       if (!ret)
+               trace_cgroup_rename(cgrp);
  
        mutex_unlock(&cgroup_mutex);
  
@@@ -4360,6 -4371,8 +4371,8 @@@ int cgroup_transfer_tasks(struct cgrou
  
                if (task) {
                        ret = cgroup_migrate(task, false, to->root);
+                       if (!ret)
+                               trace_cgroup_transfer_tasks(to, task, false);
                        put_task_struct(task);
                }
        } while (task && !ret);
@@@ -5025,6 -5038,8 +5038,8 @@@ static void css_release_work_fn(struct 
                        ss->css_released(css);
        } else {
                /* cgroup release path */
+               trace_cgroup_release(cgrp);
                cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
                cgrp->id = -1;
  
@@@ -5311,6 -5326,8 +5326,8 @@@ static int cgroup_mkdir(struct kernfs_n
        if (ret)
                goto out_destroy;
  
+       trace_cgroup_mkdir(cgrp);
        /* let's create and online css's */
        kernfs_activate(kn);
  
@@@ -5486,6 -5503,9 +5503,9 @@@ static int cgroup_rmdir(struct kernfs_n
  
        ret = cgroup_destroy_locked(cgrp);
  
+       if (!ret)
+               trace_cgroup_rmdir(cgrp);
        cgroup_kn_unlock(kn);
        return ret;
  }
@@@ -5606,12 -5626,6 +5626,12 @@@ int __init cgroup_init(void
        BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files));
        BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files));
  
 +      /*
 +       * The latency of the synchronize_sched() is too high for cgroups,
 +       * avoid it at the cost of forcing all readers into the slow path.
 +       */
 +      rcu_sync_enter_start(&cgroup_threadgroup_rwsem.rss);
 +
        get_user_ns(init_cgroup_ns.user_ns);
  
        mutex_lock(&cgroup_mutex);
@@@ -5722,7 -5736,7 +5742,7 @@@ core_initcall(cgroup_wq_init)
  int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
                     struct pid *pid, struct task_struct *tsk)
  {
-       char *buf, *path;
+       char *buf;
        int retval;
        struct cgroup_root *root;
  
                 * " (deleted)" is appended to the cgroup path.
                 */
                if (cgroup_on_dfl(cgrp) || !(tsk->flags & PF_EXITING)) {
-                       path = cgroup_path_ns_locked(cgrp, buf, PATH_MAX,
+                       retval = cgroup_path_ns_locked(cgrp, buf, PATH_MAX,
                                                current->nsproxy->cgroup_ns);
-                       if (!path) {
+                       if (retval >= PATH_MAX) {
                                retval = -ENAMETOOLONG;
                                goto out_unlock;
                        }
+                       seq_puts(m, buf);
                } else {
-                       path = "/";
+                       seq_puts(m, "/");
                }
  
-               seq_puts(m, path);
                if (cgroup_on_dfl(cgrp) && cgroup_is_dead(cgrp))
                        seq_puts(m, " (deleted)\n");
                else
@@@ -6041,8 -6055,9 +6061,9 @@@ static void cgroup_release_agent(struc
  {
        struct cgroup *cgrp =
                container_of(work, struct cgroup, release_agent_work);
-       char *pathbuf = NULL, *agentbuf = NULL, *path;
+       char *pathbuf = NULL, *agentbuf = NULL;
        char *argv[3], *envp[3];
+       int ret;
  
        mutex_lock(&cgroup_mutex);
  
                goto out;
  
        spin_lock_irq(&css_set_lock);
-       path = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
+       ret = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
        spin_unlock_irq(&css_set_lock);
-       if (!path)
+       if (ret >= PATH_MAX)
                goto out;
  
        argv[0] = agentbuf;
-       argv[1] = path;
+       argv[1] = pathbuf;
        argv[2] = NULL;
  
        /* minimal command environment */
diff --combined kernel/sched/debug.c
index 13935886a4711b2efd576f8890e1564f54991653,23cb609ba4ebce1b71d326a1f3aba7f0dd8bfd55..fa178b62ea79b53e3cbf37d78d65699e145d6b98
@@@ -369,12 -369,8 +369,12 @@@ static void print_cfs_group_stats(struc
  
  #define P(F) \
        SEQ_printf(m, "  .%-30s: %lld\n", #F, (long long)F)
 +#define P_SCHEDSTAT(F) \
 +      SEQ_printf(m, "  .%-30s: %lld\n", #F, (long long)schedstat_val(F))
  #define PN(F) \
        SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
 +#define PN_SCHEDSTAT(F) \
 +      SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
  
        if (!se)
                return;
        PN(se->exec_start);
        PN(se->vruntime);
        PN(se->sum_exec_runtime);
 -#ifdef CONFIG_SCHEDSTATS
        if (schedstat_enabled()) {
 -              PN(se->statistics.wait_start);
 -              PN(se->statistics.sleep_start);
 -              PN(se->statistics.block_start);
 -              PN(se->statistics.sleep_max);
 -              PN(se->statistics.block_max);
 -              PN(se->statistics.exec_max);
 -              PN(se->statistics.slice_max);
 -              PN(se->statistics.wait_max);
 -              PN(se->statistics.wait_sum);
 -              P(se->statistics.wait_count);
 +              PN_SCHEDSTAT(se->statistics.wait_start);
 +              PN_SCHEDSTAT(se->statistics.sleep_start);
 +              PN_SCHEDSTAT(se->statistics.block_start);
 +              PN_SCHEDSTAT(se->statistics.sleep_max);
 +              PN_SCHEDSTAT(se->statistics.block_max);
 +              PN_SCHEDSTAT(se->statistics.exec_max);
 +              PN_SCHEDSTAT(se->statistics.slice_max);
 +              PN_SCHEDSTAT(se->statistics.wait_max);
 +              PN_SCHEDSTAT(se->statistics.wait_sum);
 +              P_SCHEDSTAT(se->statistics.wait_count);
        }
 -#endif
        P(se->load.weight);
  #ifdef CONFIG_SMP
        P(se->avg.load_avg);
        P(se->avg.util_avg);
  #endif
 +
 +#undef PN_SCHEDSTAT
  #undef PN
 +#undef P_SCHEDSTAT
  #undef P
  }
  #endif
@@@ -415,7 -410,8 +415,8 @@@ static char *task_group_path(struct tas
        if (autogroup_path(tg, group_path, PATH_MAX))
                return group_path;
  
-       return cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
+       cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
+       return group_path;
  }
  #endif
  
@@@ -434,9 -430,9 +435,9 @@@ print_task(struct seq_file *m, struct r
                p->prio);
  
        SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
 -              SPLIT_NS(schedstat_val(p, se.statistics.wait_sum)),
 +              SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)),
                SPLIT_NS(p->se.sum_exec_runtime),
 -              SPLIT_NS(schedstat_val(p, se.statistics.sum_sleep_runtime)));
 +              SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime)));
  
  #ifdef CONFIG_NUMA_BALANCING
        SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
@@@ -631,7 -627,9 +632,7 @@@ do {                                                                       
  #undef P64
  #endif
  
 -#ifdef CONFIG_SCHEDSTATS
 -#define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, rq->n);
 -
 +#define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, schedstat_val(rq->n));
        if (schedstat_enabled()) {
                P(yld_count);
                P(sched_count);
                P(ttwu_count);
                P(ttwu_local);
        }
 -
  #undef P
 -#endif
 +
        spin_lock_irqsave(&sched_debug_lock, flags);
        print_cfs_stats(m, cpu);
        print_rt_stats(m, cpu);
@@@ -870,14 -869,10 +871,14 @@@ void proc_sched_show_task(struct task_s
        SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
  #define P(F) \
        SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
 +#define P_SCHEDSTAT(F) \
 +      SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)schedstat_val(p->F))
  #define __PN(F) \
        SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
  #define PN(F) \
        SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
 +#define PN_SCHEDSTAT(F) \
 +      SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(p->F)))
  
        PN(se.exec_start);
        PN(se.vruntime);
  
        P(se.nr_migrations);
  
 -#ifdef CONFIG_SCHEDSTATS
        if (schedstat_enabled()) {
                u64 avg_atom, avg_per_cpu;
  
 -              PN(se.statistics.sum_sleep_runtime);
 -              PN(se.statistics.wait_start);
 -              PN(se.statistics.sleep_start);
 -              PN(se.statistics.block_start);
 -              PN(se.statistics.sleep_max);
 -              PN(se.statistics.block_max);
 -              PN(se.statistics.exec_max);
 -              PN(se.statistics.slice_max);
 -              PN(se.statistics.wait_max);
 -              PN(se.statistics.wait_sum);
 -              P(se.statistics.wait_count);
 -              PN(se.statistics.iowait_sum);
 -              P(se.statistics.iowait_count);
 -              P(se.statistics.nr_migrations_cold);
 -              P(se.statistics.nr_failed_migrations_affine);
 -              P(se.statistics.nr_failed_migrations_running);
 -              P(se.statistics.nr_failed_migrations_hot);
 -              P(se.statistics.nr_forced_migrations);
 -              P(se.statistics.nr_wakeups);
 -              P(se.statistics.nr_wakeups_sync);
 -              P(se.statistics.nr_wakeups_migrate);
 -              P(se.statistics.nr_wakeups_local);
 -              P(se.statistics.nr_wakeups_remote);
 -              P(se.statistics.nr_wakeups_affine);
 -              P(se.statistics.nr_wakeups_affine_attempts);
 -              P(se.statistics.nr_wakeups_passive);
 -              P(se.statistics.nr_wakeups_idle);
 +              PN_SCHEDSTAT(se.statistics.sum_sleep_runtime);
 +              PN_SCHEDSTAT(se.statistics.wait_start);
 +              PN_SCHEDSTAT(se.statistics.sleep_start);
 +              PN_SCHEDSTAT(se.statistics.block_start);
 +              PN_SCHEDSTAT(se.statistics.sleep_max);
 +              PN_SCHEDSTAT(se.statistics.block_max);
 +              PN_SCHEDSTAT(se.statistics.exec_max);
 +              PN_SCHEDSTAT(se.statistics.slice_max);
 +              PN_SCHEDSTAT(se.statistics.wait_max);
 +              PN_SCHEDSTAT(se.statistics.wait_sum);
 +              P_SCHEDSTAT(se.statistics.wait_count);
 +              PN_SCHEDSTAT(se.statistics.iowait_sum);
 +              P_SCHEDSTAT(se.statistics.iowait_count);
 +              P_SCHEDSTAT(se.statistics.nr_migrations_cold);
 +              P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine);
 +              P_SCHEDSTAT(se.statistics.nr_failed_migrations_running);
 +              P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot);
 +              P_SCHEDSTAT(se.statistics.nr_forced_migrations);
 +              P_SCHEDSTAT(se.statistics.nr_wakeups);
 +              P_SCHEDSTAT(se.statistics.nr_wakeups_sync);
 +              P_SCHEDSTAT(se.statistics.nr_wakeups_migrate);
 +              P_SCHEDSTAT(se.statistics.nr_wakeups_local);
 +              P_SCHEDSTAT(se.statistics.nr_wakeups_remote);
 +              P_SCHEDSTAT(se.statistics.nr_wakeups_affine);
 +              P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
 +              P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
 +              P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
  
                avg_atom = p->se.sum_exec_runtime;
                if (nr_switches)
                __PN(avg_atom);
                __PN(avg_per_cpu);
        }
 -#endif
 +
        __P(nr_switches);
        SEQ_printf(m, "%-45s:%21Ld\n",
                   "nr_voluntary_switches", (long long)p->nvcsw);
  #endif
        P(policy);
        P(prio);
 +#undef PN_SCHEDSTAT
  #undef PN
  #undef __PN
 +#undef P_SCHEDSTAT
  #undef P
  #undef __P
  
This page took 0.041824 seconds and 5 git commands to generate.