tracing: extend sched_pi_setprio
[deliverable/linux.git] / mm / zsmalloc.c
index b0bc023d25c539e05b5a874b65dcf575a136f271..7b5fd2b9095e200d577e61d6ad755209e3dd1c4b 100644 (file)
@@ -31,6 +31,8 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#define CREATE_TRACE_POINTS
+
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
@@ -53,6 +55,7 @@
 #include <linux/mount.h>
 #include <linux/migrate.h>
 #include <linux/pagemap.h>
+#include <trace/events/zsmalloc.h>
 
 #define ZSPAGE_MAGIC   0x58
 
@@ -1767,9 +1770,12 @@ struct zs_compact_control {
        /* Destination page for migration which should be a first page
         * of zspage. */
        struct page *d_page;
-        /* Starting object index within @s_page which used for live object
-         * in the subpage. */
+       /* Starting object index within @s_page which used for live object
+        * in the subpage. */
        int obj_idx;
+
+       unsigned long nr_migrated_obj;
+       unsigned long nr_freed_pages;
 };
 
 static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
@@ -1803,6 +1809,7 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
                free_obj = obj_malloc(class, get_zspage(d_page), handle);
                zs_object_copy(class, free_obj, used_obj);
                obj_idx++;
+               cc->nr_migrated_obj++;
                /*
                 * record_obj updates handle's value to free_obj and it will
                 * invalidate lock bit(ie, HANDLE_PIN_BIT) of handle, which
@@ -2260,7 +2267,10 @@ static unsigned long zs_can_compact(struct size_class *class)
 
 static void __zs_compact(struct zs_pool *pool, struct size_class *class)
 {
-       struct zs_compact_control cc;
+       struct zs_compact_control cc = {
+               .nr_migrated_obj = 0,
+               .nr_freed_pages = 0,
+       };
        struct zspage *src_zspage;
        struct zspage *dst_zspage = NULL;
 
@@ -2292,7 +2302,7 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
                putback_zspage(class, dst_zspage);
                if (putback_zspage(class, src_zspage) == ZS_EMPTY) {
                        free_zspage(pool, class, src_zspage);
-                       pool->stats.pages_compacted += class->pages_per_zspage;
+                       cc.nr_freed_pages += class->pages_per_zspage;
                }
                spin_unlock(&class->lock);
                cond_resched();
@@ -2303,12 +2313,18 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
                putback_zspage(class, src_zspage);
 
        spin_unlock(&class->lock);
+
+       pool->stats.pages_compacted += cc.nr_freed_pages;
+       trace_zs_compact(class->index, cc.nr_migrated_obj, cc.nr_freed_pages);
 }
 
 unsigned long zs_compact(struct zs_pool *pool)
 {
        int i;
        struct size_class *class;
+       unsigned long pages_compacted_before = pool->stats.pages_compacted;
+
+       trace_zs_compact_start(pool->name);
 
        for (i = zs_size_classes - 1; i >= 0; i--) {
                class = pool->size_class[i];
@@ -2319,6 +2335,9 @@ unsigned long zs_compact(struct zs_pool *pool)
                __zs_compact(pool, class);
        }
 
+       trace_zs_compact_end(pool->name,
+               pool->stats.pages_compacted - pages_compacted_before);
+
        return pool->stats.pages_compacted;
 }
 EXPORT_SYMBOL_GPL(zs_compact);
This page took 0.031524 seconds and 5 git commands to generate.