cpufreq: governor: Rename skip_work to work_count
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Mon, 8 Feb 2016 22:41:10 +0000 (23:41 +0100)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Wed, 9 Mar 2016 13:40:57 +0000 (14:40 +0100)
The skip_work field in struct policy_dbs_info technically is a
counter, so give it a new name to reflect that.

No functional changes.

Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
drivers/cpufreq/cpufreq_governor.c
drivers/cpufreq/cpufreq_governor.h

index 7c08d8360f72113b0372f752912594b6bb08d3d7..298be52adea00d12da2fed8f5fb4eb473a69bae1 100644 (file)
@@ -196,16 +196,16 @@ static inline void gov_clear_update_util(struct cpufreq_policy *policy)
 static void gov_cancel_work(struct policy_dbs_info *policy_dbs)
 {
        /* Tell dbs_update_util_handler() to skip queuing up work items. */
-       atomic_inc(&policy_dbs->skip_work);
+       atomic_inc(&policy_dbs->work_count);
        /*
         * If dbs_update_util_handler() is already running, it may not notice
-        * the incremented skip_work, so wait for it to complete to prevent its
+        * the incremented work_count, so wait for it to complete to prevent its
         * work item from being queued up after the cancel_work_sync() below.
         */
        gov_clear_update_util(policy_dbs->policy);
        irq_work_sync(&policy_dbs->irq_work);
        cancel_work_sync(&policy_dbs->work);
-       atomic_set(&policy_dbs->skip_work, 0);
+       atomic_set(&policy_dbs->work_count, 0);
 }
 
 static void dbs_work_handler(struct work_struct *work)
@@ -234,7 +234,7 @@ static void dbs_work_handler(struct work_struct *work)
         * up using a stale sample delay value.
         */
        smp_mb__before_atomic();
-       atomic_dec(&policy_dbs->skip_work);
+       atomic_dec(&policy_dbs->work_count);
 }
 
 static void dbs_irq_work(struct irq_work *irq_work)
@@ -267,7 +267,7 @@ static void dbs_update_util_handler(struct update_util_data *data, u64 time,
         * - The governor is being stopped.
         * - It is too early (too little time from the previous sample).
         */
-       if (atomic_inc_return(&policy_dbs->skip_work) == 1) {
+       if (atomic_inc_return(&policy_dbs->work_count) == 1) {
                u64 delta_ns;
 
                delta_ns = time - policy_dbs->last_sample_time;
@@ -277,7 +277,7 @@ static void dbs_update_util_handler(struct update_util_data *data, u64 time,
                        return;
                }
        }
-       atomic_dec(&policy_dbs->skip_work);
+       atomic_dec(&policy_dbs->work_count);
 }
 
 static void set_sampling_rate(struct dbs_data *dbs_data,
@@ -305,7 +305,7 @@ static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *poli
                return NULL;
 
        mutex_init(&policy_dbs->timer_mutex);
-       atomic_set(&policy_dbs->skip_work, 0);
+       atomic_set(&policy_dbs->work_count, 0);
        init_irq_work(&policy_dbs->irq_work, dbs_irq_work);
        INIT_WORK(&policy_dbs->work, dbs_work_handler);
 
index 95e6834d36a831c8a43701aa84c023e3b79a2c1b..37537220e48cc7421399db34b5b09e66f13cdf7b 100644 (file)
@@ -149,7 +149,7 @@ struct policy_dbs_info {
 
        u64 last_sample_time;
        s64 sample_delay_ns;
-       atomic_t skip_work;
+       atomic_t work_count;
        struct irq_work irq_work;
        struct work_struct work;
        /* dbs_data may be shared between multiple policy objects */
This page took 0.028422 seconds and 5 git commands to generate.