cpufreq: Make cpufreq_quick_get() safe to call
[deliverable/linux.git] / drivers / cpufreq / cpufreq.c
index f65553dc48c997ea6a31fc167c57ef6182cb103b..f870399f00b1c38a0d604bc7d1a2d2dc4c552a4f 100644 (file)
@@ -38,48 +38,10 @@ static inline bool policy_is_inactive(struct cpufreq_policy *policy)
        return cpumask_empty(policy->cpus);
 }
 
-static bool suitable_policy(struct cpufreq_policy *policy, bool active)
-{
-       return active == !policy_is_inactive(policy);
-}
-
-/* Finds Next Acive/Inactive policy */
-static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
-                                         bool active)
-{
-       do {
-               /* No more policies in the list */
-               if (list_is_last(&policy->policy_list, &cpufreq_policy_list))
-                       return NULL;
-
-               policy = list_next_entry(policy, policy_list);
-       } while (!suitable_policy(policy, active));
-
-       return policy;
-}
-
-static struct cpufreq_policy *first_policy(bool active)
-{
-       struct cpufreq_policy *policy;
-
-       /* No policies in the list */
-       if (list_empty(&cpufreq_policy_list))
-               return NULL;
-
-       policy = list_first_entry(&cpufreq_policy_list, typeof(*policy),
-                                 policy_list);
-
-       if (!suitable_policy(policy, active))
-               policy = next_policy(policy, active);
-
-       return policy;
-}
-
 /* Macros to iterate over CPU policies */
-#define for_each_suitable_policy(__policy, __active)   \
-       for (__policy = first_policy(__active);         \
-            __policy;                                  \
-            __policy = next_policy(__policy, __active))
+#define for_each_suitable_policy(__policy, __active)                    \
+       list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
+               if ((__active) == !policy_is_inactive(__policy))
 
 #define for_each_active_policy(__policy)               \
        for_each_suitable_policy(__policy, true)
@@ -103,52 +65,6 @@ static struct cpufreq_driver *cpufreq_driver;
 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
 static DEFINE_RWLOCK(cpufreq_driver_lock);
 
-static DEFINE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
-
-/**
- * cpufreq_set_update_util_data - Populate the CPU's update_util_data pointer.
- * @cpu: The CPU to set the pointer for.
- * @data: New pointer value.
- *
- * Set and publish the update_util_data pointer for the given CPU.  That pointer
- * points to a struct update_util_data object containing a callback function
- * to call from cpufreq_update_util().  That function will be called from an RCU
- * read-side critical section, so it must not sleep.
- *
- * Callers must use RCU callbacks to free any memory that might be accessed
- * via the old update_util_data pointer or invoke synchronize_rcu() right after
- * this function to avoid use-after-free.
- */
-void cpufreq_set_update_util_data(int cpu, struct update_util_data *data)
-{
-       rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), data);
-}
-EXPORT_SYMBOL_GPL(cpufreq_set_update_util_data);
-
-/**
- * cpufreq_update_util - Take a note about CPU utilization changes.
- * @time: Current time.
- * @util: Current utilization.
- * @max: Utilization ceiling.
- *
- * This function is called by the scheduler on every invocation of
- * update_load_avg() on the CPU whose utilization is being updated.
- */
-void cpufreq_update_util(u64 time, unsigned long util, unsigned long max)
-{
-       struct update_util_data *data;
-
-       rcu_read_lock();
-
-       data = rcu_dereference(*this_cpu_ptr(&cpufreq_update_util_data));
-       if (data && data->func)
-               data->func(data, time, util, max);
-
-       rcu_read_unlock();
-}
-
-DEFINE_MUTEX(cpufreq_governor_lock);
-
 /* Flag to suspend/resume CPUFreq governors */
 static bool cpufreq_suspended;
 
@@ -158,10 +74,8 @@ static inline bool has_target(void)
 }
 
 /* internal prototypes */
-static int __cpufreq_governor(struct cpufreq_policy *policy,
-               unsigned int event);
+static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
-static void handle_update(struct work_struct *work);
 
 /**
  * Two notifier lists: the "policy" list is involved in the
@@ -863,12 +777,7 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
        ssize_t ret;
 
        down_read(&policy->rwsem);
-
-       if (fattr->show)
-               ret = fattr->show(policy, buf);
-       else
-               ret = -EIO;
-
+       ret = fattr->show(policy, buf);
        up_read(&policy->rwsem);
 
        return ret;
@@ -883,18 +792,12 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
 
        get_online_cpus();
 
-       if (!cpu_online(policy->cpu))
-               goto unlock;
-
-       down_write(&policy->rwsem);
-
-       if (fattr->store)
+       if (cpu_online(policy->cpu)) {
+               down_write(&policy->rwsem);
                ret = fattr->store(policy, buf, count);
-       else
-               ret = -EIO;
+               up_write(&policy->rwsem);
+       }
 
-       up_write(&policy->rwsem);
-unlock:
        put_online_cpus();
 
        return ret;
@@ -1049,36 +952,45 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp
        if (cpumask_test_cpu(cpu, policy->cpus))
                return 0;
 
+       down_write(&policy->rwsem);
        if (has_target()) {
-               ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+               ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
                if (ret) {
                        pr_err("%s: Failed to stop governor\n", __func__);
-                       return ret;
+                       goto unlock;
                }
        }
 
-       down_write(&policy->rwsem);
        cpumask_set_cpu(cpu, policy->cpus);
-       up_write(&policy->rwsem);
 
        if (has_target()) {
-               ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
+               ret = cpufreq_governor(policy, CPUFREQ_GOV_START);
                if (!ret)
-                       ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+                       ret = cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
 
-               if (ret) {
+               if (ret)
                        pr_err("%s: Failed to start governor\n", __func__);
-                       return ret;
-               }
        }
 
-       return 0;
+unlock:
+       up_write(&policy->rwsem);
+       return ret;
+}
+
+static void handle_update(struct work_struct *work)
+{
+       struct cpufreq_policy *policy =
+               container_of(work, struct cpufreq_policy, update);
+       unsigned int cpu = policy->cpu;
+       pr_debug("handle_update for cpu %u called\n", cpu);
+       cpufreq_update_policy(cpu);
 }
 
 static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
 {
        struct device *dev = get_cpu_device(cpu);
        struct cpufreq_policy *policy;
+       int ret;
 
        if (WARN_ON(!dev))
                return NULL;
@@ -1096,7 +1008,13 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
        if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
                goto err_free_rcpumask;
 
-       kobject_init(&policy->kobj, &ktype_cpufreq);
+       ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
+                                  cpufreq_global_kobject, "policy%u", cpu);
+       if (ret) {
+               pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
+               goto err_free_real_cpus;
+       }
+
        INIT_LIST_HEAD(&policy->policy_list);
        init_rwsem(&policy->rwsem);
        spin_lock_init(&policy->transition_lock);
@@ -1107,6 +1025,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
        policy->cpu = cpu;
        return policy;
 
+err_free_real_cpus:
+       free_cpumask_var(policy->real_cpus);
 err_free_rcpumask:
        free_cpumask_var(policy->related_cpus);
 err_free_cpumask:
@@ -1211,16 +1131,6 @@ static int cpufreq_online(unsigned int cpu)
                cpumask_copy(policy->related_cpus, policy->cpus);
                /* Remember CPUs present at the policy creation time. */
                cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask);
-
-               /* Name and add the kobject */
-               ret = kobject_add(&policy->kobj, cpufreq_global_kobject,
-                                 "policy%u",
-                                 cpumask_first(policy->related_cpus));
-               if (ret) {
-                       pr_err("%s: failed to add policy->kobj: %d\n", __func__,
-                              ret);
-                       goto out_exit_policy;
-               }
        }
 
        /*
@@ -1375,13 +1285,13 @@ static void cpufreq_offline(unsigned int cpu)
                return;
        }
 
+       down_write(&policy->rwsem);
        if (has_target()) {
-               ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+               ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
                if (ret)
                        pr_err("%s: Failed to stop governor\n", __func__);
        }
 
-       down_write(&policy->rwsem);
        cpumask_clear_cpu(cpu, policy->cpus);
 
        if (policy_is_inactive(policy)) {
@@ -1394,20 +1304,19 @@ static void cpufreq_offline(unsigned int cpu)
                /* Nominate new CPU */
                policy->cpu = cpumask_any(policy->cpus);
        }
-       up_write(&policy->rwsem);
 
        /* Start governor again for active policy */
        if (!policy_is_inactive(policy)) {
                if (has_target()) {
-                       ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
+                       ret = cpufreq_governor(policy, CPUFREQ_GOV_START);
                        if (!ret)
-                               ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+                               ret = cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
 
                        if (ret)
                                pr_err("%s: Failed to start governor\n", __func__);
                }
 
-               return;
+               goto unlock;
        }
 
        if (cpufreq_driver->stop_cpu)
@@ -1415,7 +1324,7 @@ static void cpufreq_offline(unsigned int cpu)
 
        /* If cpu is last user of policy, free policy */
        if (has_target()) {
-               ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+               ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
                if (ret)
                        pr_err("%s: Failed to exit governor\n", __func__);
        }
@@ -1429,6 +1338,9 @@ static void cpufreq_offline(unsigned int cpu)
                cpufreq_driver->exit(policy);
                policy->freq_table = NULL;
        }
+
+unlock:
+       up_write(&policy->rwsem);
 }
 
 /**
@@ -1454,15 +1366,6 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
                cpufreq_policy_free(policy, true);
 }
 
-static void handle_update(struct work_struct *work)
-{
-       struct cpufreq_policy *policy =
-               container_of(work, struct cpufreq_policy, update);
-       unsigned int cpu = policy->cpu;
-       pr_debug("handle_update for cpu %u called\n", cpu);
-       cpufreq_update_policy(cpu);
-}
-
 /**
  *     cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
  *     in deep trouble.
@@ -1498,9 +1401,17 @@ unsigned int cpufreq_quick_get(unsigned int cpu)
 {
        struct cpufreq_policy *policy;
        unsigned int ret_freq = 0;
+       unsigned long flags;
 
-       if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
-               return cpufreq_driver->get(cpu);
+       read_lock_irqsave(&cpufreq_driver_lock, flags);
+
+       if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
+               ret_freq = cpufreq_driver->get(cpu);
+               read_unlock_irqrestore(&cpufreq_driver_lock, flags);
+               return ret_freq;
+       }
+
+       read_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
        policy = cpufreq_cpu_get(cpu);
        if (policy) {
@@ -1625,6 +1536,7 @@ EXPORT_SYMBOL(cpufreq_generic_suspend);
 void cpufreq_suspend(void)
 {
        struct cpufreq_policy *policy;
+       int ret;
 
        if (!cpufreq_driver)
                return;
@@ -1635,7 +1547,11 @@ void cpufreq_suspend(void)
        pr_debug("%s: Suspending Governors\n", __func__);
 
        for_each_active_policy(policy) {
-               if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
+               down_write(&policy->rwsem);
+               ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+               up_write(&policy->rwsem);
+
+               if (ret)
                        pr_err("%s: Failed to stop governor for policy: %p\n",
                                __func__, policy);
                else if (cpufreq_driver->suspend
@@ -1657,6 +1573,7 @@ suspend:
 void cpufreq_resume(void)
 {
        struct cpufreq_policy *policy;
+       int ret;
 
        if (!cpufreq_driver)
                return;
@@ -1669,13 +1586,20 @@ void cpufreq_resume(void)
        pr_debug("%s: Resuming Governors\n", __func__);
 
        for_each_active_policy(policy) {
-               if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
+               if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
                        pr_err("%s: Failed to resume driver: %p\n", __func__,
                                policy);
-               else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
-                   || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
-                       pr_err("%s: Failed to start governor for policy: %p\n",
-                               __func__, policy);
+               } else {
+                       down_write(&policy->rwsem);
+                       ret = cpufreq_governor(policy, CPUFREQ_GOV_START);
+                       if (!ret)
+                               cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+                       up_write(&policy->rwsem);
+
+                       if (ret)
+                               pr_err("%s: Failed to start governor for policy: %p\n",
+                                      __func__, policy);
+               }
        }
 
        /*
@@ -1887,7 +1811,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
                            unsigned int relation)
 {
        unsigned int old_target_freq = target_freq;
-       int retval = -EINVAL;
+       struct cpufreq_frequency_table *freq_table;
+       int index, retval;
 
        if (cpufreq_disabled())
                return -ENODEV;
@@ -1914,34 +1839,28 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
        policy->restore_freq = policy->cur;
 
        if (cpufreq_driver->target)
-               retval = cpufreq_driver->target(policy, target_freq, relation);
-       else if (cpufreq_driver->target_index) {
-               struct cpufreq_frequency_table *freq_table;
-               int index;
-
-               freq_table = cpufreq_frequency_get_table(policy->cpu);
-               if (unlikely(!freq_table)) {
-                       pr_err("%s: Unable to find freq_table\n", __func__);
-                       goto out;
-               }
+               return cpufreq_driver->target(policy, target_freq, relation);
 
-               retval = cpufreq_frequency_table_target(policy, freq_table,
-                               target_freq, relation, &index);
-               if (unlikely(retval)) {
-                       pr_err("%s: Unable to find matching freq\n", __func__);
-                       goto out;
-               }
+       if (!cpufreq_driver->target_index)
+               return -EINVAL;
 
-               if (freq_table[index].frequency == policy->cur) {
-                       retval = 0;
-                       goto out;
-               }
+       freq_table = cpufreq_frequency_get_table(policy->cpu);
+       if (unlikely(!freq_table)) {
+               pr_err("%s: Unable to find freq_table\n", __func__);
+               return -EINVAL;
+       }
 
-               retval = __target_index(policy, freq_table, index);
+       retval = cpufreq_frequency_table_target(policy, freq_table, target_freq,
+                                               relation, &index);
+       if (unlikely(retval)) {
+               pr_err("%s: Unable to find matching freq\n", __func__);
+               return retval;
        }
 
-out:
-       return retval;
+       if (freq_table[index].frequency == policy->cur)
+               return 0;
+
+       return __target_index(policy, freq_table, index);
 }
 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
 
@@ -1966,8 +1885,7 @@ __weak struct cpufreq_governor *cpufreq_fallback_governor(void)
        return NULL;
 }
 
-static int __cpufreq_governor(struct cpufreq_policy *policy,
-                                       unsigned int event)
+static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
 {
        int ret;
 
@@ -2001,21 +1919,6 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
 
        pr_debug("%s: for CPU %u, event %u\n", __func__, policy->cpu, event);
 
-       mutex_lock(&cpufreq_governor_lock);
-       if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
-           || (!policy->governor_enabled
-           && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
-               mutex_unlock(&cpufreq_governor_lock);
-               return -EBUSY;
-       }
-
-       if (event == CPUFREQ_GOV_STOP)
-               policy->governor_enabled = false;
-       else if (event == CPUFREQ_GOV_START)
-               policy->governor_enabled = true;
-
-       mutex_unlock(&cpufreq_governor_lock);
-
        ret = policy->governor->governor(policy, event);
 
        if (!ret) {
@@ -2023,14 +1926,6 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
                        policy->governor->initialized++;
                else if (event == CPUFREQ_GOV_POLICY_EXIT)
                        policy->governor->initialized--;
-       } else {
-               /* Restore original values */
-               mutex_lock(&cpufreq_governor_lock);
-               if (event == CPUFREQ_GOV_STOP)
-                       policy->governor_enabled = true;
-               else if (event == CPUFREQ_GOV_START)
-                       policy->governor_enabled = false;
-               mutex_unlock(&cpufreq_governor_lock);
        }
 
        if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
@@ -2185,7 +2080,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
        old_gov = policy->governor;
        /* end old governor */
        if (old_gov) {
-               ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+               ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
                if (ret) {
                        /* This can happen due to race with other operations */
                        pr_debug("%s: Failed to Stop Governor: %s (%d)\n",
@@ -2193,7 +2088,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
                        return ret;
                }
 
-               ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+               ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
                if (ret) {
                        pr_err("%s: Failed to Exit Governor: %s (%d)\n",
                               __func__, old_gov->name, ret);
@@ -2203,30 +2098,30 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
 
        /* start new governor */
        policy->governor = new_policy->governor;
-       ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
+       ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
        if (!ret) {
-               ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
+               ret = cpufreq_governor(policy, CPUFREQ_GOV_START);
                if (!ret)
                        goto out;
 
-               __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+               cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
        }
 
        /* new governor failed, so re-start old one */
        pr_debug("starting governor %s failed\n", policy->governor->name);
        if (old_gov) {
                policy->governor = old_gov;
-               if (__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT))
+               if (cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT))
                        policy->governor = NULL;
                else
-                       __cpufreq_governor(policy, CPUFREQ_GOV_START);
+                       cpufreq_governor(policy, CPUFREQ_GOV_START);
        }
 
        return ret;
 
  out:
        pr_debug("governor: change or update limits\n");
-       return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+       return cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
 }
 
 /**
@@ -2326,8 +2221,11 @@ static int cpufreq_boost_set_sw(int state)
                                       __func__);
                                break;
                        }
+
+                       down_write(&policy->rwsem);
                        policy->user_policy.max = policy->max;
-                       __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+                       cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+                       up_write(&policy->rwsem);
                }
        }
 
@@ -2413,7 +2311,7 @@ EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
  * submitted by the CPU Frequency driver.
  *
  * Registers a CPU Frequency driver to this core code. This code
- * returns zero on success, -EBUSY when another driver got here first
+ * returns zero on success, -EEXIST when another driver got here first
  * (and isn't unregistered in the meantime).
  *
  */
This page took 0.031986 seconds and 5 git commands to generate.