Merge back cpufreq changes for v4.7.
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Mon, 25 Apr 2016 13:44:01 +0000 (15:44 +0200)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Mon, 25 Apr 2016 13:44:01 +0000 (15:44 +0200)
1  2 
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq_governor.c
drivers/cpufreq/intel_pstate.c

index e93405f0eac46565d18c25873d1113c3a1aa65fd,2f1ae568f74b16c107e0d899b09c04128c6ad35a..a48b998b3304db5eff1ebc9401274c5467b18533
@@@ -78,6 -78,11 +78,11 @@@ static int cpufreq_governor(struct cpuf
  static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
  static int cpufreq_start_governor(struct cpufreq_policy *policy);
  
+ static inline int cpufreq_exit_governor(struct cpufreq_policy *policy)
+ {
+       return cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+ }
  /**
   * Two notifier lists: the "policy" list is involved in the
   * validation process for a new CPU frequency policy; the
@@@ -429,6 -434,73 +434,73 @@@ void cpufreq_freq_transition_end(struc
  }
  EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
  
+ /*
+  * Fast frequency switching status count.  Positive means "enabled", negative
+  * means "disabled" and 0 means "not decided yet".
+  */
+ static int cpufreq_fast_switch_count;
+ static DEFINE_MUTEX(cpufreq_fast_switch_lock);
+ static void cpufreq_list_transition_notifiers(void)
+ {
+       struct notifier_block *nb;
+       pr_info("Registered transition notifiers:\n");
+       mutex_lock(&cpufreq_transition_notifier_list.mutex);
+       for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
+               pr_info("%pF\n", nb->notifier_call);
+       mutex_unlock(&cpufreq_transition_notifier_list.mutex);
+ }
+ /**
+  * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
+  * @policy: cpufreq policy to enable fast frequency switching for.
+  *
+  * Try to enable fast frequency switching for @policy.
+  *
+  * The attempt will fail if there is at least one transition notifier registered
+  * at this point, as fast frequency switching is quite fundamentally at odds
+  * with transition notifiers.  Thus if successful, it will make registration of
+  * transition notifiers fail going forward.
+  */
+ void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
+ {
+       lockdep_assert_held(&policy->rwsem);
+       if (!policy->fast_switch_possible)
+               return;
+       mutex_lock(&cpufreq_fast_switch_lock);
+       if (cpufreq_fast_switch_count >= 0) {
+               cpufreq_fast_switch_count++;
+               policy->fast_switch_enabled = true;
+       } else {
+               pr_warn("CPU%u: Fast frequency switching not enabled\n",
+                       policy->cpu);
+               cpufreq_list_transition_notifiers();
+       }
+       mutex_unlock(&cpufreq_fast_switch_lock);
+ }
+ EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
+ /**
+  * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
+  * @policy: cpufreq policy to disable fast frequency switching for.
+  */
+ void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
+ {
+       mutex_lock(&cpufreq_fast_switch_lock);
+       if (policy->fast_switch_enabled) {
+               policy->fast_switch_enabled = false;
+               if (!WARN_ON(cpufreq_fast_switch_count <= 0))
+                       cpufreq_fast_switch_count--;
+       }
+       mutex_unlock(&cpufreq_fast_switch_lock);
+ }
+ EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
  
  /*********************************************************************
   *                          SYSFS INTERFACE                          *
@@@ -1248,26 -1320,24 +1320,24 @@@ out_free_policy
   */
  static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
  {
+       struct cpufreq_policy *policy;
        unsigned cpu = dev->id;
-       int ret;
  
        dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
  
-       if (cpu_online(cpu)) {
-               ret = cpufreq_online(cpu);
-       } else {
-               /*
-                * A hotplug notifier will follow and we will handle it as CPU
-                * online then.  For now, just create the sysfs link, unless
-                * there is no policy or the link is already present.
-                */
-               struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
+       if (cpu_online(cpu))
+               return cpufreq_online(cpu);
  
-               ret = policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
-                       ? add_cpu_dev_symlink(policy, cpu) : 0;
-       }
+       /*
+        * A hotplug notifier will follow and we will handle it as CPU online
+        * then.  For now, just create the sysfs link, unless there is no policy
+        * or the link is already present.
+        */
+       policy = per_cpu(cpufreq_cpu_data, cpu);
+       if (!policy || cpumask_test_and_set_cpu(cpu, policy->real_cpus))
+               return 0;
  
-       return ret;
+       return add_cpu_dev_symlink(policy, cpu);
  }
  
  static void cpufreq_offline(unsigned int cpu)
  
        /* If cpu is last user of policy, free policy */
        if (has_target()) {
-               ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+               ret = cpufreq_exit_governor(policy);
                if (ret)
                        pr_err("%s: Failed to exit governor\n", __func__);
        }
@@@ -1447,8 -1517,12 +1517,12 @@@ static unsigned int __cpufreq_get(struc
  
        ret_freq = cpufreq_driver->get(policy->cpu);
  
-       /* Updating inactive policies is invalid, so avoid doing that. */
-       if (unlikely(policy_is_inactive(policy)))
+       /*
+        * Updating inactive policies is invalid, so avoid doing that.  Also
+        * if fast frequency switching is used with the given policy, the check
+        * against policy->cur is pointless, so skip it in that case too.
+        */
+       if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled)
                return ret_freq;
  
        if (ret_freq && policy->cur &&
@@@ -1491,9 -1565,6 +1565,9 @@@ static unsigned int cpufreq_update_curr
  {
        unsigned int new_freq;
  
 +      if (cpufreq_suspended)
 +              return 0;
 +
        new_freq = cpufreq_driver->get(policy->cpu);
        if (!new_freq)
                return 0;
@@@ -1675,8 -1746,18 +1749,18 @@@ int cpufreq_register_notifier(struct no
  
        switch (list) {
        case CPUFREQ_TRANSITION_NOTIFIER:
+               mutex_lock(&cpufreq_fast_switch_lock);
+               if (cpufreq_fast_switch_count > 0) {
+                       mutex_unlock(&cpufreq_fast_switch_lock);
+                       return -EBUSY;
+               }
                ret = srcu_notifier_chain_register(
                                &cpufreq_transition_notifier_list, nb);
+               if (!ret)
+                       cpufreq_fast_switch_count--;
+               mutex_unlock(&cpufreq_fast_switch_lock);
                break;
        case CPUFREQ_POLICY_NOTIFIER:
                ret = blocking_notifier_chain_register(
@@@ -1709,8 -1790,14 +1793,14 @@@ int cpufreq_unregister_notifier(struct 
  
        switch (list) {
        case CPUFREQ_TRANSITION_NOTIFIER:
+               mutex_lock(&cpufreq_fast_switch_lock);
                ret = srcu_notifier_chain_unregister(
                                &cpufreq_transition_notifier_list, nb);
+               if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
+                       cpufreq_fast_switch_count++;
+               mutex_unlock(&cpufreq_fast_switch_lock);
                break;
        case CPUFREQ_POLICY_NOTIFIER:
                ret = blocking_notifier_chain_unregister(
@@@ -1729,6 -1816,37 +1819,37 @@@ EXPORT_SYMBOL(cpufreq_unregister_notifi
   *                              GOVERNORS                            *
   *********************************************************************/
  
+ /**
+  * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
+  * @policy: cpufreq policy to switch the frequency for.
+  * @target_freq: New frequency to set (may be approximate).
+  *
+  * Carry out a fast frequency switch without sleeping.
+  *
+  * The driver's ->fast_switch() callback invoked by this function must be
+  * suitable for being called from within RCU-sched read-side critical sections
+  * and it is expected to select the minimum available frequency greater than or
+  * equal to @target_freq (CPUFREQ_RELATION_L).
+  *
+  * This function must not be called if policy->fast_switch_enabled is unset.
+  *
+  * Governors calling this function must guarantee that it will never be invoked
+  * twice in parallel for the same policy and that it will never be called in
+  * parallel with either ->target() or ->target_index() for the same policy.
+  *
+  * If CPUFREQ_ENTRY_INVALID is returned by the driver's ->fast_switch()
+  * callback to indicate an error condition, the hardware configuration must be
+  * preserved.
+  */
+ unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
+                                       unsigned int target_freq)
+ {
+       clamp_val(target_freq, policy->min, policy->max);
+       return cpufreq_driver->fast_switch(policy, target_freq);
+ }
+ EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
  /* Must set freqs->new to intermediate frequency */
  static int __target_intermediate(struct cpufreq_policy *policy,
                                 struct cpufreq_freqs *freqs, int index)
@@@ -2104,7 -2222,7 +2225,7 @@@ static int cpufreq_set_policy(struct cp
                        return ret;
                }
  
-               ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+               ret = cpufreq_exit_governor(policy);
                if (ret) {
                        pr_err("%s: Failed to Exit Governor: %s (%d)\n",
                               __func__, old_gov->name, ret);
                        pr_debug("cpufreq: governor change\n");
                        return 0;
                }
-               cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+               cpufreq_exit_governor(policy);
        }
  
        /* new governor failed, so re-start old one */
@@@ -2189,16 -2307,13 +2310,13 @@@ static int cpufreq_cpu_callback(struct 
  
        switch (action & ~CPU_TASKS_FROZEN) {
        case CPU_ONLINE:
+       case CPU_DOWN_FAILED:
                cpufreq_online(cpu);
                break;
  
        case CPU_DOWN_PREPARE:
                cpufreq_offline(cpu);
                break;
-       case CPU_DOWN_FAILED:
-               cpufreq_online(cpu);
-               break;
        }
        return NOTIFY_OK;
  }
index 5f1147fa9239cdc325eb8fae871d151a4b36337f,20f0a4e114d13c5252fdabc84c77498c2f041480..db649c6d86c957b2683fbf50078ee785ba9c2308
@@@ -43,9 -43,10 +43,10 @@@ static DEFINE_MUTEX(gov_dbs_data_mutex)
   * This must be called with dbs_data->mutex held, otherwise traversing
   * policy_dbs_list isn't safe.
   */
- ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
+ ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
                            size_t count)
  {
+       struct dbs_data *dbs_data = to_dbs_data(attr_set);
        struct policy_dbs_info *policy_dbs;
        unsigned int rate;
        int ret;
@@@ -59,7 -60,7 +60,7 @@@
         * We are operating under dbs_data->mutex and so the list and its
         * entries can't be freed concurrently.
         */
-       list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) {
+       list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
                mutex_lock(&policy_dbs->timer_mutex);
                /*
                 * On 32-bit architectures this may race with the
@@@ -96,7 -97,7 +97,7 @@@ void gov_update_cpu_data(struct dbs_dat
  {
        struct policy_dbs_info *policy_dbs;
  
-       list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) {
+       list_for_each_entry(policy_dbs, &dbs_data->attr_set.policy_list, list) {
                unsigned int j;
  
                for_each_cpu(j, policy_dbs->policy->cpus) {
  }
  EXPORT_SYMBOL_GPL(gov_update_cpu_data);
  
- static inline struct dbs_data *to_dbs_data(struct kobject *kobj)
- {
-       return container_of(kobj, struct dbs_data, kobj);
- }
- static inline struct governor_attr *to_gov_attr(struct attribute *attr)
- {
-       return container_of(attr, struct governor_attr, attr);
- }
- static ssize_t governor_show(struct kobject *kobj, struct attribute *attr,
-                            char *buf)
- {
-       struct dbs_data *dbs_data = to_dbs_data(kobj);
-       struct governor_attr *gattr = to_gov_attr(attr);
-       return gattr->show(dbs_data, buf);
- }
- static ssize_t governor_store(struct kobject *kobj, struct attribute *attr,
-                             const char *buf, size_t count)
- {
-       struct dbs_data *dbs_data = to_dbs_data(kobj);
-       struct governor_attr *gattr = to_gov_attr(attr);
-       int ret = -EBUSY;
-       mutex_lock(&dbs_data->mutex);
-       if (dbs_data->usage_count)
-               ret = gattr->store(dbs_data, buf, count);
-       mutex_unlock(&dbs_data->mutex);
-       return ret;
- }
- /*
-  * Sysfs Ops for accessing governor attributes.
-  *
-  * All show/store invocations for governor specific sysfs attributes, will first
-  * call the below show/store callbacks and the attribute specific callback will
-  * be called from within it.
-  */
- static const struct sysfs_ops governor_sysfs_ops = {
-       .show   = governor_show,
-       .store  = governor_store,
- };
  unsigned int dbs_update(struct cpufreq_policy *policy)
  {
        struct policy_dbs_info *policy_dbs = policy->governor_data;
                wall_time = cur_wall_time - j_cdbs->prev_cpu_wall;
                j_cdbs->prev_cpu_wall = cur_wall_time;
  
 -              if (cur_idle_time <= j_cdbs->prev_cpu_idle) {
 -                      idle_time = 0;
 -              } else {
 -                      idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
 -                      j_cdbs->prev_cpu_idle = cur_idle_time;
 -              }
 +              idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
 +              j_cdbs->prev_cpu_idle = cur_idle_time;
  
                if (ignore_nice) {
                        u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
  }
  EXPORT_SYMBOL_GPL(dbs_update);
  
- static void gov_set_update_util(struct policy_dbs_info *policy_dbs,
-                               unsigned int delay_us)
- {
-       struct cpufreq_policy *policy = policy_dbs->policy;
-       int cpu;
-       gov_update_sample_delay(policy_dbs, delay_us);
-       policy_dbs->last_sample_time = 0;
-       for_each_cpu(cpu, policy->cpus) {
-               struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
-               cpufreq_set_update_util_data(cpu, &cdbs->update_util);
-       }
- }
- static inline void gov_clear_update_util(struct cpufreq_policy *policy)
- {
-       int i;
-       for_each_cpu(i, policy->cpus)
-               cpufreq_set_update_util_data(i, NULL);
-       synchronize_sched();
- }
- static void gov_cancel_work(struct cpufreq_policy *policy)
- {
-       struct policy_dbs_info *policy_dbs = policy->governor_data;
-       gov_clear_update_util(policy_dbs->policy);
-       irq_work_sync(&policy_dbs->irq_work);
-       cancel_work_sync(&policy_dbs->work);
-       atomic_set(&policy_dbs->work_count, 0);
-       policy_dbs->work_in_progress = false;
- }
  static void dbs_work_handler(struct work_struct *work)
  {
        struct policy_dbs_info *policy_dbs;
@@@ -378,6 -298,44 +294,44 @@@ static void dbs_update_util_handler(str
        irq_work_queue(&policy_dbs->irq_work);
  }
  
+ static void gov_set_update_util(struct policy_dbs_info *policy_dbs,
+                               unsigned int delay_us)
+ {
+       struct cpufreq_policy *policy = policy_dbs->policy;
+       int cpu;
+       gov_update_sample_delay(policy_dbs, delay_us);
+       policy_dbs->last_sample_time = 0;
+       for_each_cpu(cpu, policy->cpus) {
+               struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
+               cpufreq_add_update_util_hook(cpu, &cdbs->update_util,
+                                            dbs_update_util_handler);
+       }
+ }
+ static inline void gov_clear_update_util(struct cpufreq_policy *policy)
+ {
+       int i;
+       for_each_cpu(i, policy->cpus)
+               cpufreq_remove_update_util_hook(i);
+       synchronize_sched();
+ }
+ static void gov_cancel_work(struct cpufreq_policy *policy)
+ {
+       struct policy_dbs_info *policy_dbs = policy->governor_data;
+       gov_clear_update_util(policy_dbs->policy);
+       irq_work_sync(&policy_dbs->irq_work);
+       cancel_work_sync(&policy_dbs->work);
+       atomic_set(&policy_dbs->work_count, 0);
+       policy_dbs->work_in_progress = false;
+ }
  static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
                                                     struct dbs_governor *gov)
  {
                struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
  
                j_cdbs->policy_dbs = policy_dbs;
-               j_cdbs->update_util.func = dbs_update_util_handler;
        }
        return policy_dbs;
  }
@@@ -449,10 -406,7 +402,7 @@@ static int cpufreq_governor_init(struc
                policy_dbs->dbs_data = dbs_data;
                policy->governor_data = policy_dbs;
  
-               mutex_lock(&dbs_data->mutex);
-               dbs_data->usage_count++;
-               list_add(&policy_dbs->list, &dbs_data->policy_dbs_list);
-               mutex_unlock(&dbs_data->mutex);
+               gov_attr_set_get(&dbs_data->attr_set, &policy_dbs->list);
                goto out;
        }
  
                goto free_policy_dbs_info;
        }
  
-       INIT_LIST_HEAD(&dbs_data->policy_dbs_list);
-       mutex_init(&dbs_data->mutex);
+       gov_attr_set_init(&dbs_data->attr_set, &policy_dbs->list);
  
        ret = gov->init(dbs_data, !policy->governor->initialized);
        if (ret)
        if (!have_governor_per_policy())
                gov->gdbs_data = dbs_data;
  
-       policy->governor_data = policy_dbs;
        policy_dbs->dbs_data = dbs_data;
-       dbs_data->usage_count = 1;
-       list_add(&policy_dbs->list, &dbs_data->policy_dbs_list);
+       policy->governor_data = policy_dbs;
  
        gov->kobj_type.sysfs_ops = &governor_sysfs_ops;
-       ret = kobject_init_and_add(&dbs_data->kobj, &gov->kobj_type,
+       ret = kobject_init_and_add(&dbs_data->attr_set.kobj, &gov->kobj_type,
                                   get_governor_parent_kobj(policy),
                                   "%s", gov->gov.name);
        if (!ret)
@@@ -519,29 -469,21 +465,21 @@@ static int cpufreq_governor_exit(struc
        struct dbs_governor *gov = dbs_governor_of(policy);
        struct policy_dbs_info *policy_dbs = policy->governor_data;
        struct dbs_data *dbs_data = policy_dbs->dbs_data;
-       int count;
+       unsigned int count;
  
        /* Protect gov->gdbs_data against concurrent updates. */
        mutex_lock(&gov_dbs_data_mutex);
  
-       mutex_lock(&dbs_data->mutex);
-       list_del(&policy_dbs->list);
-       count = --dbs_data->usage_count;
-       mutex_unlock(&dbs_data->mutex);
-       if (!count) {
-               kobject_put(&dbs_data->kobj);
+       count = gov_attr_set_put(&dbs_data->attr_set, &policy_dbs->list);
  
-               policy->governor_data = NULL;
+       policy->governor_data = NULL;
  
+       if (!count) {
                if (!have_governor_per_policy())
                        gov->gdbs_data = NULL;
  
                gov->exit(dbs_data, policy->governor->initialized == 1);
-               mutex_destroy(&dbs_data->mutex);
                kfree(dbs_data);
-       } else {
-               policy->governor_data = NULL;
        }
  
        free_policy_dbs_info(policy_dbs, gov);
index 30fe323c4551b4628de4c5878500d29eab8d57ca,1866705ee5da48861bc88b18013f952a47b3f057..12ae2e602e797f0bcb1eb99951ff26a57725f4a5
@@@ -10,6 -10,8 +10,8 @@@
   * of the License.
   */
  
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  #include <linux/kernel.h>
  #include <linux/kernel_stat.h>
  #include <linux/module.h>
@@@ -341,17 -343,17 +343,17 @@@ static inline void pid_reset(struct _pi
  
  static inline void pid_p_gain_set(struct _pid *pid, int percent)
  {
-       pid->p_gain = div_fp(int_tofp(percent), int_tofp(100));
+       pid->p_gain = div_fp(percent, 100);
  }
  
  static inline void pid_i_gain_set(struct _pid *pid, int percent)
  {
-       pid->i_gain = div_fp(int_tofp(percent), int_tofp(100));
+       pid->i_gain = div_fp(percent, 100);
  }
  
  static inline void pid_d_gain_set(struct _pid *pid, int percent)
  {
-       pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
+       pid->d_gain = div_fp(percent, 100);
  }
  
  static signed int pid_calc(struct _pid *pid, int32_t busy)
@@@ -529,7 -531,7 +531,7 @@@ static ssize_t show_turbo_pct(struct ko
  
        total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
        no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
-       turbo_fp = div_fp(int_tofp(no_turbo), int_tofp(total));
+       turbo_fp = div_fp(no_turbo, total);
        turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
        return sprintf(buf, "%u\n", turbo_pct);
  }
@@@ -571,7 -573,7 +573,7 @@@ static ssize_t store_no_turbo(struct ko
  
        update_turbo_state();
        if (limits->turbo_disabled) {
-               pr_warn("intel_pstate: Turbo disabled by BIOS or unavailable on processor\n");
+               pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
                return -EPERM;
        }
  
@@@ -600,8 -602,7 +602,7 @@@ static ssize_t store_max_perf_pct(struc
                                   limits->max_perf_pct);
        limits->max_perf_pct = max(limits->min_perf_pct,
                                   limits->max_perf_pct);
-       limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
-                                 int_tofp(100));
+       limits->max_perf = div_fp(limits->max_perf_pct, 100);
  
        if (hwp_active)
                intel_pstate_hwp_set_online_cpus();
@@@ -625,8 -626,7 +626,7 @@@ static ssize_t store_min_perf_pct(struc
                                   limits->min_perf_pct);
        limits->min_perf_pct = min(limits->max_perf_pct,
                                   limits->min_perf_pct);
-       limits->min_perf = div_fp(int_tofp(limits->min_perf_pct),
-                                 int_tofp(100));
+       limits->min_perf = div_fp(limits->min_perf_pct, 100);
  
        if (hwp_active)
                intel_pstate_hwp_set_online_cpus();
@@@ -1011,8 -1011,8 +1011,8 @@@ static inline void intel_pstate_calc_bu
        struct sample *sample = &cpu->sample;
        int64_t core_pct;
  
-       core_pct = int_tofp(sample->aperf) * int_tofp(100);
-       core_pct = div64_u64(core_pct, int_tofp(sample->mperf));
+       core_pct = sample->aperf * int_tofp(100);
+       core_pct = div64_u64(core_pct, sample->mperf);
  
        sample->core_pct_busy = (int32_t)core_pct;
  }
@@@ -1115,8 -1115,8 +1115,8 @@@ static inline int32_t get_target_pstate
         * specified pstate.
         */
        core_busy = cpu->sample.core_pct_busy;
-       max_pstate = int_tofp(cpu->pstate.max_pstate_physical);
-       current_pstate = int_tofp(cpu->pstate.current_pstate);
+       max_pstate = cpu->pstate.max_pstate_physical;
+       current_pstate = cpu->pstate.current_pstate;
        core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
  
        /*
         */
        duration_ns = cpu->sample.time - cpu->last_sample_time;
        if ((s64)duration_ns > pid_params.sample_rate_ns * 3) {
-               sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns),
-                                     int_tofp(duration_ns));
+               sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns);
                core_busy = mul_fp(core_busy, sample_ratio);
 +      } else {
 +              sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc);
 +              if (sample_ratio < int_tofp(1))
 +                      core_busy = 0;
        }
  
        cpu->sample.busy_scaled = core_busy;
@@@ -1246,9 -1241,7 +1245,7 @@@ static int intel_pstate_init_cpu(unsign
  
        intel_pstate_busy_pid_reset(cpu);
  
-       cpu->update_util.func = intel_pstate_update_util;
-       pr_debug("intel_pstate: controlling: cpu %d\n", cpunum);
+       pr_debug("controlling: cpu %d\n", cpunum);
  
        return 0;
  }
@@@ -1271,12 -1264,13 +1268,13 @@@ static void intel_pstate_set_update_uti
  
        /* Prevent intel_pstate_update_util() from using stale data. */
        cpu->sample.time = 0;
-       cpufreq_set_update_util_data(cpu_num, &cpu->update_util);
+       cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
+                                    intel_pstate_update_util);
  }
  
  static void intel_pstate_clear_update_util_hook(unsigned int cpu)
  {
-       cpufreq_set_update_util_data(cpu, NULL);
+       cpufreq_remove_update_util_hook(cpu);
        synchronize_sched();
  }
  
@@@ -1304,12 -1298,12 +1302,12 @@@ static int intel_pstate_set_policy(stru
        if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
                limits = &performance_limits;
                if (policy->max >= policy->cpuinfo.max_freq) {
-                       pr_debug("intel_pstate: set performance\n");
+                       pr_debug("set performance\n");
                        intel_pstate_set_performance_limits(limits);
                        goto out;
                }
        } else {
-               pr_debug("intel_pstate: set powersave\n");
+               pr_debug("set powersave\n");
                limits = &powersave_limits;
        }
  
        /* Make sure min_perf_pct <= max_perf_pct */
        limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
  
-       limits->min_perf = div_fp(int_tofp(limits->min_perf_pct),
-                                 int_tofp(100));
-       limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
-                                 int_tofp(100));
+       limits->min_perf = div_fp(limits->min_perf_pct, 100);
+       limits->max_perf = div_fp(limits->max_perf_pct, 100);
  
   out:
        intel_pstate_set_update_util_hook(policy->cpu);
@@@ -1363,7 -1355,7 +1359,7 @@@ static void intel_pstate_stop_cpu(struc
        int cpu_num = policy->cpu;
        struct cpudata *cpu = all_cpu_data[cpu_num];
  
-       pr_debug("intel_pstate: CPU %d exiting\n", cpu_num);
+       pr_debug("CPU %d exiting\n", cpu_num);
  
        intel_pstate_clear_update_util_hook(cpu_num);
  
@@@ -1608,7 -1600,7 +1604,7 @@@ hwp_cpu_matched
        if (intel_pstate_platform_pwr_mgmt_exists())
                return -ENODEV;
  
-       pr_info("Intel P-state driver initializing.\n");
+       pr_info("Intel P-state driver initializing\n");
  
        all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
        if (!all_cpu_data)
        intel_pstate_sysfs_expose_params();
  
        if (hwp_active)
-               pr_info("intel_pstate: HWP enabled\n");
+               pr_info("HWP enabled\n");
  
        return rc;
  out:
@@@ -1651,7 -1643,7 +1647,7 @@@ static int __init intel_pstate_setup(ch
        if (!strcmp(str, "disable"))
                no_load = 1;
        if (!strcmp(str, "no_hwp")) {
-               pr_info("intel_pstate: HWP disabled\n");
+               pr_info("HWP disabled\n");
                no_hwp = 1;
        }
        if (!strcmp(str, "force"))
This page took 0.039502 seconds and 5 git commands to generate.