Merge cpufreq fixes going into v4.6.
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Fri, 6 May 2016 20:01:14 +0000 (22:01 +0200)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Fri, 6 May 2016 20:01:14 +0000 (22:01 +0200)
* pm-cpufreq-fixes:
  intel_pstate: Fix intel_pstate_get()
  cpufreq: intel_pstate: Fix HWP on boot CPU after system resume
  cpufreq: st: enable selective initialization based on the platform
  cpufreq: intel_pstate: Fix processing for turbo activation ratio

1  2 
drivers/cpufreq/cpufreq.c
drivers/cpufreq/intel_pstate.c

index a48b998b3304db5eff1ebc9401274c5467b18533,c4acfc5273b3c1f8c5efc321f5ea8407a4b7f02b..035513b012eebf049cffbe713680c556993f944f
@@@ -78,11 -78,6 +78,11 @@@ static int cpufreq_governor(struct cpuf
  static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
  static int cpufreq_start_governor(struct cpufreq_policy *policy);
  
 +static inline int cpufreq_exit_governor(struct cpufreq_policy *policy)
 +{
 +      return cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
 +}
 +
  /**
   * Two notifier lists: the "policy" list is involved in the
   * validation process for a new CPU frequency policy; the
@@@ -434,73 -429,6 +434,73 @@@ void cpufreq_freq_transition_end(struc
  }
  EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
  
 +/*
 + * Fast frequency switching status count.  Positive means "enabled", negative
 + * means "disabled" and 0 means "not decided yet".
 + */
 +static int cpufreq_fast_switch_count;
 +static DEFINE_MUTEX(cpufreq_fast_switch_lock);
 +
 +static void cpufreq_list_transition_notifiers(void)
 +{
 +      struct notifier_block *nb;
 +
 +      pr_info("Registered transition notifiers:\n");
 +
 +      mutex_lock(&cpufreq_transition_notifier_list.mutex);
 +
 +      for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
 +              pr_info("%pF\n", nb->notifier_call);
 +
 +      mutex_unlock(&cpufreq_transition_notifier_list.mutex);
 +}
 +
 +/**
 + * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
 + * @policy: cpufreq policy to enable fast frequency switching for.
 + *
 + * Try to enable fast frequency switching for @policy.
 + *
 + * The attempt will fail if there is at least one transition notifier registered
 + * at this point, as fast frequency switching is quite fundamentally at odds
 + * with transition notifiers.  Thus if successful, it will make registration of
 + * transition notifiers fail going forward.
 + */
 +void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
 +{
 +      lockdep_assert_held(&policy->rwsem);
 +
 +      if (!policy->fast_switch_possible)
 +              return;
 +
 +      mutex_lock(&cpufreq_fast_switch_lock);
 +      if (cpufreq_fast_switch_count >= 0) {
 +              cpufreq_fast_switch_count++;
 +              policy->fast_switch_enabled = true;
 +      } else {
 +              pr_warn("CPU%u: Fast frequency switching not enabled\n",
 +                      policy->cpu);
 +              cpufreq_list_transition_notifiers();
 +      }
 +      mutex_unlock(&cpufreq_fast_switch_lock);
 +}
 +EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
 +
 +/**
 + * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
 + * @policy: cpufreq policy to disable fast frequency switching for.
 + */
 +void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
 +{
 +      mutex_lock(&cpufreq_fast_switch_lock);
 +      if (policy->fast_switch_enabled) {
 +              policy->fast_switch_enabled = false;
 +              if (!WARN_ON(cpufreq_fast_switch_count <= 0))
 +                      cpufreq_fast_switch_count--;
 +      }
 +      mutex_unlock(&cpufreq_fast_switch_lock);
 +}
 +EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
  
  /*********************************************************************
   *                          SYSFS INTERFACE                          *
@@@ -1320,24 -1248,26 +1320,24 @@@ out_free_policy
   */
  static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
  {
 +      struct cpufreq_policy *policy;
        unsigned cpu = dev->id;
 -      int ret;
  
        dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
  
 -      if (cpu_online(cpu)) {
 -              ret = cpufreq_online(cpu);
 -      } else {
 -              /*
 -               * A hotplug notifier will follow and we will handle it as CPU
 -               * online then.  For now, just create the sysfs link, unless
 -               * there is no policy or the link is already present.
 -               */
 -              struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
 +      if (cpu_online(cpu))
 +              return cpufreq_online(cpu);
  
 -              ret = policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
 -                      ? add_cpu_dev_symlink(policy, cpu) : 0;
 -      }
 +      /*
 +       * A hotplug notifier will follow and we will handle it as CPU online
 +       * then.  For now, just create the sysfs link, unless there is no policy
 +       * or the link is already present.
 +       */
 +      policy = per_cpu(cpufreq_cpu_data, cpu);
 +      if (!policy || cpumask_test_and_set_cpu(cpu, policy->real_cpus))
 +              return 0;
  
 -      return ret;
 +      return add_cpu_dev_symlink(policy, cpu);
  }
  
  static void cpufreq_offline(unsigned int cpu)
  
        /* If cpu is last user of policy, free policy */
        if (has_target()) {
 -              ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
 +              ret = cpufreq_exit_governor(policy);
                if (ret)
                        pr_err("%s: Failed to exit governor\n", __func__);
        }
@@@ -1517,12 -1447,8 +1517,12 @@@ static unsigned int __cpufreq_get(struc
  
        ret_freq = cpufreq_driver->get(policy->cpu);
  
 -      /* Updating inactive policies is invalid, so avoid doing that. */
 -      if (unlikely(policy_is_inactive(policy)))
 +      /*
 +       * Updating inactive policies is invalid, so avoid doing that.  Also
 +       * if fast frequency switching is used with the given policy, the check
 +       * against policy->cur is pointless, so skip it in that case too.
 +       */
 +      if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled)
                return ret_freq;
  
        if (ret_freq && policy->cur &&
@@@ -1631,21 -1557,25 +1631,25 @@@ void cpufreq_suspend(void
        if (!cpufreq_driver)
                return;
  
-       if (!has_target())
+       if (!has_target() && !cpufreq_driver->suspend)
                goto suspend;
  
        pr_debug("%s: Suspending Governors\n", __func__);
  
        for_each_active_policy(policy) {
-               down_write(&policy->rwsem);
-               ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
-               up_write(&policy->rwsem);
+               if (has_target()) {
+                       down_write(&policy->rwsem);
+                       ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+                       up_write(&policy->rwsem);
  
-               if (ret)
-                       pr_err("%s: Failed to stop governor for policy: %p\n",
-                               __func__, policy);
-               else if (cpufreq_driver->suspend
-                   && cpufreq_driver->suspend(policy))
+                       if (ret) {
+                               pr_err("%s: Failed to stop governor for policy: %p\n",
+                                       __func__, policy);
+                               continue;
+                       }
+               }
+               if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
                        pr_err("%s: Failed to suspend driver: %p\n", __func__,
                                policy);
        }
@@@ -1670,7 -1600,7 +1674,7 @@@ void cpufreq_resume(void
  
        cpufreq_suspended = false;
  
-       if (!has_target())
+       if (!has_target() && !cpufreq_driver->resume)
                return;
  
        pr_debug("%s: Resuming Governors\n", __func__);
                if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
                        pr_err("%s: Failed to resume driver: %p\n", __func__,
                                policy);
-               } else {
+               } else if (has_target()) {
                        down_write(&policy->rwsem);
                        ret = cpufreq_start_governor(policy);
                        up_write(&policy->rwsem);
@@@ -1749,18 -1679,8 +1753,18 @@@ int cpufreq_register_notifier(struct no
  
        switch (list) {
        case CPUFREQ_TRANSITION_NOTIFIER:
 +              mutex_lock(&cpufreq_fast_switch_lock);
 +
 +              if (cpufreq_fast_switch_count > 0) {
 +                      mutex_unlock(&cpufreq_fast_switch_lock);
 +                      return -EBUSY;
 +              }
                ret = srcu_notifier_chain_register(
                                &cpufreq_transition_notifier_list, nb);
 +              if (!ret)
 +                      cpufreq_fast_switch_count--;
 +
 +              mutex_unlock(&cpufreq_fast_switch_lock);
                break;
        case CPUFREQ_POLICY_NOTIFIER:
                ret = blocking_notifier_chain_register(
@@@ -1793,14 -1713,8 +1797,14 @@@ int cpufreq_unregister_notifier(struct 
  
        switch (list) {
        case CPUFREQ_TRANSITION_NOTIFIER:
 +              mutex_lock(&cpufreq_fast_switch_lock);
 +
                ret = srcu_notifier_chain_unregister(
                                &cpufreq_transition_notifier_list, nb);
 +              if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
 +                      cpufreq_fast_switch_count++;
 +
 +              mutex_unlock(&cpufreq_fast_switch_lock);
                break;
        case CPUFREQ_POLICY_NOTIFIER:
                ret = blocking_notifier_chain_unregister(
@@@ -1819,37 -1733,6 +1823,37 @@@ EXPORT_SYMBOL(cpufreq_unregister_notifi
   *                              GOVERNORS                            *
   *********************************************************************/
  
 +/**
 + * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
 + * @policy: cpufreq policy to switch the frequency for.
 + * @target_freq: New frequency to set (may be approximate).
 + *
 + * Carry out a fast frequency switch without sleeping.
 + *
 + * The driver's ->fast_switch() callback invoked by this function must be
 + * suitable for being called from within RCU-sched read-side critical sections
 + * and it is expected to select the minimum available frequency greater than or
 + * equal to @target_freq (CPUFREQ_RELATION_L).
 + *
 + * This function must not be called if policy->fast_switch_enabled is unset.
 + *
 + * Governors calling this function must guarantee that it will never be invoked
 + * twice in parallel for the same policy and that it will never be called in
 + * parallel with either ->target() or ->target_index() for the same policy.
 + *
 + * If CPUFREQ_ENTRY_INVALID is returned by the driver's ->fast_switch()
 + * callback to indicate an error condition, the hardware configuration must be
 + * preserved.
 + */
 +unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
 +                                      unsigned int target_freq)
 +{
 +      clamp_val(target_freq, policy->min, policy->max);
 +
 +      return cpufreq_driver->fast_switch(policy, target_freq);
 +}
 +EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
 +
  /* Must set freqs->new to intermediate frequency */
  static int __target_intermediate(struct cpufreq_policy *policy,
                                 struct cpufreq_freqs *freqs, int index)
@@@ -2225,7 -2108,7 +2229,7 @@@ static int cpufreq_set_policy(struct cp
                        return ret;
                }
  
 -              ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
 +              ret = cpufreq_exit_governor(policy);
                if (ret) {
                        pr_err("%s: Failed to Exit Governor: %s (%d)\n",
                               __func__, old_gov->name, ret);
                        pr_debug("cpufreq: governor change\n");
                        return 0;
                }
 -              cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
 +              cpufreq_exit_governor(policy);
        }
  
        /* new governor failed, so re-start old one */
@@@ -2310,13 -2193,16 +2314,13 @@@ static int cpufreq_cpu_callback(struct 
  
        switch (action & ~CPU_TASKS_FROZEN) {
        case CPU_ONLINE:
 +      case CPU_DOWN_FAILED:
                cpufreq_online(cpu);
                break;
  
        case CPU_DOWN_PREPARE:
                cpufreq_offline(cpu);
                break;
 -
 -      case CPU_DOWN_FAILED:
 -              cpufreq_online(cpu);
 -              break;
        }
        return NOTIFY_OK;
  }
index 74453fe546c1ace49658fc478de3625e6e02edfd,b230ebaae66cb7ee0def1d228ff33c778343f344..3addb22725e122c8aff3d0a1012b3f285f294fd8
@@@ -10,8 -10,6 +10,8 @@@
   * of the License.
   */
  
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 +
  #include <linux/kernel.h>
  #include <linux/kernel_stat.h>
  #include <linux/module.h>
  #define ATOM_TURBO_RATIOS     0x66c
  #define ATOM_TURBO_VIDS               0x66d
  
 +#ifdef CONFIG_ACPI
 +#include <acpi/processor.h>
 +#endif
 +
  #define FRAC_BITS 8
  #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
  #define fp_toint(X) ((X) >> FRAC_BITS)
@@@ -178,8 -172,6 +178,8 @@@ struct _pid 
   * @prev_cummulative_iowait: IO Wait time difference from last and
   *                    current sample
   * @sample:           Storage for storing last Sample data
 + * @acpi_perf_data:   Stores ACPI perf information read from _PSS
 + * @valid_pss_table:  Set to true for valid ACPI _PSS entries found
   *
   * This structure stores per CPU instance data for all CPUs.
   */
@@@ -198,10 -190,6 +198,10 @@@ struct cpudata 
        u64     prev_tsc;
        u64     prev_cummulative_iowait;
        struct sample sample;
 +#ifdef CONFIG_ACPI
 +      struct acpi_processor_performance acpi_perf_data;
 +      bool valid_pss_table;
 +#endif
  };
  
  static struct cpudata **all_cpu_data;
@@@ -270,9 -258,6 +270,9 @@@ static struct pstate_adjust_policy pid_
  static struct pstate_funcs pstate_funcs;
  static int hwp_active;
  
 +#ifdef CONFIG_ACPI
 +static bool acpi_ppc;
 +#endif
  
  /**
   * struct perf_limits - Store user and policy limits
@@@ -346,124 -331,6 +346,124 @@@ static struct perf_limits *limits = &pe
  static struct perf_limits *limits = &powersave_limits;
  #endif
  
 +#ifdef CONFIG_ACPI
 +
 +static bool intel_pstate_get_ppc_enable_status(void)
 +{
 +      if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER ||
 +          acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER)
 +              return true;
 +
 +      return acpi_ppc;
 +}
 +
 +/*
 + * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and
 + * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and
 + * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state
 + * ratio, out of it only high 8 bits are used. For example 0x1700 is setting
 + * target ratio 0x17. The _PSS control value stores in a format which can be
 + * directly written to PERF_CTL MSR. But in intel_pstate driver this shift
 + * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()).
 + * This function converts the _PSS control value to intel pstate driver format
 + * for comparison and assignment.
 + */
 +static int convert_to_native_pstate_format(struct cpudata *cpu, int index)
 +{
 +      return cpu->acpi_perf_data.states[index].control >> 8;
 +}
 +
 +static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
 +{
 +      struct cpudata *cpu;
 +      int turbo_pss_ctl;
 +      int ret;
 +      int i;
 +
 +      if (hwp_active)
 +              return;
 +
 +      if (!intel_pstate_get_ppc_enable_status())
 +              return;
 +
 +      cpu = all_cpu_data[policy->cpu];
 +
 +      ret = acpi_processor_register_performance(&cpu->acpi_perf_data,
 +                                                policy->cpu);
 +      if (ret)
 +              return;
 +
 +      /*
 +       * Check if the control value in _PSS is for PERF_CTL MSR, which should
 +       * guarantee that the states returned by it map to the states in our
 +       * list directly.
 +       */
 +      if (cpu->acpi_perf_data.control_register.space_id !=
 +                                              ACPI_ADR_SPACE_FIXED_HARDWARE)
 +              goto err;
 +
 +      /*
 +       * If there is only one entry _PSS, simply ignore _PSS and continue as
 +       * usual without taking _PSS into account
 +       */
 +      if (cpu->acpi_perf_data.state_count < 2)
 +              goto err;
 +
 +      pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu);
 +      for (i = 0; i < cpu->acpi_perf_data.state_count; i++) {
 +              pr_debug("     %cP%d: %u MHz, %u mW, 0x%x\n",
 +                       (i == cpu->acpi_perf_data.state ? '*' : ' '), i,
 +                       (u32) cpu->acpi_perf_data.states[i].core_frequency,
 +                       (u32) cpu->acpi_perf_data.states[i].power,
 +                       (u32) cpu->acpi_perf_data.states[i].control);
 +      }
 +
 +      /*
 +       * The _PSS table doesn't contain whole turbo frequency range.
 +       * This just contains +1 MHZ above the max non turbo frequency,
 +       * with control value corresponding to max turbo ratio. But
 +       * when cpufreq set policy is called, it will call with this
 +       * max frequency, which will cause a reduced performance as
 +       * this driver uses real max turbo frequency as the max
 +       * frequency. So correct this frequency in _PSS table to
 +       * correct max turbo frequency based on the turbo ratio.
 +       * Also need to convert to MHz as _PSS freq is in MHz.
 +       */
 +      turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0);
 +      if (turbo_pss_ctl > cpu->pstate.max_pstate)
 +              cpu->acpi_perf_data.states[0].core_frequency =
 +                                      policy->cpuinfo.max_freq / 1000;
 +      cpu->valid_pss_table = true;
 +      pr_info("_PPC limits will be enforced\n");
 +
 +      return;
 +
 + err:
 +      cpu->valid_pss_table = false;
 +      acpi_processor_unregister_performance(policy->cpu);
 +}
 +
 +static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
 +{
 +      struct cpudata *cpu;
 +
 +      cpu = all_cpu_data[policy->cpu];
 +      if (!cpu->valid_pss_table)
 +              return;
 +
 +      acpi_processor_unregister_performance(policy->cpu);
 +}
 +
 +#else
 +static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
 +{
 +}
 +
 +static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
 +{
 +}
 +#endif
 +
  static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
                             int deadband, int integral) {
        pid->setpoint = int_tofp(setpoint);
  
  static inline void pid_p_gain_set(struct _pid *pid, int percent)
  {
 -      pid->p_gain = div_fp(int_tofp(percent), int_tofp(100));
 +      pid->p_gain = div_fp(percent, 100);
  }
  
  static inline void pid_i_gain_set(struct _pid *pid, int percent)
  {
 -      pid->i_gain = div_fp(int_tofp(percent), int_tofp(100));
 +      pid->i_gain = div_fp(percent, 100);
  }
  
  static inline void pid_d_gain_set(struct _pid *pid, int percent)
  {
 -      pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
 +      pid->d_gain = div_fp(percent, 100);
  }
  
  static signed int pid_calc(struct _pid *pid, int32_t busy)
@@@ -586,6 -453,14 +586,14 @@@ static void intel_pstate_hwp_set(const 
        }
  }
  
+ static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy)
+ {
+       if (hwp_active)
+               intel_pstate_hwp_set(policy->cpus);
+       return 0;
+ }
  static void intel_pstate_hwp_set_online_cpus(void)
  {
        get_online_cpus();
@@@ -662,7 -537,7 +670,7 @@@ static ssize_t show_turbo_pct(struct ko
  
        total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
        no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
 -      turbo_fp = div_fp(int_tofp(no_turbo), int_tofp(total));
 +      turbo_fp = div_fp(no_turbo, total);
        turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
        return sprintf(buf, "%u\n", turbo_pct);
  }
@@@ -704,7 -579,7 +712,7 @@@ static ssize_t store_no_turbo(struct ko
  
        update_turbo_state();
        if (limits->turbo_disabled) {
 -              pr_warn("intel_pstate: Turbo disabled by BIOS or unavailable on processor\n");
 +              pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
                return -EPERM;
        }
  
@@@ -733,7 -608,8 +741,7 @@@ static ssize_t store_max_perf_pct(struc
                                   limits->max_perf_pct);
        limits->max_perf_pct = max(limits->min_perf_pct,
                                   limits->max_perf_pct);
 -      limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
 -                                int_tofp(100));
 +      limits->max_perf = div_fp(limits->max_perf_pct, 100);
  
        if (hwp_active)
                intel_pstate_hwp_set_online_cpus();
@@@ -757,7 -633,8 +765,7 @@@ static ssize_t store_min_perf_pct(struc
                                   limits->min_perf_pct);
        limits->min_perf_pct = min(limits->max_perf_pct,
                                   limits->min_perf_pct);
 -      limits->min_perf = div_fp(int_tofp(limits->min_perf_pct),
 -                                int_tofp(100));
 +      limits->min_perf = div_fp(limits->min_perf_pct, 100);
  
        if (hwp_active)
                intel_pstate_hwp_set_online_cpus();
@@@ -944,6 -821,11 +952,11 @@@ static int core_get_max_pstate(void
                        if (err)
                                goto skip_tar;
  
+                       /* For level 1 and 2, bits[23:16] contain the ratio */
+                       if (tdp_ctrl)
+                               tdp_ratio >>= 16;
+                       tdp_ratio &= 0xff; /* ratios are only 8 bits long */
                        if (tdp_ratio - 1 == tar) {
                                max_pstate = tar;
                                pr_debug("max_pstate=TAC %x\n", max_pstate);
@@@ -1142,8 -1024,8 +1155,8 @@@ static inline void intel_pstate_calc_bu
        struct sample *sample = &cpu->sample;
        int64_t core_pct;
  
 -      core_pct = int_tofp(sample->aperf) * int_tofp(100);
 -      core_pct = div64_u64(core_pct, int_tofp(sample->mperf));
 +      core_pct = sample->aperf * int_tofp(100);
 +      core_pct = div64_u64(core_pct, sample->mperf);
  
        sample->core_pct_busy = (int32_t)core_pct;
  }
@@@ -1188,16 -1070,11 +1201,17 @@@ static inline bool intel_pstate_sample(
  
  static inline int32_t get_avg_frequency(struct cpudata *cpu)
  {
-       return div64_u64(cpu->pstate.max_pstate_physical * cpu->sample.aperf *
-               cpu->pstate.scaling, cpu->sample.mperf);
+       return fp_toint(mul_fp(cpu->sample.core_pct_busy,
+                              int_tofp(cpu->pstate.max_pstate_physical *
+                                               cpu->pstate.scaling / 100)));
  }
  
 +static inline int32_t get_avg_pstate(struct cpudata *cpu)
 +{
 +      return div64_u64(cpu->pstate.max_pstate_physical * cpu->sample.aperf,
 +                       cpu->sample.mperf);
 +}
 +
  static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
  {
        struct sample *sample = &cpu->sample;
        cpu_load = div64_u64(int_tofp(100) * mperf, sample->tsc);
        cpu->sample.busy_scaled = cpu_load;
  
 -      return cpu->pstate.current_pstate - pid_calc(&cpu->pid, cpu_load);
 +      return get_avg_pstate(cpu) - pid_calc(&cpu->pid, cpu_load);
  }
  
  static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
        int32_t core_busy, max_pstate, current_pstate, sample_ratio;
        u64 duration_ns;
  
-       intel_pstate_calc_busy(cpu);
        /*
         * core_busy is the ratio of actual performance to max
         * max_pstate is the max non turbo pstate available
         * specified pstate.
         */
        core_busy = cpu->sample.core_pct_busy;
 -      max_pstate = int_tofp(cpu->pstate.max_pstate_physical);
 -      current_pstate = int_tofp(cpu->pstate.current_pstate);
 +      max_pstate = cpu->pstate.max_pstate_physical;
 +      current_pstate = cpu->pstate.current_pstate;
        core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
  
        /*
         */
        duration_ns = cpu->sample.time - cpu->last_sample_time;
        if ((s64)duration_ns > pid_params.sample_rate_ns * 3) {
 -              sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns),
 -                                    int_tofp(duration_ns));
 +              sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns);
                core_busy = mul_fp(core_busy, sample_ratio);
        } else {
                sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc);
@@@ -1322,8 -1198,11 +1334,11 @@@ static void intel_pstate_update_util(st
        if ((s64)delta_ns >= pid_params.sample_rate_ns) {
                bool sample_taken = intel_pstate_sample(cpu, time);
  
-               if (sample_taken && !hwp_active)
-                       intel_pstate_adjust_busy_pstate(cpu);
+               if (sample_taken) {
+                       intel_pstate_calc_busy(cpu);
+                       if (!hwp_active)
+                               intel_pstate_adjust_busy_pstate(cpu);
+               }
        }
  }
  
@@@ -1382,7 -1261,9 +1397,7 @@@ static int intel_pstate_init_cpu(unsign
  
        intel_pstate_busy_pid_reset(cpu);
  
 -      cpu->update_util.func = intel_pstate_update_util;
 -
 -      pr_debug("intel_pstate: controlling: cpu %d\n", cpunum);
 +      pr_debug("controlling: cpu %d\n", cpunum);
  
        return 0;
  }
@@@ -1405,13 -1286,12 +1420,13 @@@ static void intel_pstate_set_update_uti
  
        /* Prevent intel_pstate_update_util() from using stale data. */
        cpu->sample.time = 0;
 -      cpufreq_set_update_util_data(cpu_num, &cpu->update_util);
 +      cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
 +                                   intel_pstate_update_util);
  }
  
  static void intel_pstate_clear_update_util_hook(unsigned int cpu)
  {
 -      cpufreq_set_update_util_data(cpu, NULL);
 +      cpufreq_remove_update_util_hook(cpu);
        synchronize_sched();
  }
  
@@@ -1431,31 -1311,20 +1446,31 @@@ static void intel_pstate_set_performanc
  
  static int intel_pstate_set_policy(struct cpufreq_policy *policy)
  {
 +      struct cpudata *cpu;
 +
        if (!policy->cpuinfo.max_freq)
                return -ENODEV;
  
        intel_pstate_clear_update_util_hook(policy->cpu);
  
 +      cpu = all_cpu_data[0];
 +      if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate) {
 +              if (policy->max < policy->cpuinfo.max_freq &&
 +                  policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) {
 +                      pr_debug("policy->max > max non turbo frequency\n");
 +                      policy->max = policy->cpuinfo.max_freq;
 +              }
 +      }
 +
        if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
                limits = &performance_limits;
                if (policy->max >= policy->cpuinfo.max_freq) {
 -                      pr_debug("intel_pstate: set performance\n");
 +                      pr_debug("set performance\n");
                        intel_pstate_set_performance_limits(limits);
                        goto out;
                }
        } else {
 -              pr_debug("intel_pstate: set powersave\n");
 +              pr_debug("set powersave\n");
                limits = &powersave_limits;
        }
  
        /* Make sure min_perf_pct <= max_perf_pct */
        limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
  
 -      limits->min_perf = div_fp(int_tofp(limits->min_perf_pct),
 -                                int_tofp(100));
 -      limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
 -                                int_tofp(100));
 +      limits->min_perf = div_fp(limits->min_perf_pct, 100);
 +      limits->max_perf = div_fp(limits->max_perf_pct, 100);
  
   out:
        intel_pstate_set_update_util_hook(policy->cpu);
  
-       if (hwp_active)
-               intel_pstate_hwp_set(policy->cpus);
+       intel_pstate_hwp_set_policy(policy);
  
        return 0;
  }
@@@ -1507,7 -1377,7 +1521,7 @@@ static void intel_pstate_stop_cpu(struc
        int cpu_num = policy->cpu;
        struct cpudata *cpu = all_cpu_data[cpu_num];
  
 -      pr_debug("intel_pstate: CPU %d exiting\n", cpu_num);
 +      pr_debug("CPU %d exiting\n", cpu_num);
  
        intel_pstate_clear_update_util_hook(cpu_num);
  
@@@ -1540,27 -1410,19 +1554,28 @@@ static int intel_pstate_cpu_init(struc
        policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
        policy->cpuinfo.max_freq =
                cpu->pstate.turbo_pstate * cpu->pstate.scaling;
 +      intel_pstate_init_acpi_perf_limits(policy);
        policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
        cpumask_set_cpu(policy->cpu, policy->cpus);
  
        return 0;
  }
  
 +static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
 +{
 +      intel_pstate_exit_perf_limits(policy);
 +
 +      return 0;
 +}
 +
  static struct cpufreq_driver intel_pstate_driver = {
        .flags          = CPUFREQ_CONST_LOOPS,
        .verify         = intel_pstate_verify_policy,
        .setpolicy      = intel_pstate_set_policy,
+       .resume         = intel_pstate_hwp_set_policy,
        .get            = intel_pstate_get,
        .init           = intel_pstate_cpu_init,
 +      .exit           = intel_pstate_cpu_exit,
        .stop_cpu       = intel_pstate_stop_cpu,
        .name           = "intel_pstate",
  };
@@@ -1604,7 -1466,8 +1619,7 @@@ static void copy_cpu_funcs(struct pstat
  
  }
  
 -#if IS_ENABLED(CONFIG_ACPI)
 -#include <acpi/processor.h>
 +#ifdef CONFIG_ACPI
  
  static bool intel_pstate_no_acpi_pss(void)
  {
@@@ -1760,7 -1623,7 +1775,7 @@@ hwp_cpu_matched
        if (intel_pstate_platform_pwr_mgmt_exists())
                return -ENODEV;
  
 -      pr_info("Intel P-state driver initializing.\n");
 +      pr_info("Intel P-state driver initializing\n");
  
        all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
        if (!all_cpu_data)
        intel_pstate_sysfs_expose_params();
  
        if (hwp_active)
 -              pr_info("intel_pstate: HWP enabled\n");
 +              pr_info("HWP enabled\n");
  
        return rc;
  out:
@@@ -1803,19 -1666,13 +1818,19 @@@ static int __init intel_pstate_setup(ch
        if (!strcmp(str, "disable"))
                no_load = 1;
        if (!strcmp(str, "no_hwp")) {
 -              pr_info("intel_pstate: HWP disabled\n");
 +              pr_info("HWP disabled\n");
                no_hwp = 1;
        }
        if (!strcmp(str, "force"))
                force_load = 1;
        if (!strcmp(str, "hwp_only"))
                hwp_only = 1;
 +
 +#ifdef CONFIG_ACPI
 +      if (!strcmp(str, "support_acpi_ppc"))
 +              acpi_ppc = true;
 +#endif
 +
        return 0;
  }
  early_param("intel_pstate", intel_pstate_setup);
This page took 0.038048 seconds and 5 git commands to generate.