diff options
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r-- | drivers/cpufreq/Kconfig.arm | 2 | ||||
-rw-r--r-- | drivers/cpufreq/armada-8k-cpufreq.c | 6 | ||||
-rw-r--r-- | drivers/cpufreq/cppc_cpufreq.c | 163 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq-dt-platdev.c | 2 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq-dt.c | 155 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 85 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_stats.c | 16 | ||||
-rw-r--r-- | drivers/cpufreq/highbank-cpufreq.c | 7 | ||||
-rw-r--r-- | drivers/cpufreq/intel_pstate.c | 9 | ||||
-rw-r--r-- | drivers/cpufreq/loongson1-cpufreq.c | 1 | ||||
-rw-r--r-- | drivers/cpufreq/mediatek-cpufreq.c | 3 | ||||
-rw-r--r-- | drivers/cpufreq/qcom-cpufreq-nvmem.c | 16 | ||||
-rw-r--r-- | drivers/cpufreq/scmi-cpufreq.c | 5 | ||||
-rw-r--r-- | drivers/cpufreq/scpi-cpufreq.c | 1 | ||||
-rw-r--r-- | drivers/cpufreq/sti-cpufreq.c | 14 | ||||
-rw-r--r-- | drivers/cpufreq/sun50i-cpufreq-nvmem.c | 1 | ||||
-rw-r--r-- | drivers/cpufreq/tegra186-cpufreq.c | 122 | ||||
-rw-r--r-- | drivers/cpufreq/tegra194-cpufreq.c | 72 | ||||
-rw-r--r-- | drivers/cpufreq/vexpress-spc-cpufreq.c | 1 |
19 files changed, 360 insertions, 321 deletions
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 015ec0c02835..1f73fa75b1a0 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -94,7 +94,7 @@ config ARM_IMX6Q_CPUFREQ tristate "Freescale i.MX6 cpufreq support" depends on ARCH_MXC depends on REGULATOR_ANATOP - select NVMEM_IMX_OCOTP + depends on NVMEM_IMX_OCOTP || COMPILE_TEST select PM_OPP help This adds cpufreq driver support for Freescale i.MX6 series SoCs. diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c index 39e34f5066d3..b0fc5e84f857 100644 --- a/drivers/cpufreq/armada-8k-cpufreq.c +++ b/drivers/cpufreq/armada-8k-cpufreq.c @@ -204,6 +204,12 @@ static void __exit armada_8k_cpufreq_exit(void) } module_exit(armada_8k_cpufreq_exit); +static const struct of_device_id __maybe_unused armada_8k_cpufreq_of_match[] = { + { .compatible = "marvell,ap806-cpu-clock" }, + { }, +}; +MODULE_DEVICE_TABLE(of, armada_8k_cpufreq_of_match); + MODULE_AUTHOR("Gregory Clement <gregory.clement@bootlin.com>"); MODULE_DESCRIPTION("Armada 8K cpufreq driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index f29e8d0553a8..7cc9bd8568de 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -26,8 +26,8 @@ /* Minimum struct length needed for the DMI processor entry we want */ #define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48 -/* Offest in the DMI processor structure for the max frequency */ -#define DMI_PROCESSOR_MAX_SPEED 0x14 +/* Offset in the DMI processor structure for the max frequency */ +#define DMI_PROCESSOR_MAX_SPEED 0x14 /* * These structs contain information parsed from per CPU @@ -96,11 +96,11 @@ static u64 cppc_get_dmi_max_khz(void) * and extrapolate the rest * For perf/freq > Nominal, we use the ratio perf:freq at Nominal for conversion */ -static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu, - unsigned int perf) +static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu_data, + unsigned int perf) { + struct cppc_perf_caps *caps = &cpu_data->perf_caps; static u64 max_khz; - struct cppc_perf_caps *caps = &cpu->perf_caps; u64 mul, div; if (caps->lowest_freq && caps->nominal_freq) { @@ -120,11 +120,11 @@ static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu, return (u64)perf * mul / div; } -static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu, - unsigned int freq) +static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data, + unsigned int freq) { + struct cppc_perf_caps *caps = &cpu_data->perf_caps; static u64 max_khz; - struct cppc_perf_caps *caps = &cpu->perf_caps; u64 mul, div; if (caps->lowest_freq && caps->nominal_freq) { @@ -146,32 +146,30 @@ static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu, } static int cppc_cpufreq_set_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) + unsigned int target_freq, + unsigned int relation) { - struct cppc_cpudata *cpu; + struct cppc_cpudata *cpu_data = all_cpu_data[policy->cpu]; struct cpufreq_freqs freqs; u32 desired_perf; int ret = 0; - cpu = all_cpu_data[policy->cpu]; - - desired_perf = cppc_cpufreq_khz_to_perf(cpu, target_freq); + desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq); /* Return if it is exactly the same perf */ - if (desired_perf == cpu->perf_ctrls.desired_perf) + if (desired_perf == cpu_data->perf_ctrls.desired_perf) return ret; - cpu->perf_ctrls.desired_perf = desired_perf; + cpu_data->perf_ctrls.desired_perf = desired_perf; freqs.old = policy->cur; freqs.new = target_freq; cpufreq_freq_transition_begin(policy, &freqs); - ret = cppc_set_perf(cpu->cpu, &cpu->perf_ctrls); + ret = cppc_set_perf(cpu_data->cpu, &cpu_data->perf_ctrls); cpufreq_freq_transition_end(policy, &freqs, ret != 0); if (ret) pr_debug("Failed to set target on CPU:%d. ret:%d\n", - cpu->cpu, ret); + cpu_data->cpu, ret); return ret; } @@ -184,28 +182,29 @@ static int cppc_verify_policy(struct cpufreq_policy_data *policy) static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy) { - int cpu_num = policy->cpu; - struct cppc_cpudata *cpu = all_cpu_data[cpu_num]; + struct cppc_cpudata *cpu_data = all_cpu_data[policy->cpu]; + struct cppc_perf_caps *caps = &cpu_data->perf_caps; + unsigned int cpu = policy->cpu; int ret; - cpu->perf_ctrls.desired_perf = cpu->perf_caps.lowest_perf; + cpu_data->perf_ctrls.desired_perf = caps->lowest_perf; - ret = cppc_set_perf(cpu_num, &cpu->perf_ctrls); + ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls); if (ret) pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n", - cpu->perf_caps.lowest_perf, cpu_num, ret); + caps->lowest_perf, cpu, ret); } /* * The PCC subspace describes the rate at which platform can accept commands * on the shared PCC channel (including READs which do not count towards freq - * trasition requests), so ideally we need to use the PCC values as a fallback + * transition requests), so ideally we need to use the PCC values as a fallback * if we don't have a platform specific transition_delay_us */ #ifdef CONFIG_ARM64 #include <asm/cputype.h> -static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu) +static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu) { unsigned long implementor = read_cpuid_implementor(); unsigned long part_num = read_cpuid_part_number(); @@ -233,7 +232,7 @@ static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu) #else -static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu) +static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu) { return cppc_get_transition_latency(cpu) / NSEC_PER_USEC; } @@ -241,54 +240,57 @@ static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu) static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) { - struct cppc_cpudata *cpu; - unsigned int cpu_num = policy->cpu; + struct cppc_cpudata *cpu_data = all_cpu_data[policy->cpu]; + struct cppc_perf_caps *caps = &cpu_data->perf_caps; + unsigned int cpu = policy->cpu; int ret = 0; - cpu = all_cpu_data[policy->cpu]; - - cpu->cpu = cpu_num; - ret = cppc_get_perf_caps(policy->cpu, &cpu->perf_caps); + cpu_data->cpu = cpu; + ret = cppc_get_perf_caps(cpu, caps); if (ret) { pr_debug("Err reading CPU%d perf capabilities. ret:%d\n", - cpu_num, ret); + cpu, ret); return ret; } /* Convert the lowest and nominal freq from MHz to KHz */ - cpu->perf_caps.lowest_freq *= 1000; - cpu->perf_caps.nominal_freq *= 1000; + caps->lowest_freq *= 1000; + caps->nominal_freq *= 1000; /* * Set min to lowest nonlinear perf to avoid any efficiency penalty (see * Section 8.4.7.1.1.5 of ACPI 6.1 spec) */ - policy->min = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.lowest_nonlinear_perf); - policy->max = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.nominal_perf); + policy->min = cppc_cpufreq_perf_to_khz(cpu_data, + caps->lowest_nonlinear_perf); + policy->max = cppc_cpufreq_perf_to_khz(cpu_data, + caps->nominal_perf); /* * Set cpuinfo.min_freq to Lowest to make the full range of performance * available if userspace wants to use any perf between lowest & lowest * nonlinear perf */ - policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.lowest_perf); - policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.nominal_perf); + policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu_data, + caps->lowest_perf); + policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu_data, + caps->nominal_perf); - policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu_num); - policy->shared_type = cpu->shared_type; + policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu); + policy->shared_type = cpu_data->shared_type; if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { int i; - cpumask_copy(policy->cpus, cpu->shared_cpu_map); + cpumask_copy(policy->cpus, cpu_data->shared_cpu_map); for_each_cpu(i, policy->cpus) { - if (unlikely(i == policy->cpu)) + if (unlikely(i == cpu)) continue; - memcpy(&all_cpu_data[i]->perf_caps, &cpu->perf_caps, - sizeof(cpu->perf_caps)); + memcpy(&all_cpu_data[i]->perf_caps, caps, + sizeof(cpu_data->perf_caps)); } } else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) { /* Support only SW_ANY for now. */ @@ -296,24 +298,23 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) return -EFAULT; } - cpu->cur_policy = policy; + cpu_data->cur_policy = policy; /* * If 'highest_perf' is greater than 'nominal_perf', we assume CPU Boost * is supported. */ - if (cpu->perf_caps.highest_perf > cpu->perf_caps.nominal_perf) + if (caps->highest_perf > caps->nominal_perf) boost_supported = true; /* Set policy->cur to max now. The governors will adjust later. */ - policy->cur = cppc_cpufreq_perf_to_khz(cpu, - cpu->perf_caps.highest_perf); - cpu->perf_ctrls.desired_perf = cpu->perf_caps.highest_perf; + policy->cur = cppc_cpufreq_perf_to_khz(cpu_data, caps->highest_perf); + cpu_data->perf_ctrls.desired_perf = caps->highest_perf; - ret = cppc_set_perf(cpu_num, &cpu->perf_ctrls); + ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls); if (ret) pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n", - cpu->perf_caps.highest_perf, cpu_num, ret); + caps->highest_perf, cpu, ret); return ret; } @@ -326,7 +327,7 @@ static inline u64 get_delta(u64 t1, u64 t0) return (u32)t1 - (u32)t0; } -static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu, +static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu_data, struct cppc_perf_fb_ctrs fb_ctrs_t0, struct cppc_perf_fb_ctrs fb_ctrs_t1) { @@ -345,33 +346,34 @@ static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu, delivered_perf = (reference_perf * delta_delivered) / delta_reference; else - delivered_perf = cpu->perf_ctrls.desired_perf; + delivered_perf = cpu_data->perf_ctrls.desired_perf; - return cppc_cpufreq_perf_to_khz(cpu, delivered_perf); + return cppc_cpufreq_perf_to_khz(cpu_data, delivered_perf); } -static unsigned int cppc_cpufreq_get_rate(unsigned int cpunum) +static unsigned int cppc_cpufreq_get_rate(unsigned int cpu) { struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0}; - struct cppc_cpudata *cpu = all_cpu_data[cpunum]; + struct cppc_cpudata *cpu_data = all_cpu_data[cpu]; int ret; - ret = cppc_get_perf_ctrs(cpunum, &fb_ctrs_t0); + ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0); if (ret) return ret; udelay(2); /* 2usec delay between sampling */ - ret = cppc_get_perf_ctrs(cpunum, &fb_ctrs_t1); + ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t1); if (ret) return ret; - return cppc_get_rate_from_fbctrs(cpu, fb_ctrs_t0, fb_ctrs_t1); + return cppc_get_rate_from_fbctrs(cpu_data, fb_ctrs_t0, fb_ctrs_t1); } static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state) { - struct cppc_cpudata *cpudata; + struct cppc_cpudata *cpu_data = all_cpu_data[policy->cpu]; + struct cppc_perf_caps *caps = &cpu_data->perf_caps; int ret; if (!boost_supported) { @@ -379,13 +381,12 @@ static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state) return -EINVAL; } - cpudata = all_cpu_data[policy->cpu]; if (state) - policy->max = cppc_cpufreq_perf_to_khz(cpudata, - cpudata->perf_caps.highest_perf); + policy->max = cppc_cpufreq_perf_to_khz(cpu_data, + caps->highest_perf); else - policy->max = cppc_cpufreq_perf_to_khz(cpudata, - cpudata->perf_caps.nominal_perf); + policy->max = cppc_cpufreq_perf_to_khz(cpu_data, + caps->nominal_perf); policy->cpuinfo.max_freq = policy->max; ret = freq_qos_update_request(policy->max_freq_req, policy->max); @@ -412,17 +413,17 @@ static struct cpufreq_driver cppc_cpufreq_driver = { * platform specific mechanism. We reuse the desired performance register to * store the real performance calculated by the platform. */ -static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpunum) +static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu) { - struct cppc_cpudata *cpudata = all_cpu_data[cpunum]; + struct cppc_cpudata *cpu_data = all_cpu_data[cpu]; u64 desired_perf; int ret; - ret = cppc_get_desired_perf(cpunum, &desired_perf); + ret = cppc_get_desired_perf(cpu, &desired_perf); if (ret < 0) return -EIO; - return cppc_cpufreq_perf_to_khz(cpudata, desired_perf); + return cppc_cpufreq_perf_to_khz(cpu_data, desired_perf); } static void cppc_check_hisi_workaround(void) @@ -450,8 +451,8 @@ static void cppc_check_hisi_workaround(void) static int __init cppc_cpufreq_init(void) { + struct cppc_cpudata *cpu_data; int i, ret = 0; - struct cppc_cpudata *cpu; if (acpi_disabled) return -ENODEV; @@ -466,8 +467,8 @@ static int __init cppc_cpufreq_init(void) if (!all_cpu_data[i]) goto out; - cpu = all_cpu_data[i]; - if (!zalloc_cpumask_var(&cpu->shared_cpu_map, GFP_KERNEL)) + cpu_data = all_cpu_data[i]; + if (!zalloc_cpumask_var(&cpu_data->shared_cpu_map, GFP_KERNEL)) goto out; } @@ -487,11 +488,11 @@ static int __init cppc_cpufreq_init(void) out: for_each_possible_cpu(i) { - cpu = all_cpu_data[i]; - if (!cpu) + cpu_data = all_cpu_data[i]; + if (!cpu_data) break; - free_cpumask_var(cpu->shared_cpu_map); - kfree(cpu); + free_cpumask_var(cpu_data->shared_cpu_map); + kfree(cpu_data); } kfree(all_cpu_data); @@ -500,15 +501,15 @@ out: static void __exit cppc_cpufreq_exit(void) { - struct cppc_cpudata *cpu; + struct cppc_cpudata *cpu_data; int i; cpufreq_unregister_driver(&cppc_cpufreq_driver); for_each_possible_cpu(i) { - cpu = all_cpu_data[i]; - free_cpumask_var(cpu->shared_cpu_map); - kfree(cpu); + cpu_data = all_cpu_data[i]; + free_cpumask_var(cpu_data->shared_cpu_map); + kfree(cpu_data); } kfree(all_cpu_data); diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index 3776d960f405..bd2db0188cbb 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -119,10 +119,12 @@ static const struct of_device_id blacklist[] __initconst = { { .compatible = "mediatek,mt2712", }, { .compatible = "mediatek,mt7622", }, { .compatible = "mediatek,mt7623", }, + { .compatible = "mediatek,mt8167", }, { .compatible = "mediatek,mt817x", }, { .compatible = "mediatek,mt8173", }, { .compatible = "mediatek,mt8176", }, { .compatible = "mediatek,mt8183", }, + { .compatible = "mediatek,mt8516", }, { .compatible = "nvidia,tegra20", }, { .compatible = "nvidia,tegra30", }, diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index e363ae04aac6..ad4234518ef6 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -30,7 +30,7 @@ struct private_data { cpumask_var_t cpus; struct device *cpu_dev; struct opp_table *opp_table; - struct opp_table *reg_opp_table; + struct cpufreq_frequency_table *freq_table; bool have_static_opps; }; @@ -102,7 +102,6 @@ node_put: static int cpufreq_init(struct cpufreq_policy *policy) { - struct cpufreq_frequency_table *freq_table; struct private_data *priv; struct device *cpu_dev; struct clk *cpu_clk; @@ -114,9 +113,7 @@ static int cpufreq_init(struct cpufreq_policy *policy) pr_err("failed to find data for cpu%d\n", policy->cpu); return -ENODEV; } - cpu_dev = priv->cpu_dev; - cpumask_copy(policy->cpus, priv->cpus); cpu_clk = clk_get(cpu_dev, NULL); if (IS_ERR(cpu_clk)) { @@ -125,67 +122,32 @@ static int cpufreq_init(struct cpufreq_policy *policy) return ret; } - /* - * Initialize OPP tables for all policy->cpus. They will be shared by - * all CPUs which have marked their CPUs shared with OPP bindings. - * - * For platforms not using operating-points-v2 bindings, we do this - * before updating policy->cpus. Otherwise, we will end up creating - * duplicate OPPs for policy->cpus. - * - * OPPs might be populated at runtime, don't check for error here - */ - if (!dev_pm_opp_of_cpumask_add_table(policy->cpus)) - priv->have_static_opps = true; - - /* - * But we need OPP table to function so if it is not there let's - * give platform code chance to provide it for us. - */ - ret = dev_pm_opp_get_opp_count(cpu_dev); - if (ret <= 0) { - dev_err(cpu_dev, "OPP table can't be empty\n"); - ret = -ENODEV; - goto out_free_opp; - } - - ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); - if (ret) { - dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); - goto out_free_opp; - } + transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev); + if (!transition_latency) + transition_latency = CPUFREQ_ETERNAL; + cpumask_copy(policy->cpus, priv->cpus); policy->driver_data = priv; policy->clk = cpu_clk; - policy->freq_table = freq_table; - + policy->freq_table = priv->freq_table; policy->suspend_freq = dev_pm_opp_get_suspend_opp_freq(cpu_dev) / 1000; + policy->cpuinfo.transition_latency = transition_latency; + policy->dvfs_possible_from_any_cpu = true; /* Support turbo/boost mode */ if (policy_has_boost_freq(policy)) { /* This gets disabled by core on driver unregister */ ret = cpufreq_enable_boost_support(); if (ret) - goto out_free_cpufreq_table; + goto out_clk_put; cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs; } - transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev); - if (!transition_latency) - transition_latency = CPUFREQ_ETERNAL; - - policy->cpuinfo.transition_latency = transition_latency; - policy->dvfs_possible_from_any_cpu = true; - dev_pm_opp_of_register_em(cpu_dev, policy->cpus); return 0; -out_free_cpufreq_table: - dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); -out_free_opp: - if (priv->have_static_opps) - dev_pm_opp_of_cpumask_remove_table(policy->cpus); +out_clk_put: clk_put(cpu_clk); return ret; @@ -208,11 +170,6 @@ static int cpufreq_offline(struct cpufreq_policy *policy) static int cpufreq_exit(struct cpufreq_policy *policy) { - struct private_data *priv = policy->driver_data; - - dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); - if (priv->have_static_opps) - dev_pm_opp_of_cpumask_remove_table(policy->related_cpus); clk_put(policy->clk); return 0; } @@ -236,6 +193,7 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu) { struct private_data *priv; struct device *cpu_dev; + bool fallback = false; const char *reg_name; int ret; @@ -254,68 +212,86 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu) if (!alloc_cpumask_var(&priv->cpus, GFP_KERNEL)) return -ENOMEM; + cpumask_set_cpu(cpu, priv->cpus); priv->cpu_dev = cpu_dev; - /* Try to get OPP table early to ensure resources are available */ - priv->opp_table = dev_pm_opp_get_opp_table(cpu_dev); - if (IS_ERR(priv->opp_table)) { - ret = PTR_ERR(priv->opp_table); - if (ret != -EPROBE_DEFER) - dev_err(cpu_dev, "failed to get OPP table: %d\n", ret); - goto free_cpumask; - } - /* * OPP layer will be taking care of regulators now, but it needs to know * the name of the regulator first. */ reg_name = find_supply_name(cpu_dev); if (reg_name) { - priv->reg_opp_table = dev_pm_opp_set_regulators(cpu_dev, - ®_name, 1); - if (IS_ERR(priv->reg_opp_table)) { - ret = PTR_ERR(priv->reg_opp_table); + priv->opp_table = dev_pm_opp_set_regulators(cpu_dev, ®_name, + 1); + if (IS_ERR(priv->opp_table)) { + ret = PTR_ERR(priv->opp_table); if (ret != -EPROBE_DEFER) dev_err(cpu_dev, "failed to set regulators: %d\n", ret); - goto put_table; + goto free_cpumask; } } - /* Find OPP sharing information so we can fill pri->cpus here */ /* Get OPP-sharing information from "operating-points-v2" bindings */ ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->cpus); if (ret) { if (ret != -ENOENT) - goto put_reg; + goto out; /* * operating-points-v2 not supported, fallback to all CPUs share * OPP for backward compatibility if the platform hasn't set * sharing CPUs. */ - if (dev_pm_opp_get_sharing_cpus(cpu_dev, priv->cpus)) { - cpumask_setall(priv->cpus); - - /* - * OPP tables are initialized only for cpu, do it for - * others as well. - */ - ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->cpus); - if (ret) - dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n", - __func__, ret); - } + if (dev_pm_opp_get_sharing_cpus(cpu_dev, priv->cpus)) + fallback = true; + } + + /* + * Initialize OPP tables for all priv->cpus. They will be shared by + * all CPUs which have marked their CPUs shared with OPP bindings. + * + * For platforms not using operating-points-v2 bindings, we do this + * before updating priv->cpus. Otherwise, we will end up creating + * duplicate OPPs for the CPUs. + * + * OPPs might be populated at runtime, don't check for error here. + */ + if (!dev_pm_opp_of_cpumask_add_table(priv->cpus)) + priv->have_static_opps = true; + + /* + * The OPP table must be initialized, statically or dynamically, by this + * point. + */ + ret = dev_pm_opp_get_opp_count(cpu_dev); + if (ret <= 0) { + dev_err(cpu_dev, "OPP table can't be empty\n"); + ret = -ENODEV; + goto out; + } + + if (fallback) { + cpumask_setall(priv->cpus); + ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->cpus); + if (ret) + dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n", + __func__, ret); + } + + ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &priv->freq_table); + if (ret) { + dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); + goto out; } list_add(&priv->node, &priv_list); return 0; -put_reg: - if (priv->reg_opp_table) - dev_pm_opp_put_regulators(priv->reg_opp_table); -put_table: - dev_pm_opp_put_opp_table(priv->opp_table); +out: + if (priv->have_static_opps) + dev_pm_opp_of_cpumask_remove_table(priv->cpus); + dev_pm_opp_put_regulators(priv->opp_table); free_cpumask: free_cpumask_var(priv->cpus); return ret; @@ -326,9 +302,10 @@ static void dt_cpufreq_release(void) struct private_data *priv, *tmp; list_for_each_entry_safe(priv, tmp, &priv_list, node) { - if (priv->reg_opp_table) - dev_pm_opp_put_regulators(priv->reg_opp_table); - dev_pm_opp_put_opp_table(priv->opp_table); + dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &priv->freq_table); + if (priv->have_static_opps) + dev_pm_opp_of_cpumask_remove_table(priv->cpus); + dev_pm_opp_put_regulators(priv->opp_table); free_cpumask_var(priv->cpus); list_del(&priv->node); } diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 1e7e3f2ff09f..c17aa2973c44 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -298,8 +298,10 @@ struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu) * EXTERNALLY AFFECTING FREQUENCY CHANGES * *********************************************************************/ -/* - * adjust_jiffies - adjust the system "loops_per_jiffy" +/** + * adjust_jiffies - Adjust the system "loops_per_jiffy". + * @val: CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE. + * @ci: Frequency change information. * * This function alters the system "loops_per_jiffy" for the clock * speed change. Note that loops_per_jiffy cannot be updated on SMP @@ -331,14 +333,14 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) } /** - * cpufreq_notify_transition - Notify frequency transition and adjust_jiffies. + * cpufreq_notify_transition - Notify frequency transition and adjust jiffies. * @policy: cpufreq policy to enable fast frequency switching for. * @freqs: contain details of the frequency update. * @state: set to CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE. * - * This function calls the transition notifiers and the "adjust_jiffies" - * function. It is called twice on all CPU frequency changes that have - * external effects. + * This function calls the transition notifiers and adjust_jiffies(). + * + * It is called twice on all CPU frequency changes that have external effects. */ static void cpufreq_notify_transition(struct cpufreq_policy *policy, struct cpufreq_freqs *freqs, @@ -1391,8 +1393,10 @@ static int cpufreq_online(unsigned int cpu) policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req), GFP_KERNEL); - if (!policy->min_freq_req) + if (!policy->min_freq_req) { + ret = -ENOMEM; goto out_destroy_policy; + } ret = freq_qos_add_request(&policy->constraints, policy->min_freq_req, FREQ_QOS_MIN, @@ -1429,6 +1433,7 @@ static int cpufreq_online(unsigned int cpu) if (cpufreq_driver->get && has_target()) { policy->cur = cpufreq_driver->get(policy->cpu); if (!policy->cur) { + ret = -EIO; pr_err("%s: ->get() failed\n", __func__); goto out_destroy_policy; } @@ -1646,13 +1651,12 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) } /** - * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're - * in deep trouble. - * @policy: policy managing CPUs - * @new_freq: CPU frequency the CPU actually runs at + * cpufreq_out_of_sync - Fix up actual and saved CPU frequency difference. + * @policy: Policy managing CPUs. + * @new_freq: New CPU frequency. * - * We adjust to current frequency first, and need to clean up later. - * So either call to cpufreq_update_policy() or schedule handle_update()). + * Adjust to the current frequency first and clean up later by either calling + * cpufreq_update_policy(), or scheduling handle_update(). */ static void cpufreq_out_of_sync(struct cpufreq_policy *policy, unsigned int new_freq) @@ -1832,7 +1836,7 @@ int cpufreq_generic_suspend(struct cpufreq_policy *policy) EXPORT_SYMBOL(cpufreq_generic_suspend); /** - * cpufreq_suspend() - Suspend CPUFreq governors + * cpufreq_suspend() - Suspend CPUFreq governors. * * Called during system wide Suspend/Hibernate cycles for suspending governors * as some platforms can't change frequency after this point in suspend cycle. @@ -1868,7 +1872,7 @@ suspend: } /** - * cpufreq_resume() - Resume CPUFreq governors + * cpufreq_resume() - Resume CPUFreq governors. * * Called during system wide Suspend/Hibernate cycle for resuming governors that * are suspended with cpufreq_suspend(). @@ -1920,10 +1924,10 @@ bool cpufreq_driver_test_flags(u16 flags) } /** - * cpufreq_get_current_driver - return current driver's name + * cpufreq_get_current_driver - Return the current driver's name. * - * Return the name string of the currently loaded cpufreq driver - * or NULL, if none. + * Return the name string of the currently registered cpufreq driver or NULL if + * none. */ const char *cpufreq_get_current_driver(void) { @@ -1935,10 +1939,10 @@ const char *cpufreq_get_current_driver(void) EXPORT_SYMBOL_GPL(cpufreq_get_current_driver); /** - * cpufreq_get_driver_data - return current driver data + * cpufreq_get_driver_data - Return current driver data. * - * Return the private data of the currently loaded cpufreq - * driver, or NULL if no cpufreq driver is loaded. + * Return the private data of the currently registered cpufreq driver, or NULL + * if no cpufreq driver has been registered. */ void *cpufreq_get_driver_data(void) { @@ -1954,17 +1958,16 @@ EXPORT_SYMBOL_GPL(cpufreq_get_driver_data); *********************************************************************/ /** - * cpufreq_register_notifier - register a driver with cpufreq - * @nb: notifier function to register - * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER + * cpufreq_register_notifier - Register a notifier with cpufreq. + * @nb: notifier function to register. + * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER. * - * Add a driver to one of two lists: either a list of drivers that - * are notified about clock rate changes (once before and once after - * the transition), or a list of drivers that are notified about - * changes in cpufreq policy. + * Add a notifier to one of two lists: either a list of notifiers that run on + * clock rate changes (once before and once after every transition), or a list + * of notifiers that ron on cpufreq policy changes. * - * This function may sleep, and has the same return conditions as - * blocking_notifier_chain_register. + * This function may sleep and it has the same return values as + * blocking_notifier_chain_register(). */ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list) { @@ -2001,14 +2004,14 @@ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list) EXPORT_SYMBOL(cpufreq_register_notifier); /** - * cpufreq_unregister_notifier - unregister a driver with cpufreq - * @nb: notifier block to be unregistered - * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER + * cpufreq_unregister_notifier - Unregister a notifier from cpufreq. + * @nb: notifier block to be unregistered. + * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER. * - * Remove a driver from the CPU frequency notifier list. + * Remove a notifier from one of the cpufreq notifier lists. * - * This function may sleep, and has the same return conditions as - * blocking_notifier_chain_unregister. + * This function may sleep and it has the same return values as + * blocking_notifier_chain_unregister(). */ int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list) { @@ -2123,7 +2126,7 @@ static int __target_intermediate(struct cpufreq_policy *policy, static int __target_index(struct cpufreq_policy *policy, int index) { struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0}; - unsigned int intermediate_freq = 0; + unsigned int restore_freq, intermediate_freq = 0; unsigned int newfreq = policy->freq_table[index].frequency; int retval = -EINVAL; bool notify; @@ -2131,6 +2134,9 @@ static int __target_index(struct cpufreq_policy *policy, int index) if (newfreq == policy->cur) return 0; + /* Save last value to restore later on errors */ + restore_freq = policy->cur; + notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION); if (notify) { /* Handle switching to intermediate frequency */ @@ -2168,7 +2174,7 @@ static int __target_index(struct cpufreq_policy *policy, int index) */ if (unlikely(retval && intermediate_freq)) { freqs.old = intermediate_freq; - freqs.new = policy->restore_freq; + freqs.new = restore_freq; cpufreq_freq_transition_begin(policy, &freqs); cpufreq_freq_transition_end(policy, &freqs, 0); } @@ -2203,9 +2209,6 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, !(cpufreq_driver->flags & CPUFREQ_NEED_UPDATE_LIMITS)) return 0; - /* Save last value to restore later on errors */ - policy->restore_freq = policy->cur; - if (cpufreq_driver->target) return cpufreq_driver->target(policy, target_freq, relation); diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index 6cd5c8ab5d49..da717f7cd9a9 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c @@ -9,9 +9,9 @@ #include <linux/cpu.h> #include <linux/cpufreq.h> #include <linux/module.h> +#include <linux/sched/clock.h> #include <linux/slab.h> - struct cpufreq_stats { unsigned int total_trans; unsigned long long last_time; @@ -30,7 +30,7 @@ struct cpufreq_stats { static void cpufreq_stats_update(struct cpufreq_stats *stats, unsigned long long time) { - unsigned long long cur_time = get_jiffies_64(); + unsigned long long cur_time = local_clock(); stats->time_in_state[stats->last_index] += cur_time - time; stats->last_time = cur_time; @@ -42,7 +42,7 @@ static void cpufreq_stats_reset_table(struct cpufreq_stats *stats) memset(stats->time_in_state, 0, count * sizeof(u64)); memset(stats->trans_table, 0, count * count * sizeof(int)); - stats->last_time = get_jiffies_64(); + stats->last_time = local_clock(); stats->total_trans = 0; /* Adjust for the time elapsed since reset was requested */ @@ -82,18 +82,18 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf) * before the reset_pending read above. */ smp_rmb(); - time = get_jiffies_64() - READ_ONCE(stats->reset_time); + time = local_clock() - READ_ONCE(stats->reset_time); } else { time = 0; } } else { time = stats->time_in_state[i]; if (i == stats->last_index) - time += get_jiffies_64() - stats->last_time; + time += local_clock() - stats->last_time; } len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i], - jiffies_64_to_clock_t(time)); + nsec_to_clock_t(time)); } return len; } @@ -109,7 +109,7 @@ static ssize_t store_reset(struct cpufreq_policy *policy, const char *buf, * Defer resetting of stats to cpufreq_stats_record_transition() to * avoid races. */ - WRITE_ONCE(stats->reset_time, get_jiffies_64()); + WRITE_ONCE(stats->reset_time, local_clock()); /* * The memory barrier below is to prevent the readers of reset_time from * seeing a stale or partially updated value. @@ -249,7 +249,7 @@ void cpufreq_stats_create_table(struct cpufreq_policy *policy) stats->freq_table[i++] = pos->frequency; stats->state_num = i; - stats->last_time = get_jiffies_64(); + stats->last_time = local_clock(); stats->last_index = freq_table_get_index(stats, policy->cur); policy->stats = stats; diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c index 5a7f6dafcddb..ac57cddc5f2f 100644 --- a/drivers/cpufreq/highbank-cpufreq.c +++ b/drivers/cpufreq/highbank-cpufreq.c @@ -101,6 +101,13 @@ out_put_node: } module_init(hb_cpufreq_driver_init); +static const struct of_device_id __maybe_unused hb_cpufreq_of_match[] = { + { .compatible = "calxeda,highbank" }, + { .compatible = "calxeda,ecx-2000" }, + { }, +}; +MODULE_DEVICE_TABLE(of, hb_cpufreq_of_match); + MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@calxeda.com>"); MODULE_DESCRIPTION("Calxeda Highbank cpufreq driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 36a3ccfe6d3d..2a4db856222f 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -2569,14 +2569,13 @@ static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy, int old_pstate = cpu->pstate.current_pstate; target_pstate = intel_pstate_prepare_request(cpu, target_pstate); - if (hwp_active) { + if (hwp_active) intel_cpufreq_adjust_hwp(cpu, target_pstate, policy->strict_target, fast_switch); - cpu->pstate.current_pstate = target_pstate; - } else if (target_pstate != old_pstate) { + else if (target_pstate != old_pstate) intel_cpufreq_adjust_perf_ctl(cpu, target_pstate, fast_switch); - cpu->pstate.current_pstate = target_pstate; - } + + cpu->pstate.current_pstate = target_pstate; intel_cpufreq_trace(cpu, fast_switch ? INTEL_PSTATE_TRACE_FAST_SWITCH : INTEL_PSTATE_TRACE_TARGET, old_pstate); diff --git a/drivers/cpufreq/loongson1-cpufreq.c b/drivers/cpufreq/loongson1-cpufreq.c index 0ea88778882a..86f612593e49 100644 --- a/drivers/cpufreq/loongson1-cpufreq.c +++ b/drivers/cpufreq/loongson1-cpufreq.c @@ -216,6 +216,7 @@ static struct platform_driver ls1x_cpufreq_platdrv = { module_platform_driver(ls1x_cpufreq_platdrv); +MODULE_ALIAS("platform:ls1x-cpufreq"); MODULE_AUTHOR("Kelvin Cheung <keguang.zhang@gmail.com>"); MODULE_DESCRIPTION("Loongson1 CPUFreq driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c index 7d1212c9b7c8..022e3e966e71 100644 --- a/drivers/cpufreq/mediatek-cpufreq.c +++ b/drivers/cpufreq/mediatek-cpufreq.c @@ -532,6 +532,7 @@ static const struct of_device_id mtk_cpufreq_machines[] __initconst = { { .compatible = "mediatek,mt2712", }, { .compatible = "mediatek,mt7622", }, { .compatible = "mediatek,mt7623", }, + { .compatible = "mediatek,mt8167", }, { .compatible = "mediatek,mt817x", }, { .compatible = "mediatek,mt8173", }, { .compatible = "mediatek,mt8176", }, @@ -540,6 +541,7 @@ static const struct of_device_id mtk_cpufreq_machines[] __initconst = { { } }; +MODULE_DEVICE_TABLE(of, mtk_cpufreq_machines); static int __init mtk_cpufreq_driver_init(void) { @@ -572,6 +574,7 @@ static int __init mtk_cpufreq_driver_init(void) pdev = platform_device_register_simple("mtk-cpufreq", -1, NULL, 0); if (IS_ERR(pdev)) { pr_err("failed to register mtk-cpufreq platform device\n"); + platform_driver_unregister(&mtk_cpufreq_platdrv); return PTR_ERR(pdev); } diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c index d06b37822c3d..d1744b5d9619 100644 --- a/drivers/cpufreq/qcom-cpufreq-nvmem.c +++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c @@ -397,19 +397,19 @@ static int qcom_cpufreq_probe(struct platform_device *pdev) free_genpd_opp: for_each_possible_cpu(cpu) { - if (IS_ERR_OR_NULL(drv->genpd_opp_tables[cpu])) + if (IS_ERR(drv->genpd_opp_tables[cpu])) break; dev_pm_opp_detach_genpd(drv->genpd_opp_tables[cpu]); } kfree(drv->genpd_opp_tables); free_opp: for_each_possible_cpu(cpu) { - if (IS_ERR_OR_NULL(drv->names_opp_tables[cpu])) + if (IS_ERR(drv->names_opp_tables[cpu])) break; dev_pm_opp_put_prop_name(drv->names_opp_tables[cpu]); } for_each_possible_cpu(cpu) { - if (IS_ERR_OR_NULL(drv->hw_opp_tables[cpu])) + if (IS_ERR(drv->hw_opp_tables[cpu])) break; dev_pm_opp_put_supported_hw(drv->hw_opp_tables[cpu]); } @@ -430,12 +430,9 @@ static int qcom_cpufreq_remove(struct platform_device *pdev) platform_device_unregister(cpufreq_dt_pdev); for_each_possible_cpu(cpu) { - if (drv->names_opp_tables[cpu]) - dev_pm_opp_put_supported_hw(drv->names_opp_tables[cpu]); - if (drv->hw_opp_tables[cpu]) - dev_pm_opp_put_supported_hw(drv->hw_opp_tables[cpu]); - if (drv->genpd_opp_tables[cpu]) - dev_pm_opp_detach_genpd(drv->genpd_opp_tables[cpu]); + dev_pm_opp_put_supported_hw(drv->names_opp_tables[cpu]); + dev_pm_opp_put_supported_hw(drv->hw_opp_tables[cpu]); + dev_pm_opp_detach_genpd(drv->genpd_opp_tables[cpu]); } kfree(drv->names_opp_tables); @@ -464,6 +461,7 @@ static const struct of_device_id qcom_cpufreq_match_list[] __initconst = { { .compatible = "qcom,msm8960", .data = &match_data_krait }, {}, }; +MODULE_DEVICE_TABLE(of, qcom_cpufreq_match_list); /* * Since the driver depends on smem and nvmem drivers, which may diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index 8286205c7165..491a0a24fb1e 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -126,6 +126,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) struct scmi_data *priv; struct cpufreq_frequency_table *freq_table; struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power); + bool power_scale_mw; cpu_dev = get_cpu_device(policy->cpu); if (!cpu_dev) { @@ -189,7 +190,9 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) policy->fast_switch_possible = handle->perf_ops->fast_switch_possible(handle, cpu_dev); - em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb, policy->cpus); + power_scale_mw = handle->perf_ops->power_scale_mw_get(handle); + em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb, policy->cpus, + power_scale_mw); return 0; diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c index 43db05b949d9..e5140ad63db8 100644 --- a/drivers/cpufreq/scpi-cpufreq.c +++ b/drivers/cpufreq/scpi-cpufreq.c @@ -233,6 +233,7 @@ static struct platform_driver scpi_cpufreq_platdrv = { }; module_platform_driver(scpi_cpufreq_platdrv); +MODULE_ALIAS("platform:scpi-cpufreq"); MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>"); MODULE_DESCRIPTION("ARM SCPI CPUFreq interface driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c index 4ac6fb23792a..fdb0a722d881 100644 --- a/drivers/cpufreq/sti-cpufreq.c +++ b/drivers/cpufreq/sti-cpufreq.c @@ -223,7 +223,8 @@ use_defaults: opp_table = dev_pm_opp_set_supported_hw(dev, version, VERSION_ELEMENTS); if (IS_ERR(opp_table)) { dev_err(dev, "Failed to set supported hardware\n"); - return PTR_ERR(opp_table); + ret = PTR_ERR(opp_table); + goto err_put_prop_name; } dev_dbg(dev, "pcode: %d major: %d minor: %d substrate: %d\n", @@ -232,6 +233,10 @@ use_defaults: version[0], version[1], version[2]); return 0; + +err_put_prop_name: + dev_pm_opp_put_prop_name(opp_table); + return ret; } static int sti_cpufreq_fetch_syscon_registers(void) @@ -292,6 +297,13 @@ register_cpufreq_dt: } module_init(sti_cpufreq_init); +static const struct of_device_id __maybe_unused sti_cpufreq_of_match[] = { + { .compatible = "st,stih407" }, + { .compatible = "st,stih410" }, + { }, +}; +MODULE_DEVICE_TABLE(of, sti_cpufreq_of_match); + MODULE_DESCRIPTION("STMicroelectronics CPUFreq/OPP driver"); MODULE_AUTHOR("Ajitpal Singh <ajitpal.singh@st.com>"); MODULE_AUTHOR("Lee Jones <lee.jones@linaro.org>"); diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c index 9907a165135b..2deed8d8773f 100644 --- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c +++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c @@ -167,6 +167,7 @@ static const struct of_device_id sun50i_cpufreq_match_list[] = { { .compatible = "allwinner,sun50i-h6" }, {} }; +MODULE_DEVICE_TABLE(of, sun50i_cpufreq_match_list); static const struct of_device_id *sun50i_cpufreq_match_node(void) { diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c index 7eb2c56c65de..e566ea298b59 100644 --- a/drivers/cpufreq/tegra186-cpufreq.c +++ b/drivers/cpufreq/tegra186-cpufreq.c @@ -12,35 +12,52 @@ #include <soc/tegra/bpmp.h> #include <soc/tegra/bpmp-abi.h> -#define EDVD_CORE_VOLT_FREQ(core) (0x20 + (core) * 0x4) -#define EDVD_CORE_VOLT_FREQ_F_SHIFT 0 -#define EDVD_CORE_VOLT_FREQ_F_MASK 0xffff -#define EDVD_CORE_VOLT_FREQ_V_SHIFT 16 - -struct tegra186_cpufreq_cluster_info { - unsigned long offset; - int cpus[4]; +#define TEGRA186_NUM_CLUSTERS 2 +#define EDVD_OFFSET_A57(core) ((SZ_64K * 6) + (0x20 + (core) * 0x4)) +#define EDVD_OFFSET_DENVER(core) ((SZ_64K * 7) + (0x20 + (core) * 0x4)) +#define EDVD_CORE_VOLT_FREQ_F_SHIFT 0 +#define EDVD_CORE_VOLT_FREQ_F_MASK 0xffff +#define EDVD_CORE_VOLT_FREQ_V_SHIFT 16 + +struct tegra186_cpufreq_cpu { unsigned int bpmp_cluster_id; + unsigned int edvd_offset; }; -#define NO_CPU -1 -static const struct tegra186_cpufreq_cluster_info tegra186_clusters[] = { - /* Denver cluster */ +static const struct tegra186_cpufreq_cpu tegra186_cpus[] = { + /* CPU0 - A57 Cluster */ + { + .bpmp_cluster_id = 1, + .edvd_offset = EDVD_OFFSET_A57(0) + }, + /* CPU1 - Denver Cluster */ { - .offset = SZ_64K * 7, - .cpus = { 1, 2, NO_CPU, NO_CPU }, .bpmp_cluster_id = 0, + .edvd_offset = EDVD_OFFSET_DENVER(0) + }, + /* CPU2 - Denver Cluster */ + { + .bpmp_cluster_id = 0, + .edvd_offset = EDVD_OFFSET_DENVER(1) + }, + /* CPU3 - A57 Cluster */ + { + .bpmp_cluster_id = 1, + .edvd_offset = EDVD_OFFSET_A57(1) }, - /* A57 cluster */ + /* CPU4 - A57 Cluster */ { - .offset = SZ_64K * 6, - .cpus = { 0, 3, 4, 5 }, .bpmp_cluster_id = 1, + .edvd_offset = EDVD_OFFSET_A57(2) + }, + /* CPU5 - A57 Cluster */ + { + .bpmp_cluster_id = 1, + .edvd_offset = EDVD_OFFSET_A57(3) }, }; struct tegra186_cpufreq_cluster { - const struct tegra186_cpufreq_cluster_info *info; struct cpufreq_frequency_table *table; u32 ref_clk_khz; u32 div; @@ -48,36 +65,18 @@ struct tegra186_cpufreq_cluster { struct tegra186_cpufreq_data { void __iomem *regs; - - size_t num_clusters; struct tegra186_cpufreq_cluster *clusters; + const struct tegra186_cpufreq_cpu *cpus; }; static int tegra186_cpufreq_init(struct cpufreq_policy *policy) { struct tegra186_cpufreq_data *data = cpufreq_get_driver_data(); - unsigned int i; - - for (i = 0; i < data->num_clusters; i++) { - struct tegra186_cpufreq_cluster *cluster = &data->clusters[i]; - const struct tegra186_cpufreq_cluster_info *info = - cluster->info; - int core; - - for (core = 0; core < ARRAY_SIZE(info->cpus); core++) { - if (info->cpus[core] == policy->cpu) - break; - } - if (core == ARRAY_SIZE(info->cpus)) - continue; - - policy->driver_data = - data->regs + info->offset + EDVD_CORE_VOLT_FREQ(core); - policy->freq_table = cluster->table; - break; - } + unsigned int cluster = data->cpus[policy->cpu].bpmp_cluster_id; + policy->freq_table = data->clusters[cluster].table; policy->cpuinfo.transition_latency = 300 * 1000; + policy->driver_data = NULL; return 0; } @@ -85,11 +84,12 @@ static int tegra186_cpufreq_init(struct cpufreq_policy *policy) static int tegra186_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) { + struct tegra186_cpufreq_data *data = cpufreq_get_driver_data(); struct cpufreq_frequency_table *tbl = policy->freq_table + index; - void __iomem *edvd_reg = policy->driver_data; + unsigned int edvd_offset = data->cpus[policy->cpu].edvd_offset; u32 edvd_val = tbl->driver_data; - writel(edvd_val, edvd_reg); + writel(edvd_val, data->regs + edvd_offset); return 0; } @@ -97,35 +97,22 @@ static int tegra186_cpufreq_set_target(struct cpufreq_policy *policy, static unsigned int tegra186_cpufreq_get(unsigned int cpu) { struct tegra186_cpufreq_data *data = cpufreq_get_driver_data(); + struct tegra186_cpufreq_cluster *cluster; struct cpufreq_policy *policy; - void __iomem *edvd_reg; - unsigned int i, freq = 0; + unsigned int edvd_offset, cluster_id; u32 ndiv; policy = cpufreq_cpu_get(cpu); if (!policy) return 0; - edvd_reg = policy->driver_data; - ndiv = readl(edvd_reg) & EDVD_CORE_VOLT_FREQ_F_MASK; - - for (i = 0; i < data->num_clusters; i++) { - struct tegra186_cpufreq_cluster *cluster = &data->clusters[i]; - int core; - - for (core = 0; core < ARRAY_SIZE(cluster->info->cpus); core++) { - if (cluster->info->cpus[core] != policy->cpu) - continue; - - freq = (cluster->ref_clk_khz * ndiv) / cluster->div; - goto out; - } - } - -out: + edvd_offset = data->cpus[policy->cpu].edvd_offset; + ndiv = readl(data->regs + edvd_offset) & EDVD_CORE_VOLT_FREQ_F_MASK; + cluster_id = data->cpus[policy->cpu].bpmp_cluster_id; + cluster = &data->clusters[cluster_id]; cpufreq_cpu_put(policy); - return freq; + return (cluster->ref_clk_khz * ndiv) / cluster->div; } static struct cpufreq_driver tegra186_cpufreq_driver = { @@ -141,7 +128,7 @@ static struct cpufreq_driver tegra186_cpufreq_driver = { static struct cpufreq_frequency_table *init_vhint_table( struct platform_device *pdev, struct tegra_bpmp *bpmp, - struct tegra186_cpufreq_cluster *cluster) + struct tegra186_cpufreq_cluster *cluster, unsigned int cluster_id) { struct cpufreq_frequency_table *table; struct mrq_cpu_vhint_request req; @@ -160,7 +147,7 @@ static struct cpufreq_frequency_table *init_vhint_table( memset(&req, 0, sizeof(req)); req.addr = phys; - req.cluster_id = cluster->info->bpmp_cluster_id; + req.cluster_id = cluster_id; memset(&msg, 0, sizeof(msg)); msg.mrq = MRQ_CPU_VHINT; @@ -234,12 +221,12 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev) if (!data) return -ENOMEM; - data->clusters = devm_kcalloc(&pdev->dev, ARRAY_SIZE(tegra186_clusters), + data->clusters = devm_kcalloc(&pdev->dev, TEGRA186_NUM_CLUSTERS, sizeof(*data->clusters), GFP_KERNEL); if (!data->clusters) return -ENOMEM; - data->num_clusters = ARRAY_SIZE(tegra186_clusters); + data->cpus = tegra186_cpus; bpmp = tegra_bpmp_get(&pdev->dev); if (IS_ERR(bpmp)) @@ -251,11 +238,10 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev) goto put_bpmp; } - for (i = 0; i < data->num_clusters; i++) { + for (i = 0; i < TEGRA186_NUM_CLUSTERS; i++) { struct tegra186_cpufreq_cluster *cluster = &data->clusters[i]; - cluster->info = &tegra186_clusters[i]; - cluster->table = init_vhint_table(pdev, bpmp, cluster); + cluster->table = init_vhint_table(pdev, bpmp, cluster, i); if (IS_ERR(cluster->table)) { err = PTR_ERR(cluster->table); goto put_bpmp; diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c index e1d931c457a7..6a67f36f3b80 100644 --- a/drivers/cpufreq/tegra194-cpufreq.c +++ b/drivers/cpufreq/tegra194-cpufreq.c @@ -21,7 +21,6 @@ #define KHZ 1000 #define REF_CLK_MHZ 408 /* 408 MHz */ #define US_DELAY 500 -#define US_DELAY_MIN 2 #define CPUFREQ_TBL_STEP_HZ (50 * KHZ * KHZ) #define MAX_CNT ~0U @@ -44,7 +43,6 @@ struct tegra194_cpufreq_data { struct tegra_cpu_ctr { u32 cpu; - u32 delay; u32 coreclk_cnt, last_coreclk_cnt; u32 refclk_cnt, last_refclk_cnt; }; @@ -112,7 +110,7 @@ static void tegra_read_counters(struct work_struct *work) val = read_freq_feedback(); c->last_refclk_cnt = lower_32_bits(val); c->last_coreclk_cnt = upper_32_bits(val); - udelay(c->delay); + udelay(US_DELAY); val = read_freq_feedback(); c->refclk_cnt = lower_32_bits(val); c->coreclk_cnt = upper_32_bits(val); @@ -139,7 +137,7 @@ static void tegra_read_counters(struct work_struct *work) * @cpu - logical cpu whose freq to be updated * Returns freq in KHz on success, 0 if cpu is offline */ -static unsigned int tegra194_get_speed_common(u32 cpu, u32 delay) +static unsigned int tegra194_calculate_speed(u32 cpu) { struct read_counters_work read_counters_work; struct tegra_cpu_ctr c; @@ -153,7 +151,6 @@ static unsigned int tegra194_get_speed_common(u32 cpu, u32 delay) * interrupts enabled. */ read_counters_work.c.cpu = cpu; - read_counters_work.c.delay = delay; INIT_WORK_ONSTACK(&read_counters_work.work, tegra_read_counters); queue_work_on(cpu, read_counters_wq, &read_counters_work.work); flush_work(&read_counters_work.work); @@ -180,9 +177,61 @@ static unsigned int tegra194_get_speed_common(u32 cpu, u32 delay) return (rate_mhz * KHZ); /* in KHz */ } +static void get_cpu_ndiv(void *ndiv) +{ + u64 ndiv_val; + + asm volatile("mrs %0, s3_0_c15_c0_4" : "=r" (ndiv_val) : ); + + *(u64 *)ndiv = ndiv_val; +} + +static void set_cpu_ndiv(void *data) +{ + struct cpufreq_frequency_table *tbl = data; + u64 ndiv_val = (u64)tbl->driver_data; + + asm volatile("msr s3_0_c15_c0_4, %0" : : "r" (ndiv_val)); +} + static unsigned int tegra194_get_speed(u32 cpu) { - return tegra194_get_speed_common(cpu, US_DELAY); + struct tegra194_cpufreq_data *data = cpufreq_get_driver_data(); + struct cpufreq_frequency_table *pos; + unsigned int rate; + u64 ndiv; + int ret; + u32 cl; + + smp_call_function_single(cpu, get_cpu_cluster, &cl, true); + + /* reconstruct actual cpu freq using counters */ + rate = tegra194_calculate_speed(cpu); + + /* get last written ndiv value */ + ret = smp_call_function_single(cpu, get_cpu_ndiv, &ndiv, true); + if (WARN_ON_ONCE(ret)) + return rate; + + /* + * If the reconstructed frequency has acceptable delta from + * the last written value, then return freq corresponding + * to the last written ndiv value from freq_table. This is + * done to return consistent value. + */ + cpufreq_for_each_valid_entry(pos, data->tables[cl]) { + if (pos->driver_data != ndiv) + continue; + + if (abs(pos->frequency - rate) > 115200) { + pr_warn("cpufreq: cpu%d,cur:%u,set:%u,set ndiv:%llu\n", + cpu, rate, pos->frequency, ndiv); + } else { + rate = pos->frequency; + } + break; + } + return rate; } static int tegra194_cpufreq_init(struct cpufreq_policy *policy) @@ -196,9 +245,6 @@ static int tegra194_cpufreq_init(struct cpufreq_policy *policy) if (cl >= data->num_clusters) return -EINVAL; - /* boot freq */ - policy->cur = tegra194_get_speed_common(policy->cpu, US_DELAY_MIN); - /* set same policy for all cpus in a cluster */ for (cpu = (cl * 2); cpu < ((cl + 1) * 2); cpu++) cpumask_set_cpu(cpu, policy->cpus); @@ -209,14 +255,6 @@ static int tegra194_cpufreq_init(struct cpufreq_policy *policy) return 0; } -static void set_cpu_ndiv(void *data) -{ - struct cpufreq_frequency_table *tbl = data; - u64 ndiv_val = (u64)tbl->driver_data; - - asm volatile("msr s3_0_c15_c0_4, %0" : : "r" (ndiv_val)); -} - static int tegra194_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) { diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c index e89b905754d2..f711d8eaea6a 100644 --- a/drivers/cpufreq/vexpress-spc-cpufreq.c +++ b/drivers/cpufreq/vexpress-spc-cpufreq.c @@ -591,6 +591,7 @@ static struct platform_driver ve_spc_cpufreq_platdrv = { }; module_platform_driver(ve_spc_cpufreq_platdrv); +MODULE_ALIAS("platform:vexpress-spc-cpufreq"); MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>"); MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>"); MODULE_DESCRIPTION("Vexpress SPC ARM big LITTLE cpufreq driver"); |