summaryrefslogtreecommitdiff
path: root/drivers/thermal
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/thermal')
-rw-r--r--drivers/thermal/Kconfig68
-rw-r--r--drivers/thermal/Makefile5
-rw-r--r--drivers/thermal/cpu_cooling.c624
-rw-r--r--drivers/thermal/db8500_thermal.c2
-rw-r--r--drivers/thermal/fair_share.c41
-rw-r--r--drivers/thermal/hisi_thermal.c420
-rw-r--r--drivers/thermal/imx_thermal.c3
-rw-r--r--drivers/thermal/int340x_thermal/processor_thermal_device.c59
-rw-r--r--drivers/thermal/intel_powerclamp.c5
-rw-r--r--drivers/thermal/intel_quark_dts_thermal.c473
-rw-r--r--drivers/thermal/intel_soc_dts_iosf.c478
-rw-r--r--drivers/thermal/intel_soc_dts_iosf.h62
-rw-r--r--drivers/thermal/intel_soc_dts_thermal.c430
-rw-r--r--drivers/thermal/of-thermal.c41
-rw-r--r--drivers/thermal/power_allocator.c544
-rw-r--r--drivers/thermal/qcom-spmi-temp-alarm.c309
-rw-r--r--drivers/thermal/samsung/Kconfig2
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c190
-rw-r--r--drivers/thermal/samsung/exynos_tmu.h1
-rw-r--r--drivers/thermal/thermal_core.c315
-rw-r--r--drivers/thermal/thermal_core.h11
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c104
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c5
-rw-r--r--drivers/thermal/x86_pkg_temp_thermal.c2
24 files changed, 3647 insertions, 547 deletions
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index af40db0df..118938ee8 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -42,6 +42,17 @@ config THERMAL_OF
Say 'Y' here if you need to build thermal infrastructure
based on device tree.
+config THERMAL_WRITABLE_TRIPS
+ bool "Enable writable trip points"
+ help
+ This option allows the system integrator to choose whether
+ trip temperatures can be changed from userspace. The
+ writable trips need to be specified when setting up the
+ thermal zone but the choice here takes precedence.
+
+ Say 'Y' here if you would like to allow userspace tools to
+ change trip temperatures.
+
choice
prompt "Default Thermal governor"
default THERMAL_DEFAULT_GOV_STEP_WISE
@@ -71,6 +82,14 @@ config THERMAL_DEFAULT_GOV_USER_SPACE
Select this if you want to let the user space manage the
platform thermals.
+config THERMAL_DEFAULT_GOV_POWER_ALLOCATOR
+ bool "power_allocator"
+ select THERMAL_GOV_POWER_ALLOCATOR
+ help
+ Select this if you want to control temperature based on
+ system and device power allocation. This governor can only
+ operate on cooling devices that implement the power API.
+
endchoice
config THERMAL_GOV_FAIR_SHARE
@@ -99,6 +118,12 @@ config THERMAL_GOV_USER_SPACE
help
Enable this to let the user space manage the platform thermals.
+config THERMAL_GOV_POWER_ALLOCATOR
+ bool "Power allocator thermal governor"
+ help
+ Enable this to manage platform thermals by dynamically
+ allocating and limiting power to devices.
+
config CPU_THERMAL
bool "generic cpu cooling support"
depends on CPU_FREQ
@@ -136,6 +161,14 @@ config THERMAL_EMULATION
because userland can easily disable the thermal policy by simply
flooding this sysfs node with low temperature values.
+config HISI_THERMAL
+ tristate "Hisilicon thermal driver"
+ depends on ARCH_HISI && CPU_THERMAL && OF
+ help
+ Enable this to plug hisilicon's thermal sensor driver into the Linux
+ thermal framework. cpufreq is used as the cooling device to throttle
+ CPUs when the passive trip is crossed.
+
config IMX_THERMAL
tristate "Temperature sensor driver for Freescale i.MX SoCs"
depends on CPU_THERMAL
@@ -249,9 +282,20 @@ config X86_PKG_TEMP_THERMAL
two trip points which can be set by user to get notifications via thermal
notification methods.
+config INTEL_SOC_DTS_IOSF_CORE
+ tristate
+ depends on X86
+ select IOSF_MBI
+ help
+ This is becoming a common feature for Intel SoCs to expose the additional
+ digital temperature sensors (DTSs) using side band interface (IOSF). This
+ implements the common set of helper functions to register, get temperature
+ and get/set thresholds on DTSs.
+
config INTEL_SOC_DTS_THERMAL
tristate "Intel SoCs DTS thermal driver"
- depends on X86 && IOSF_MBI
+ depends on X86
+ select INTEL_SOC_DTS_IOSF_CORE
help
Enable this to register Intel SoCs (e.g. Bay Trail) platform digital
temperature sensor (DTS). These SoCs have two additional DTSs in
@@ -261,12 +305,23 @@ config INTEL_SOC_DTS_THERMAL
notification methods.The other trip is a critical trip point, which
was set by the driver based on the TJ MAX temperature.
+config INTEL_QUARK_DTS_THERMAL
+ tristate "Intel Quark DTS thermal driver"
+ depends on X86_INTEL_QUARK
+ help
+ Enable this to register Intel Quark SoC (e.g. X1000) platform digital
+ temperature sensor (DTS). For X1000 SoC, it has one on-die DTS.
+ The DTS will be registered as a thermal zone. There are two trip points:
+ hot & critical. The critical trip point default value is set by
+ underlying BIOS/Firmware.
+
config INT340X_THERMAL
tristate "ACPI INT340X thermal drivers"
depends on X86 && ACPI
select THERMAL_GOV_USER_SPACE
select ACPI_THERMAL_REL
select ACPI_FAN
+ select INTEL_SOC_DTS_IOSF_CORE
help
Newer laptops and tablets that use ACPI may have thermal sensors and
other devices with thermal control capabilities outside the core
@@ -299,4 +354,15 @@ depends on ARCH_STI && OF
source "drivers/thermal/st/Kconfig"
endmenu
+config QCOM_SPMI_TEMP_ALARM
+ tristate "Qualcomm SPMI PMIC Temperature Alarm"
+ depends on OF && SPMI && IIO
+ select REGMAP_SPMI
+ help
+ This enables a thermal sysfs driver for Qualcomm plug-and-play (QPNP)
+ PMIC devices. It shows up in sysfs as a thermal sensor with multiple
+ trip points. The temperature reported by the thermal sensor reflects the
+ real time die temperature if an ADC is present or an estimate of the
+ temperature based upon the over temperature stage value.
+
endif
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index fa0dc4867..535dfee14 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -14,6 +14,7 @@ thermal_sys-$(CONFIG_THERMAL_GOV_FAIR_SHARE) += fair_share.o
thermal_sys-$(CONFIG_THERMAL_GOV_BANG_BANG) += gov_bang_bang.o
thermal_sys-$(CONFIG_THERMAL_GOV_STEP_WISE) += step_wise.o
thermal_sys-$(CONFIG_THERMAL_GOV_USER_SPACE) += user_space.o
+thermal_sys-$(CONFIG_THERMAL_GOV_POWER_ALLOCATOR) += power_allocator.o
# cpufreq cooling
thermal_sys-$(CONFIG_CPU_THERMAL) += cpu_cooling.o
@@ -22,6 +23,7 @@ thermal_sys-$(CONFIG_CPU_THERMAL) += cpu_cooling.o
thermal_sys-$(CONFIG_CLOCK_THERMAL) += clock_cooling.o
# platform thermal drivers
+obj-$(CONFIG_QCOM_SPMI_TEMP_ALARM) += qcom-spmi-temp-alarm.o
obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o
obj-$(CONFIG_ROCKCHIP_THERMAL) += rockchip_thermal.o
obj-$(CONFIG_RCAR_THERMAL) += rcar_thermal.o
@@ -34,8 +36,11 @@ obj-$(CONFIG_IMX_THERMAL) += imx_thermal.o
obj-$(CONFIG_DB8500_CPUFREQ_COOLING) += db8500_cpufreq_cooling.o
obj-$(CONFIG_INTEL_POWERCLAMP) += intel_powerclamp.o
obj-$(CONFIG_X86_PKG_TEMP_THERMAL) += x86_pkg_temp_thermal.o
+obj-$(CONFIG_INTEL_SOC_DTS_IOSF_CORE) += intel_soc_dts_iosf.o
obj-$(CONFIG_INTEL_SOC_DTS_THERMAL) += intel_soc_dts_thermal.o
+obj-$(CONFIG_INTEL_QUARK_DTS_THERMAL) += intel_quark_dts_thermal.o
obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal/
obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal/
obj-$(CONFIG_ST_THERMAL) += st/
obj-$(CONFIG_TEGRA_SOCTHERM) += tegra_soctherm.o
+obj-$(CONFIG_HISI_THERMAL) += hisi_thermal.o
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index f65f0d109..620dcd405 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -26,10 +26,13 @@
#include <linux/thermal.h>
#include <linux/cpufreq.h>
#include <linux/err.h>
+#include <linux/pm_opp.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/cpu_cooling.h>
+#include <trace/events/thermal.h>
+
/*
* Cooling state <-> CPUFreq frequency
*
@@ -45,6 +48,19 @@
*/
/**
+ * struct power_table - frequency to power conversion
+ * @frequency: frequency in KHz
+ * @power: power in mW
+ *
+ * This structure is built when the cooling device registers and helps
+ * in translating frequency to power and viceversa.
+ */
+struct power_table {
+ u32 frequency;
+ u32 power;
+};
+
+/**
* struct cpufreq_cooling_device - data for cooling device with cpufreq
* @id: unique integer value corresponding to each cpufreq_cooling_device
* registered.
@@ -52,12 +68,21 @@
* registered cooling device.
* @cpufreq_state: integer value representing the current state of cpufreq
* cooling devices.
- * @cpufreq_val: integer value representing the absolute value of the clipped
+ * @clipped_freq: integer value representing the absolute value of the clipped
* frequency.
* @max_level: maximum cooling level. One less than total number of valid
* cpufreq frequencies.
* @allowed_cpus: all the cpus involved for this cpufreq_cooling_device.
* @node: list_head to link all cpufreq_cooling_device together.
+ * @last_load: load measured by the latest call to cpufreq_get_actual_power()
+ * @time_in_idle: previous reading of the absolute time that this cpu was idle
+ * @time_in_idle_timestamp: wall time of the last invocation of
+ * get_cpu_idle_time_us()
+ * @dyn_power_table: array of struct power_table for frequency to power
+ * conversion, sorted in ascending order.
+ * @dyn_power_table_entries: number of entries in the @dyn_power_table array
+ * @cpu_dev: the first cpu_device from @allowed_cpus that has OPPs registered
+ * @plat_get_static_power: callback to calculate the static power
*
* This structure is required for keeping information of each registered
* cpufreq_cooling_device.
@@ -66,15 +91,25 @@ struct cpufreq_cooling_device {
int id;
struct thermal_cooling_device *cool_dev;
unsigned int cpufreq_state;
- unsigned int cpufreq_val;
+ unsigned int clipped_freq;
unsigned int max_level;
unsigned int *freq_table; /* In descending order */
struct cpumask allowed_cpus;
struct list_head node;
+ u32 last_load;
+ u64 *time_in_idle;
+ u64 *time_in_idle_timestamp;
+ struct power_table *dyn_power_table;
+ int dyn_power_table_entries;
+ struct device *cpu_dev;
+ get_static_t plat_get_static_power;
};
static DEFINE_IDR(cpufreq_idr);
static DEFINE_MUTEX(cooling_cpufreq_lock);
+static unsigned int cpufreq_dev_count;
+
+static DEFINE_MUTEX(cooling_list_lock);
static LIST_HEAD(cpufreq_dev_list);
/**
@@ -153,14 +188,14 @@ unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
{
struct cpufreq_cooling_device *cpufreq_dev;
- mutex_lock(&cooling_cpufreq_lock);
+ mutex_lock(&cooling_list_lock);
list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) {
- mutex_unlock(&cooling_cpufreq_lock);
+ mutex_unlock(&cooling_list_lock);
return get_level(cpufreq_dev, freq);
}
}
- mutex_unlock(&cooling_cpufreq_lock);
+ mutex_unlock(&cooling_list_lock);
pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu);
return THERMAL_CSTATE_INVALID;
@@ -183,26 +218,246 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
unsigned long event, void *data)
{
struct cpufreq_policy *policy = data;
- unsigned long max_freq = 0;
+ unsigned long clipped_freq;
struct cpufreq_cooling_device *cpufreq_dev;
if (event != CPUFREQ_ADJUST)
- return 0;
+ return NOTIFY_DONE;
- mutex_lock(&cooling_cpufreq_lock);
+ mutex_lock(&cooling_list_lock);
list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
- if (!cpumask_test_cpu(policy->cpu,
- &cpufreq_dev->allowed_cpus))
+ if (!cpumask_test_cpu(policy->cpu, &cpufreq_dev->allowed_cpus))
continue;
- max_freq = cpufreq_dev->cpufreq_val;
+ /*
+ * policy->max is the maximum allowed frequency defined by user
+ * and clipped_freq is the maximum that thermal constraints
+ * allow.
+ *
+ * If clipped_freq is lower than policy->max, then we need to
+ * readjust policy->max.
+ *
+ * But, if clipped_freq is greater than policy->max, we don't
+ * need to do anything.
+ */
+ clipped_freq = cpufreq_dev->clipped_freq;
+
+ if (policy->max > clipped_freq)
+ cpufreq_verify_within_limits(policy, 0, clipped_freq);
+ break;
+ }
+ mutex_unlock(&cooling_list_lock);
- if (policy->max != max_freq)
- cpufreq_verify_within_limits(policy, 0, max_freq);
+ return NOTIFY_OK;
+}
+
+/**
+ * build_dyn_power_table() - create a dynamic power to frequency table
+ * @cpufreq_device: the cpufreq cooling device in which to store the table
+ * @capacitance: dynamic power coefficient for these cpus
+ *
+ * Build a dynamic power to frequency table for this cpu and store it
+ * in @cpufreq_device. This table will be used in cpu_power_to_freq() and
+ * cpu_freq_to_power() to convert between power and frequency
+ * efficiently. Power is stored in mW, frequency in KHz. The
+ * resulting table is in ascending order.
+ *
+ * Return: 0 on success, -E* on error.
+ */
+static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
+ u32 capacitance)
+{
+ struct power_table *power_table;
+ struct dev_pm_opp *opp;
+ struct device *dev = NULL;
+ int num_opps = 0, cpu, i, ret = 0;
+ unsigned long freq;
+
+ rcu_read_lock();
+
+ for_each_cpu(cpu, &cpufreq_device->allowed_cpus) {
+ dev = get_cpu_device(cpu);
+ if (!dev) {
+ dev_warn(&cpufreq_device->cool_dev->device,
+ "No cpu device for cpu %d\n", cpu);
+ continue;
+ }
+
+ num_opps = dev_pm_opp_get_opp_count(dev);
+ if (num_opps > 0) {
+ break;
+ } else if (num_opps < 0) {
+ ret = num_opps;
+ goto unlock;
+ }
}
- mutex_unlock(&cooling_cpufreq_lock);
- return 0;
+ if (num_opps == 0) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ power_table = kcalloc(num_opps, sizeof(*power_table), GFP_KERNEL);
+ if (!power_table) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ for (freq = 0, i = 0;
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq), !IS_ERR(opp);
+ freq++, i++) {
+ u32 freq_mhz, voltage_mv;
+ u64 power;
+
+ freq_mhz = freq / 1000000;
+ voltage_mv = dev_pm_opp_get_voltage(opp) / 1000;
+
+ /*
+ * Do the multiplication with MHz and millivolt so as
+ * to not overflow.
+ */
+ power = (u64)capacitance * freq_mhz * voltage_mv * voltage_mv;
+ do_div(power, 1000000000);
+
+ /* frequency is stored in power_table in KHz */
+ power_table[i].frequency = freq / 1000;
+
+ /* power is stored in mW */
+ power_table[i].power = power;
+ }
+
+ if (i == 0) {
+ ret = PTR_ERR(opp);
+ goto unlock;
+ }
+
+ cpufreq_device->cpu_dev = dev;
+ cpufreq_device->dyn_power_table = power_table;
+ cpufreq_device->dyn_power_table_entries = i;
+
+unlock:
+ rcu_read_unlock();
+ return ret;
+}
+
+static u32 cpu_freq_to_power(struct cpufreq_cooling_device *cpufreq_device,
+ u32 freq)
+{
+ int i;
+ struct power_table *pt = cpufreq_device->dyn_power_table;
+
+ for (i = 1; i < cpufreq_device->dyn_power_table_entries; i++)
+ if (freq < pt[i].frequency)
+ break;
+
+ return pt[i - 1].power;
+}
+
+static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_device,
+ u32 power)
+{
+ int i;
+ struct power_table *pt = cpufreq_device->dyn_power_table;
+
+ for (i = 1; i < cpufreq_device->dyn_power_table_entries; i++)
+ if (power < pt[i].power)
+ break;
+
+ return pt[i - 1].frequency;
+}
+
+/**
+ * get_load() - get load for a cpu since last updated
+ * @cpufreq_device: &struct cpufreq_cooling_device for this cpu
+ * @cpu: cpu number
+ *
+ * Return: The average load of cpu @cpu in percentage since this
+ * function was last called.
+ */
+static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu)
+{
+ u32 load;
+ u64 now, now_idle, delta_time, delta_idle;
+
+ now_idle = get_cpu_idle_time(cpu, &now, 0);
+ delta_idle = now_idle - cpufreq_device->time_in_idle[cpu];
+ delta_time = now - cpufreq_device->time_in_idle_timestamp[cpu];
+
+ if (delta_time <= delta_idle)
+ load = 0;
+ else
+ load = div64_u64(100 * (delta_time - delta_idle), delta_time);
+
+ cpufreq_device->time_in_idle[cpu] = now_idle;
+ cpufreq_device->time_in_idle_timestamp[cpu] = now;
+
+ return load;
+}
+
+/**
+ * get_static_power() - calculate the static power consumed by the cpus
+ * @cpufreq_device: struct &cpufreq_cooling_device for this cpu cdev
+ * @tz: thermal zone device in which we're operating
+ * @freq: frequency in KHz
+ * @power: pointer in which to store the calculated static power
+ *
+ * Calculate the static power consumed by the cpus described by
+ * @cpu_actor running at frequency @freq. This function relies on a
+ * platform specific function that should have been provided when the
+ * actor was registered. If it wasn't, the static power is assumed to
+ * be negligible. The calculated static power is stored in @power.
+ *
+ * Return: 0 on success, -E* on failure.
+ */
+static int get_static_power(struct cpufreq_cooling_device *cpufreq_device,
+ struct thermal_zone_device *tz, unsigned long freq,
+ u32 *power)
+{
+ struct dev_pm_opp *opp;
+ unsigned long voltage;
+ struct cpumask *cpumask = &cpufreq_device->allowed_cpus;
+ unsigned long freq_hz = freq * 1000;
+
+ if (!cpufreq_device->plat_get_static_power ||
+ !cpufreq_device->cpu_dev) {
+ *power = 0;
+ return 0;
+ }
+
+ rcu_read_lock();
+
+ opp = dev_pm_opp_find_freq_exact(cpufreq_device->cpu_dev, freq_hz,
+ true);
+ voltage = dev_pm_opp_get_voltage(opp);
+
+ rcu_read_unlock();
+
+ if (voltage == 0) {
+ dev_warn_ratelimited(cpufreq_device->cpu_dev,
+ "Failed to get voltage for frequency %lu: %ld\n",
+ freq_hz, IS_ERR(opp) ? PTR_ERR(opp) : 0);
+ return -EINVAL;
+ }
+
+ return cpufreq_device->plat_get_static_power(cpumask, tz->passive_delay,
+ voltage, power);
+}
+
+/**
+ * get_dynamic_power() - calculate the dynamic power
+ * @cpufreq_device: &cpufreq_cooling_device for this cdev
+ * @freq: current frequency
+ *
+ * Return: the dynamic power consumed by the cpus described by
+ * @cpufreq_device.
+ */
+static u32 get_dynamic_power(struct cpufreq_cooling_device *cpufreq_device,
+ unsigned long freq)
+{
+ u32 raw_cpu_power;
+
+ raw_cpu_power = cpu_freq_to_power(cpufreq_device, freq);
+ return (raw_cpu_power * cpufreq_device->last_load) / 100;
}
/* cpufreq cooling device callback functions are defined below */
@@ -273,15 +528,212 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
clip_freq = cpufreq_device->freq_table[state];
cpufreq_device->cpufreq_state = state;
- cpufreq_device->cpufreq_val = clip_freq;
+ cpufreq_device->clipped_freq = clip_freq;
cpufreq_update_policy(cpu);
return 0;
}
+/**
+ * cpufreq_get_requested_power() - get the current power
+ * @cdev: &thermal_cooling_device pointer
+ * @tz: a valid thermal zone device pointer
+ * @power: pointer in which to store the resulting power
+ *
+ * Calculate the current power consumption of the cpus in milliwatts
+ * and store it in @power. This function should actually calculate
+ * the requested power, but it's hard to get the frequency that
+ * cpufreq would have assigned if there were no thermal limits.
+ * Instead, we calculate the current power on the assumption that the
+ * immediate future will look like the immediate past.
+ *
+ * We use the current frequency and the average load since this
+ * function was last called. In reality, there could have been
+ * multiple opps since this function was last called and that affects
+ * the load calculation. While it's not perfectly accurate, this
+ * simplification is good enough and works. REVISIT this, as more
+ * complex code may be needed if experiments show that it's not
+ * accurate enough.
+ *
+ * Return: 0 on success, -E* if getting the static power failed.
+ */
+static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
+ struct thermal_zone_device *tz,
+ u32 *power)
+{
+ unsigned long freq;
+ int i = 0, cpu, ret;
+ u32 static_power, dynamic_power, total_load = 0;
+ struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
+ u32 *load_cpu = NULL;
+
+ cpu = cpumask_any_and(&cpufreq_device->allowed_cpus, cpu_online_mask);
+
+ /*
+ * All the CPUs are offline, thus the requested power by
+ * the cdev is 0
+ */
+ if (cpu >= nr_cpu_ids) {
+ *power = 0;
+ return 0;
+ }
+
+ freq = cpufreq_quick_get(cpu);
+
+ if (trace_thermal_power_cpu_get_power_enabled()) {
+ u32 ncpus = cpumask_weight(&cpufreq_device->allowed_cpus);
+
+ load_cpu = devm_kcalloc(&cdev->device, ncpus, sizeof(*load_cpu),
+ GFP_KERNEL);
+ }
+
+ for_each_cpu(cpu, &cpufreq_device->allowed_cpus) {
+ u32 load;
+
+ if (cpu_online(cpu))
+ load = get_load(cpufreq_device, cpu);
+ else
+ load = 0;
+
+ total_load += load;
+ if (trace_thermal_power_cpu_limit_enabled() && load_cpu)
+ load_cpu[i] = load;
+
+ i++;
+ }
+
+ cpufreq_device->last_load = total_load;
+
+ dynamic_power = get_dynamic_power(cpufreq_device, freq);
+ ret = get_static_power(cpufreq_device, tz, freq, &static_power);
+ if (ret) {
+ if (load_cpu)
+ devm_kfree(&cdev->device, load_cpu);
+ return ret;
+ }
+
+ if (load_cpu) {
+ trace_thermal_power_cpu_get_power(
+ &cpufreq_device->allowed_cpus,
+ freq, load_cpu, i, dynamic_power, static_power);
+
+ devm_kfree(&cdev->device, load_cpu);
+ }
+
+ *power = static_power + dynamic_power;
+ return 0;
+}
+
+/**
+ * cpufreq_state2power() - convert a cpu cdev state to power consumed
+ * @cdev: &thermal_cooling_device pointer
+ * @tz: a valid thermal zone device pointer
+ * @state: cooling device state to be converted
+ * @power: pointer in which to store the resulting power
+ *
+ * Convert cooling device state @state into power consumption in
+ * milliwatts assuming 100% load. Store the calculated power in
+ * @power.
+ *
+ * Return: 0 on success, -EINVAL if the cooling device state could not
+ * be converted into a frequency or other -E* if there was an error
+ * when calculating the static power.
+ */
+static int cpufreq_state2power(struct thermal_cooling_device *cdev,
+ struct thermal_zone_device *tz,
+ unsigned long state, u32 *power)
+{
+ unsigned int freq, num_cpus;
+ cpumask_t cpumask;
+ u32 static_power, dynamic_power;
+ int ret;
+ struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
+
+ cpumask_and(&cpumask, &cpufreq_device->allowed_cpus, cpu_online_mask);
+ num_cpus = cpumask_weight(&cpumask);
+
+ /* None of our cpus are online, so no power */
+ if (num_cpus == 0) {
+ *power = 0;
+ return 0;
+ }
+
+ freq = cpufreq_device->freq_table[state];
+ if (!freq)
+ return -EINVAL;
+
+ dynamic_power = cpu_freq_to_power(cpufreq_device, freq) * num_cpus;
+ ret = get_static_power(cpufreq_device, tz, freq, &static_power);
+ if (ret)
+ return ret;
+
+ *power = static_power + dynamic_power;
+ return 0;
+}
+
+/**
+ * cpufreq_power2state() - convert power to a cooling device state
+ * @cdev: &thermal_cooling_device pointer
+ * @tz: a valid thermal zone device pointer
+ * @power: power in milliwatts to be converted
+ * @state: pointer in which to store the resulting state
+ *
+ * Calculate a cooling device state for the cpus described by @cdev
+ * that would allow them to consume at most @power mW and store it in
+ * @state. Note that this calculation depends on external factors
+ * such as the cpu load or the current static power. Calling this
+ * function with the same power as input can yield different cooling
+ * device states depending on those external factors.
+ *
+ * Return: 0 on success, -ENODEV if no cpus are online or -EINVAL if
+ * the calculated frequency could not be converted to a valid state.
+ * The latter should not happen unless the frequencies available to
+ * cpufreq have changed since the initialization of the cpu cooling
+ * device.
+ */
+static int cpufreq_power2state(struct thermal_cooling_device *cdev,
+ struct thermal_zone_device *tz, u32 power,
+ unsigned long *state)
+{
+ unsigned int cpu, cur_freq, target_freq;
+ int ret;
+ s32 dyn_power;
+ u32 last_load, normalised_power, static_power;
+ struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
+
+ cpu = cpumask_any_and(&cpufreq_device->allowed_cpus, cpu_online_mask);
+
+ /* None of our cpus are online */
+ if (cpu >= nr_cpu_ids)
+ return -ENODEV;
+
+ cur_freq = cpufreq_quick_get(cpu);
+ ret = get_static_power(cpufreq_device, tz, cur_freq, &static_power);
+ if (ret)
+ return ret;
+
+ dyn_power = power - static_power;
+ dyn_power = dyn_power > 0 ? dyn_power : 0;
+ last_load = cpufreq_device->last_load ?: 1;
+ normalised_power = (dyn_power * 100) / last_load;
+ target_freq = cpu_power_to_freq(cpufreq_device, normalised_power);
+
+ *state = cpufreq_cooling_get_level(cpu, target_freq);
+ if (*state == THERMAL_CSTATE_INVALID) {
+ dev_warn_ratelimited(&cdev->device,
+ "Failed to convert %dKHz for cpu %d into a cdev state\n",
+ target_freq, cpu);
+ return -EINVAL;
+ }
+
+ trace_thermal_power_cpu_limit(&cpufreq_device->allowed_cpus,
+ target_freq, *state, power);
+ return 0;
+}
+
/* Bind cpufreq callbacks to thermal cooling device ops */
-static struct thermal_cooling_device_ops const cpufreq_cooling_ops = {
+static struct thermal_cooling_device_ops cpufreq_cooling_ops = {
.get_max_state = cpufreq_get_max_state,
.get_cur_state = cpufreq_get_cur_state,
.set_cur_state = cpufreq_set_cur_state,
@@ -311,6 +763,9 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table,
* @np: a valid struct device_node to the cooling device device tree node
* @clip_cpus: cpumask of cpus where the frequency constraints will happen.
* Normally this should be same as cpufreq policy->related_cpus.
+ * @capacitance: dynamic power coefficient for these cpus
+ * @plat_static_func: function to calculate the static power consumed by these
+ * cpus (optional)
*
* This interface function registers the cpufreq cooling device with the name
* "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
@@ -322,13 +777,14 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table,
*/
static struct thermal_cooling_device *
__cpufreq_cooling_register(struct device_node *np,
- const struct cpumask *clip_cpus)
+ const struct cpumask *clip_cpus, u32 capacitance,
+ get_static_t plat_static_func)
{
struct thermal_cooling_device *cool_dev;
struct cpufreq_cooling_device *cpufreq_dev;
char dev_name[THERMAL_NAME_LENGTH];
struct cpufreq_frequency_table *pos, *table;
- unsigned int freq, i;
+ unsigned int freq, i, num_cpus;
int ret;
table = cpufreq_frequency_get_table(cpumask_first(clip_cpus));
@@ -341,6 +797,23 @@ __cpufreq_cooling_register(struct device_node *np,
if (!cpufreq_dev)
return ERR_PTR(-ENOMEM);
+ num_cpus = cpumask_weight(clip_cpus);
+ cpufreq_dev->time_in_idle = kcalloc(num_cpus,
+ sizeof(*cpufreq_dev->time_in_idle),
+ GFP_KERNEL);
+ if (!cpufreq_dev->time_in_idle) {
+ cool_dev = ERR_PTR(-ENOMEM);
+ goto free_cdev;
+ }
+
+ cpufreq_dev->time_in_idle_timestamp =
+ kcalloc(num_cpus, sizeof(*cpufreq_dev->time_in_idle_timestamp),
+ GFP_KERNEL);
+ if (!cpufreq_dev->time_in_idle_timestamp) {
+ cool_dev = ERR_PTR(-ENOMEM);
+ goto free_time_in_idle;
+ }
+
/* Find max levels */
cpufreq_for_each_valid_entry(pos, table)
cpufreq_dev->max_level++;
@@ -349,7 +822,7 @@ __cpufreq_cooling_register(struct device_node *np,
cpufreq_dev->max_level, GFP_KERNEL);
if (!cpufreq_dev->freq_table) {
cool_dev = ERR_PTR(-ENOMEM);
- goto free_cdev;
+ goto free_time_in_idle_timestamp;
}
/* max_level is an index, not a counter */
@@ -357,6 +830,20 @@ __cpufreq_cooling_register(struct device_node *np,
cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus);
+ if (capacitance) {
+ cpufreq_cooling_ops.get_requested_power =
+ cpufreq_get_requested_power;
+ cpufreq_cooling_ops.state2power = cpufreq_state2power;
+ cpufreq_cooling_ops.power2state = cpufreq_power2state;
+ cpufreq_dev->plat_get_static_power = plat_static_func;
+
+ ret = build_dyn_power_table(cpufreq_dev, capacitance);
+ if (ret) {
+ cool_dev = ERR_PTR(ret);
+ goto free_table;
+ }
+ }
+
ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
if (ret) {
cool_dev = ERR_PTR(ret);
@@ -383,17 +870,19 @@ __cpufreq_cooling_register(struct device_node *np,
pr_debug("%s: freq:%u KHz\n", __func__, freq);
}
- cpufreq_dev->cpufreq_val = cpufreq_dev->freq_table[0];
+ cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0];
cpufreq_dev->cool_dev = cool_dev;
mutex_lock(&cooling_cpufreq_lock);
+ mutex_lock(&cooling_list_lock);
+ list_add(&cpufreq_dev->node, &cpufreq_dev_list);
+ mutex_unlock(&cooling_list_lock);
+
/* Register the notifier for first cpufreq cooling device */
- if (list_empty(&cpufreq_dev_list))
+ if (!cpufreq_dev_count++)
cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
- list_add(&cpufreq_dev->node, &cpufreq_dev_list);
-
mutex_unlock(&cooling_cpufreq_lock);
return cool_dev;
@@ -402,6 +891,10 @@ remove_idr:
release_idr(&cpufreq_idr, cpufreq_dev->id);
free_table:
kfree(cpufreq_dev->freq_table);
+free_time_in_idle_timestamp:
+ kfree(cpufreq_dev->time_in_idle_timestamp);
+free_time_in_idle:
+ kfree(cpufreq_dev->time_in_idle);
free_cdev:
kfree(cpufreq_dev);
@@ -422,7 +915,7 @@ free_cdev:
struct thermal_cooling_device *
cpufreq_cooling_register(const struct cpumask *clip_cpus)
{
- return __cpufreq_cooling_register(NULL, clip_cpus);
+ return __cpufreq_cooling_register(NULL, clip_cpus, 0, NULL);
}
EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
@@ -446,11 +939,78 @@ of_cpufreq_cooling_register(struct device_node *np,
if (!np)
return ERR_PTR(-EINVAL);
- return __cpufreq_cooling_register(np, clip_cpus);
+ return __cpufreq_cooling_register(np, clip_cpus, 0, NULL);
}
EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
/**
+ * cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions
+ * @clip_cpus: cpumask of cpus where the frequency constraints will happen
+ * @capacitance: dynamic power coefficient for these cpus
+ * @plat_static_func: function to calculate the static power consumed by these
+ * cpus (optional)
+ *
+ * This interface function registers the cpufreq cooling device with
+ * the name "thermal-cpufreq-%x". This api can support multiple
+ * instances of cpufreq cooling devices. Using this function, the
+ * cooling device will implement the power extensions by using a
+ * simple cpu power model. The cpus must have registered their OPPs
+ * using the OPP library.
+ *
+ * An optional @plat_static_func may be provided to calculate the
+ * static power consumed by these cpus. If the platform's static
+ * power consumption is unknown or negligible, make it NULL.
+ *
+ * Return: a valid struct thermal_cooling_device pointer on success,
+ * on failure, it returns a corresponding ERR_PTR().
+ */
+struct thermal_cooling_device *
+cpufreq_power_cooling_register(const struct cpumask *clip_cpus, u32 capacitance,
+ get_static_t plat_static_func)
+{
+ return __cpufreq_cooling_register(NULL, clip_cpus, capacitance,
+ plat_static_func);
+}
+EXPORT_SYMBOL(cpufreq_power_cooling_register);
+
+/**
+ * of_cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions
+ * @np: a valid struct device_node to the cooling device device tree node
+ * @clip_cpus: cpumask of cpus where the frequency constraints will happen
+ * @capacitance: dynamic power coefficient for these cpus
+ * @plat_static_func: function to calculate the static power consumed by these
+ * cpus (optional)
+ *
+ * This interface function registers the cpufreq cooling device with
+ * the name "thermal-cpufreq-%x". This api can support multiple
+ * instances of cpufreq cooling devices. Using this API, the cpufreq
+ * cooling device will be linked to the device tree node provided.
+ * Using this function, the cooling device will implement the power
+ * extensions by using a simple cpu power model. The cpus must have
+ * registered their OPPs using the OPP library.
+ *
+ * An optional @plat_static_func may be provided to calculate the
+ * static power consumed by these cpus. If the platform's static
+ * power consumption is unknown or negligible, make it NULL.
+ *
+ * Return: a valid struct thermal_cooling_device pointer on success,
+ * on failure, it returns a corresponding ERR_PTR().
+ */
+struct thermal_cooling_device *
+of_cpufreq_power_cooling_register(struct device_node *np,
+ const struct cpumask *clip_cpus,
+ u32 capacitance,
+ get_static_t plat_static_func)
+{
+ if (!np)
+ return ERR_PTR(-EINVAL);
+
+ return __cpufreq_cooling_register(np, clip_cpus, capacitance,
+ plat_static_func);
+}
+EXPORT_SYMBOL(of_cpufreq_power_cooling_register);
+
+/**
* cpufreq_cooling_unregister - function to remove cpufreq cooling device.
* @cdev: thermal cooling device pointer.
*
@@ -464,17 +1024,23 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
return;
cpufreq_dev = cdev->devdata;
- mutex_lock(&cooling_cpufreq_lock);
- list_del(&cpufreq_dev->node);
/* Unregister the notifier for the last cpufreq cooling device */
- if (list_empty(&cpufreq_dev_list))
+ mutex_lock(&cooling_cpufreq_lock);
+ if (!--cpufreq_dev_count)
cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
+
+ mutex_lock(&cooling_list_lock);
+ list_del(&cpufreq_dev->node);
+ mutex_unlock(&cooling_list_lock);
+
mutex_unlock(&cooling_cpufreq_lock);
thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
release_idr(&cpufreq_idr, cpufreq_dev->id);
+ kfree(cpufreq_dev->time_in_idle_timestamp);
+ kfree(cpufreq_dev->time_in_idle);
kfree(cpufreq_dev->freq_table);
kfree(cpufreq_dev);
}
diff --git a/drivers/thermal/db8500_thermal.c b/drivers/thermal/db8500_thermal.c
index 20adfbe27..2fb273c4b 100644
--- a/drivers/thermal/db8500_thermal.c
+++ b/drivers/thermal/db8500_thermal.c
@@ -76,7 +76,7 @@ static int db8500_cdev_bind(struct thermal_zone_device *thermal,
upper = lower = i > max_state ? max_state : i;
ret = thermal_zone_bind_cooling_device(thermal, i, cdev,
- upper, lower);
+ upper, lower, THERMAL_WEIGHT_DEFAULT);
dev_info(&cdev->device, "%s bind to %d: %d-%s\n", cdev->type,
i, ret, ret ? "fail" : "succeed");
diff --git a/drivers/thermal/fair_share.c b/drivers/thermal/fair_share.c
index 6e0a3fbfa..c2c10bbe2 100644
--- a/drivers/thermal/fair_share.c
+++ b/drivers/thermal/fair_share.c
@@ -59,17 +59,17 @@ static int get_trip_level(struct thermal_zone_device *tz)
}
static long get_target_state(struct thermal_zone_device *tz,
- struct thermal_cooling_device *cdev, int weight, int level)
+ struct thermal_cooling_device *cdev, int percentage, int level)
{
unsigned long max_state;
cdev->ops->get_max_state(cdev, &max_state);
- return (long)(weight * level * max_state) / (100 * tz->trips);
+ return (long)(percentage * level * max_state) / (100 * tz->trips);
}
/**
- * fair_share_throttle - throttles devices asscciated with the given zone
+ * fair_share_throttle - throttles devices associated with the given zone
* @tz - thermal_zone_device
*
* Throttling Logic: This uses three parameters to calculate the new
@@ -77,7 +77,7 @@ static long get_target_state(struct thermal_zone_device *tz,
*
* Parameters used for Throttling:
* P1. max_state: Maximum throttle state exposed by the cooling device.
- * P2. weight[i]/100:
+ * P2. percentage[i]/100:
* How 'effective' the 'i'th device is, in cooling the given zone.
* P3. cur_trip_level/max_no_of_trips:
* This describes the extent to which the devices should be throttled.
@@ -88,28 +88,33 @@ static long get_target_state(struct thermal_zone_device *tz,
*/
static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
{
- const struct thermal_zone_params *tzp;
- struct thermal_cooling_device *cdev;
struct thermal_instance *instance;
- int i;
+ int total_weight = 0;
+ int total_instance = 0;
int cur_trip_level = get_trip_level(tz);
- if (!tz->tzp || !tz->tzp->tbp)
- return -EINVAL;
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ if (instance->trip != trip)
+ continue;
+
+ total_weight += instance->weight;
+ total_instance++;
+ }
- tzp = tz->tzp;
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ int percentage;
+ struct thermal_cooling_device *cdev = instance->cdev;
- for (i = 0; i < tzp->num_tbps; i++) {
- if (!tzp->tbp[i].cdev)
+ if (instance->trip != trip)
continue;
- cdev = tzp->tbp[i].cdev;
- instance = get_thermal_instance(tz, cdev, trip);
- if (!instance)
- continue;
+ if (!total_weight)
+ percentage = 100 / total_instance;
+ else
+ percentage = (instance->weight * 100) / total_weight;
- instance->target = get_target_state(tz, cdev,
- tzp->tbp[i].weight, cur_trip_level);
+ instance->target = get_target_state(tz, cdev, percentage,
+ cur_trip_level);
instance->cdev->updated = false;
thermal_cdev_update(cdev);
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
new file mode 100644
index 000000000..b49f97c73
--- /dev/null
+++ b/drivers/thermal/hisi_thermal.c
@@ -0,0 +1,420 @@
+/*
+ * Hisilicon thermal sensor driver
+ *
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ * Copyright (c) 2014-2015 Linaro Limited.
+ *
+ * Xinwei Kong <kong.kongxinwei@hisilicon.com>
+ * Leo Yan <leo.yan@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include "thermal_core.h"
+
+#define TEMP0_TH (0x4)
+#define TEMP0_RST_TH (0x8)
+#define TEMP0_CFG (0xC)
+#define TEMP0_EN (0x10)
+#define TEMP0_INT_EN (0x14)
+#define TEMP0_INT_CLR (0x18)
+#define TEMP0_RST_MSK (0x1C)
+#define TEMP0_VALUE (0x28)
+
+#define HISI_TEMP_BASE (-60)
+#define HISI_TEMP_RESET (100000)
+
+#define HISI_MAX_SENSORS 4
+
+struct hisi_thermal_sensor {
+ struct hisi_thermal_data *thermal;
+ struct thermal_zone_device *tzd;
+
+ long sensor_temp;
+ uint32_t id;
+ uint32_t thres_temp;
+};
+
+struct hisi_thermal_data {
+ struct mutex thermal_lock; /* protects register data */
+ struct platform_device *pdev;
+ struct clk *clk;
+ struct hisi_thermal_sensor sensors[HISI_MAX_SENSORS];
+
+ int irq, irq_bind_sensor;
+ bool irq_enabled;
+
+ void __iomem *regs;
+};
+
+/* in millicelsius */
+static inline int _step_to_temp(int step)
+{
+ /*
+ * Every step equals (1 * 200) / 255 celsius, and finally
+ * need convert to millicelsius.
+ */
+ return (HISI_TEMP_BASE + (step * 200 / 255)) * 1000;
+}
+
+static inline long _temp_to_step(long temp)
+{
+ return ((temp / 1000 - HISI_TEMP_BASE) * 255 / 200);
+}
+
+static long hisi_thermal_get_sensor_temp(struct hisi_thermal_data *data,
+ struct hisi_thermal_sensor *sensor)
+{
+ long val;
+
+ mutex_lock(&data->thermal_lock);
+
+ /* disable interrupt */
+ writel(0x0, data->regs + TEMP0_INT_EN);
+ writel(0x1, data->regs + TEMP0_INT_CLR);
+
+ /* disable module firstly */
+ writel(0x0, data->regs + TEMP0_EN);
+
+ /* select sensor id */
+ writel((sensor->id << 12), data->regs + TEMP0_CFG);
+
+ /* enable module */
+ writel(0x1, data->regs + TEMP0_EN);
+
+ usleep_range(3000, 5000);
+
+ val = readl(data->regs + TEMP0_VALUE);
+ val = _step_to_temp(val);
+
+ mutex_unlock(&data->thermal_lock);
+
+ return val;
+}
+
+static void hisi_thermal_enable_bind_irq_sensor
+ (struct hisi_thermal_data *data)
+{
+ struct hisi_thermal_sensor *sensor;
+
+ mutex_lock(&data->thermal_lock);
+
+ sensor = &data->sensors[data->irq_bind_sensor];
+
+ /* setting the hdak time */
+ writel(0x0, data->regs + TEMP0_CFG);
+
+ /* disable module firstly */
+ writel(0x0, data->regs + TEMP0_RST_MSK);
+ writel(0x0, data->regs + TEMP0_EN);
+
+ /* select sensor id */
+ writel((sensor->id << 12), data->regs + TEMP0_CFG);
+
+ /* enable for interrupt */
+ writel(_temp_to_step(sensor->thres_temp) | 0x0FFFFFF00,
+ data->regs + TEMP0_TH);
+
+ writel(_temp_to_step(HISI_TEMP_RESET), data->regs + TEMP0_RST_TH);
+
+ /* enable module */
+ writel(0x1, data->regs + TEMP0_RST_MSK);
+ writel(0x1, data->regs + TEMP0_EN);
+
+ writel(0x0, data->regs + TEMP0_INT_CLR);
+ writel(0x1, data->regs + TEMP0_INT_EN);
+
+ usleep_range(3000, 5000);
+
+ mutex_unlock(&data->thermal_lock);
+}
+
+static void hisi_thermal_disable_sensor(struct hisi_thermal_data *data)
+{
+ mutex_lock(&data->thermal_lock);
+
+ /* disable sensor module */
+ writel(0x0, data->regs + TEMP0_INT_EN);
+ writel(0x0, data->regs + TEMP0_RST_MSK);
+ writel(0x0, data->regs + TEMP0_EN);
+
+ mutex_unlock(&data->thermal_lock);
+}
+
+static int hisi_thermal_get_temp(void *_sensor, long *temp)
+{
+ struct hisi_thermal_sensor *sensor = _sensor;
+ struct hisi_thermal_data *data = sensor->thermal;
+
+ int sensor_id = 0, i;
+ long max_temp = 0;
+
+ *temp = hisi_thermal_get_sensor_temp(data, sensor);
+
+ sensor->sensor_temp = *temp;
+
+ for (i = 0; i < HISI_MAX_SENSORS; i++) {
+ if (data->sensors[i].sensor_temp >= max_temp) {
+ max_temp = data->sensors[i].sensor_temp;
+ sensor_id = i;
+ }
+ }
+
+ mutex_lock(&data->thermal_lock);
+ data->irq_bind_sensor = sensor_id;
+ mutex_unlock(&data->thermal_lock);
+
+ dev_dbg(&data->pdev->dev, "id=%d, irq=%d, temp=%ld, thres=%d\n",
+ sensor->id, data->irq_enabled, *temp, sensor->thres_temp);
+ /*
+ * Bind irq to sensor for two cases:
+ * Reenable alarm IRQ if temperature below threshold;
+ * if irq has been enabled, always set it;
+ */
+ if (data->irq_enabled) {
+ hisi_thermal_enable_bind_irq_sensor(data);
+ return 0;
+ }
+
+ if (max_temp < sensor->thres_temp) {
+ data->irq_enabled = true;
+ hisi_thermal_enable_bind_irq_sensor(data);
+ enable_irq(data->irq);
+ }
+
+ return 0;
+}
+
+static struct thermal_zone_of_device_ops hisi_of_thermal_ops = {
+ .get_temp = hisi_thermal_get_temp,
+};
+
+static irqreturn_t hisi_thermal_alarm_irq(int irq, void *dev)
+{
+ struct hisi_thermal_data *data = dev;
+
+ disable_irq_nosync(irq);
+ data->irq_enabled = false;
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t hisi_thermal_alarm_irq_thread(int irq, void *dev)
+{
+ struct hisi_thermal_data *data = dev;
+ struct hisi_thermal_sensor *sensor;
+ int i;
+
+ mutex_lock(&data->thermal_lock);
+ sensor = &data->sensors[data->irq_bind_sensor];
+
+ dev_crit(&data->pdev->dev, "THERMAL ALARM: T > %d\n",
+ sensor->thres_temp / 1000);
+ mutex_unlock(&data->thermal_lock);
+
+ for (i = 0; i < HISI_MAX_SENSORS; i++)
+ thermal_zone_device_update(data->sensors[i].tzd);
+
+ return IRQ_HANDLED;
+}
+
+static int hisi_thermal_register_sensor(struct platform_device *pdev,
+ struct hisi_thermal_data *data,
+ struct hisi_thermal_sensor *sensor,
+ int index)
+{
+ int ret, i;
+ const struct thermal_trip *trip;
+
+ sensor->id = index;
+ sensor->thermal = data;
+
+ sensor->tzd = thermal_zone_of_sensor_register(&pdev->dev, sensor->id,
+ sensor, &hisi_of_thermal_ops);
+ if (IS_ERR(sensor->tzd)) {
+ ret = PTR_ERR(sensor->tzd);
+ dev_err(&pdev->dev, "failed to register sensor id %d: %d\n",
+ sensor->id, ret);
+ return ret;
+ }
+
+ trip = of_thermal_get_trip_points(sensor->tzd);
+
+ for (i = 0; i < of_thermal_get_ntrips(sensor->tzd); i++) {
+ if (trip[i].type == THERMAL_TRIP_PASSIVE) {
+ sensor->thres_temp = trip[i].temperature;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static const struct of_device_id of_hisi_thermal_match[] = {
+ { .compatible = "hisilicon,tsensor" },
+ { /* end */ }
+};
+MODULE_DEVICE_TABLE(of, of_hisi_thermal_match);
+
+static void hisi_thermal_toggle_sensor(struct hisi_thermal_sensor *sensor,
+ bool on)
+{
+ struct thermal_zone_device *tzd = sensor->tzd;
+
+ tzd->ops->set_mode(tzd,
+ on ? THERMAL_DEVICE_ENABLED : THERMAL_DEVICE_DISABLED);
+}
+
+static int hisi_thermal_probe(struct platform_device *pdev)
+{
+ struct hisi_thermal_data *data;
+ struct resource *res;
+ int i;
+ int ret;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ mutex_init(&data->thermal_lock);
+ data->pdev = pdev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->regs)) {
+ dev_err(&pdev->dev, "failed to get io address\n");
+ return PTR_ERR(data->regs);
+ }
+
+ data->irq = platform_get_irq(pdev, 0);
+ if (data->irq < 0)
+ return data->irq;
+
+ ret = devm_request_threaded_irq(&pdev->dev, data->irq,
+ hisi_thermal_alarm_irq,
+ hisi_thermal_alarm_irq_thread,
+ 0, "hisi_thermal", data);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request alarm irq: %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, data);
+
+ data->clk = devm_clk_get(&pdev->dev, "thermal_clk");
+ if (IS_ERR(data->clk)) {
+ ret = PTR_ERR(data->clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "failed to get thermal clk: %d\n", ret);
+ return ret;
+ }
+
+ /* enable clock for thermal */
+ ret = clk_prepare_enable(data->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable thermal clk: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < HISI_MAX_SENSORS; ++i) {
+ ret = hisi_thermal_register_sensor(pdev, data,
+ &data->sensors[i], i);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to register thermal sensor: %d\n", ret);
+ goto err_get_sensor_data;
+ }
+ }
+
+ hisi_thermal_enable_bind_irq_sensor(data);
+ data->irq_enabled = true;
+
+ for (i = 0; i < HISI_MAX_SENSORS; i++)
+ hisi_thermal_toggle_sensor(&data->sensors[i], true);
+
+ return 0;
+
+err_get_sensor_data:
+ clk_disable_unprepare(data->clk);
+
+ return ret;
+}
+
+static int hisi_thermal_remove(struct platform_device *pdev)
+{
+ struct hisi_thermal_data *data = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < HISI_MAX_SENSORS; i++) {
+ struct hisi_thermal_sensor *sensor = &data->sensors[i];
+
+ hisi_thermal_toggle_sensor(sensor, false);
+ thermal_zone_of_sensor_unregister(&pdev->dev, sensor->tzd);
+ }
+
+ hisi_thermal_disable_sensor(data);
+ clk_disable_unprepare(data->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int hisi_thermal_suspend(struct device *dev)
+{
+ struct hisi_thermal_data *data = dev_get_drvdata(dev);
+
+ hisi_thermal_disable_sensor(data);
+ data->irq_enabled = false;
+
+ clk_disable_unprepare(data->clk);
+
+ return 0;
+}
+
+static int hisi_thermal_resume(struct device *dev)
+{
+ struct hisi_thermal_data *data = dev_get_drvdata(dev);
+
+ clk_prepare_enable(data->clk);
+
+ data->irq_enabled = true;
+ hisi_thermal_enable_bind_irq_sensor(data);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(hisi_thermal_pm_ops,
+ hisi_thermal_suspend, hisi_thermal_resume);
+
+static struct platform_driver hisi_thermal_driver = {
+ .driver = {
+ .name = "hisi_thermal",
+ .pm = &hisi_thermal_pm_ops,
+ .of_match_table = of_hisi_thermal_match,
+ },
+ .probe = hisi_thermal_probe,
+ .remove = hisi_thermal_remove,
+};
+
+module_platform_driver(hisi_thermal_driver);
+
+MODULE_AUTHOR("Xinwei Kong <kong.kongxinwei@hisilicon.com>");
+MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>");
+MODULE_DESCRIPTION("Hisilicon thermal driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index 2ccbc0788..fde4c2876 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -306,7 +306,8 @@ static int imx_bind(struct thermal_zone_device *tz,
ret = thermal_zone_bind_cooling_device(tz, IMX_TRIP_PASSIVE, cdev,
THERMAL_NO_LIMIT,
- THERMAL_NO_LIMIT);
+ THERMAL_NO_LIMIT,
+ THERMAL_WEIGHT_DEFAULT);
if (ret) {
dev_err(&tz->device,
"binding zone %s with cdev %s failed:%d\n",
diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c
index 5e8d8e91e..3df3dc34b 100644
--- a/drivers/thermal/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c
@@ -16,15 +16,20 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
+#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/acpi.h>
#include <linux/thermal.h>
#include "int340x_thermal_zone.h"
+#include "../intel_soc_dts_iosf.h"
/* Broadwell-U/HSB thermal reporting device */
#define PCI_DEVICE_ID_PROC_BDW_THERMAL 0x1603
#define PCI_DEVICE_ID_PROC_HSB_THERMAL 0x0A03
+/* Skylake thermal reporting device */
+#define PCI_DEVICE_ID_PROC_SKL_THERMAL 0x1903
+
/* Braswell thermal reporting device */
#define PCI_DEVICE_ID_PROC_BSW_THERMAL 0x22DC
@@ -42,6 +47,7 @@ struct proc_thermal_device {
struct acpi_device *adev;
struct power_config power_limits[2];
struct int34x_thermal_zone *int340x_zone;
+ struct intel_soc_dts_sensors *soc_dts;
};
enum proc_thermal_emum_mode_type {
@@ -308,6 +314,18 @@ static int int3401_remove(struct platform_device *pdev)
return 0;
}
+static irqreturn_t proc_thermal_pci_msi_irq(int irq, void *devid)
+{
+ struct proc_thermal_device *proc_priv;
+ struct pci_dev *pdev = devid;
+
+ proc_priv = pci_get_drvdata(pdev);
+
+ intel_soc_dts_iosf_interrupt_handler(proc_priv->soc_dts);
+
+ return IRQ_HANDLED;
+}
+
static int proc_thermal_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *unused)
{
@@ -334,18 +352,57 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, proc_priv);
proc_thermal_emum_mode = PROC_THERMAL_PCI;
+ if (pdev->device == PCI_DEVICE_ID_PROC_BSW_THERMAL) {
+ /*
+ * Enumerate additional DTS sensors available via IOSF.
+ * But we are not treating as a failure condition, if
+ * there are no aux DTSs enabled or fails. This driver
+ * already exposes sensors, which can be accessed via
+ * ACPI/MSR. So we don't want to fail for auxiliary DTSs.
+ */
+ proc_priv->soc_dts = intel_soc_dts_iosf_init(
+ INTEL_SOC_DTS_INTERRUPT_MSI, 2, 0);
+
+ if (proc_priv->soc_dts && pdev->irq) {
+ ret = pci_enable_msi(pdev);
+ if (!ret) {
+ ret = request_threaded_irq(pdev->irq, NULL,
+ proc_thermal_pci_msi_irq,
+ IRQF_ONESHOT, "proc_thermal",
+ pdev);
+ if (ret) {
+ intel_soc_dts_iosf_exit(
+ proc_priv->soc_dts);
+ pci_disable_msi(pdev);
+ proc_priv->soc_dts = NULL;
+ }
+ }
+ } else
+ dev_err(&pdev->dev, "No auxiliary DTSs enabled\n");
+ }
+
return 0;
}
static void proc_thermal_pci_remove(struct pci_dev *pdev)
{
- proc_thermal_remove(pci_get_drvdata(pdev));
+ struct proc_thermal_device *proc_priv = pci_get_drvdata(pdev);
+
+ if (proc_priv->soc_dts) {
+ intel_soc_dts_iosf_exit(proc_priv->soc_dts);
+ if (pdev->irq) {
+ free_irq(pdev->irq, pdev);
+ pci_disable_msi(pdev);
+ }
+ }
+ proc_thermal_remove(proc_priv);
pci_disable_device(pdev);
}
static const struct pci_device_id proc_thermal_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BDW_THERMAL)},
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_HSB_THERMAL)},
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_SKL_THERMAL)},
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BSW_THERMAL)},
{ 0, },
};
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 725718e97..5820e8513 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -119,7 +119,7 @@ exit:
return ret;
}
-static struct kernel_param_ops duration_ops = {
+static const struct kernel_param_ops duration_ops = {
.set = duration_set,
.get = param_get_int,
};
@@ -167,7 +167,7 @@ exit_win:
return ret;
}
-static struct kernel_param_ops window_size_ops = {
+static const struct kernel_param_ops window_size_ops = {
.set = window_size_set,
.get = param_get_int,
};
@@ -697,6 +697,7 @@ static const struct x86_cpu_id intel_powerclamp_ids[] __initconst = {
{ X86_VENDOR_INTEL, 6, 0x4d},
{ X86_VENDOR_INTEL, 6, 0x4f},
{ X86_VENDOR_INTEL, 6, 0x56},
+ { X86_VENDOR_INTEL, 6, 0x57},
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
diff --git a/drivers/thermal/intel_quark_dts_thermal.c b/drivers/thermal/intel_quark_dts_thermal.c
new file mode 100644
index 000000000..4434ec812
--- /dev/null
+++ b/drivers/thermal/intel_quark_dts_thermal.c
@@ -0,0 +1,473 @@
+/*
+ * intel_quark_dts_thermal.c
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * Ong Boon Leong <boon.leong.ong@intel.com>
+ * Intel Malaysia, Penang
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Quark DTS thermal driver is implemented by referencing
+ * intel_soc_dts_thermal.c.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/thermal.h>
+#include <asm/cpu_device_id.h>
+#include <asm/iosf_mbi.h>
+
+#define X86_FAMILY_QUARK 0x5
+#define X86_MODEL_QUARK_X1000 0x9
+
+/* DTS reset is programmed via QRK_MBI_UNIT_SOC */
+#define QRK_DTS_REG_OFFSET_RESET 0x34
+#define QRK_DTS_RESET_BIT BIT(0)
+
+/* DTS enable is programmed via QRK_MBI_UNIT_RMU */
+#define QRK_DTS_REG_OFFSET_ENABLE 0xB0
+#define QRK_DTS_ENABLE_BIT BIT(15)
+
+/* Temperature Register is read via QRK_MBI_UNIT_RMU */
+#define QRK_DTS_REG_OFFSET_TEMP 0xB1
+#define QRK_DTS_MASK_TEMP 0xFF
+#define QRK_DTS_OFFSET_TEMP 0
+#define QRK_DTS_OFFSET_REL_TEMP 16
+#define QRK_DTS_TEMP_BASE 50
+
+/* Programmable Trip Point Register is configured via QRK_MBI_UNIT_RMU */
+#define QRK_DTS_REG_OFFSET_PTPS 0xB2
+#define QRK_DTS_MASK_TP_THRES 0xFF
+#define QRK_DTS_SHIFT_TP 8
+#define QRK_DTS_ID_TP_CRITICAL 0
+#define QRK_DTS_SAFE_TP_THRES 105
+
+/* Thermal Sensor Register Lock */
+#define QRK_DTS_REG_OFFSET_LOCK 0x71
+#define QRK_DTS_LOCK_BIT BIT(5)
+
+/* Quark DTS has 2 trip points: hot & catastrophic */
+#define QRK_MAX_DTS_TRIPS 2
+/* If DTS not locked, all trip points are configurable */
+#define QRK_DTS_WR_MASK_SET 0x3
+/* If DTS locked, all trip points are not configurable */
+#define QRK_DTS_WR_MASK_CLR 0
+
+#define DEFAULT_POLL_DELAY 2000
+
+struct soc_sensor_entry {
+ bool locked;
+ u32 store_ptps;
+ u32 store_dts_enable;
+ enum thermal_device_mode mode;
+ struct thermal_zone_device *tzone;
+};
+
+static struct soc_sensor_entry *soc_dts;
+
+static int polling_delay = DEFAULT_POLL_DELAY;
+module_param(polling_delay, int, 0644);
+MODULE_PARM_DESC(polling_delay,
+ "Polling interval for checking trip points (in milliseconds)");
+
+static DEFINE_MUTEX(dts_update_mutex);
+
+static int soc_dts_enable(struct thermal_zone_device *tzd)
+{
+ u32 out;
+ struct soc_sensor_entry *aux_entry = tzd->devdata;
+ int ret;
+
+ ret = iosf_mbi_read(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_READ,
+ QRK_DTS_REG_OFFSET_ENABLE, &out);
+ if (ret)
+ return ret;
+
+ if (out & QRK_DTS_ENABLE_BIT) {
+ aux_entry->mode = THERMAL_DEVICE_ENABLED;
+ return 0;
+ }
+
+ if (!aux_entry->locked) {
+ out |= QRK_DTS_ENABLE_BIT;
+ ret = iosf_mbi_write(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_WRITE,
+ QRK_DTS_REG_OFFSET_ENABLE, out);
+ if (ret)
+ return ret;
+
+ aux_entry->mode = THERMAL_DEVICE_ENABLED;
+ } else {
+ aux_entry->mode = THERMAL_DEVICE_DISABLED;
+ pr_info("DTS is locked. Cannot enable DTS\n");
+ ret = -EPERM;
+ }
+
+ return ret;
+}
+
+static int soc_dts_disable(struct thermal_zone_device *tzd)
+{
+ u32 out;
+ struct soc_sensor_entry *aux_entry = tzd->devdata;
+ int ret;
+
+ ret = iosf_mbi_read(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_READ,
+ QRK_DTS_REG_OFFSET_ENABLE, &out);
+ if (ret)
+ return ret;
+
+ if (!(out & QRK_DTS_ENABLE_BIT)) {
+ aux_entry->mode = THERMAL_DEVICE_DISABLED;
+ return 0;
+ }
+
+ if (!aux_entry->locked) {
+ out &= ~QRK_DTS_ENABLE_BIT;
+ ret = iosf_mbi_write(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_WRITE,
+ QRK_DTS_REG_OFFSET_ENABLE, out);
+
+ if (ret)
+ return ret;
+
+ aux_entry->mode = THERMAL_DEVICE_DISABLED;
+ } else {
+ aux_entry->mode = THERMAL_DEVICE_ENABLED;
+ pr_info("DTS is locked. Cannot disable DTS\n");
+ ret = -EPERM;
+ }
+
+ return ret;
+}
+
+static int _get_trip_temp(int trip, unsigned long *temp)
+{
+ int status;
+ u32 out;
+
+ mutex_lock(&dts_update_mutex);
+ status = iosf_mbi_read(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_READ,
+ QRK_DTS_REG_OFFSET_PTPS, &out);
+ mutex_unlock(&dts_update_mutex);
+
+ if (status)
+ return status;
+
+ /*
+ * Thermal Sensor Programmable Trip Point Register has 8-bit
+ * fields for critical (catastrophic) and hot set trip point
+ * thresholds. The threshold value is always offset by its
+ * temperature base (50 degree Celsius).
+ */
+ *temp = (out >> (trip * QRK_DTS_SHIFT_TP)) & QRK_DTS_MASK_TP_THRES;
+ *temp -= QRK_DTS_TEMP_BASE;
+
+ return 0;
+}
+
+static inline int sys_get_trip_temp(struct thermal_zone_device *tzd,
+ int trip, unsigned long *temp)
+{
+ return _get_trip_temp(trip, temp);
+}
+
+static inline int sys_get_crit_temp(struct thermal_zone_device *tzd,
+ unsigned long *temp)
+{
+ return _get_trip_temp(QRK_DTS_ID_TP_CRITICAL, temp);
+}
+
+static int update_trip_temp(struct soc_sensor_entry *aux_entry,
+ int trip, unsigned long temp)
+{
+ u32 out;
+ u32 temp_out;
+ u32 store_ptps;
+ int ret;
+
+ mutex_lock(&dts_update_mutex);
+ if (aux_entry->locked) {
+ ret = -EPERM;
+ goto failed;
+ }
+
+ ret = iosf_mbi_read(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_READ,
+ QRK_DTS_REG_OFFSET_PTPS, &store_ptps);
+ if (ret)
+ goto failed;
+
+ /*
+ * Protection against unsafe trip point thresdhold value.
+ * As Quark X1000 data-sheet does not provide any recommendation
+ * regarding the safe trip point threshold value to use, we choose
+ * the safe value according to the threshold value set by UEFI BIOS.
+ */
+ if (temp > QRK_DTS_SAFE_TP_THRES)
+ temp = QRK_DTS_SAFE_TP_THRES;
+
+ /*
+ * Thermal Sensor Programmable Trip Point Register has 8-bit
+ * fields for critical (catastrophic) and hot set trip point
+ * thresholds. The threshold value is always offset by its
+ * temperature base (50 degree Celsius).
+ */
+ temp_out = temp + QRK_DTS_TEMP_BASE;
+ out = (store_ptps & ~(QRK_DTS_MASK_TP_THRES <<
+ (trip * QRK_DTS_SHIFT_TP)));
+ out |= (temp_out & QRK_DTS_MASK_TP_THRES) <<
+ (trip * QRK_DTS_SHIFT_TP);
+
+ ret = iosf_mbi_write(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_WRITE,
+ QRK_DTS_REG_OFFSET_PTPS, out);
+
+failed:
+ mutex_unlock(&dts_update_mutex);
+ return ret;
+}
+
+static inline int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip,
+ unsigned long temp)
+{
+ return update_trip_temp(tzd->devdata, trip, temp);
+}
+
+static int sys_get_trip_type(struct thermal_zone_device *thermal,
+ int trip, enum thermal_trip_type *type)
+{
+ if (trip)
+ *type = THERMAL_TRIP_HOT;
+ else
+ *type = THERMAL_TRIP_CRITICAL;
+
+ return 0;
+}
+
+static int sys_get_curr_temp(struct thermal_zone_device *tzd,
+ unsigned long *temp)
+{
+ u32 out;
+ int ret;
+
+ mutex_lock(&dts_update_mutex);
+ ret = iosf_mbi_read(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_READ,
+ QRK_DTS_REG_OFFSET_TEMP, &out);
+ mutex_unlock(&dts_update_mutex);
+
+ if (ret)
+ return ret;
+
+ /*
+ * Thermal Sensor Temperature Register has 8-bit field
+ * for temperature value (offset by temperature base
+ * 50 degree Celsius).
+ */
+ out = (out >> QRK_DTS_OFFSET_TEMP) & QRK_DTS_MASK_TEMP;
+ *temp = out - QRK_DTS_TEMP_BASE;
+
+ return 0;
+}
+
+static int sys_get_mode(struct thermal_zone_device *tzd,
+ enum thermal_device_mode *mode)
+{
+ struct soc_sensor_entry *aux_entry = tzd->devdata;
+ *mode = aux_entry->mode;
+ return 0;
+}
+
+static int sys_set_mode(struct thermal_zone_device *tzd,
+ enum thermal_device_mode mode)
+{
+ int ret;
+
+ mutex_lock(&dts_update_mutex);
+ if (mode == THERMAL_DEVICE_ENABLED)
+ ret = soc_dts_enable(tzd);
+ else
+ ret = soc_dts_disable(tzd);
+ mutex_unlock(&dts_update_mutex);
+
+ return ret;
+}
+
+static struct thermal_zone_device_ops tzone_ops = {
+ .get_temp = sys_get_curr_temp,
+ .get_trip_temp = sys_get_trip_temp,
+ .get_trip_type = sys_get_trip_type,
+ .set_trip_temp = sys_set_trip_temp,
+ .get_crit_temp = sys_get_crit_temp,
+ .get_mode = sys_get_mode,
+ .set_mode = sys_set_mode,
+};
+
+static void free_soc_dts(struct soc_sensor_entry *aux_entry)
+{
+ if (aux_entry) {
+ if (!aux_entry->locked) {
+ mutex_lock(&dts_update_mutex);
+ iosf_mbi_write(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_WRITE,
+ QRK_DTS_REG_OFFSET_ENABLE,
+ aux_entry->store_dts_enable);
+
+ iosf_mbi_write(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_WRITE,
+ QRK_DTS_REG_OFFSET_PTPS,
+ aux_entry->store_ptps);
+ mutex_unlock(&dts_update_mutex);
+ }
+ thermal_zone_device_unregister(aux_entry->tzone);
+ kfree(aux_entry);
+ }
+}
+
+static struct soc_sensor_entry *alloc_soc_dts(void)
+{
+ struct soc_sensor_entry *aux_entry;
+ int err;
+ u32 out;
+ int wr_mask;
+
+ aux_entry = kzalloc(sizeof(*aux_entry), GFP_KERNEL);
+ if (!aux_entry) {
+ err = -ENOMEM;
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Check if DTS register is locked */
+ err = iosf_mbi_read(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_READ,
+ QRK_DTS_REG_OFFSET_LOCK,
+ &out);
+ if (err)
+ goto err_ret;
+
+ if (out & QRK_DTS_LOCK_BIT) {
+ aux_entry->locked = true;
+ wr_mask = QRK_DTS_WR_MASK_CLR;
+ } else {
+ aux_entry->locked = false;
+ wr_mask = QRK_DTS_WR_MASK_SET;
+ }
+
+ /* Store DTS default state if DTS registers are not locked */
+ if (!aux_entry->locked) {
+ /* Store DTS default enable for restore on exit */
+ err = iosf_mbi_read(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_READ,
+ QRK_DTS_REG_OFFSET_ENABLE,
+ &aux_entry->store_dts_enable);
+ if (err)
+ goto err_ret;
+
+ /* Store DTS default PTPS register for restore on exit */
+ err = iosf_mbi_read(QRK_MBI_UNIT_RMU, QRK_MBI_RMU_READ,
+ QRK_DTS_REG_OFFSET_PTPS,
+ &aux_entry->store_ptps);
+ if (err)
+ goto err_ret;
+ }
+
+ aux_entry->tzone = thermal_zone_device_register("quark_dts",
+ QRK_MAX_DTS_TRIPS,
+ wr_mask,
+ aux_entry, &tzone_ops, NULL, 0, polling_delay);
+ if (IS_ERR(aux_entry->tzone)) {
+ err = PTR_ERR(aux_entry->tzone);
+ goto err_ret;
+ }
+
+ mutex_lock(&dts_update_mutex);
+ err = soc_dts_enable(aux_entry->tzone);
+ mutex_unlock(&dts_update_mutex);
+ if (err)
+ goto err_aux_status;
+
+ return aux_entry;
+
+err_aux_status:
+ thermal_zone_device_unregister(aux_entry->tzone);
+err_ret:
+ kfree(aux_entry);
+ return ERR_PTR(err);
+}
+
+static const struct x86_cpu_id qrk_thermal_ids[] __initconst = {
+ { X86_VENDOR_INTEL, X86_FAMILY_QUARK, X86_MODEL_QUARK_X1000 },
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, qrk_thermal_ids);
+
+static int __init intel_quark_thermal_init(void)
+{
+ int err = 0;
+
+ if (!x86_match_cpu(qrk_thermal_ids) || !iosf_mbi_available())
+ return -ENODEV;
+
+ soc_dts = alloc_soc_dts();
+ if (IS_ERR(soc_dts)) {
+ err = PTR_ERR(soc_dts);
+ goto err_free;
+ }
+
+ return 0;
+
+err_free:
+ free_soc_dts(soc_dts);
+ return err;
+}
+
+static void __exit intel_quark_thermal_exit(void)
+{
+ free_soc_dts(soc_dts);
+}
+
+module_init(intel_quark_thermal_init)
+module_exit(intel_quark_thermal_exit)
+
+MODULE_DESCRIPTION("Intel Quark DTS Thermal Driver");
+MODULE_AUTHOR("Ong Boon Leong <boon.leong.ong@intel.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/thermal/intel_soc_dts_iosf.c b/drivers/thermal/intel_soc_dts_iosf.c
new file mode 100644
index 000000000..42e4b6ac3
--- /dev/null
+++ b/drivers/thermal/intel_soc_dts_iosf.c
@@ -0,0 +1,478 @@
+/*
+ * intel_soc_dts_iosf.c
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <asm/iosf_mbi.h>
+#include "intel_soc_dts_iosf.h"
+
+#define SOC_DTS_OFFSET_ENABLE 0xB0
+#define SOC_DTS_OFFSET_TEMP 0xB1
+
+#define SOC_DTS_OFFSET_PTPS 0xB2
+#define SOC_DTS_OFFSET_PTTS 0xB3
+#define SOC_DTS_OFFSET_PTTSS 0xB4
+#define SOC_DTS_OFFSET_PTMC 0x80
+#define SOC_DTS_TE_AUX0 0xB5
+#define SOC_DTS_TE_AUX1 0xB6
+
+#define SOC_DTS_AUX0_ENABLE_BIT BIT(0)
+#define SOC_DTS_AUX1_ENABLE_BIT BIT(1)
+#define SOC_DTS_CPU_MODULE0_ENABLE_BIT BIT(16)
+#define SOC_DTS_CPU_MODULE1_ENABLE_BIT BIT(17)
+#define SOC_DTS_TE_SCI_ENABLE BIT(9)
+#define SOC_DTS_TE_SMI_ENABLE BIT(10)
+#define SOC_DTS_TE_MSI_ENABLE BIT(11)
+#define SOC_DTS_TE_APICA_ENABLE BIT(14)
+#define SOC_DTS_PTMC_APIC_DEASSERT_BIT BIT(4)
+
+/* DTS encoding for TJ MAX temperature */
+#define SOC_DTS_TJMAX_ENCODING 0x7F
+
+/* Only 2 out of 4 is allowed for OSPM */
+#define SOC_MAX_DTS_TRIPS 2
+
+/* Mask for two trips in status bits */
+#define SOC_DTS_TRIP_MASK 0x03
+
+/* DTS0 and DTS 1 */
+#define SOC_MAX_DTS_SENSORS 2
+
+static int get_tj_max(u32 *tj_max)
+{
+ u32 eax, edx;
+ u32 val;
+ int err;
+
+ err = rdmsr_safe(MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
+ if (err)
+ goto err_ret;
+ else {
+ val = (eax >> 16) & 0xff;
+ if (val)
+ *tj_max = val * 1000;
+ else {
+ err = -EINVAL;
+ goto err_ret;
+ }
+ }
+
+ return 0;
+err_ret:
+ *tj_max = 0;
+
+ return err;
+}
+
+static int sys_get_trip_temp(struct thermal_zone_device *tzd, int trip,
+ unsigned long *temp)
+{
+ int status;
+ u32 out;
+ struct intel_soc_dts_sensor_entry *dts;
+ struct intel_soc_dts_sensors *sensors;
+
+ dts = tzd->devdata;
+ sensors = dts->sensors;
+ mutex_lock(&sensors->dts_update_lock);
+ status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
+ SOC_DTS_OFFSET_PTPS, &out);
+ mutex_unlock(&sensors->dts_update_lock);
+ if (status)
+ return status;
+
+ out = (out >> (trip * 8)) & SOC_DTS_TJMAX_ENCODING;
+ if (!out)
+ *temp = 0;
+ else
+ *temp = sensors->tj_max - out * 1000;
+
+ return 0;
+}
+
+static int update_trip_temp(struct intel_soc_dts_sensor_entry *dts,
+ int thres_index, unsigned long temp,
+ enum thermal_trip_type trip_type)
+{
+ int status;
+ u32 temp_out;
+ u32 out;
+ u32 store_ptps;
+ u32 store_ptmc;
+ u32 store_te_out;
+ u32 te_out;
+ u32 int_enable_bit = SOC_DTS_TE_APICA_ENABLE;
+ struct intel_soc_dts_sensors *sensors = dts->sensors;
+
+ if (sensors->intr_type == INTEL_SOC_DTS_INTERRUPT_MSI)
+ int_enable_bit |= SOC_DTS_TE_MSI_ENABLE;
+
+ temp_out = (sensors->tj_max - temp) / 1000;
+
+ status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
+ SOC_DTS_OFFSET_PTPS, &store_ptps);
+ if (status)
+ return status;
+
+ out = (store_ptps & ~(0xFF << (thres_index * 8)));
+ out |= (temp_out & 0xFF) << (thres_index * 8);
+ status = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
+ SOC_DTS_OFFSET_PTPS, out);
+ if (status)
+ return status;
+
+ pr_debug("update_trip_temp PTPS = %x\n", out);
+ status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
+ SOC_DTS_OFFSET_PTMC, &out);
+ if (status)
+ goto err_restore_ptps;
+
+ store_ptmc = out;
+
+ status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
+ SOC_DTS_TE_AUX0 + thres_index,
+ &te_out);
+ if (status)
+ goto err_restore_ptmc;
+
+ store_te_out = te_out;
+ /* Enable for CPU module 0 and module 1 */
+ out |= (SOC_DTS_CPU_MODULE0_ENABLE_BIT |
+ SOC_DTS_CPU_MODULE1_ENABLE_BIT);
+ if (temp) {
+ if (thres_index)
+ out |= SOC_DTS_AUX1_ENABLE_BIT;
+ else
+ out |= SOC_DTS_AUX0_ENABLE_BIT;
+ te_out |= int_enable_bit;
+ } else {
+ if (thres_index)
+ out &= ~SOC_DTS_AUX1_ENABLE_BIT;
+ else
+ out &= ~SOC_DTS_AUX0_ENABLE_BIT;
+ te_out &= ~int_enable_bit;
+ }
+ status = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
+ SOC_DTS_OFFSET_PTMC, out);
+ if (status)
+ goto err_restore_te_out;
+
+ status = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
+ SOC_DTS_TE_AUX0 + thres_index,
+ te_out);
+ if (status)
+ goto err_restore_te_out;
+
+ dts->trip_types[thres_index] = trip_type;
+
+ return 0;
+err_restore_te_out:
+ iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
+ SOC_DTS_OFFSET_PTMC, store_te_out);
+err_restore_ptmc:
+ iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
+ SOC_DTS_OFFSET_PTMC, store_ptmc);
+err_restore_ptps:
+ iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
+ SOC_DTS_OFFSET_PTPS, store_ptps);
+ /* Nothing we can do if restore fails */
+
+ return status;
+}
+
+static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip,
+ unsigned long temp)
+{
+ struct intel_soc_dts_sensor_entry *dts = tzd->devdata;
+ struct intel_soc_dts_sensors *sensors = dts->sensors;
+ int status;
+
+ if (temp > sensors->tj_max)
+ return -EINVAL;
+
+ mutex_lock(&sensors->dts_update_lock);
+ status = update_trip_temp(tzd->devdata, trip, temp,
+ dts->trip_types[trip]);
+ mutex_unlock(&sensors->dts_update_lock);
+
+ return status;
+}
+
+static int sys_get_trip_type(struct thermal_zone_device *tzd,
+ int trip, enum thermal_trip_type *type)
+{
+ struct intel_soc_dts_sensor_entry *dts;
+
+ dts = tzd->devdata;
+
+ *type = dts->trip_types[trip];
+
+ return 0;
+}
+
+static int sys_get_curr_temp(struct thermal_zone_device *tzd,
+ unsigned long *temp)
+{
+ int status;
+ u32 out;
+ struct intel_soc_dts_sensor_entry *dts;
+ struct intel_soc_dts_sensors *sensors;
+
+ dts = tzd->devdata;
+ sensors = dts->sensors;
+ status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
+ SOC_DTS_OFFSET_TEMP, &out);
+ if (status)
+ return status;
+
+ out = (out & dts->temp_mask) >> dts->temp_shift;
+ out -= SOC_DTS_TJMAX_ENCODING;
+ *temp = sensors->tj_max - out * 1000;
+
+ return 0;
+}
+
+static struct thermal_zone_device_ops tzone_ops = {
+ .get_temp = sys_get_curr_temp,
+ .get_trip_temp = sys_get_trip_temp,
+ .get_trip_type = sys_get_trip_type,
+ .set_trip_temp = sys_set_trip_temp,
+};
+
+static int soc_dts_enable(int id)
+{
+ u32 out;
+ int ret;
+
+ ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
+ SOC_DTS_OFFSET_ENABLE, &out);
+ if (ret)
+ return ret;
+
+ if (!(out & BIT(id))) {
+ out |= BIT(id);
+ ret = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
+ SOC_DTS_OFFSET_ENABLE, out);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static void remove_dts_thermal_zone(struct intel_soc_dts_sensor_entry *dts)
+{
+ if (dts) {
+ iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
+ SOC_DTS_OFFSET_ENABLE, dts->store_status);
+ thermal_zone_device_unregister(dts->tzone);
+ }
+}
+
+static int add_dts_thermal_zone(int id, struct intel_soc_dts_sensor_entry *dts,
+ bool notification_support, int trip_cnt,
+ int read_only_trip_cnt)
+{
+ char name[10];
+ int trip_count = 0;
+ int trip_mask = 0;
+ u32 store_ptps;
+ int ret;
+ int i;
+
+ /* Store status to restor on exit */
+ ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
+ SOC_DTS_OFFSET_ENABLE,
+ &dts->store_status);
+ if (ret)
+ goto err_ret;
+
+ dts->id = id;
+ dts->temp_mask = 0x00FF << (id * 8);
+ dts->temp_shift = id * 8;
+ if (notification_support) {
+ trip_count = min(SOC_MAX_DTS_TRIPS, trip_cnt);
+ trip_mask = BIT(trip_count - read_only_trip_cnt) - 1;
+ }
+
+ /* Check if the writable trip we provide is not used by BIOS */
+ ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
+ SOC_DTS_OFFSET_PTPS, &store_ptps);
+ if (ret)
+ trip_mask = 0;
+ else {
+ for (i = 0; i < trip_count; ++i) {
+ if (trip_mask & BIT(i))
+ if (store_ptps & (0xff << (i * 8)))
+ trip_mask &= ~BIT(i);
+ }
+ }
+ dts->trip_mask = trip_mask;
+ dts->trip_count = trip_count;
+ snprintf(name, sizeof(name), "soc_dts%d", id);
+ dts->tzone = thermal_zone_device_register(name,
+ trip_count,
+ trip_mask,
+ dts, &tzone_ops,
+ NULL, 0, 0);
+ if (IS_ERR(dts->tzone)) {
+ ret = PTR_ERR(dts->tzone);
+ goto err_ret;
+ }
+
+ ret = soc_dts_enable(id);
+ if (ret)
+ goto err_enable;
+
+ return 0;
+err_enable:
+ thermal_zone_device_unregister(dts->tzone);
+err_ret:
+ return ret;
+}
+
+int intel_soc_dts_iosf_add_read_only_critical_trip(
+ struct intel_soc_dts_sensors *sensors, int critical_offset)
+{
+ int i, j;
+
+ for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
+ for (j = 0; j < sensors->soc_dts[i].trip_count; ++j) {
+ if (!(sensors->soc_dts[i].trip_mask & BIT(j))) {
+ return update_trip_temp(&sensors->soc_dts[i], j,
+ sensors->tj_max - critical_offset,
+ THERMAL_TRIP_CRITICAL);
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(intel_soc_dts_iosf_add_read_only_critical_trip);
+
+void intel_soc_dts_iosf_interrupt_handler(struct intel_soc_dts_sensors *sensors)
+{
+ u32 sticky_out;
+ int status;
+ u32 ptmc_out;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sensors->intr_notify_lock, flags);
+
+ status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
+ SOC_DTS_OFFSET_PTMC, &ptmc_out);
+ ptmc_out |= SOC_DTS_PTMC_APIC_DEASSERT_BIT;
+ status = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
+ SOC_DTS_OFFSET_PTMC, ptmc_out);
+
+ status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
+ SOC_DTS_OFFSET_PTTSS, &sticky_out);
+ pr_debug("status %d PTTSS %x\n", status, sticky_out);
+ if (sticky_out & SOC_DTS_TRIP_MASK) {
+ int i;
+ /* reset sticky bit */
+ status = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
+ SOC_DTS_OFFSET_PTTSS, sticky_out);
+ spin_unlock_irqrestore(&sensors->intr_notify_lock, flags);
+
+ for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
+ pr_debug("TZD update for zone %d\n", i);
+ thermal_zone_device_update(sensors->soc_dts[i].tzone);
+ }
+ } else
+ spin_unlock_irqrestore(&sensors->intr_notify_lock, flags);
+}
+EXPORT_SYMBOL_GPL(intel_soc_dts_iosf_interrupt_handler);
+
+struct intel_soc_dts_sensors *intel_soc_dts_iosf_init(
+ enum intel_soc_dts_interrupt_type intr_type, int trip_count,
+ int read_only_trip_count)
+{
+ struct intel_soc_dts_sensors *sensors;
+ bool notification;
+ u32 tj_max;
+ int ret;
+ int i;
+
+ if (!iosf_mbi_available())
+ return ERR_PTR(-ENODEV);
+
+ if (!trip_count || read_only_trip_count > trip_count)
+ return ERR_PTR(-EINVAL);
+
+ if (get_tj_max(&tj_max))
+ return ERR_PTR(-EINVAL);
+
+ sensors = kzalloc(sizeof(*sensors), GFP_KERNEL);
+ if (!sensors)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&sensors->intr_notify_lock);
+ mutex_init(&sensors->dts_update_lock);
+ sensors->intr_type = intr_type;
+ sensors->tj_max = tj_max;
+ if (intr_type == INTEL_SOC_DTS_INTERRUPT_NONE)
+ notification = false;
+ else
+ notification = true;
+ for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
+ sensors->soc_dts[i].sensors = sensors;
+ ret = add_dts_thermal_zone(i, &sensors->soc_dts[i],
+ notification, trip_count,
+ read_only_trip_count);
+ if (ret)
+ goto err_free;
+ }
+
+ for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
+ ret = update_trip_temp(&sensors->soc_dts[i], 0, 0,
+ THERMAL_TRIP_PASSIVE);
+ if (ret)
+ goto err_remove_zone;
+
+ ret = update_trip_temp(&sensors->soc_dts[i], 1, 0,
+ THERMAL_TRIP_PASSIVE);
+ if (ret)
+ goto err_remove_zone;
+ }
+
+ return sensors;
+err_remove_zone:
+ for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i)
+ remove_dts_thermal_zone(&sensors->soc_dts[i]);
+
+err_free:
+ kfree(sensors);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(intel_soc_dts_iosf_init);
+
+void intel_soc_dts_iosf_exit(struct intel_soc_dts_sensors *sensors)
+{
+ int i;
+
+ for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
+ update_trip_temp(&sensors->soc_dts[i], 0, 0, 0);
+ update_trip_temp(&sensors->soc_dts[i], 1, 0, 0);
+ remove_dts_thermal_zone(&sensors->soc_dts[i]);
+ }
+ kfree(sensors);
+}
+EXPORT_SYMBOL_GPL(intel_soc_dts_iosf_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/intel_soc_dts_iosf.h b/drivers/thermal/intel_soc_dts_iosf.h
new file mode 100644
index 000000000..625e37bf9
--- /dev/null
+++ b/drivers/thermal/intel_soc_dts_iosf.h
@@ -0,0 +1,62 @@
+/*
+ * intel_soc_dts_iosf.h
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef _INTEL_SOC_DTS_IOSF_CORE_H
+#define _INTEL_SOC_DTS_IOSF_CORE_H
+
+#include <linux/thermal.h>
+
+/* DTS0 and DTS 1 */
+#define SOC_MAX_DTS_SENSORS 2
+
+enum intel_soc_dts_interrupt_type {
+ INTEL_SOC_DTS_INTERRUPT_NONE,
+ INTEL_SOC_DTS_INTERRUPT_APIC,
+ INTEL_SOC_DTS_INTERRUPT_MSI,
+ INTEL_SOC_DTS_INTERRUPT_SCI,
+ INTEL_SOC_DTS_INTERRUPT_SMI,
+};
+
+struct intel_soc_dts_sensors;
+
+struct intel_soc_dts_sensor_entry {
+ int id;
+ u32 temp_mask;
+ u32 temp_shift;
+ u32 store_status;
+ u32 trip_mask;
+ u32 trip_count;
+ enum thermal_trip_type trip_types[2];
+ struct thermal_zone_device *tzone;
+ struct intel_soc_dts_sensors *sensors;
+};
+
+struct intel_soc_dts_sensors {
+ u32 tj_max;
+ spinlock_t intr_notify_lock;
+ struct mutex dts_update_lock;
+ enum intel_soc_dts_interrupt_type intr_type;
+ struct intel_soc_dts_sensor_entry soc_dts[SOC_MAX_DTS_SENSORS];
+};
+
+struct intel_soc_dts_sensors *intel_soc_dts_iosf_init(
+ enum intel_soc_dts_interrupt_type intr_type, int trip_count,
+ int read_only_trip_count);
+void intel_soc_dts_iosf_exit(struct intel_soc_dts_sensors *sensors);
+void intel_soc_dts_iosf_interrupt_handler(
+ struct intel_soc_dts_sensors *sensors);
+int intel_soc_dts_iosf_add_read_only_critical_trip(
+ struct intel_soc_dts_sensors *sensors, int critical_offset);
+#endif
diff --git a/drivers/thermal/intel_soc_dts_thermal.c b/drivers/thermal/intel_soc_dts_thermal.c
index 9013505e4..4ebb31a35 100644
--- a/drivers/thermal/intel_soc_dts_thermal.c
+++ b/drivers/thermal/intel_soc_dts_thermal.c
@@ -16,431 +16,54 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
-#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/thermal.h>
#include <asm/cpu_device_id.h>
-#include <asm/iosf_mbi.h>
-
-#define SOC_DTS_OFFSET_ENABLE 0xB0
-#define SOC_DTS_OFFSET_TEMP 0xB1
-
-#define SOC_DTS_OFFSET_PTPS 0xB2
-#define SOC_DTS_OFFSET_PTTS 0xB3
-#define SOC_DTS_OFFSET_PTTSS 0xB4
-#define SOC_DTS_OFFSET_PTMC 0x80
-#define SOC_DTS_TE_AUX0 0xB5
-#define SOC_DTS_TE_AUX1 0xB6
-
-#define SOC_DTS_AUX0_ENABLE_BIT BIT(0)
-#define SOC_DTS_AUX1_ENABLE_BIT BIT(1)
-#define SOC_DTS_CPU_MODULE0_ENABLE_BIT BIT(16)
-#define SOC_DTS_CPU_MODULE1_ENABLE_BIT BIT(17)
-#define SOC_DTS_TE_SCI_ENABLE BIT(9)
-#define SOC_DTS_TE_SMI_ENABLE BIT(10)
-#define SOC_DTS_TE_MSI_ENABLE BIT(11)
-#define SOC_DTS_TE_APICA_ENABLE BIT(14)
-#define SOC_DTS_PTMC_APIC_DEASSERT_BIT BIT(4)
-
-/* DTS encoding for TJ MAX temperature */
-#define SOC_DTS_TJMAX_ENCODING 0x7F
-
-/* IRQ 86 is a fixed APIC interrupt for BYT DTS Aux threshold notifications */
-#define BYT_SOC_DTS_APIC_IRQ 86
-
-/* Only 2 out of 4 is allowed for OSPM */
-#define SOC_MAX_DTS_TRIPS 2
-
-/* Mask for two trips in status bits */
-#define SOC_DTS_TRIP_MASK 0x03
-
-/* DTS0 and DTS 1 */
-#define SOC_MAX_DTS_SENSORS 2
+#include "intel_soc_dts_iosf.h"
#define CRITICAL_OFFSET_FROM_TJ_MAX 5000
-struct soc_sensor_entry {
- int id;
- u32 tj_max;
- u32 temp_mask;
- u32 temp_shift;
- u32 store_status;
- struct thermal_zone_device *tzone;
-};
-
-static struct soc_sensor_entry *soc_dts[SOC_MAX_DTS_SENSORS];
-
static int crit_offset = CRITICAL_OFFSET_FROM_TJ_MAX;
module_param(crit_offset, int, 0644);
MODULE_PARM_DESC(crit_offset,
"Critical Temperature offset from tj max in millidegree Celsius.");
-static DEFINE_MUTEX(aux_update_mutex);
-static spinlock_t intr_notify_lock;
-static int soc_dts_thres_irq;
-
-static int get_tj_max(u32 *tj_max)
-{
- u32 eax, edx;
- u32 val;
- int err;
-
- err = rdmsr_safe(MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
- if (err)
- goto err_ret;
- else {
- val = (eax >> 16) & 0xff;
- if (val)
- *tj_max = val * 1000;
- else {
- err = -EINVAL;
- goto err_ret;
- }
- }
-
- return 0;
-err_ret:
- *tj_max = 0;
-
- return err;
-}
-
-static int sys_get_trip_temp(struct thermal_zone_device *tzd,
- int trip, unsigned long *temp)
-{
- int status;
- u32 out;
- struct soc_sensor_entry *aux_entry;
-
- aux_entry = tzd->devdata;
-
- if (!trip) {
- /* Just return the critical temp */
- *temp = aux_entry->tj_max - crit_offset;
- return 0;
- }
-
- mutex_lock(&aux_update_mutex);
- status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
- SOC_DTS_OFFSET_PTPS, &out);
- mutex_unlock(&aux_update_mutex);
- if (status)
- return status;
-
- out = (out >> (trip * 8)) & SOC_DTS_TJMAX_ENCODING;
-
- if (!out)
- *temp = 0;
- else
- *temp = aux_entry->tj_max - out * 1000;
-
- return 0;
-}
-
-static int update_trip_temp(struct soc_sensor_entry *aux_entry,
- int thres_index, unsigned long temp)
-{
- int status;
- u32 temp_out;
- u32 out;
- u32 store_ptps;
- u32 store_ptmc;
- u32 store_te_out;
- u32 te_out;
-
- u32 int_enable_bit = SOC_DTS_TE_APICA_ENABLE |
- SOC_DTS_TE_MSI_ENABLE;
-
- temp_out = (aux_entry->tj_max - temp) / 1000;
-
- status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
- SOC_DTS_OFFSET_PTPS, &store_ptps);
- if (status)
- return status;
-
- out = (store_ptps & ~(0xFF << (thres_index * 8)));
- out |= (temp_out & 0xFF) << (thres_index * 8);
- status = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
- SOC_DTS_OFFSET_PTPS, out);
- if (status)
- return status;
- pr_debug("update_trip_temp PTPS = %x\n", out);
- status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
- SOC_DTS_OFFSET_PTMC, &out);
- if (status)
- goto err_restore_ptps;
-
- store_ptmc = out;
-
- status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
- SOC_DTS_TE_AUX0 + thres_index,
- &te_out);
- if (status)
- goto err_restore_ptmc;
-
- store_te_out = te_out;
-
- /* Enable for CPU module 0 and module 1 */
- out |= (SOC_DTS_CPU_MODULE0_ENABLE_BIT |
- SOC_DTS_CPU_MODULE1_ENABLE_BIT);
- if (temp) {
- if (thres_index)
- out |= SOC_DTS_AUX1_ENABLE_BIT;
- else
- out |= SOC_DTS_AUX0_ENABLE_BIT;
- te_out |= int_enable_bit;
- } else {
- if (thres_index)
- out &= ~SOC_DTS_AUX1_ENABLE_BIT;
- else
- out &= ~SOC_DTS_AUX0_ENABLE_BIT;
- te_out &= ~int_enable_bit;
- }
- status = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
- SOC_DTS_OFFSET_PTMC, out);
- if (status)
- goto err_restore_te_out;
-
- status = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
- SOC_DTS_TE_AUX0 + thres_index,
- te_out);
- if (status)
- goto err_restore_te_out;
-
- return 0;
-
-err_restore_te_out:
- iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
- SOC_DTS_OFFSET_PTMC, store_te_out);
-err_restore_ptmc:
- iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
- SOC_DTS_OFFSET_PTMC, store_ptmc);
-err_restore_ptps:
- iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
- SOC_DTS_OFFSET_PTPS, store_ptps);
- /* Nothing we can do if restore fails */
-
- return status;
-}
-
-static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip,
- unsigned long temp)
-{
- struct soc_sensor_entry *aux_entry = tzd->devdata;
- int status;
-
- if (temp > (aux_entry->tj_max - crit_offset))
- return -EINVAL;
-
- mutex_lock(&aux_update_mutex);
- status = update_trip_temp(tzd->devdata, trip, temp);
- mutex_unlock(&aux_update_mutex);
-
- return status;
-}
-
-static int sys_get_trip_type(struct thermal_zone_device *thermal,
- int trip, enum thermal_trip_type *type)
-{
- if (trip)
- *type = THERMAL_TRIP_PASSIVE;
- else
- *type = THERMAL_TRIP_CRITICAL;
-
- return 0;
-}
-
-static int sys_get_curr_temp(struct thermal_zone_device *tzd,
- unsigned long *temp)
-{
- int status;
- u32 out;
- struct soc_sensor_entry *aux_entry;
-
- aux_entry = tzd->devdata;
-
- status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
- SOC_DTS_OFFSET_TEMP, &out);
- if (status)
- return status;
-
- out = (out & aux_entry->temp_mask) >> aux_entry->temp_shift;
- out -= SOC_DTS_TJMAX_ENCODING;
- *temp = aux_entry->tj_max - out * 1000;
-
- return 0;
-}
-
-static struct thermal_zone_device_ops tzone_ops = {
- .get_temp = sys_get_curr_temp,
- .get_trip_temp = sys_get_trip_temp,
- .get_trip_type = sys_get_trip_type,
- .set_trip_temp = sys_set_trip_temp,
-};
-
-static void free_soc_dts(struct soc_sensor_entry *aux_entry)
-{
- if (aux_entry) {
- iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
- SOC_DTS_OFFSET_ENABLE, aux_entry->store_status);
- thermal_zone_device_unregister(aux_entry->tzone);
- kfree(aux_entry);
- }
-}
-
-static int soc_dts_enable(int id)
-{
- u32 out;
- int ret;
-
- ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
- SOC_DTS_OFFSET_ENABLE, &out);
- if (ret)
- return ret;
-
- if (!(out & BIT(id))) {
- out |= BIT(id);
- ret = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
- SOC_DTS_OFFSET_ENABLE, out);
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
-static struct soc_sensor_entry *alloc_soc_dts(int id, u32 tj_max,
- bool notification_support)
-{
- struct soc_sensor_entry *aux_entry;
- char name[10];
- int trip_count = 0;
- int trip_mask = 0;
- int err;
-
- aux_entry = kzalloc(sizeof(*aux_entry), GFP_KERNEL);
- if (!aux_entry) {
- err = -ENOMEM;
- return ERR_PTR(-ENOMEM);
- }
-
- /* Store status to restor on exit */
- err = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
- SOC_DTS_OFFSET_ENABLE,
- &aux_entry->store_status);
- if (err)
- goto err_ret;
-
- aux_entry->id = id;
- aux_entry->tj_max = tj_max;
- aux_entry->temp_mask = 0x00FF << (id * 8);
- aux_entry->temp_shift = id * 8;
- if (notification_support) {
- trip_count = SOC_MAX_DTS_TRIPS;
- trip_mask = 0x02;
- }
- snprintf(name, sizeof(name), "soc_dts%d", id);
- aux_entry->tzone = thermal_zone_device_register(name,
- trip_count,
- trip_mask,
- aux_entry, &tzone_ops,
- NULL, 0, 0);
- if (IS_ERR(aux_entry->tzone)) {
- err = PTR_ERR(aux_entry->tzone);
- goto err_ret;
- }
-
- err = soc_dts_enable(id);
- if (err)
- goto err_aux_status;
-
- return aux_entry;
-
-err_aux_status:
- thermal_zone_device_unregister(aux_entry->tzone);
-err_ret:
- kfree(aux_entry);
- return ERR_PTR(err);
-}
-
-static void proc_thermal_interrupt(void)
-{
- u32 sticky_out;
- int status;
- u32 ptmc_out;
- unsigned long flags;
-
- spin_lock_irqsave(&intr_notify_lock, flags);
-
- /* Clear APIC interrupt */
- status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
- SOC_DTS_OFFSET_PTMC, &ptmc_out);
-
- ptmc_out |= SOC_DTS_PTMC_APIC_DEASSERT_BIT;
- status = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
- SOC_DTS_OFFSET_PTMC, ptmc_out);
-
- /* Read status here */
- status = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
- SOC_DTS_OFFSET_PTTSS, &sticky_out);
- pr_debug("status %d PTTSS %x\n", status, sticky_out);
- if (sticky_out & SOC_DTS_TRIP_MASK) {
- int i;
- /* reset sticky bit */
- status = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
- SOC_DTS_OFFSET_PTTSS, sticky_out);
- spin_unlock_irqrestore(&intr_notify_lock, flags);
-
- for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
- pr_debug("TZD update for zone %d\n", i);
- thermal_zone_device_update(soc_dts[i]->tzone);
- }
- } else
- spin_unlock_irqrestore(&intr_notify_lock, flags);
+/* IRQ 86 is a fixed APIC interrupt for BYT DTS Aux threshold notifications */
+#define BYT_SOC_DTS_APIC_IRQ 86
-}
+static int soc_dts_thres_irq;
+static struct intel_soc_dts_sensors *soc_dts;
static irqreturn_t soc_irq_thread_fn(int irq, void *dev_data)
{
- proc_thermal_interrupt();
pr_debug("proc_thermal_interrupt\n");
+ intel_soc_dts_iosf_interrupt_handler(soc_dts);
return IRQ_HANDLED;
}
static const struct x86_cpu_id soc_thermal_ids[] = {
{ X86_VENDOR_INTEL, X86_FAMILY_ANY, 0x37, 0, BYT_SOC_DTS_APIC_IRQ},
- { X86_VENDOR_INTEL, X86_FAMILY_ANY, 0x4c, 0, 0},
{}
};
MODULE_DEVICE_TABLE(x86cpu, soc_thermal_ids);
static int __init intel_soc_thermal_init(void)
{
- u32 tj_max;
int err = 0;
- int i;
const struct x86_cpu_id *match_cpu;
match_cpu = x86_match_cpu(soc_thermal_ids);
if (!match_cpu)
return -ENODEV;
- if (get_tj_max(&tj_max))
- return -EINVAL;
-
- soc_dts_thres_irq = (int)match_cpu->driver_data;
-
- for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
- soc_dts[i] = alloc_soc_dts(i, tj_max,
- soc_dts_thres_irq ? true : false);
- if (IS_ERR(soc_dts[i])) {
- err = PTR_ERR(soc_dts[i]);
- goto err_free;
- }
+ /* Create a zone with 2 trips with marked as read only */
+ soc_dts = intel_soc_dts_iosf_init(INTEL_SOC_DTS_INTERRUPT_APIC, 2, 1);
+ if (IS_ERR(soc_dts)) {
+ err = PTR_ERR(soc_dts);
+ return err;
}
- spin_lock_init(&intr_notify_lock);
+ soc_dts_thres_irq = (int)match_cpu->driver_data;
if (soc_dts_thres_irq) {
err = request_threaded_irq(soc_dts_thres_irq, NULL,
@@ -449,42 +72,31 @@ static int __init intel_soc_thermal_init(void)
"soc_dts", soc_dts);
if (err) {
pr_err("request_threaded_irq ret %d\n", err);
- goto err_free;
+ goto error_irq;
}
}
- for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
- err = update_trip_temp(soc_dts[i], 0, tj_max - crit_offset);
- if (err)
- goto err_trip_temp;
- }
+ err = intel_soc_dts_iosf_add_read_only_critical_trip(soc_dts,
+ crit_offset);
+ if (err)
+ goto error_trips;
return 0;
-err_trip_temp:
- i = SOC_MAX_DTS_SENSORS;
+error_trips:
if (soc_dts_thres_irq)
free_irq(soc_dts_thres_irq, soc_dts);
-err_free:
- while (--i >= 0)
- free_soc_dts(soc_dts[i]);
+error_irq:
+ intel_soc_dts_iosf_exit(soc_dts);
return err;
}
static void __exit intel_soc_thermal_exit(void)
{
- int i;
-
- for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i)
- update_trip_temp(soc_dts[i], 0, 0);
-
if (soc_dts_thres_irq)
free_irq(soc_dts_thres_irq, soc_dts);
-
- for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i)
- free_soc_dts(soc_dts[i]);
-
+ intel_soc_dts_iosf_exit(soc_dts);
}
module_init(intel_soc_thermal_init)
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 668fb1bde..b295b2b6c 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -58,6 +58,8 @@ struct __thermal_bind_params {
* @mode: current thermal zone device mode (enabled/disabled)
* @passive_delay: polling interval while passive cooling is activated
* @polling_delay: zone polling interval
+ * @slope: slope of the temperature adjustment curve
+ * @offset: offset of the temperature adjustment curve
* @ntrips: number of trip points
* @trips: an array of trip points (0..ntrips - 1)
* @num_tbps: number of thermal bind params
@@ -70,6 +72,8 @@ struct __thermal_zone {
enum thermal_device_mode mode;
int passive_delay;
int polling_delay;
+ int slope;
+ int offset;
/* trip data */
int ntrips;
@@ -227,7 +231,8 @@ static int of_thermal_bind(struct thermal_zone_device *thermal,
ret = thermal_zone_bind_cooling_device(thermal,
tbp->trip_id, cdev,
tbp->max,
- tbp->min);
+ tbp->min,
+ tbp->usage);
if (ret)
return ret;
}
@@ -581,7 +586,7 @@ static int thermal_of_populate_bind_params(struct device_node *np,
u32 prop;
/* Default weight. Usage is optional */
- __tbp->usage = 0;
+ __tbp->usage = THERMAL_WEIGHT_DEFAULT;
ret = of_property_read_u32(np, "contribution", &prop);
if (ret == 0)
__tbp->usage = prop;
@@ -715,7 +720,7 @@ static int thermal_of_populate_trip(struct device_node *np,
* @np parameter and fills the read data into a __thermal_zone data structure
* and return this pointer.
*
- * TODO: Missing properties to parse: thermal-sensor-names and coefficients
+ * TODO: Missing properties to parse: thermal-sensor-names
*
* Return: On success returns a valid struct __thermal_zone,
* otherwise, it returns a corresponding ERR_PTR(). Caller must
@@ -727,7 +732,7 @@ thermal_of_build_thermal_zone(struct device_node *np)
struct device_node *child = NULL, *gchild;
struct __thermal_zone *tz;
int ret, i;
- u32 prop;
+ u32 prop, coef[2];
if (!np) {
pr_err("no thermal zone np\n");
@@ -752,6 +757,20 @@ thermal_of_build_thermal_zone(struct device_node *np)
}
tz->polling_delay = prop;
+ /*
+ * REVIST: for now, the thermal framework supports only
+ * one sensor per thermal zone. Thus, we are considering
+ * only the first two values as slope and offset.
+ */
+ ret = of_property_read_u32_array(np, "coefficients", coef, 2);
+ if (ret == 0) {
+ tz->slope = coef[0];
+ tz->offset = coef[1];
+ } else {
+ tz->slope = 1;
+ tz->offset = 0;
+ }
+
/* trips */
child = of_get_child_by_name(np, "trips");
@@ -865,6 +884,8 @@ int __init of_parse_thermal_zones(void)
for_each_child_of_node(np, child) {
struct thermal_zone_device *zone;
struct thermal_zone_params *tzp;
+ int i, mask = 0;
+ u32 prop;
/* Check whether child is enabled or not */
if (!of_device_is_available(child))
@@ -891,8 +912,18 @@ int __init of_parse_thermal_zones(void)
/* No hwmon because there might be hwmon drivers registering */
tzp->no_hwmon = true;
+ if (!of_property_read_u32(child, "sustainable-power", &prop))
+ tzp->sustainable_power = prop;
+
+ for (i = 0; i < tz->ntrips; i++)
+ mask |= 1 << i;
+
+ /* these two are left for temperature drivers to use */
+ tzp->slope = tz->slope;
+ tzp->offset = tz->offset;
+
zone = thermal_zone_device_register(child->name, tz->ntrips,
- 0, tz,
+ mask, tz,
ops, tzp,
tz->passive_delay,
tz->polling_delay);
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
new file mode 100644
index 000000000..251676902
--- /dev/null
+++ b/drivers/thermal/power_allocator.c
@@ -0,0 +1,544 @@
+/*
+ * A power allocator to manage temperature
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "Power allocator: " fmt
+
+#include <linux/rculist.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/thermal_power_allocator.h>
+
+#include "thermal_core.h"
+
+#define FRAC_BITS 10
+#define int_to_frac(x) ((x) << FRAC_BITS)
+#define frac_to_int(x) ((x) >> FRAC_BITS)
+
+/**
+ * mul_frac() - multiply two fixed-point numbers
+ * @x: first multiplicand
+ * @y: second multiplicand
+ *
+ * Return: the result of multiplying two fixed-point numbers. The
+ * result is also a fixed-point number.
+ */
+static inline s64 mul_frac(s64 x, s64 y)
+{
+ return (x * y) >> FRAC_BITS;
+}
+
+/**
+ * div_frac() - divide two fixed-point numbers
+ * @x: the dividend
+ * @y: the divisor
+ *
+ * Return: the result of dividing two fixed-point numbers. The
+ * result is also a fixed-point number.
+ */
+static inline s64 div_frac(s64 x, s64 y)
+{
+ return div_s64(x << FRAC_BITS, y);
+}
+
+/**
+ * struct power_allocator_params - parameters for the power allocator governor
+ * @err_integral: accumulated error in the PID controller.
+ * @prev_err: error in the previous iteration of the PID controller.
+ * Used to calculate the derivative term.
+ * @trip_switch_on: first passive trip point of the thermal zone. The
+ * governor switches on when this trip point is crossed.
+ * @trip_max_desired_temperature: last passive trip point of the thermal
+ * zone. The temperature we are
+ * controlling for.
+ */
+struct power_allocator_params {
+ s64 err_integral;
+ s32 prev_err;
+ int trip_switch_on;
+ int trip_max_desired_temperature;
+};
+
+/**
+ * pid_controller() - PID controller
+ * @tz: thermal zone we are operating in
+ * @current_temp: the current temperature in millicelsius
+ * @control_temp: the target temperature in millicelsius
+ * @max_allocatable_power: maximum allocatable power for this thermal zone
+ *
+ * This PID controller increases the available power budget so that the
+ * temperature of the thermal zone gets as close as possible to
+ * @control_temp and limits the power if it exceeds it. k_po is the
+ * proportional term when we are overshooting, k_pu is the
+ * proportional term when we are undershooting. integral_cutoff is a
+ * threshold below which we stop accumulating the error. The
+ * accumulated error is only valid if the requested power will make
+ * the system warmer. If the system is mostly idle, there's no point
+ * in accumulating positive error.
+ *
+ * Return: The power budget for the next period.
+ */
+static u32 pid_controller(struct thermal_zone_device *tz,
+ unsigned long current_temp,
+ unsigned long control_temp,
+ u32 max_allocatable_power)
+{
+ s64 p, i, d, power_range;
+ s32 err, max_power_frac;
+ struct power_allocator_params *params = tz->governor_data;
+
+ max_power_frac = int_to_frac(max_allocatable_power);
+
+ err = ((s32)control_temp - (s32)current_temp);
+ err = int_to_frac(err);
+
+ /* Calculate the proportional term */
+ p = mul_frac(err < 0 ? tz->tzp->k_po : tz->tzp->k_pu, err);
+
+ /*
+ * Calculate the integral term
+ *
+ * if the error is less than cut off allow integration (but
+ * the integral is limited to max power)
+ */
+ i = mul_frac(tz->tzp->k_i, params->err_integral);
+
+ if (err < int_to_frac(tz->tzp->integral_cutoff)) {
+ s64 i_next = i + mul_frac(tz->tzp->k_i, err);
+
+ if (abs64(i_next) < max_power_frac) {
+ i = i_next;
+ params->err_integral += err;
+ }
+ }
+
+ /*
+ * Calculate the derivative term
+ *
+ * We do err - prev_err, so with a positive k_d, a decreasing
+ * error (i.e. driving closer to the line) results in less
+ * power being applied, slowing down the controller)
+ */
+ d = mul_frac(tz->tzp->k_d, err - params->prev_err);
+ d = div_frac(d, tz->passive_delay);
+ params->prev_err = err;
+
+ power_range = p + i + d;
+
+ /* feed-forward the known sustainable dissipatable power */
+ power_range = tz->tzp->sustainable_power + frac_to_int(power_range);
+
+ power_range = clamp(power_range, (s64)0, (s64)max_allocatable_power);
+
+ trace_thermal_power_allocator_pid(tz, frac_to_int(err),
+ frac_to_int(params->err_integral),
+ frac_to_int(p), frac_to_int(i),
+ frac_to_int(d), power_range);
+
+ return power_range;
+}
+
+/**
+ * divvy_up_power() - divvy the allocated power between the actors
+ * @req_power: each actor's requested power
+ * @max_power: each actor's maximum available power
+ * @num_actors: size of the @req_power, @max_power and @granted_power's array
+ * @total_req_power: sum of @req_power
+ * @power_range: total allocated power
+ * @granted_power: output array: each actor's granted power
+ * @extra_actor_power: an appropriately sized array to be used in the
+ * function as temporary storage of the extra power given
+ * to the actors
+ *
+ * This function divides the total allocated power (@power_range)
+ * fairly between the actors. It first tries to give each actor a
+ * share of the @power_range according to how much power it requested
+ * compared to the rest of the actors. For example, if only one actor
+ * requests power, then it receives all the @power_range. If
+ * three actors each requests 1mW, each receives a third of the
+ * @power_range.
+ *
+ * If any actor received more than their maximum power, then that
+ * surplus is re-divvied among the actors based on how far they are
+ * from their respective maximums.
+ *
+ * Granted power for each actor is written to @granted_power, which
+ * should've been allocated by the calling function.
+ */
+static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors,
+ u32 total_req_power, u32 power_range,
+ u32 *granted_power, u32 *extra_actor_power)
+{
+ u32 extra_power, capped_extra_power;
+ int i;
+
+ /*
+ * Prevent division by 0 if none of the actors request power.
+ */
+ if (!total_req_power)
+ total_req_power = 1;
+
+ capped_extra_power = 0;
+ extra_power = 0;
+ for (i = 0; i < num_actors; i++) {
+ u64 req_range = req_power[i] * power_range;
+
+ granted_power[i] = DIV_ROUND_CLOSEST_ULL(req_range,
+ total_req_power);
+
+ if (granted_power[i] > max_power[i]) {
+ extra_power += granted_power[i] - max_power[i];
+ granted_power[i] = max_power[i];
+ }
+
+ extra_actor_power[i] = max_power[i] - granted_power[i];
+ capped_extra_power += extra_actor_power[i];
+ }
+
+ if (!extra_power)
+ return;
+
+ /*
+ * Re-divvy the reclaimed extra among actors based on
+ * how far they are from the max
+ */
+ extra_power = min(extra_power, capped_extra_power);
+ if (capped_extra_power > 0)
+ for (i = 0; i < num_actors; i++)
+ granted_power[i] += (extra_actor_power[i] *
+ extra_power) / capped_extra_power;
+}
+
+static int allocate_power(struct thermal_zone_device *tz,
+ unsigned long current_temp,
+ unsigned long control_temp)
+{
+ struct thermal_instance *instance;
+ struct power_allocator_params *params = tz->governor_data;
+ u32 *req_power, *max_power, *granted_power, *extra_actor_power;
+ u32 *weighted_req_power;
+ u32 total_req_power, max_allocatable_power, total_weighted_req_power;
+ u32 total_granted_power, power_range;
+ int i, num_actors, total_weight, ret = 0;
+ int trip_max_desired_temperature = params->trip_max_desired_temperature;
+
+ mutex_lock(&tz->lock);
+
+ num_actors = 0;
+ total_weight = 0;
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ if ((instance->trip == trip_max_desired_temperature) &&
+ cdev_is_power_actor(instance->cdev)) {
+ num_actors++;
+ total_weight += instance->weight;
+ }
+ }
+
+ /*
+ * We need to allocate five arrays of the same size:
+ * req_power, max_power, granted_power, extra_actor_power and
+ * weighted_req_power. They are going to be needed until this
+ * function returns. Allocate them all in one go to simplify
+ * the allocation and deallocation logic.
+ */
+ BUILD_BUG_ON(sizeof(*req_power) != sizeof(*max_power));
+ BUILD_BUG_ON(sizeof(*req_power) != sizeof(*granted_power));
+ BUILD_BUG_ON(sizeof(*req_power) != sizeof(*extra_actor_power));
+ BUILD_BUG_ON(sizeof(*req_power) != sizeof(*weighted_req_power));
+ req_power = kcalloc(num_actors * 5, sizeof(*req_power), GFP_KERNEL);
+ if (!req_power) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ max_power = &req_power[num_actors];
+ granted_power = &req_power[2 * num_actors];
+ extra_actor_power = &req_power[3 * num_actors];
+ weighted_req_power = &req_power[4 * num_actors];
+
+ i = 0;
+ total_weighted_req_power = 0;
+ total_req_power = 0;
+ max_allocatable_power = 0;
+
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ int weight;
+ struct thermal_cooling_device *cdev = instance->cdev;
+
+ if (instance->trip != trip_max_desired_temperature)
+ continue;
+
+ if (!cdev_is_power_actor(cdev))
+ continue;
+
+ if (cdev->ops->get_requested_power(cdev, tz, &req_power[i]))
+ continue;
+
+ if (!total_weight)
+ weight = 1 << FRAC_BITS;
+ else
+ weight = instance->weight;
+
+ weighted_req_power[i] = frac_to_int(weight * req_power[i]);
+
+ if (power_actor_get_max_power(cdev, tz, &max_power[i]))
+ continue;
+
+ total_req_power += req_power[i];
+ max_allocatable_power += max_power[i];
+ total_weighted_req_power += weighted_req_power[i];
+
+ i++;
+ }
+
+ power_range = pid_controller(tz, current_temp, control_temp,
+ max_allocatable_power);
+
+ divvy_up_power(weighted_req_power, max_power, num_actors,
+ total_weighted_req_power, power_range, granted_power,
+ extra_actor_power);
+
+ total_granted_power = 0;
+ i = 0;
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ if (instance->trip != trip_max_desired_temperature)
+ continue;
+
+ if (!cdev_is_power_actor(instance->cdev))
+ continue;
+
+ power_actor_set_power(instance->cdev, instance,
+ granted_power[i]);
+ total_granted_power += granted_power[i];
+
+ i++;
+ }
+
+ trace_thermal_power_allocator(tz, req_power, total_req_power,
+ granted_power, total_granted_power,
+ num_actors, power_range,
+ max_allocatable_power, current_temp,
+ (s32)control_temp - (s32)current_temp);
+
+ kfree(req_power);
+unlock:
+ mutex_unlock(&tz->lock);
+
+ return ret;
+}
+
+static int get_governor_trips(struct thermal_zone_device *tz,
+ struct power_allocator_params *params)
+{
+ int i, ret, last_passive;
+ bool found_first_passive;
+
+ found_first_passive = false;
+ last_passive = -1;
+ ret = -EINVAL;
+
+ for (i = 0; i < tz->trips; i++) {
+ enum thermal_trip_type type;
+
+ ret = tz->ops->get_trip_type(tz, i, &type);
+ if (ret)
+ return ret;
+
+ if (!found_first_passive) {
+ if (type == THERMAL_TRIP_PASSIVE) {
+ params->trip_switch_on = i;
+ found_first_passive = true;
+ }
+ } else if (type == THERMAL_TRIP_PASSIVE) {
+ last_passive = i;
+ } else {
+ break;
+ }
+ }
+
+ if (last_passive != -1) {
+ params->trip_max_desired_temperature = last_passive;
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void reset_pid_controller(struct power_allocator_params *params)
+{
+ params->err_integral = 0;
+ params->prev_err = 0;
+}
+
+static void allow_maximum_power(struct thermal_zone_device *tz)
+{
+ struct thermal_instance *instance;
+ struct power_allocator_params *params = tz->governor_data;
+
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ if ((instance->trip != params->trip_max_desired_temperature) ||
+ (!cdev_is_power_actor(instance->cdev)))
+ continue;
+
+ instance->target = 0;
+ instance->cdev->updated = false;
+ thermal_cdev_update(instance->cdev);
+ }
+}
+
+/**
+ * power_allocator_bind() - bind the power_allocator governor to a thermal zone
+ * @tz: thermal zone to bind it to
+ *
+ * Check that the thermal zone is valid for this governor, that is, it
+ * has two thermal trips. If so, initialize the PID controller
+ * parameters and bind it to the thermal zone.
+ *
+ * Return: 0 on success, -EINVAL if the trips were invalid or -ENOMEM
+ * if we ran out of memory.
+ */
+static int power_allocator_bind(struct thermal_zone_device *tz)
+{
+ int ret;
+ struct power_allocator_params *params;
+ unsigned long switch_on_temp, control_temp;
+ u32 temperature_threshold;
+
+ if (!tz->tzp || !tz->tzp->sustainable_power) {
+ dev_err(&tz->device,
+ "power_allocator: missing sustainable_power\n");
+ return -EINVAL;
+ }
+
+ params = kzalloc(sizeof(*params), GFP_KERNEL);
+ if (!params)
+ return -ENOMEM;
+
+ ret = get_governor_trips(tz, params);
+ if (ret) {
+ dev_err(&tz->device,
+ "thermal zone %s has wrong trip setup for power allocator\n",
+ tz->type);
+ goto free;
+ }
+
+ ret = tz->ops->get_trip_temp(tz, params->trip_switch_on,
+ &switch_on_temp);
+ if (ret)
+ goto free;
+
+ ret = tz->ops->get_trip_temp(tz, params->trip_max_desired_temperature,
+ &control_temp);
+ if (ret)
+ goto free;
+
+ temperature_threshold = control_temp - switch_on_temp;
+
+ tz->tzp->k_po = tz->tzp->k_po ?:
+ int_to_frac(tz->tzp->sustainable_power) / temperature_threshold;
+ tz->tzp->k_pu = tz->tzp->k_pu ?:
+ int_to_frac(2 * tz->tzp->sustainable_power) /
+ temperature_threshold;
+ tz->tzp->k_i = tz->tzp->k_i ?: int_to_frac(10) / 1000;
+ /*
+ * The default for k_d and integral_cutoff is 0, so we can
+ * leave them as they are.
+ */
+
+ reset_pid_controller(params);
+
+ tz->governor_data = params;
+
+ return 0;
+
+free:
+ kfree(params);
+ return ret;
+}
+
+static void power_allocator_unbind(struct thermal_zone_device *tz)
+{
+ dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id);
+ kfree(tz->governor_data);
+ tz->governor_data = NULL;
+}
+
+static int power_allocator_throttle(struct thermal_zone_device *tz, int trip)
+{
+ int ret;
+ unsigned long switch_on_temp, control_temp, current_temp;
+ struct power_allocator_params *params = tz->governor_data;
+
+ /*
+ * We get called for every trip point but we only need to do
+ * our calculations once
+ */
+ if (trip != params->trip_max_desired_temperature)
+ return 0;
+
+ ret = thermal_zone_get_temp(tz, &current_temp);
+ if (ret) {
+ dev_warn(&tz->device, "Failed to get temperature: %d\n", ret);
+ return ret;
+ }
+
+ ret = tz->ops->get_trip_temp(tz, params->trip_switch_on,
+ &switch_on_temp);
+ if (ret) {
+ dev_warn(&tz->device,
+ "Failed to get switch on temperature: %d\n", ret);
+ return ret;
+ }
+
+ if (current_temp < switch_on_temp) {
+ tz->passive = 0;
+ reset_pid_controller(params);
+ allow_maximum_power(tz);
+ return 0;
+ }
+
+ tz->passive = 1;
+
+ ret = tz->ops->get_trip_temp(tz, params->trip_max_desired_temperature,
+ &control_temp);
+ if (ret) {
+ dev_warn(&tz->device,
+ "Failed to get the maximum desired temperature: %d\n",
+ ret);
+ return ret;
+ }
+
+ return allocate_power(tz, current_temp, control_temp);
+}
+
+static struct thermal_governor thermal_gov_power_allocator = {
+ .name = "power_allocator",
+ .bind_to_tz = power_allocator_bind,
+ .unbind_from_tz = power_allocator_unbind,
+ .throttle = power_allocator_throttle,
+};
+
+int thermal_gov_power_allocator_register(void)
+{
+ return thermal_register_governor(&thermal_gov_power_allocator);
+}
+
+void thermal_gov_power_allocator_unregister(void)
+{
+ thermal_unregister_governor(&thermal_gov_power_allocator);
+}
diff --git a/drivers/thermal/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom-spmi-temp-alarm.c
new file mode 100644
index 000000000..c8d27b8fb
--- /dev/null
+++ b/drivers/thermal/qcom-spmi-temp-alarm.c
@@ -0,0 +1,309 @@
+/*
+ * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/iio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/thermal.h>
+
+#define QPNP_TM_REG_TYPE 0x04
+#define QPNP_TM_REG_SUBTYPE 0x05
+#define QPNP_TM_REG_STATUS 0x08
+#define QPNP_TM_REG_SHUTDOWN_CTRL1 0x40
+#define QPNP_TM_REG_ALARM_CTRL 0x46
+
+#define QPNP_TM_TYPE 0x09
+#define QPNP_TM_SUBTYPE 0x08
+
+#define STATUS_STAGE_MASK 0x03
+
+#define SHUTDOWN_CTRL1_THRESHOLD_MASK 0x03
+
+#define ALARM_CTRL_FORCE_ENABLE 0x80
+
+/*
+ * Trip point values based on threshold control
+ * 0 = {105 C, 125 C, 145 C}
+ * 1 = {110 C, 130 C, 150 C}
+ * 2 = {115 C, 135 C, 155 C}
+ * 3 = {120 C, 140 C, 160 C}
+*/
+#define TEMP_STAGE_STEP 20000 /* Stage step: 20.000 C */
+#define TEMP_STAGE_HYSTERESIS 2000
+
+#define TEMP_THRESH_MIN 105000 /* Threshold Min: 105 C */
+#define TEMP_THRESH_STEP 5000 /* Threshold step: 5 C */
+
+#define THRESH_MIN 0
+
+/* Temperature in Milli Celsius reported during stage 0 if no ADC is present */
+#define DEFAULT_TEMP 37000
+
+struct qpnp_tm_chip {
+ struct regmap *map;
+ struct thermal_zone_device *tz_dev;
+ long temp;
+ unsigned int thresh;
+ unsigned int stage;
+ unsigned int prev_stage;
+ unsigned int base;
+ struct iio_channel *adc;
+};
+
+static int qpnp_tm_read(struct qpnp_tm_chip *chip, u16 addr, u8 *data)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(chip->map, chip->base + addr, &val);
+ if (ret < 0)
+ return ret;
+
+ *data = val;
+ return 0;
+}
+
+static int qpnp_tm_write(struct qpnp_tm_chip *chip, u16 addr, u8 data)
+{
+ return regmap_write(chip->map, chip->base + addr, data);
+}
+
+/*
+ * This function updates the internal temp value based on the
+ * current thermal stage and threshold as well as the previous stage
+ */
+static int qpnp_tm_update_temp_no_adc(struct qpnp_tm_chip *chip)
+{
+ unsigned int stage;
+ int ret;
+ u8 reg = 0;
+
+ ret = qpnp_tm_read(chip, QPNP_TM_REG_STATUS, &reg);
+ if (ret < 0)
+ return ret;
+
+ stage = reg & STATUS_STAGE_MASK;
+
+ if (stage > chip->stage) {
+ /* increasing stage, use lower bound */
+ chip->temp = (stage - 1) * TEMP_STAGE_STEP +
+ chip->thresh * TEMP_THRESH_STEP +
+ TEMP_STAGE_HYSTERESIS + TEMP_THRESH_MIN;
+ } else if (stage < chip->stage) {
+ /* decreasing stage, use upper bound */
+ chip->temp = stage * TEMP_STAGE_STEP +
+ chip->thresh * TEMP_THRESH_STEP -
+ TEMP_STAGE_HYSTERESIS + TEMP_THRESH_MIN;
+ }
+
+ chip->stage = stage;
+
+ return 0;
+}
+
+static int qpnp_tm_get_temp(void *data, long *temp)
+{
+ struct qpnp_tm_chip *chip = data;
+ int ret, mili_celsius;
+
+ if (!temp)
+ return -EINVAL;
+
+ if (IS_ERR(chip->adc)) {
+ ret = qpnp_tm_update_temp_no_adc(chip);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = iio_read_channel_processed(chip->adc, &mili_celsius);
+ if (ret < 0)
+ return ret;
+
+ chip->temp = mili_celsius;
+ }
+
+ *temp = chip->temp < 0 ? 0 : chip->temp;
+
+ return 0;
+}
+
+static const struct thermal_zone_of_device_ops qpnp_tm_sensor_ops = {
+ .get_temp = qpnp_tm_get_temp,
+};
+
+static irqreturn_t qpnp_tm_isr(int irq, void *data)
+{
+ struct qpnp_tm_chip *chip = data;
+
+ thermal_zone_device_update(chip->tz_dev);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * This function initializes the internal temp value based on only the
+ * current thermal stage and threshold. Setup threshold control and
+ * disable shutdown override.
+ */
+static int qpnp_tm_init(struct qpnp_tm_chip *chip)
+{
+ int ret;
+ u8 reg;
+
+ chip->thresh = THRESH_MIN;
+ chip->temp = DEFAULT_TEMP;
+
+ ret = qpnp_tm_read(chip, QPNP_TM_REG_STATUS, &reg);
+ if (ret < 0)
+ return ret;
+
+ chip->stage = reg & STATUS_STAGE_MASK;
+
+ if (chip->stage)
+ chip->temp = chip->thresh * TEMP_THRESH_STEP +
+ (chip->stage - 1) * TEMP_STAGE_STEP +
+ TEMP_THRESH_MIN;
+
+ /*
+ * Set threshold and disable software override of stage 2 and 3
+ * shutdowns.
+ */
+ reg = chip->thresh & SHUTDOWN_CTRL1_THRESHOLD_MASK;
+ ret = qpnp_tm_write(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, reg);
+ if (ret < 0)
+ return ret;
+
+ /* Enable the thermal alarm PMIC module in always-on mode. */
+ reg = ALARM_CTRL_FORCE_ENABLE;
+ ret = qpnp_tm_write(chip, QPNP_TM_REG_ALARM_CTRL, reg);
+
+ return ret;
+}
+
+static int qpnp_tm_probe(struct platform_device *pdev)
+{
+ struct qpnp_tm_chip *chip;
+ struct device_node *node;
+ u8 type, subtype;
+ u32 res[2];
+ int ret, irq;
+
+ node = pdev->dev.of_node;
+
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, chip);
+
+ chip->map = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!chip->map)
+ return -ENXIO;
+
+ ret = of_property_read_u32_array(node, "reg", res, 2);
+ if (ret < 0)
+ return ret;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ /* ADC based measurements are optional */
+ chip->adc = iio_channel_get(&pdev->dev, "thermal");
+ if (PTR_ERR(chip->adc) == -EPROBE_DEFER)
+ return PTR_ERR(chip->adc);
+
+ chip->base = res[0];
+
+ ret = qpnp_tm_read(chip, QPNP_TM_REG_TYPE, &type);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "could not read type\n");
+ goto fail;
+ }
+
+ ret = qpnp_tm_read(chip, QPNP_TM_REG_SUBTYPE, &subtype);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "could not read subtype\n");
+ goto fail;
+ }
+
+ if (type != QPNP_TM_TYPE || subtype != QPNP_TM_SUBTYPE) {
+ dev_err(&pdev->dev, "invalid type 0x%02x or subtype 0x%02x\n",
+ type, subtype);
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ ret = qpnp_tm_init(chip);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "init failed\n");
+ goto fail;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, qpnp_tm_isr,
+ IRQF_ONESHOT, node->name, chip);
+ if (ret < 0)
+ goto fail;
+
+ chip->tz_dev = thermal_zone_of_sensor_register(&pdev->dev, 0, chip,
+ &qpnp_tm_sensor_ops);
+ if (IS_ERR(chip->tz_dev)) {
+ dev_err(&pdev->dev, "failed to register sensor\n");
+ ret = PTR_ERR(chip->tz_dev);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ if (!IS_ERR(chip->adc))
+ iio_channel_release(chip->adc);
+
+ return ret;
+}
+
+static int qpnp_tm_remove(struct platform_device *pdev)
+{
+ struct qpnp_tm_chip *chip = dev_get_drvdata(&pdev->dev);
+
+ thermal_zone_of_sensor_unregister(&pdev->dev, chip->tz_dev);
+ if (!IS_ERR(chip->adc))
+ iio_channel_release(chip->adc);
+
+ return 0;
+}
+
+static const struct of_device_id qpnp_tm_match_table[] = {
+ { .compatible = "qcom,spmi-temp-alarm" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qpnp_tm_match_table);
+
+static struct platform_driver qpnp_tm_driver = {
+ .driver = {
+ .name = "spmi-temp-alarm",
+ .of_match_table = qpnp_tm_match_table,
+ },
+ .probe = qpnp_tm_probe,
+ .remove = qpnp_tm_remove,
+};
+module_platform_driver(qpnp_tm_driver);
+
+MODULE_ALIAS("platform:spmi-temp-alarm");
+MODULE_DESCRIPTION("QPNP PMIC Temperature Alarm driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/samsung/Kconfig b/drivers/thermal/samsung/Kconfig
index c8e35c1a4..e0da3865e 100644
--- a/drivers/thermal/samsung/Kconfig
+++ b/drivers/thermal/samsung/Kconfig
@@ -1,6 +1,6 @@
config EXYNOS_THERMAL
tristate "Exynos thermal management unit driver"
- depends on OF
+ depends on THERMAL_OF
help
If you say yes here you get support for the TMU (Thermal Management
Unit) driver for SAMSUNG EXYNOS series of SoCs. This driver initialises
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index 67098a8a7..c96ff10b8 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -97,6 +97,32 @@
#define EXYNOS4412_MUX_ADDR_VALUE 6
#define EXYNOS4412_MUX_ADDR_SHIFT 20
+/* Exynos5433 specific registers */
+#define EXYNOS5433_TMU_REG_CONTROL1 0x024
+#define EXYNOS5433_TMU_SAMPLING_INTERVAL 0x02c
+#define EXYNOS5433_TMU_COUNTER_VALUE0 0x030
+#define EXYNOS5433_TMU_COUNTER_VALUE1 0x034
+#define EXYNOS5433_TMU_REG_CURRENT_TEMP1 0x044
+#define EXYNOS5433_THD_TEMP_RISE3_0 0x050
+#define EXYNOS5433_THD_TEMP_RISE7_4 0x054
+#define EXYNOS5433_THD_TEMP_FALL3_0 0x060
+#define EXYNOS5433_THD_TEMP_FALL7_4 0x064
+#define EXYNOS5433_TMU_REG_INTEN 0x0c0
+#define EXYNOS5433_TMU_REG_INTPEND 0x0c8
+#define EXYNOS5433_TMU_EMUL_CON 0x110
+#define EXYNOS5433_TMU_PD_DET_EN 0x130
+
+#define EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT 16
+#define EXYNOS5433_TRIMINFO_CALIB_SEL_SHIFT 23
+#define EXYNOS5433_TRIMINFO_SENSOR_ID_MASK \
+ (0xf << EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT)
+#define EXYNOS5433_TRIMINFO_CALIB_SEL_MASK BIT(23)
+
+#define EXYNOS5433_TRIMINFO_ONE_POINT_TRIMMING 0
+#define EXYNOS5433_TRIMINFO_TWO_POINT_TRIMMING 1
+
+#define EXYNOS5433_PD_DET_EN 1
+
/*exynos5440 specific registers*/
#define EXYNOS5440_TMU_S0_7_TRIM 0x000
#define EXYNOS5440_TMU_S0_7_CTRL 0x020
@@ -484,6 +510,101 @@ out:
return ret;
}
+static int exynos5433_tmu_initialize(struct platform_device *pdev)
+{
+ struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+ struct exynos_tmu_platform_data *pdata = data->pdata;
+ struct thermal_zone_device *tz = data->tzd;
+ unsigned int status, trim_info;
+ unsigned int rising_threshold = 0, falling_threshold = 0;
+ unsigned long temp, temp_hist;
+ int ret = 0, threshold_code, i, sensor_id, cal_type;
+
+ status = readb(data->base + EXYNOS_TMU_REG_STATUS);
+ if (!status) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO);
+ sanitize_temp_error(data, trim_info);
+
+ /* Read the temperature sensor id */
+ sensor_id = (trim_info & EXYNOS5433_TRIMINFO_SENSOR_ID_MASK)
+ >> EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT;
+ dev_info(&pdev->dev, "Temperature sensor ID: 0x%x\n", sensor_id);
+
+ /* Read the calibration mode */
+ writel(trim_info, data->base + EXYNOS_TMU_REG_TRIMINFO);
+ cal_type = (trim_info & EXYNOS5433_TRIMINFO_CALIB_SEL_MASK)
+ >> EXYNOS5433_TRIMINFO_CALIB_SEL_SHIFT;
+
+ switch (cal_type) {
+ case EXYNOS5433_TRIMINFO_ONE_POINT_TRIMMING:
+ pdata->cal_type = TYPE_ONE_POINT_TRIMMING;
+ break;
+ case EXYNOS5433_TRIMINFO_TWO_POINT_TRIMMING:
+ pdata->cal_type = TYPE_TWO_POINT_TRIMMING;
+ break;
+ default:
+ pdata->cal_type = TYPE_ONE_POINT_TRIMMING;
+ break;
+ };
+
+ dev_info(&pdev->dev, "Calibration type is %d-point calibration\n",
+ cal_type ? 2 : 1);
+
+ /* Write temperature code for rising and falling threshold */
+ for (i = 0; i < of_thermal_get_ntrips(tz); i++) {
+ int rising_reg_offset, falling_reg_offset;
+ int j = 0;
+
+ switch (i) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ rising_reg_offset = EXYNOS5433_THD_TEMP_RISE3_0;
+ falling_reg_offset = EXYNOS5433_THD_TEMP_FALL3_0;
+ j = i;
+ break;
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ rising_reg_offset = EXYNOS5433_THD_TEMP_RISE7_4;
+ falling_reg_offset = EXYNOS5433_THD_TEMP_FALL7_4;
+ j = i - 4;
+ break;
+ default:
+ continue;
+ }
+
+ /* Write temperature code for rising threshold */
+ tz->ops->get_trip_temp(tz, i, &temp);
+ temp /= MCELSIUS;
+ threshold_code = temp_to_code(data, temp);
+
+ rising_threshold = readl(data->base + rising_reg_offset);
+ rising_threshold |= (threshold_code << j * 8);
+ writel(rising_threshold, data->base + rising_reg_offset);
+
+ /* Write temperature code for falling threshold */
+ tz->ops->get_trip_hyst(tz, i, &temp_hist);
+ temp_hist = temp - (temp_hist / MCELSIUS);
+ threshold_code = temp_to_code(data, temp_hist);
+
+ falling_threshold = readl(data->base + falling_reg_offset);
+ falling_threshold &= ~(0xff << j * 8);
+ falling_threshold |= (threshold_code << j * 8);
+ writel(falling_threshold, data->base + falling_reg_offset);
+ }
+
+ data->tmu_clear_irqs(data);
+out:
+ return ret;
+}
+
static int exynos5440_tmu_initialize(struct platform_device *pdev)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
@@ -643,6 +764,48 @@ static void exynos4210_tmu_control(struct platform_device *pdev, bool on)
writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
}
+static void exynos5433_tmu_control(struct platform_device *pdev, bool on)
+{
+ struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+ struct thermal_zone_device *tz = data->tzd;
+ unsigned int con, interrupt_en, pd_det_en;
+
+ con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
+
+ if (on) {
+ con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
+ interrupt_en =
+ (of_thermal_is_trip_valid(tz, 7)
+ << EXYNOS7_TMU_INTEN_RISE7_SHIFT) |
+ (of_thermal_is_trip_valid(tz, 6)
+ << EXYNOS7_TMU_INTEN_RISE6_SHIFT) |
+ (of_thermal_is_trip_valid(tz, 5)
+ << EXYNOS7_TMU_INTEN_RISE5_SHIFT) |
+ (of_thermal_is_trip_valid(tz, 4)
+ << EXYNOS7_TMU_INTEN_RISE4_SHIFT) |
+ (of_thermal_is_trip_valid(tz, 3)
+ << EXYNOS7_TMU_INTEN_RISE3_SHIFT) |
+ (of_thermal_is_trip_valid(tz, 2)
+ << EXYNOS7_TMU_INTEN_RISE2_SHIFT) |
+ (of_thermal_is_trip_valid(tz, 1)
+ << EXYNOS7_TMU_INTEN_RISE1_SHIFT) |
+ (of_thermal_is_trip_valid(tz, 0)
+ << EXYNOS7_TMU_INTEN_RISE0_SHIFT);
+
+ interrupt_en |=
+ interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
+ } else {
+ con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
+ interrupt_en = 0; /* Disable all interrupts */
+ }
+
+ pd_det_en = on ? EXYNOS5433_PD_DET_EN : 0;
+
+ writel(pd_det_en, data->base + EXYNOS5433_TMU_PD_DET_EN);
+ writel(interrupt_en, data->base + EXYNOS5433_TMU_REG_INTEN);
+ writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
+}
+
static void exynos5440_tmu_control(struct platform_device *pdev, bool on)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
@@ -770,6 +933,8 @@ static void exynos4412_tmu_set_emulation(struct exynos_tmu_data *data,
if (data->soc == SOC_ARCH_EXYNOS5260)
emul_con = EXYNOS5260_EMUL_CON;
+ if (data->soc == SOC_ARCH_EXYNOS5433)
+ emul_con = EXYNOS5433_TMU_EMUL_CON;
else if (data->soc == SOC_ARCH_EXYNOS7)
emul_con = EXYNOS7_TMU_REG_EMUL_CON;
else
@@ -882,6 +1047,9 @@ static void exynos4210_tmu_clear_irqs(struct exynos_tmu_data *data)
} else if (data->soc == SOC_ARCH_EXYNOS7) {
tmu_intstat = EXYNOS7_TMU_REG_INTPEND;
tmu_intclear = EXYNOS7_TMU_REG_INTPEND;
+ } else if (data->soc == SOC_ARCH_EXYNOS5433) {
+ tmu_intstat = EXYNOS5433_TMU_REG_INTPEND;
+ tmu_intclear = EXYNOS5433_TMU_REG_INTPEND;
} else {
tmu_intstat = EXYNOS_TMU_REG_INTSTAT;
tmu_intclear = EXYNOS_TMU_REG_INTCLEAR;
@@ -926,6 +1094,7 @@ static const struct of_device_id exynos_tmu_match[] = {
{ .compatible = "samsung,exynos5260-tmu", },
{ .compatible = "samsung,exynos5420-tmu", },
{ .compatible = "samsung,exynos5420-tmu-ext-triminfo", },
+ { .compatible = "samsung,exynos5433-tmu", },
{ .compatible = "samsung,exynos5440-tmu", },
{ .compatible = "samsung,exynos7-tmu", },
{ /* sentinel */ },
@@ -949,6 +1118,8 @@ static int exynos_of_get_soc_type(struct device_node *np)
else if (of_device_is_compatible(np,
"samsung,exynos5420-tmu-ext-triminfo"))
return SOC_ARCH_EXYNOS5420_TRIMINFO;
+ else if (of_device_is_compatible(np, "samsung,exynos5433-tmu"))
+ return SOC_ARCH_EXYNOS5433;
else if (of_device_is_compatible(np, "samsung,exynos5440-tmu"))
return SOC_ARCH_EXYNOS5440;
else if (of_device_is_compatible(np, "samsung,exynos7-tmu"))
@@ -1069,6 +1240,13 @@ static int exynos_map_dt_data(struct platform_device *pdev)
data->tmu_set_emulation = exynos4412_tmu_set_emulation;
data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
break;
+ case SOC_ARCH_EXYNOS5433:
+ data->tmu_initialize = exynos5433_tmu_initialize;
+ data->tmu_control = exynos5433_tmu_control;
+ data->tmu_read = exynos4412_tmu_read;
+ data->tmu_set_emulation = exynos4412_tmu_set_emulation;
+ data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
+ break;
case SOC_ARCH_EXYNOS5440:
data->tmu_initialize = exynos5440_tmu_initialize;
data->tmu_control = exynos5440_tmu_control;
@@ -1118,7 +1296,6 @@ static struct thermal_zone_of_device_ops exynos_sensor_ops = {
static int exynos_tmu_probe(struct platform_device *pdev)
{
- struct exynos_tmu_platform_data *pdata;
struct exynos_tmu_data *data;
int ret;
@@ -1140,8 +1317,6 @@ static int exynos_tmu_probe(struct platform_device *pdev)
if (ret)
goto err_sensor;
- pdata = data->pdata;
-
INIT_WORK(&data->irq_work, exynos_tmu_work);
data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
@@ -1172,7 +1347,9 @@ static int exynos_tmu_probe(struct platform_device *pdev)
goto err_clk_sec;
}
- if (data->soc == SOC_ARCH_EXYNOS7) {
+ switch (data->soc) {
+ case SOC_ARCH_EXYNOS5433:
+ case SOC_ARCH_EXYNOS7:
data->sclk = devm_clk_get(&pdev->dev, "tmu_sclk");
if (IS_ERR(data->sclk)) {
dev_err(&pdev->dev, "Failed to get sclk\n");
@@ -1184,7 +1361,10 @@ static int exynos_tmu_probe(struct platform_device *pdev)
goto err_clk;
}
}
- }
+ break;
+ default:
+ break;
+ };
ret = exynos_tmu_initialize(pdev);
if (ret) {
diff --git a/drivers/thermal/samsung/exynos_tmu.h b/drivers/thermal/samsung/exynos_tmu.h
index 4d71ec6c9..440c7140b 100644
--- a/drivers/thermal/samsung/exynos_tmu.h
+++ b/drivers/thermal/samsung/exynos_tmu.h
@@ -33,6 +33,7 @@ enum soc_type {
SOC_ARCH_EXYNOS5260,
SOC_ARCH_EXYNOS5420,
SOC_ARCH_EXYNOS5420_TRIMINFO,
+ SOC_ARCH_EXYNOS5433,
SOC_ARCH_EXYNOS5440,
SOC_ARCH_EXYNOS7,
};
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 4108db7e1..4ca211be4 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -75,6 +75,58 @@ static struct thermal_governor *__find_governor(const char *name)
return NULL;
}
+/**
+ * bind_previous_governor() - bind the previous governor of the thermal zone
+ * @tz: a valid pointer to a struct thermal_zone_device
+ * @failed_gov_name: the name of the governor that failed to register
+ *
+ * Register the previous governor of the thermal zone after a new
+ * governor has failed to be bound.
+ */
+static void bind_previous_governor(struct thermal_zone_device *tz,
+ const char *failed_gov_name)
+{
+ if (tz->governor && tz->governor->bind_to_tz) {
+ if (tz->governor->bind_to_tz(tz)) {
+ dev_err(&tz->device,
+ "governor %s failed to bind and the previous one (%s) failed to bind again, thermal zone %s has no governor\n",
+ failed_gov_name, tz->governor->name, tz->type);
+ tz->governor = NULL;
+ }
+ }
+}
+
+/**
+ * thermal_set_governor() - Switch to another governor
+ * @tz: a valid pointer to a struct thermal_zone_device
+ * @new_gov: pointer to the new governor
+ *
+ * Change the governor of thermal zone @tz.
+ *
+ * Return: 0 on success, an error if the new governor's bind_to_tz() failed.
+ */
+static int thermal_set_governor(struct thermal_zone_device *tz,
+ struct thermal_governor *new_gov)
+{
+ int ret = 0;
+
+ if (tz->governor && tz->governor->unbind_from_tz)
+ tz->governor->unbind_from_tz(tz);
+
+ if (new_gov && new_gov->bind_to_tz) {
+ ret = new_gov->bind_to_tz(tz);
+ if (ret) {
+ bind_previous_governor(tz, new_gov->name);
+
+ return ret;
+ }
+ }
+
+ tz->governor = new_gov;
+
+ return ret;
+}
+
int thermal_register_governor(struct thermal_governor *governor)
{
int err;
@@ -107,8 +159,15 @@ int thermal_register_governor(struct thermal_governor *governor)
name = pos->tzp->governor_name;
- if (!strncasecmp(name, governor->name, THERMAL_NAME_LENGTH))
- pos->governor = governor;
+ if (!strncasecmp(name, governor->name, THERMAL_NAME_LENGTH)) {
+ int ret;
+
+ ret = thermal_set_governor(pos, governor);
+ if (ret)
+ dev_err(&pos->device,
+ "Failed to set governor %s for thermal zone %s: %d\n",
+ governor->name, pos->type, ret);
+ }
}
mutex_unlock(&thermal_list_lock);
@@ -134,7 +193,7 @@ void thermal_unregister_governor(struct thermal_governor *governor)
list_for_each_entry(pos, &thermal_tz_list, node) {
if (!strncasecmp(pos->governor->name, governor->name,
THERMAL_NAME_LENGTH))
- pos->governor = NULL;
+ thermal_set_governor(pos, NULL);
}
mutex_unlock(&thermal_list_lock);
@@ -218,7 +277,8 @@ static void print_bind_err_msg(struct thermal_zone_device *tz,
static void __bind(struct thermal_zone_device *tz, int mask,
struct thermal_cooling_device *cdev,
- unsigned long *limits)
+ unsigned long *limits,
+ unsigned int weight)
{
int i, ret;
@@ -233,7 +293,8 @@ static void __bind(struct thermal_zone_device *tz, int mask,
upper = limits[i * 2 + 1];
}
ret = thermal_zone_bind_cooling_device(tz, i, cdev,
- upper, lower);
+ upper, lower,
+ weight);
if (ret)
print_bind_err_msg(tz, cdev, ret);
}
@@ -280,7 +341,8 @@ static void bind_cdev(struct thermal_cooling_device *cdev)
continue;
tzp->tbp[i].cdev = cdev;
__bind(pos, tzp->tbp[i].trip_mask, cdev,
- tzp->tbp[i].binding_limits);
+ tzp->tbp[i].binding_limits,
+ tzp->tbp[i].weight);
}
}
@@ -319,7 +381,8 @@ static void bind_tz(struct thermal_zone_device *tz)
continue;
tzp->tbp[i].cdev = pos;
__bind(tz, tzp->tbp[i].trip_mask, pos,
- tzp->tbp[i].binding_limits);
+ tzp->tbp[i].binding_limits,
+ tzp->tbp[i].weight);
}
}
exit:
@@ -713,7 +776,8 @@ passive_store(struct device *dev, struct device_attribute *attr,
thermal_zone_bind_cooling_device(tz,
THERMAL_TRIPS_NONE, cdev,
THERMAL_NO_LIMIT,
- THERMAL_NO_LIMIT);
+ THERMAL_NO_LIMIT,
+ THERMAL_WEIGHT_DEFAULT);
}
mutex_unlock(&thermal_list_lock);
if (!tz->passive_delay)
@@ -765,8 +829,9 @@ policy_store(struct device *dev, struct device_attribute *attr,
if (!gov)
goto exit;
- tz->governor = gov;
- ret = count;
+ ret = thermal_set_governor(tz, gov);
+ if (!ret)
+ ret = count;
exit:
mutex_unlock(&tz->lock);
@@ -810,6 +875,158 @@ emul_temp_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(emul_temp, S_IWUSR, NULL, emul_temp_store);
#endif/*CONFIG_THERMAL_EMULATION*/
+static ssize_t
+sustainable_power_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+
+ if (tz->tzp)
+ return sprintf(buf, "%u\n", tz->tzp->sustainable_power);
+ else
+ return -EIO;
+}
+
+static ssize_t
+sustainable_power_store(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+ u32 sustainable_power;
+
+ if (!tz->tzp)
+ return -EIO;
+
+ if (kstrtou32(buf, 10, &sustainable_power))
+ return -EINVAL;
+
+ tz->tzp->sustainable_power = sustainable_power;
+
+ return count;
+}
+static DEVICE_ATTR(sustainable_power, S_IWUSR | S_IRUGO, sustainable_power_show,
+ sustainable_power_store);
+
+#define create_s32_tzp_attr(name) \
+ static ssize_t \
+ name##_show(struct device *dev, struct device_attribute *devattr, \
+ char *buf) \
+ { \
+ struct thermal_zone_device *tz = to_thermal_zone(dev); \
+ \
+ if (tz->tzp) \
+ return sprintf(buf, "%u\n", tz->tzp->name); \
+ else \
+ return -EIO; \
+ } \
+ \
+ static ssize_t \
+ name##_store(struct device *dev, struct device_attribute *devattr, \
+ const char *buf, size_t count) \
+ { \
+ struct thermal_zone_device *tz = to_thermal_zone(dev); \
+ s32 value; \
+ \
+ if (!tz->tzp) \
+ return -EIO; \
+ \
+ if (kstrtos32(buf, 10, &value)) \
+ return -EINVAL; \
+ \
+ tz->tzp->name = value; \
+ \
+ return count; \
+ } \
+ static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, name##_show, name##_store)
+
+create_s32_tzp_attr(k_po);
+create_s32_tzp_attr(k_pu);
+create_s32_tzp_attr(k_i);
+create_s32_tzp_attr(k_d);
+create_s32_tzp_attr(integral_cutoff);
+create_s32_tzp_attr(slope);
+create_s32_tzp_attr(offset);
+#undef create_s32_tzp_attr
+
+static struct device_attribute *dev_tzp_attrs[] = {
+ &dev_attr_sustainable_power,
+ &dev_attr_k_po,
+ &dev_attr_k_pu,
+ &dev_attr_k_i,
+ &dev_attr_k_d,
+ &dev_attr_integral_cutoff,
+ &dev_attr_slope,
+ &dev_attr_offset,
+};
+
+static int create_tzp_attrs(struct device *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev_tzp_attrs); i++) {
+ int ret;
+ struct device_attribute *dev_attr = dev_tzp_attrs[i];
+
+ ret = device_create_file(dev, dev_attr);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * power_actor_get_max_power() - get the maximum power that a cdev can consume
+ * @cdev: pointer to &thermal_cooling_device
+ * @tz: a valid thermal zone device pointer
+ * @max_power: pointer in which to store the maximum power
+ *
+ * Calculate the maximum power consumption in milliwats that the
+ * cooling device can currently consume and store it in @max_power.
+ *
+ * Return: 0 on success, -EINVAL if @cdev doesn't support the
+ * power_actor API or -E* on other error.
+ */
+int power_actor_get_max_power(struct thermal_cooling_device *cdev,
+ struct thermal_zone_device *tz, u32 *max_power)
+{
+ if (!cdev_is_power_actor(cdev))
+ return -EINVAL;
+
+ return cdev->ops->state2power(cdev, tz, 0, max_power);
+}
+
+/**
+ * power_actor_set_power() - limit the maximum power that a cooling device can consume
+ * @cdev: pointer to &thermal_cooling_device
+ * @instance: thermal instance to update
+ * @power: the power in milliwatts
+ *
+ * Set the cooling device to consume at most @power milliwatts.
+ *
+ * Return: 0 on success, -EINVAL if the cooling device does not
+ * implement the power actor API or -E* for other failures.
+ */
+int power_actor_set_power(struct thermal_cooling_device *cdev,
+ struct thermal_instance *instance, u32 power)
+{
+ unsigned long state;
+ int ret;
+
+ if (!cdev_is_power_actor(cdev))
+ return -EINVAL;
+
+ ret = cdev->ops->power2state(cdev, instance->tz, power, &state);
+ if (ret)
+ return ret;
+
+ instance->target = state;
+ cdev->updated = false;
+ thermal_cdev_update(cdev);
+
+ return 0;
+}
+
static DEVICE_ATTR(type, 0444, type_show, NULL);
static DEVICE_ATTR(temp, 0444, temp_show, NULL);
static DEVICE_ATTR(mode, 0644, mode_show, mode_store);
@@ -917,6 +1134,34 @@ static const struct attribute_group *cooling_device_attr_groups[] = {
NULL,
};
+static ssize_t
+thermal_cooling_device_weight_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct thermal_instance *instance;
+
+ instance = container_of(attr, struct thermal_instance, weight_attr);
+
+ return sprintf(buf, "%d\n", instance->weight);
+}
+
+static ssize_t
+thermal_cooling_device_weight_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct thermal_instance *instance;
+ int ret, weight;
+
+ ret = kstrtoint(buf, 0, &weight);
+ if (ret)
+ return ret;
+
+ instance = container_of(attr, struct thermal_instance, weight_attr);
+ instance->weight = weight;
+
+ return count;
+}
/* Device management */
/**
@@ -931,6 +1176,9 @@ static const struct attribute_group *cooling_device_attr_groups[] = {
* @lower: the Minimum cooling state can be used for this trip point.
* THERMAL_NO_LIMIT means no lower limit,
* and the cooling device can be in cooling state 0.
+ * @weight: The weight of the cooling device to be bound to the
+ * thermal zone. Use THERMAL_WEIGHT_DEFAULT for the
+ * default value
*
* This interface function bind a thermal cooling device to the certain trip
* point of a thermal zone device.
@@ -941,7 +1189,8 @@ static const struct attribute_group *cooling_device_attr_groups[] = {
int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
int trip,
struct thermal_cooling_device *cdev,
- unsigned long upper, unsigned long lower)
+ unsigned long upper, unsigned long lower,
+ unsigned int weight)
{
struct thermal_instance *dev;
struct thermal_instance *pos;
@@ -986,6 +1235,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
dev->upper = upper;
dev->lower = lower;
dev->target = THERMAL_NO_TARGET;
+ dev->weight = weight;
result = get_idr(&tz->idr, &tz->lock, &dev->id);
if (result)
@@ -1006,6 +1256,16 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
if (result)
goto remove_symbol_link;
+ sprintf(dev->weight_attr_name, "cdev%d_weight", dev->id);
+ sysfs_attr_init(&dev->weight_attr.attr);
+ dev->weight_attr.attr.name = dev->weight_attr_name;
+ dev->weight_attr.attr.mode = S_IWUSR | S_IRUGO;
+ dev->weight_attr.show = thermal_cooling_device_weight_show;
+ dev->weight_attr.store = thermal_cooling_device_weight_store;
+ result = device_create_file(&tz->device, &dev->weight_attr);
+ if (result)
+ goto remove_trip_file;
+
mutex_lock(&tz->lock);
mutex_lock(&cdev->lock);
list_for_each_entry(pos, &tz->thermal_instances, tz_node)
@@ -1023,6 +1283,8 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
if (!result)
return 0;
+ device_remove_file(&tz->device, &dev->weight_attr);
+remove_trip_file:
device_remove_file(&tz->device, &dev->attr);
remove_symbol_link:
sysfs_remove_link(&tz->device.kobj, dev->name);
@@ -1071,6 +1333,7 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz,
return -ENODEV;
unbind:
+ device_remove_file(&tz->device, &pos->weight_attr);
device_remove_file(&tz->device, &pos->attr);
sysfs_remove_link(&tz->device.kobj, pos->name);
release_idr(&tz->idr, &tz->lock, pos->id);
@@ -1377,7 +1640,8 @@ static int create_trip_attrs(struct thermal_zone_device *tz, int mask)
tz->trip_temp_attrs[indx].name;
tz->trip_temp_attrs[indx].attr.attr.mode = S_IRUGO;
tz->trip_temp_attrs[indx].attr.show = trip_point_temp_show;
- if (mask & (1 << indx)) {
+ if (IS_ENABLED(CONFIG_THERMAL_WRITABLE_TRIPS) &&
+ mask & (1 << indx)) {
tz->trip_temp_attrs[indx].attr.attr.mode |= S_IWUSR;
tz->trip_temp_attrs[indx].attr.store =
trip_point_temp_store;
@@ -1454,7 +1718,7 @@ static void remove_trip_attrs(struct thermal_zone_device *tz)
struct thermal_zone_device *thermal_zone_device_register(const char *type,
int trips, int mask, void *devdata,
struct thermal_zone_device_ops *ops,
- const struct thermal_zone_params *tzp,
+ struct thermal_zone_params *tzp,
int passive_delay, int polling_delay)
{
struct thermal_zone_device *tz;
@@ -1462,6 +1726,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
int result;
int count;
int passive = 0;
+ struct thermal_governor *governor;
if (type && strlen(type) >= THERMAL_NAME_LENGTH)
return ERR_PTR(-EINVAL);
@@ -1548,13 +1813,24 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
if (result)
goto unregister;
+ /* Add thermal zone params */
+ result = create_tzp_attrs(&tz->device);
+ if (result)
+ goto unregister;
+
/* Update 'this' zone's governor information */
mutex_lock(&thermal_governor_lock);
if (tz->tzp)
- tz->governor = __find_governor(tz->tzp->governor_name);
+ governor = __find_governor(tz->tzp->governor_name);
else
- tz->governor = def_governor;
+ governor = def_governor;
+
+ result = thermal_set_governor(tz, governor);
+ if (result) {
+ mutex_unlock(&thermal_governor_lock);
+ goto unregister;
+ }
mutex_unlock(&thermal_governor_lock);
@@ -1643,7 +1919,7 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
device_remove_file(&tz->device, &dev_attr_mode);
device_remove_file(&tz->device, &dev_attr_policy);
remove_trip_attrs(tz);
- tz->governor = NULL;
+ thermal_set_governor(tz, NULL);
thermal_remove_hwmon_sysfs(tz);
release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id);
@@ -1799,7 +2075,11 @@ static int __init thermal_register_governors(void)
if (result)
return result;
- return thermal_gov_user_space_register();
+ result = thermal_gov_user_space_register();
+ if (result)
+ return result;
+
+ return thermal_gov_power_allocator_register();
}
static void thermal_unregister_governors(void)
@@ -1808,6 +2088,7 @@ static void thermal_unregister_governors(void)
thermal_gov_fair_share_unregister();
thermal_gov_bang_bang_unregister();
thermal_gov_user_space_unregister();
+ thermal_gov_power_allocator_unregister();
}
static int __init thermal_init(void)
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 8e391812e..d7ac1fccd 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -46,8 +46,11 @@ struct thermal_instance {
unsigned long target; /* expected cooling state */
char attr_name[THERMAL_NAME_LENGTH];
struct device_attribute attr;
+ char weight_attr_name[THERMAL_NAME_LENGTH];
+ struct device_attribute weight_attr;
struct list_head tz_node; /* node in tz->thermal_instances */
struct list_head cdev_node; /* node in cdev->thermal_instances */
+ unsigned int weight; /* The weight of the cooling device */
};
int thermal_register_governor(struct thermal_governor *);
@@ -85,6 +88,14 @@ static inline int thermal_gov_user_space_register(void) { return 0; }
static inline void thermal_gov_user_space_unregister(void) {}
#endif /* CONFIG_THERMAL_GOV_USER_SPACE */
+#ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR
+int thermal_gov_power_allocator_register(void);
+void thermal_gov_power_allocator_unregister(void);
+#else
+static inline int thermal_gov_power_allocator_register(void) { return 0; }
+static inline void thermal_gov_power_allocator_unregister(void) {}
+#endif /* CONFIG_THERMAL_GOV_POWER_ALLOCATOR */
+
/* device tree support */
#ifdef CONFIG_THERMAL_OF
int of_parse_thermal_zones(void);
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
index bc14dc874..10c47c048 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
@@ -43,6 +43,8 @@
#include "ti-bandgap.h"
+static int ti_bandgap_force_single_read(struct ti_bandgap *bgp, int id);
+
/*** Helper functions to access registers and their bitfields ***/
/**
@@ -103,19 +105,15 @@ do { \
*/
static int ti_bandgap_power(struct ti_bandgap *bgp, bool on)
{
- int i, ret = 0;
+ int i;
- if (!TI_BANDGAP_HAS(bgp, POWER_SWITCH)) {
- ret = -ENOTSUPP;
- goto exit;
- }
+ if (!TI_BANDGAP_HAS(bgp, POWER_SWITCH))
+ return -ENOTSUPP;
for (i = 0; i < bgp->conf->sensor_count; i++)
/* active on 0 */
RMW_BITS(bgp, i, temp_sensor_ctrl, bgap_tempsoff_mask, !on);
-
-exit:
- return ret;
+ return 0;
}
/**
@@ -298,18 +296,13 @@ static
int ti_bandgap_adc_to_mcelsius(struct ti_bandgap *bgp, int adc_val, int *t)
{
const struct ti_bandgap_data *conf = bgp->conf;
- int ret = 0;
/* look up for temperature in the table and return the temperature */
- if (adc_val < conf->adc_start_val || adc_val > conf->adc_end_val) {
- ret = -ERANGE;
- goto exit;
- }
+ if (adc_val < conf->adc_start_val || adc_val > conf->adc_end_val)
+ return -ERANGE;
*t = bgp->conf->conv_table[adc_val - conf->adc_start_val];
-
-exit:
- return ret;
+ return 0;
}
/**
@@ -330,16 +323,14 @@ int ti_bandgap_mcelsius_to_adc(struct ti_bandgap *bgp, long temp, int *adc)
{
const struct ti_bandgap_data *conf = bgp->conf;
const int *conv_table = bgp->conf->conv_table;
- int high, low, mid, ret = 0;
+ int high, low, mid;
low = 0;
high = conf->adc_end_val - conf->adc_start_val;
mid = (high + low) / 2;
- if (temp < conv_table[low] || temp > conv_table[high]) {
- ret = -ERANGE;
- goto exit;
- }
+ if (temp < conv_table[low] || temp > conv_table[high])
+ return -ERANGE;
while (low < high) {
if (temp < conv_table[mid])
@@ -350,9 +341,7 @@ int ti_bandgap_mcelsius_to_adc(struct ti_bandgap *bgp, long temp, int *adc)
}
*adc = conf->adc_start_val + low;
-
-exit:
- return ret;
+ return 0;
}
/**
@@ -378,13 +367,11 @@ int ti_bandgap_add_hyst(struct ti_bandgap *bgp, int adc_val, int hyst_val,
*/
ret = ti_bandgap_adc_to_mcelsius(bgp, adc_val, &temp);
if (ret < 0)
- goto exit;
+ return ret;
temp += hyst_val;
ret = ti_bandgap_mcelsius_to_adc(bgp, temp, sum);
-
-exit:
return ret;
}
@@ -542,22 +529,18 @@ exit:
*/
static inline int ti_bandgap_validate(struct ti_bandgap *bgp, int id)
{
- int ret = 0;
-
if (!bgp || IS_ERR(bgp)) {
pr_err("%s: invalid bandgap pointer\n", __func__);
- ret = -EINVAL;
- goto exit;
+ return -EINVAL;
}
if ((id < 0) || (id >= bgp->conf->sensor_count)) {
dev_err(bgp->dev, "%s: sensor id out of range (%d)\n",
__func__, id);
- ret = -ERANGE;
+ return -ERANGE;
}
-exit:
- return ret;
+ return 0;
}
/**
@@ -585,12 +568,10 @@ static int _ti_bandgap_write_threshold(struct ti_bandgap *bgp, int id, int val,
ret = ti_bandgap_validate(bgp, id);
if (ret)
- goto exit;
+ return ret;
- if (!TI_BANDGAP_HAS(bgp, TALERT)) {
- ret = -ENOTSUPP;
- goto exit;
- }
+ if (!TI_BANDGAP_HAS(bgp, TALERT))
+ return -ENOTSUPP;
ts_data = bgp->conf->sensors[id].ts_data;
tsr = bgp->conf->sensors[id].registers;
@@ -603,17 +584,15 @@ static int _ti_bandgap_write_threshold(struct ti_bandgap *bgp, int id, int val,
}
if (ret)
- goto exit;
+ return ret;
ret = ti_bandgap_mcelsius_to_adc(bgp, val, &adc_val);
if (ret < 0)
- goto exit;
+ return ret;
spin_lock(&bgp->lock);
ret = ti_bandgap_update_alert_threshold(bgp, id, adc_val, hot);
spin_unlock(&bgp->lock);
-
-exit:
return ret;
}
@@ -656,7 +635,7 @@ static int _ti_bandgap_read_threshold(struct ti_bandgap *bgp, int id,
temp = ti_bandgap_readl(bgp, tsr->bgap_threshold);
temp = (temp & mask) >> __ffs(mask);
- ret |= ti_bandgap_adc_to_mcelsius(bgp, temp, &temp);
+ ret = ti_bandgap_adc_to_mcelsius(bgp, temp, &temp);
if (ret) {
dev_err(bgp->dev, "failed to read thot\n");
ret = -EIO;
@@ -926,11 +905,17 @@ int ti_bandgap_read_temperature(struct ti_bandgap *bgp, int id,
if (ret)
return ret;
+ if (!TI_BANDGAP_HAS(bgp, MODE_CONFIG)) {
+ ret = ti_bandgap_force_single_read(bgp, id);
+ if (ret)
+ return ret;
+ }
+
spin_lock(&bgp->lock);
temp = ti_bandgap_read_temp(bgp, id);
spin_unlock(&bgp->lock);
- ret |= ti_bandgap_adc_to_mcelsius(bgp, temp, &temp);
+ ret = ti_bandgap_adc_to_mcelsius(bgp, temp, &temp);
if (ret)
return -EIO;
@@ -991,7 +976,8 @@ void *ti_bandgap_get_sensor_data(struct ti_bandgap *bgp, int id)
static int
ti_bandgap_force_single_read(struct ti_bandgap *bgp, int id)
{
- u32 temp = 0, counter = 1000;
+ u32 counter = 1000;
+ struct temp_sensor_registers *tsr;
/* Select single conversion mode */
if (TI_BANDGAP_HAS(bgp, MODE_CONFIG))
@@ -999,16 +985,27 @@ ti_bandgap_force_single_read(struct ti_bandgap *bgp, int id)
/* Start of Conversion = 1 */
RMW_BITS(bgp, id, temp_sensor_ctrl, bgap_soc_mask, 1);
- /* Wait until DTEMP is updated */
- temp = ti_bandgap_read_temp(bgp, id);
- while ((temp == 0) && --counter)
- temp = ti_bandgap_read_temp(bgp, id);
- /* REVISIT: Check correct condition for end of conversion */
+ /* Wait for EOCZ going up */
+ tsr = bgp->conf->sensors[id].registers;
+
+ while (--counter) {
+ if (ti_bandgap_readl(bgp, tsr->temp_sensor_ctrl) &
+ tsr->bgap_eocz_mask)
+ break;
+ }
/* Start of Conversion = 0 */
RMW_BITS(bgp, id, temp_sensor_ctrl, bgap_soc_mask, 0);
+ /* Wait for EOCZ going down */
+ counter = 1000;
+ while (--counter) {
+ if (!(ti_bandgap_readl(bgp, tsr->temp_sensor_ctrl) &
+ tsr->bgap_eocz_mask))
+ break;
+ }
+
return 0;
}
@@ -1294,11 +1291,10 @@ int ti_bandgap_probe(struct platform_device *pdev)
goto free_irqs;
}
- bgp->div_clk = clk_get(NULL, bgp->conf->div_ck_name);
+ bgp->div_clk = clk_get(NULL, bgp->conf->div_ck_name);
ret = IS_ERR(bgp->div_clk);
if (ret) {
- dev_err(&pdev->dev,
- "failed to request div_ts_ck clock ref\n");
+ dev_err(&pdev->dev, "failed to request div_ts_ck clock ref\n");
ret = PTR_ERR(bgp->div_clk);
goto free_irqs;
}
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index a38c17564..c7c5b3779 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -75,7 +75,7 @@ static inline int ti_thermal_hotspot_temperature(int t, int s, int c)
}
/* thermal zone ops */
-/* Get temperature callback function for thermal zone*/
+/* Get temperature callback function for thermal zone */
static inline int __ti_thermal_get_temp(void *devdata, long *temp)
{
struct thermal_zone_device *pcb_tz = NULL;
@@ -146,7 +146,8 @@ static int ti_thermal_bind(struct thermal_zone_device *thermal,
return thermal_zone_bind_cooling_device(thermal, 0, cdev,
/* bind with min and max states defined by cpu_cooling */
THERMAL_NO_LIMIT,
- THERMAL_NO_LIMIT);
+ THERMAL_NO_LIMIT,
+ THERMAL_WEIGHT_DEFAULT);
}
/* Unbind callback functions for thermal zone */
diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
index 9ea3d9d49..50d1d2cb0 100644
--- a/drivers/thermal/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/x86_pkg_temp_thermal.c
@@ -68,7 +68,7 @@ struct phy_dev_entry {
struct thermal_zone_device *tzone;
};
-static const struct thermal_zone_params pkg_temp_tz_params = {
+static struct thermal_zone_params pkg_temp_tz_params = {
.no_hwmon = true,
};