diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-10-20 00:10:27 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-10-20 00:10:27 -0300 |
commit | d0b2f91bede3bd5e3d24dd6803e56eee959c1797 (patch) | |
tree | 7fee4ab0509879c373c4f2cbd5b8a5be5b4041ee /drivers/idle | |
parent | e914f8eb445e8f74b00303c19c2ffceaedd16a05 (diff) |
Linux-libre 4.8.2-gnupck-4.8.2-gnu
Diffstat (limited to 'drivers/idle')
-rw-r--r-- | drivers/idle/intel_idle.c | 178 |
1 files changed, 94 insertions, 84 deletions
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index c96649292..67ec58f9e 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -46,8 +46,6 @@ * to avoid complications with the lapic timer workaround. * Have not seen issues with suspend, but may need same workaround here. * - * There is currently no kernel-based automatic probing/loading mechanism - * if the driver is built as a module. */ /* un-comment DEBUG to enable pr_debug() statements */ @@ -60,8 +58,9 @@ #include <linux/sched.h> #include <linux/notifier.h> #include <linux/cpu.h> -#include <linux/module.h> +#include <linux/moduleparam.h> #include <asm/cpu_device_id.h> +#include <asm/intel-family.h> #include <asm/mwait.h> #include <asm/msr.h> @@ -827,6 +826,35 @@ static struct cpuidle_state bxt_cstates[] = { .enter = NULL } }; +static struct cpuidle_state dnv_cstates[] = { + { + .name = "C1-DNV", + .desc = "MWAIT 0x00", + .flags = MWAIT2flg(0x00), + .exit_latency = 2, + .target_residency = 2, + .enter = &intel_idle, + .enter_freeze = intel_idle_freeze, }, + { + .name = "C1E-DNV", + .desc = "MWAIT 0x01", + .flags = MWAIT2flg(0x01), + .exit_latency = 10, + .target_residency = 20, + .enter = &intel_idle, + .enter_freeze = intel_idle_freeze, }, + { + .name = "C6-DNV", + .desc = "MWAIT 0x20", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 50, + .target_residency = 500, + .enter = &intel_idle, + .enter_freeze = intel_idle_freeze, }, + { + .enter = NULL } +}; + /** * intel_idle * @dev: cpuidle_device @@ -1016,45 +1044,50 @@ static const struct idle_cpu idle_cpu_bxt = { .disable_promotion_to_c1e = true, }; +static const struct idle_cpu idle_cpu_dnv = { + .state_table = dnv_cstates, + .disable_promotion_to_c1e = true, +}; + #define ICPU(model, cpu) \ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu } static const struct x86_cpu_id intel_idle_ids[] __initconst = { - ICPU(0x1a, idle_cpu_nehalem), - ICPU(0x1e, idle_cpu_nehalem), - ICPU(0x1f, idle_cpu_nehalem), - ICPU(0x25, idle_cpu_nehalem), - ICPU(0x2c, idle_cpu_nehalem), - ICPU(0x2e, idle_cpu_nehalem), - ICPU(0x1c, idle_cpu_atom), - ICPU(0x26, idle_cpu_lincroft), - ICPU(0x2f, idle_cpu_nehalem), - ICPU(0x2a, idle_cpu_snb), - ICPU(0x2d, idle_cpu_snb), - ICPU(0x36, idle_cpu_atom), - ICPU(0x37, idle_cpu_byt), - ICPU(0x4c, idle_cpu_cht), - ICPU(0x3a, idle_cpu_ivb), - ICPU(0x3e, idle_cpu_ivt), - ICPU(0x3c, idle_cpu_hsw), - ICPU(0x3f, idle_cpu_hsw), - ICPU(0x45, idle_cpu_hsw), - ICPU(0x46, idle_cpu_hsw), - ICPU(0x4d, idle_cpu_avn), - ICPU(0x3d, idle_cpu_bdw), - ICPU(0x47, idle_cpu_bdw), - ICPU(0x4f, idle_cpu_bdw), - ICPU(0x56, idle_cpu_bdw), - ICPU(0x4e, idle_cpu_skl), - ICPU(0x5e, idle_cpu_skl), - ICPU(0x8e, idle_cpu_skl), - ICPU(0x9e, idle_cpu_skl), - ICPU(0x55, idle_cpu_skx), - ICPU(0x57, idle_cpu_knl), - ICPU(0x5c, idle_cpu_bxt), + ICPU(INTEL_FAM6_NEHALEM_EP, idle_cpu_nehalem), + ICPU(INTEL_FAM6_NEHALEM, idle_cpu_nehalem), + ICPU(INTEL_FAM6_NEHALEM_G, idle_cpu_nehalem), + ICPU(INTEL_FAM6_WESTMERE, idle_cpu_nehalem), + ICPU(INTEL_FAM6_WESTMERE_EP, idle_cpu_nehalem), + ICPU(INTEL_FAM6_NEHALEM_EX, idle_cpu_nehalem), + ICPU(INTEL_FAM6_ATOM_PINEVIEW, idle_cpu_atom), + ICPU(INTEL_FAM6_ATOM_LINCROFT, idle_cpu_lincroft), + ICPU(INTEL_FAM6_WESTMERE_EX, idle_cpu_nehalem), + ICPU(INTEL_FAM6_SANDYBRIDGE, idle_cpu_snb), + ICPU(INTEL_FAM6_SANDYBRIDGE_X, idle_cpu_snb), + ICPU(INTEL_FAM6_ATOM_CEDARVIEW, idle_cpu_atom), + ICPU(INTEL_FAM6_ATOM_SILVERMONT1, idle_cpu_byt), + ICPU(INTEL_FAM6_ATOM_AIRMONT, idle_cpu_cht), + ICPU(INTEL_FAM6_IVYBRIDGE, idle_cpu_ivb), + ICPU(INTEL_FAM6_IVYBRIDGE_X, idle_cpu_ivt), + ICPU(INTEL_FAM6_HASWELL_CORE, idle_cpu_hsw), + ICPU(INTEL_FAM6_HASWELL_X, idle_cpu_hsw), + ICPU(INTEL_FAM6_HASWELL_ULT, idle_cpu_hsw), + ICPU(INTEL_FAM6_HASWELL_GT3E, idle_cpu_hsw), + ICPU(INTEL_FAM6_ATOM_SILVERMONT2, idle_cpu_avn), + ICPU(INTEL_FAM6_BROADWELL_CORE, idle_cpu_bdw), + ICPU(INTEL_FAM6_BROADWELL_GT3E, idle_cpu_bdw), + ICPU(INTEL_FAM6_BROADWELL_X, idle_cpu_bdw), + ICPU(INTEL_FAM6_BROADWELL_XEON_D, idle_cpu_bdw), + ICPU(INTEL_FAM6_SKYLAKE_MOBILE, idle_cpu_skl), + ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, idle_cpu_skl), + ICPU(INTEL_FAM6_KABYLAKE_MOBILE, idle_cpu_skl), + ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, idle_cpu_skl), + ICPU(INTEL_FAM6_SKYLAKE_X, idle_cpu_skx), + ICPU(INTEL_FAM6_XEON_PHI_KNL, idle_cpu_knl), + ICPU(INTEL_FAM6_ATOM_GOLDMONT, idle_cpu_bxt), + ICPU(INTEL_FAM6_ATOM_DENVERTON, idle_cpu_dnv), {} }; -MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids); /* * intel_idle_probe() @@ -1154,7 +1187,10 @@ static unsigned long long irtl_2_usec(unsigned long long irtl) { unsigned long long ns; - ns = irtl_ns_units[(irtl >> 10) & 0x3]; + if (!irtl) + return 0; + + ns = irtl_ns_units[(irtl >> 10) & 0x7]; return div64_u64((irtl & 0x3FF) * ns, 1000); } @@ -1167,43 +1203,39 @@ static unsigned long long irtl_2_usec(unsigned long long irtl) static void bxt_idle_state_table_update(void) { unsigned long long msr; + unsigned int usec; rdmsrl(MSR_PKGC6_IRTL, msr); - if (msr) { - unsigned int usec = irtl_2_usec(msr); - + usec = irtl_2_usec(msr); + if (usec) { bxt_cstates[2].exit_latency = usec; bxt_cstates[2].target_residency = usec; } rdmsrl(MSR_PKGC7_IRTL, msr); - if (msr) { - unsigned int usec = irtl_2_usec(msr); - + usec = irtl_2_usec(msr); + if (usec) { bxt_cstates[3].exit_latency = usec; bxt_cstates[3].target_residency = usec; } rdmsrl(MSR_PKGC8_IRTL, msr); - if (msr) { - unsigned int usec = irtl_2_usec(msr); - + usec = irtl_2_usec(msr); + if (usec) { bxt_cstates[4].exit_latency = usec; bxt_cstates[4].target_residency = usec; } rdmsrl(MSR_PKGC9_IRTL, msr); - if (msr) { - unsigned int usec = irtl_2_usec(msr); - + usec = irtl_2_usec(msr); + if (usec) { bxt_cstates[5].exit_latency = usec; bxt_cstates[5].target_residency = usec; } rdmsrl(MSR_PKGC10_IRTL, msr); - if (msr) { - unsigned int usec = irtl_2_usec(msr); - + usec = irtl_2_usec(msr); + if (usec) { bxt_cstates[6].exit_latency = usec; bxt_cstates[6].target_residency = usec; } @@ -1261,13 +1293,13 @@ static void intel_idle_state_table_update(void) { switch (boot_cpu_data.x86_model) { - case 0x3e: /* IVT */ + case INTEL_FAM6_IVYBRIDGE_X: ivt_idle_state_table_update(); break; - case 0x5c: /* BXT */ + case INTEL_FAM6_ATOM_GOLDMONT: bxt_idle_state_table_update(); break; - case 0x5e: /* SKL-H */ + case INTEL_FAM6_SKYLAKE_DESKTOP: sklh_idle_state_table_update(); break; } @@ -1415,34 +1447,12 @@ static int __init intel_idle_init(void) return 0; } +device_initcall(intel_idle_init); -static void __exit intel_idle_exit(void) -{ - struct cpuidle_device *dev; - int i; - - cpu_notifier_register_begin(); - - if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) - on_each_cpu(__setup_broadcast_timer, (void *)false, 1); - __unregister_cpu_notifier(&cpu_hotplug_notifier); - - for_each_possible_cpu(i) { - dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); - cpuidle_unregister_device(dev); - } - - cpu_notifier_register_done(); - - cpuidle_unregister_driver(&intel_idle_driver); - free_percpu(intel_idle_cpuidle_devices); -} - -module_init(intel_idle_init); -module_exit(intel_idle_exit); - +/* + * We are not really modular, but we used to support that. Meaning we also + * support "intel_idle.max_cstate=..." at boot and also a read-only export of + * it at /sys/module/intel_idle/parameters/max_cstate -- so using module_param + * is the easiest way (currently) to continue doing that. + */ module_param(max_cstate, int, 0444); - -MODULE_AUTHOR("Len Brown <len.brown@intel.com>"); -MODULE_DESCRIPTION("Cpuidle driver for Intel Hardware v" INTEL_IDLE_VERSION); -MODULE_LICENSE("GPL"); |