diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index c3038cdc6865..25bf88b571ae 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -72,7 +72,7 @@ config CPU_FREQ_DEFAULT_GOV_USERSPACE config CPU_FREQ_DEFAULT_GOV_ONDEMAND bool "ondemand" - depends on !(X86_INTEL_PSTATE && SMP) + depends on SMP select CPU_FREQ_GOV_ONDEMAND select CPU_FREQ_GOV_PERFORMANCE help diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index eb4320b619c9..71ec5040b29f 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -18,10 +18,10 @@ #include "cpufreq_ondemand.h" /* On-demand governor macros */ -#define DEF_FREQUENCY_UP_THRESHOLD (80) -#define DEF_SAMPLING_DOWN_FACTOR (1) +#define DEF_FREQUENCY_UP_THRESHOLD (65) +#define DEF_SAMPLING_DOWN_FACTOR (10) #define MAX_SAMPLING_DOWN_FACTOR (100000) -#define MICRO_FREQUENCY_UP_THRESHOLD (95) +#define MICRO_FREQUENCY_UP_THRESHOLD (85) #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) #define MIN_FREQUENCY_UP_THRESHOLD (1) #define MAX_FREQUENCY_UP_THRESHOLD (100) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 62db6b0176b9..221331739fd1 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -851,7 +851,7 @@ struct readahead_control { ._index = i, \ } -#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE) +#define VM_READAHEAD_PAGES (SZ_8M / PAGE_SIZE) void page_cache_ra_unbounded(struct readahead_control *, unsigned long nr_to_read, unsigned long lookahead_count); diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz index 38ef6d06888e..eb1ccd2481a9 100644 --- a/kernel/Kconfig.hz +++ b/kernel/Kconfig.hz @@ -40,6 +40,20 @@ choice on SMP and NUMA systems and exactly dividing by both PAL and NTSC frame rates for video and multimedia work. + config HZ_600 + bool "600 HZ" + help + 600 Hz is a balanced timer frequency. Provides fast interactivity + on desktops with good smoothness without increasing CPU power + consumption and sacrificing the battery life on laptops. + + config HZ_750 + bool "750 HZ" + help + 750 Hz is a balanced timer frequency. Provides fast interactivity + on desktops with good smoothness without increasing CPU power + consumption and sacrificing the battery life on laptops. + config HZ_1000 bool "1000 HZ" help @@ -53,6 +67,8 @@ config HZ default 100 if HZ_100 default 250 if HZ_250 default 300 if HZ_300 + default 600 if HZ_600 + default 750 if HZ_750 default 1000 if HZ_1000 config SCHED_HRTICK diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f21714ea3db8..e3a184a75874 100644 @@ -368,7 +368,7 @@ static inline void sched_core_dequeue(struct rq *rq, struct task_struct *p) { } * part of the period that we allow rt tasks to run in us. * default: 0.95s */ -int sysctl_sched_rt_runtime = 950000; +int sysctl_sched_rt_runtime = 980000; /* diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f6a05d9b5443..aa4a2c980025 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -35,8 +35,8 @@ * * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds) */ -unsigned int sysctl_sched_latency = 6000000ULL; -static unsigned int normalized_sysctl_sched_latency = 6000000ULL; +unsigned int sysctl_sched_latency = 4000000ULL; +static unsigned int normalized_sysctl_sched_latency = 4000000ULL; /* * The initial- and re-scaling of tunables is configurable @@ -56,8 +56,8 @@ unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG; * * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) */ -unsigned int sysctl_sched_min_granularity = 750000ULL; -static unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; +unsigned int sysctl_sched_min_granularity = 500000ULL; +static unsigned int normalized_sysctl_sched_min_granularity = 500000ULL; /* * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity @@ -79,8 +79,8 @@ unsigned int sysctl_sched_child_runs_first __read_mostly; * * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) */ -unsigned int sysctl_sched_wakeup_granularity = 1000000UL; -static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; +unsigned int sysctl_sched_wakeup_granularity = 800000UL; +static unsigned int normalized_sysctl_sched_wakeup_granularity = 800000UL; const_debug unsigned int sysctl_sched_migration_cost = 500000UL; @@ -133,7 +133,7 @@ int __weak arch_asym_cpu_priority(int cpu) * * (default: 5 msec, units: microseconds) */ -unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL; +unsigned int sysctl_sched_cfs_bandwidth_slice = 4000UL; #endif static inline void update_load_add(struct load_weight *lw, unsigned long inc) diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 4812a17b288c..932f38ee246a 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -70,7 +70,7 @@ static long ratelimit_pages = 32; /* * Start background writeback (via writeback threads) at this percentage */ -int dirty_background_ratio = 10; +int dirty_background_ratio = 5; /* * dirty_background_bytes starts at 0 (disabled) so that it is a function of @@ -98,7 +98,7 @@ unsigned long vm_dirty_bytes; /* * The interval between `kupdate'-style writebacks */ -unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */ +unsigned int dirty_writeback_interval = 10 * 100; /* centiseconds */ EXPORT_SYMBOL_GPL(dirty_writeback_interval); diff --git a/mm/vmscan.c b/mm/vmscan.c index 74296c2d1fed..d08275fe7ebc 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -174,7 +174,7 @@ struct scan_control { /* * From 0 .. 200. Higher means more swappy. */ -int vm_swappiness = 60; +int vm_swappiness = 30; static void set_task_reclaim_state(struct task_struct *task, struct reclaim_state *rs) -- 2.34.0.rc2.16.g5a73c6bdc7