From f7f49141a5dbe9c99d78196b58c44307fb2e6be3 Mon Sep 17 00:00:00 2001 From: Tk-Glitch Date: Wed, 4 Jul 2018 04:30:08 +0200 Subject: [PATCH 01/17] glitched --- scripts/mkcompile_h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h index baf3ab8d9d49..854e32e6aec7 100755 --- a/scripts/mkcompile_h +++ b/scripts/mkcompile_h @@ -41,8 +41,8 @@ else fi UTS_VERSION="#$VERSION" -CONFIG_FLAGS="" -if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi +CONFIG_FLAGS="TKG" +if [ -n "$SMP" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS SMP"; fi if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi if [ -n "$PREEMPT_RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT_RT"; fi -- 2.28.0 From c304f43d14e98d4bf1215fc10bc5012f554bdd8a Mon Sep 17 00:00:00 2001 From: Alexandre Frade Date: Mon, 29 Jan 2018 16:59:22 +0000 Subject: [PATCH 02/17] dcache: cache_pressure = 50 decreases the rate at which VFS caches are reclaimed Signed-off-by: Alexandre Frade --- fs/dcache.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/dcache.c b/fs/dcache.c index 361ea7ab30ea..0c5cf69b241a 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -71,7 +71,7 @@ * If no ancestor relationship: * arbitrary, since it's serialized on rename_lock */ -int sysctl_vfs_cache_pressure __read_mostly = 100; +int sysctl_vfs_cache_pressure __read_mostly = 50; EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); -- 2.28.0 From 28f32f59d9d55ac7ec3a20b79bdd02d2a0a5f7e1 Mon Sep 17 00:00:00 2001 From: Alexandre Frade Date: Mon, 29 Jan 2018 18:29:13 +0000 Subject: [PATCH 03/17] sched/core: nr_migrate = 128 increases number of tasks to iterate in a single balance run. Signed-off-by: Alexandre Frade --- kernel/sched/core.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f788cd61df21..2bfbb4213707 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -59,7 +59,7 @@ const_debug unsigned int sysctl_sched_features = * Number of tasks to iterate in a single balance run. * Limited because this is done with IRQs disabled. */ -const_debug unsigned int sysctl_sched_nr_migrate = 32; +const_debug unsigned int sysctl_sched_nr_migrate = 128; /* * period over which we measure -rt task CPU usage in us. @@ -71,9 +71,9 @@ __read_mostly int scheduler_running; /* * part of the period that we allow rt tasks to run in us. - * default: 0.95s + * XanMod default: 0.98s */ -int sysctl_sched_rt_runtime = 950000; +int sysctl_sched_rt_runtime = 980000; /* * __task_rq_lock - lock the rq @p resides on. -- 2.28.0 From acc49f33a10f61dc66c423888cbb883ba46710e4 Mon Sep 17 00:00:00 2001 From: Alexandre Frade Date: Mon, 29 Jan 2018 17:41:29 +0000 Subject: [PATCH 04/17] scripts: disable the localversion "+" tag of a git repo Signed-off-by: Alexandre Frade --- scripts/setlocalversion | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/setlocalversion b/scripts/setlocalversion index 20f2efd57b11..0552d8b9f582 100755 --- a/scripts/setlocalversion +++ b/scripts/setlocalversion @@ -54,7 +54,7 @@ scm_version() # If only the short version is requested, don't bother # running further git commands if $short; then - echo "+" + # echo "+" return fi # If we are past a tagged commit (like -- 2.28.0 From 61fcb33fb0de8bc0f060e0a1ada38ed149217f4d Mon Sep 17 00:00:00 2001 From: Oleksandr Natalenko Date: Wed, 11 Dec 2019 11:46:19 +0100 Subject: [PATCH 05/17] init/Kconfig: enable -O3 for all arches Building a kernel with -O3 may help in hunting bugs like [1] and thus using this switch should not be restricted to one specific arch only. With that, lets expose it for everyone. [1] https://lore.kernel.org/lkml/673b885183fb64f1cbb3ed2387524077@natalenko.name/ Signed-off-by: Oleksandr Natalenko --- init/Kconfig | 1 - 1 file changed, 1 deletion(-) diff --git a/init/Kconfig b/init/Kconfig index 0498af567f70..3ae8678e1145 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1278,7 +1278,6 @@ config CC_OPTIMIZE_FOR_PERFORMANCE config CC_OPTIMIZE_FOR_PERFORMANCE_O3 bool "Optimize more for performance (-O3)" - depends on ARC help Choosing this option will pass "-O3" to your compiler to optimize the kernel yet more for performance. -- 2.28.0 From 360c6833e07cc9fdef5746f6bc45bdbc7212288d Mon Sep 17 00:00:00 2001 From: "Jan Alexander Steffens (heftig)" Date: Fri, 26 Oct 2018 11:22:33 +0100 Subject: [PATCH 06/17] infiniband: Fix __read_overflow2 error with -O3 inlining --- drivers/infiniband/core/addr.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 3a98439bba83..6efc4f907f58 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -820,6 +820,7 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid, union { struct sockaddr_in _sockaddr_in; struct sockaddr_in6 _sockaddr_in6; + struct sockaddr_ib _sockaddr_ib; } sgid_addr, dgid_addr; int ret; -- 2.28.0 From f85ed068b4d0e6c31edce8574a95757a60e58b87 Mon Sep 17 00:00:00 2001 From: Etienne Juvigny Date: Mon, 3 Sep 2018 17:36:25 +0200 Subject: [PATCH 07/17] Zenify & stuff --- init/Kconfig | 32 ++++++++++++++++++++++++++++++++ kernel/sched/fair.c | 25 +++++++++++++++++++++++++ mm/page-writeback.c | 8 ++++++++ 3 files changed, 65 insertions(+) diff --git a/init/Kconfig b/init/Kconfig index 3ae8678e1145..da708eed0f1e 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -92,6 +92,38 @@ config THREAD_INFO_IN_TASK menu "General setup" +config ZENIFY + bool "A selection of patches from Zen/Liquorix kernel and additional tweaks for a better gaming experience" + default y + help + Tunes the kernel for responsiveness at the cost of throughput and power usage. + + --- Virtual Memory Subsystem --------------------------- + + Mem dirty before bg writeback..: 10 % -> 20 % + Mem dirty before sync writeback: 20 % -> 50 % + + --- Block Layer ---------------------------------------- + + Queue depth...............: 128 -> 512 + Default MQ scheduler......: mq-deadline -> bfq + + --- CFS CPU Scheduler ---------------------------------- + + Scheduling latency.............: 6 -> 3 ms + Minimal granularity............: 0.75 -> 0.3 ms + Wakeup granularity.............: 1 -> 0.5 ms + CPU migration cost.............: 0.5 -> 0.25 ms + Bandwidth slice size...........: 5 -> 3 ms + Ondemand fine upscaling limit..: 95 % -> 85 % + + --- MuQSS CPU Scheduler -------------------------------- + + Scheduling interval............: 6 -> 3 ms + ISO task max realtime use......: 70 % -> 25 % + Ondemand coarse upscaling limit: 80 % -> 45 % + Ondemand fine upscaling limit..: 95 % -> 45 % + config BROKEN bool diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 6b3b59cc51d6..2a0072192c3d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -37,8 +37,13 @@ * * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds) */ +#ifdef CONFIG_ZENIFY +unsigned int sysctl_sched_latency = 3000000ULL; +static unsigned int normalized_sysctl_sched_latency = 3000000ULL; +#else unsigned int sysctl_sched_latency = 6000000ULL; static unsigned int normalized_sysctl_sched_latency = 6000000ULL; +#endif /* * The initial- and re-scaling of tunables is configurable @@ -58,13 +63,22 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_L * * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) */ +#ifdef CONFIG_ZENIFY +unsigned int sysctl_sched_min_granularity = 300000ULL; +static unsigned int normalized_sysctl_sched_min_granularity = 300000ULL; +#else unsigned int sysctl_sched_min_granularity = 750000ULL; static unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; +#endif /* * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity */ +#ifdef CONFIG_ZENIFY +static unsigned int sched_nr_latency = 10; +#else static unsigned int sched_nr_latency = 8; +#endif /* * After fork, child runs first. If set to 0 (default) then @@ -81,10 +95,17 @@ unsigned int sysctl_sched_child_runs_first __read_mostly; * * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) */ +#ifdef CONFIG_ZENIFY +unsigned int sysctl_sched_wakeup_granularity = 500000UL; +static unsigned int normalized_sysctl_sched_wakeup_granularity = 500000UL; + +const_debug unsigned int sysctl_sched_migration_cost = 50000UL; +#else unsigned int sysctl_sched_wakeup_granularity = 1000000UL; static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; const_debug unsigned int sysctl_sched_migration_cost = 500000UL; +#endif int sched_thermal_decay_shift; static int __init setup_sched_thermal_decay_shift(char *str) @@ -128,8 +149,12 @@ int __weak arch_asym_cpu_priority(int cpu) * * (default: 5 msec, units: microseconds) */ +#ifdef CONFIG_ZENIFY +unsigned int sysctl_sched_cfs_bandwidth_slice = 3000UL; +#else unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL; #endif +#endif static inline void update_load_add(struct load_weight *lw, unsigned long inc) { diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 28b3e7a67565..01a1aef2b9b1 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -71,7 +71,11 @@ static long ratelimit_pages = 32; /* * Start background writeback (via writeback threads) at this percentage */ +#ifdef CONFIG_ZENIFY +int dirty_background_ratio = 20; +#else int dirty_background_ratio = 10; +#endif /* * dirty_background_bytes starts at 0 (disabled) so that it is a function of @@ -88,7 +92,11 @@ int vm_highmem_is_dirtyable; /* * The generator of dirty data starts writeback at this percentage */ +#ifdef CONFIG_ZENIFY +int vm_dirty_ratio = 50; +#else int vm_dirty_ratio = 20; +#endif /* * vm_dirty_bytes starts at 0 (disabled) so that it is a function of -- 2.28.0 From e92e67143385cf285851e12aa8b7f083dd38dd24 Mon Sep 17 00:00:00 2001 From: Steven Barrett Date: Sun, 16 Jan 2011 18:57:32 -0600 Subject: [PATCH 08/17] ZEN: Allow TCP YeAH as default congestion control 4.4: In my tests YeAH dramatically slowed down transfers over a WLAN, reducing throughput from ~65Mbps (CUBIC) to ~7MBps (YeAH) over 10 seconds (netperf TCP_STREAM) including long stalls. Be careful when choosing this. ~heftig --- net/ipv4/Kconfig | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index e64e59b536d3..bfb55ef7ebbe 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -691,6 +691,9 @@ choice config DEFAULT_VEGAS bool "Vegas" if TCP_CONG_VEGAS=y + config DEFAULT_YEAH + bool "YeAH" if TCP_CONG_YEAH=y + config DEFAULT_VENO bool "Veno" if TCP_CONG_VENO=y @@ -724,6 +727,7 @@ config DEFAULT_TCP_CONG default "htcp" if DEFAULT_HTCP default "hybla" if DEFAULT_HYBLA default "vegas" if DEFAULT_VEGAS + default "yeah" if DEFAULT_YEAH default "westwood" if DEFAULT_WESTWOOD default "veno" if DEFAULT_VENO default "reno" if DEFAULT_RENO -- 2.28.0 From 76dbe7477bfde1b5e8bf29a71b5af7ab2be9b98e Mon Sep 17 00:00:00 2001 From: Steven Barrett Date: Wed, 28 Nov 2018 19:01:27 -0600 Subject: [PATCH 09/17] zen: Use [defer+madvise] as default khugepaged defrag strategy For some reason, the default strategy to respond to THP fault fallbacks is still just madvise, meaning stall if the program wants transparent hugepages, but don't trigger a background reclaim / compaction if THP begins to fail allocations. This creates a snowball affect where we still use the THP code paths, but we almost always fail once a system has been active and busy for a while. The option "defer" was created for interactive systems where THP can still improve performance. If we have to fallback to a regular page due to an allocation failure or anything else, we will trigger a background reclaim and compaction so future THP attempts succeed and previous attempts eventually have their smaller pages combined without stalling running applications. We still want madvise to stall applications that explicitely want THP, so defer+madvise _does_ make a ton of sense. Make it the default for interactive systems, especially if the kernel maintainer left transparent hugepages on "always". Reasoning and details in the original patch: https://lwn.net/Articles/711248/ --- mm/huge_memory.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 74300e337c3c..9277f22c10a7 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -53,7 +53,11 @@ unsigned long transparent_hugepage_flags __read_mostly = #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE (1< Date: Wed, 24 Oct 2018 16:58:52 -0300 Subject: [PATCH 10/17] net/sched: allow configuring cake qdisc as default Signed-off-by: Alexandre Frade --- net/sched/Kconfig | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 84badf00647e..6a922bca9f39 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -471,6 +471,9 @@ choice config DEFAULT_SFQ bool "Stochastic Fair Queue" if NET_SCH_SFQ + config DEFAULT_CAKE + bool "Common Applications Kept Enhanced" if NET_SCH_CAKE + config DEFAULT_PFIFO_FAST bool "Priority FIFO Fast" endchoice @@ -481,6 +484,7 @@ config DEFAULT_NET_SCH default "fq" if DEFAULT_FQ default "fq_codel" if DEFAULT_FQ_CODEL default "sfq" if DEFAULT_SFQ + default "cake" if DEFAULT_CAKE default "pfifo_fast" endif -- 2.28.0 From 816ee502759e954304693813bd03d94986b28dba Mon Sep 17 00:00:00 2001 From: Tk-Glitch Date: Mon, 18 Feb 2019 17:40:57 +0100 Subject: [PATCH 11/17] mm: Set watermark_scale_factor to 200 (from 10) Multiple users have reported it's helping reducing/eliminating stuttering with DXVK. --- mm/page_alloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 898ff44f2c7b..e72074034793 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -330,7 +330,7 @@ int watermark_boost_factor __read_mostly; #else int watermark_boost_factor __read_mostly = 15000; #endif -int watermark_scale_factor = 10; +int watermark_scale_factor = 200; static unsigned long nr_kernel_pages __initdata; static unsigned long nr_all_pages __initdata; -- 2.28.0 From 90240bcd90a568878738e66c0d45bed3e38e347b Mon Sep 17 00:00:00 2001 From: Tk-Glitch Date: Fri, 19 Apr 2019 12:33:38 +0200 Subject: [PATCH 12/17] Set vm.max_map_count to 262144 by default The value is still pretty low, and AMD64-ABI and ELF extended numbering supports that, so we should be fine on modern x86 systems. This fixes crashes in some applications using more than 65535 vmas (also affects some windows games running in wine, such as Star Citizen). --- include/linux/mm.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index bc05c3588aa3..b0cefe94920d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -190,8 +190,7 @@ static inline void __mm_zero_struct_page(struct page *page) * not a hard limit any more. Although some userspace tools can be surprised by * that. */ -#define MAPCOUNT_ELF_CORE_MARGIN (5) -#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) +#define DEFAULT_MAX_MAP_COUNT (262144) extern int sysctl_max_map_count; -- 2.28.0 From 3a34034dba5efe91bcec491efe8c66e8087f509b Mon Sep 17 00:00:00 2001 From: Tk-Glitch Date: Mon, 27 Jul 2020 00:19:18 +0200 Subject: [PATCH 13/17] mm: bump DEFAULT_MAX_MAP_COUNT Some games such as Detroit: Become Human tend to be very crash prone with lower values. --- include/linux/mm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index b0cefe94920d..890165099b07 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -190,7 +190,7 @@ static inline void __mm_zero_struct_page(struct page *page) * not a hard limit any more. Although some userspace tools can be surprised by * that. */ -#define DEFAULT_MAX_MAP_COUNT (262144) +#define DEFAULT_MAX_MAP_COUNT (524288) extern int sysctl_max_map_count; -- 2.28.0 From 977812938da7c7226415778c340832141d9278b7 Mon Sep 17 00:00:00 2001 From: Alexandre Frade Date: Mon, 25 Nov 2019 15:13:06 -0300 Subject: [PATCH 14/17] elevator: set default scheduler to bfq for blk-mq Signed-off-by: Alexandre Frade --- block/elevator.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/block/elevator.c b/block/elevator.c index 4eab3d70e880..79669aa39d79 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -623,15 +623,15 @@ static inline bool elv_support_iosched(struct request_queue *q) } /* - * For single queue devices, default to using mq-deadline. If we have multiple - * queues or mq-deadline is not available, default to "none". + * For single queue devices, default to using bfq. If we have multiple + * queues or bfq is not available, default to "none". */ static struct elevator_type *elevator_get_default(struct request_queue *q) { if (q->nr_hw_queues != 1) return NULL; - return elevator_get(q, "mq-deadline", false); + return elevator_get(q, "bfq", false); } /* -- 2.28.0 From e2111bc5989131c675659d40e0cc4f214df2f990 Mon Sep 17 00:00:00 2001 From: Alexandre Frade Date: Fri, 10 May 2019 16:45:59 -0300 Subject: [PATCH 15/17] block: set rq_affinity = 2 for full multithreading I/O requests Signed-off-by: Alexandre Frade --- include/linux/blkdev.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 28efe374a2e1..d4e5d35d2ece 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -624,7 +624,8 @@ struct request_queue { #define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */ #define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ - (1 << QUEUE_FLAG_SAME_COMP)) + (1 << QUEUE_FLAG_SAME_COMP) | \ + (1 << QUEUE_FLAG_SAME_FORCE)) void blk_queue_flag_set(unsigned int flag, struct request_queue *q); void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); -- 2.28.0 From 3c229f434aca65c4ca61772bc03c3e0370817b92 Mon Sep 17 00:00:00 2001 From: Alexandre Frade Date: Mon, 3 Aug 2020 17:05:04 +0000 Subject: [PATCH 16/17] mm: set 2 megabytes for address_space-level file read-ahead pages size Signed-off-by: Alexandre Frade --- include/linux/pagemap.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index cf2468da68e9..007dea784451 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -655,7 +655,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); void delete_from_page_cache_batch(struct address_space *mapping, struct pagevec *pvec); -#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE) +#define VM_READAHEAD_PAGES (SZ_2M / PAGE_SIZE) void page_cache_sync_readahead(struct address_space *, struct file_ra_state *, struct file *, pgoff_t index, unsigned long req_count); -- 2.28.0 From 716f41cf6631f3a85834dcb67b4ce99185b6387f Mon Sep 17 00:00:00 2001 From: Steven Barrett Date: Wed, 15 Jan 2020 20:43:56 -0600 Subject: [PATCH 17/17] ZEN: intel-pstate: Implement "enable" parameter If intel-pstate is compiled into the kernel, it will preempt the loading of acpi-cpufreq so you can take advantage of hardware p-states without any friction. However, intel-pstate is not completely superior to cpufreq's ondemand for one reason. There's no concept of an up_threshold property. In ondemand, up_threshold essentially reduces the maximum utilization to compare against, allowing you to hit max frequencies and turbo boost from a much lower core utilization. With intel-pstate, you have the concept of minimum and maximum performance, but no tunable that lets you define, maximum frequency means 50% core utilization. For just this oversight, there's reasons you may want ondemand. Lets support setting "enable" in kernel boot parameters. This lets kernel maintainers include "intel_pstate=disable" statically in the static boot parameters, but let users of the kernel override this selection. --- Documentation/admin-guide/kernel-parameters.txt | 3 +++ drivers/cpufreq/intel_pstate.c | 2 ++ 2 files changed, 5 insertions(+) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index fb95fad81c79..3e92fee81e33 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1857,6 +1857,9 @@ disable Do not enable intel_pstate as the default scaling driver for the supported processors + enable + Enable intel_pstate in-case "disable" was passed + previously in the kernel boot parameters passive Use intel_pstate as a scaling driver, but configure it to work with generic cpufreq governors (instead of diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 36a469150ff9..aee891c9b78a 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -2845,6 +2845,8 @@ static int __init intel_pstate_setup(char *str) pr_info("HWP disabled\n"); no_hwp = 1; } + if (!strcmp(str, "enable")) + no_load = 0; if (!strcmp(str, "force")) force_load = 1; if (!strcmp(str, "hwp_only")) -- 2.28.0 From: Sathish Narasimman Subject: [PATCH] Bluetooth: Fix: LL PRivacy BLE device fails to connect Date: Thu, 22 Oct 2020 13:53:04 +0530 When adding device to white list the device is added to resolving list also it has to be added only when HCI_ENABLE_LL_PRIVACY flag is set. HCI_ENABLE_LL_PRIVACY flag has to be tested before adding/deleting devices to resolving list. use_ll_privacy macro is used only to check if controller supports LL_Privacy. https://bugzilla.kernel.org/show_bug.cgi?id=209745 Signed-off-by: Sathish Narasimman --- net/bluetooth/hci_request.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 6f12bab4d2fa..610ed0817bd7 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -698,7 +698,8 @@ static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr, cp.bdaddr_type); hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp); - if (use_ll_privacy(req->hdev)) { + if (use_ll_privacy(req->hdev) && + hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) { struct smp_irk *irk; irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type); @@ -732,7 +733,8 @@ static int add_to_white_list(struct hci_request *req, return -1; /* White list can not be used with RPAs */ - if (!allow_rpa && !use_ll_privacy(hdev) && + if (!allow_rpa && + !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) { return -1; } @@ -750,7 +752,8 @@ static int add_to_white_list(struct hci_request *req, cp.bdaddr_type); hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp); - if (use_ll_privacy(hdev)) { + if (use_ll_privacy(hdev) && + hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) { struct smp_irk *irk; irk = hci_find_irk_by_addr(hdev, ¶ms->addr, @@ -812,7 +815,8 @@ static u8 update_white_list(struct hci_request *req) } /* White list can not be used with RPAs */ - if (!allow_rpa && !use_ll_privacy(hdev) && + if (!allow_rpa && + !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) { return 0x00; }