From 0b1da8861e93ab9dcf1d1cf7bb04fdfc6d3eabf4 Mon Sep 17 00:00:00 2001 From: Peter Jung Date: Thu, 5 May 2022 20:22:29 +0200 Subject: [PATCH 13/17] lrng Signed-off-by: Peter Jung --- MAINTAINERS | 7 + crypto/drbg.c | 16 +- crypto/jitterentropy-kcapi.c | 3 +- crypto/jitterentropy.c | 2 +- drivers/char/Kconfig | 2 + drivers/char/Makefile | 9 +- drivers/char/lrng/Kconfig | 613 +++++++++++++ drivers/char/lrng/Makefile | 21 + drivers/char/lrng/lrng_aux.c | 136 +++ drivers/char/lrng/lrng_chacha20.c | 321 +++++++ drivers/char/lrng/lrng_chacha20.h | 25 + drivers/char/lrng/lrng_drbg.c | 198 +++++ drivers/char/lrng/lrng_drng.c | 451 ++++++++++ drivers/char/lrng/lrng_es_archrandom.c | 230 +++++ drivers/char/lrng/lrng_es_aux.c | 295 +++++++ drivers/char/lrng/lrng_es_irq.c | 824 ++++++++++++++++++ drivers/char/lrng/lrng_es_irq.h | 71 ++ drivers/char/lrng/lrng_es_jent.c | 97 +++ drivers/char/lrng/lrng_es_mgr.c | 373 ++++++++ drivers/char/lrng/lrng_health.c | 410 +++++++++ drivers/char/lrng/lrng_interfaces.c | 654 ++++++++++++++ drivers/char/lrng/lrng_internal.h | 487 +++++++++++ drivers/char/lrng/lrng_kcapi.c | 225 +++++ drivers/char/lrng/lrng_kcapi_hash.c | 103 +++ drivers/char/lrng/lrng_kcapi_hash.h | 20 + drivers/char/lrng/lrng_numa.c | 123 +++ drivers/char/lrng/lrng_proc.c | 210 +++++ drivers/char/lrng/lrng_selftest.c | 387 ++++++++ drivers/char/lrng/lrng_switch.c | 226 +++++ drivers/char/lrng/lrng_testing.c | 689 +++++++++++++++ include/crypto/drbg.h | 7 + .../crypto/internal}/jitterentropy.h | 0 include/linux/lrng.h | 81 ++ 33 files changed, 7306 insertions(+), 10 deletions(-) create mode 100644 drivers/char/lrng/Kconfig create mode 100644 drivers/char/lrng/Makefile create mode 100644 drivers/char/lrng/lrng_aux.c create mode 100644 drivers/char/lrng/lrng_chacha20.c create mode 100644 drivers/char/lrng/lrng_chacha20.h create mode 100644 drivers/char/lrng/lrng_drbg.c create mode 100644 drivers/char/lrng/lrng_drng.c create mode 100644 drivers/char/lrng/lrng_es_archrandom.c create mode 100644 drivers/char/lrng/lrng_es_aux.c create mode 100644 drivers/char/lrng/lrng_es_irq.c create mode 100644 drivers/char/lrng/lrng_es_irq.h create mode 100644 drivers/char/lrng/lrng_es_jent.c create mode 100644 drivers/char/lrng/lrng_es_mgr.c create mode 100644 drivers/char/lrng/lrng_health.c create mode 100644 drivers/char/lrng/lrng_interfaces.c create mode 100644 drivers/char/lrng/lrng_internal.h create mode 100644 drivers/char/lrng/lrng_kcapi.c create mode 100644 drivers/char/lrng/lrng_kcapi_hash.c create mode 100644 drivers/char/lrng/lrng_kcapi_hash.h create mode 100644 drivers/char/lrng/lrng_numa.c create mode 100644 drivers/char/lrng/lrng_proc.c create mode 100644 drivers/char/lrng/lrng_selftest.c create mode 100644 drivers/char/lrng/lrng_switch.c create mode 100644 drivers/char/lrng/lrng_testing.c rename {crypto => include/crypto/internal}/jitterentropy.h (100%) create mode 100644 include/linux/lrng.h diff --git a/MAINTAINERS b/MAINTAINERS index 38f3b65fea8d..febbbd60e91f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -11119,6 +11119,13 @@ F: Documentation/litmus-tests/ F: Documentation/memory-barriers.txt F: tools/memory-model/ +LINUX RANDOM NUMBER GENERATOR (LRNG) DRIVER +M: Stephan Mueller +S: Maintained +W: https://www.chronox.de/lrng.html +F: drivers/char/lrng/ +F: include/linux/lrng.h + LIS3LV02D ACCELEROMETER DRIVER M: Eric Piel S: Maintained diff --git a/crypto/drbg.c b/crypto/drbg.c index 177983b6ae38..ea79a31014a4 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -115,7 +115,7 @@ * the SHA256 / AES 256 over other ciphers. Thus, the favored * DRBGs are the latest entries in this array. */ -static const struct drbg_core drbg_cores[] = { +const struct drbg_core drbg_cores[] = { #ifdef CONFIG_CRYPTO_DRBG_CTR { .flags = DRBG_CTR | DRBG_STRENGTH128, @@ -192,6 +192,7 @@ static const struct drbg_core drbg_cores[] = { }, #endif /* CONFIG_CRYPTO_DRBG_HMAC */ }; +EXPORT_SYMBOL(drbg_cores); static int drbg_uninstantiate(struct drbg_state *drbg); @@ -207,7 +208,7 @@ static int drbg_uninstantiate(struct drbg_state *drbg); * Return: normalized strength in *bytes* value or 32 as default * to counter programming errors */ -static inline unsigned short drbg_sec_strength(drbg_flag_t flags) +unsigned short drbg_sec_strength(drbg_flag_t flags) { switch (flags & DRBG_STRENGTH_MASK) { case DRBG_STRENGTH128: @@ -220,6 +221,7 @@ static inline unsigned short drbg_sec_strength(drbg_flag_t flags) return 32; } } +EXPORT_SYMBOL(drbg_sec_strength); /* * FIPS 140-2 continuous self test for the noise source @@ -1252,7 +1254,7 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers, } /* Free all substructures in a DRBG state without the DRBG state structure */ -static inline void drbg_dealloc_state(struct drbg_state *drbg) +void drbg_dealloc_state(struct drbg_state *drbg) { if (!drbg) return; @@ -1273,12 +1275,13 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg) drbg->fips_primed = false; } } +EXPORT_SYMBOL(drbg_dealloc_state); /* * Allocate all sub-structures for a DRBG state. * The DRBG state structure must already be allocated. */ -static inline int drbg_alloc_state(struct drbg_state *drbg) +int drbg_alloc_state(struct drbg_state *drbg) { int ret = -ENOMEM; unsigned int sb_size = 0; @@ -1359,6 +1362,7 @@ static inline int drbg_alloc_state(struct drbg_state *drbg) drbg_dealloc_state(drbg); return ret; } +EXPORT_SYMBOL(drbg_alloc_state); /************************************************************************* * DRBG interface functions @@ -1895,8 +1899,7 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg, * * return: flags */ -static inline void drbg_convert_tfm_core(const char *cra_driver_name, - int *coreref, bool *pr) +void drbg_convert_tfm_core(const char *cra_driver_name, int *coreref, bool *pr) { int i = 0; size_t start = 0; @@ -1923,6 +1926,7 @@ static inline void drbg_convert_tfm_core(const char *cra_driver_name, } } } +EXPORT_SYMBOL(drbg_convert_tfm_core); static int drbg_kcapi_init(struct crypto_tfm *tfm) { diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c index 2d115bec15ae..e7dac734d237 100644 --- a/crypto/jitterentropy-kcapi.c +++ b/crypto/jitterentropy-kcapi.c @@ -42,8 +42,7 @@ #include #include #include - -#include "jitterentropy.h" +#include /*************************************************************************** * Helper function diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c index 93bff3213823..81b80a4d3d3a 100644 --- a/crypto/jitterentropy.c +++ b/crypto/jitterentropy.c @@ -133,7 +133,7 @@ struct rand_data { #define JENT_ENTROPY_SAFETY_FACTOR 64 #include -#include "jitterentropy.h" +#include /*************************************************************************** * Adaptive Proportion Test diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 55f48375e3fe..93d52a419470 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -452,4 +452,6 @@ config RANDOM_TRUST_BOOTLOADER only mixes the entropy pool. This can also be configured at boot with "random.trust_bootloader=on/off". +source "drivers/char/lrng/Kconfig" + endmenu diff --git a/drivers/char/Makefile b/drivers/char/Makefile index 264eb398fdd4..7371f7464a49 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -3,7 +3,14 @@ # Makefile for the kernel character device drivers. # -obj-y += mem.o random.o +obj-y += mem.o + +ifeq ($(CONFIG_LRNG),y) + obj-y += lrng/ +else + obj-y += random.o +endif + obj-$(CONFIG_TTY_PRINTK) += ttyprintk.o obj-y += misc.o obj-$(CONFIG_ATARI_DSP56K) += dsp56k.o diff --git a/drivers/char/lrng/Kconfig b/drivers/char/lrng/Kconfig new file mode 100644 index 000000000000..658f89f6b35f --- /dev/null +++ b/drivers/char/lrng/Kconfig @@ -0,0 +1,613 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Linux Random Number Generator configuration +# + +choice + prompt "Random Number Generator Implementation" + default RANDOM_DEFAULT_IMPL + help + Select the random number generator implementation that is + accessible via /dev/random, /dev/urandom, getrandom(2), and + the in-kernel function get_random_bytes. + +config RANDOM_DEFAULT_IMPL + bool "Default Implementation" + help + The default random number generator as provided with + drivers/char/random.c is selected with this option. + +config LRNG + bool "LRNG Implementation with SP800-90A/B/C compliance" + select CRYPTO_LIB_SHA256 if CRYPTO + help + The Linux Random Number Generator (LRNG) generates entropy + from different entropy sources. Each entropy source can + be enabled and configured independently. The interrupt + entropy source can be configured to be SP800-90B compliant. + The entire LRNG can be configured to be SP800-90C compliant. + Runtime-switchable cryptographic support is available. + The LRNG delivers significant entropy during boot. + +endchoice + +menu "Linux Random Number Generator Configuration" + depends on LRNG + +if LRNG + +menu "Specific DRNG seeding strategies" + +config LRNG_OVERSAMPLE_ENTROPY_SOURCES + bool "Oversample entropy sources" + default n + help + When enabling this option, the entropy sources are + over-sampled with the following approach: First, the + the entropy sources are requested to provide 64 bits more + entropy than the size of the entropy buffer. For example, + if the entropy buffer is 256 bits, 320 bits of entropy + is requested to fill that buffer. + + Second, the seed operation of the deterministic RNG + requests 128 bits more data from each entropy source than + the security strength of the DRNG during initialization. + A prerequisite for this operation is that the digest size + of the used hash must be at least equally large to generate + that buffer. If the prerequisite is not met, this + oversampling is not applied. + + This strategy is intended to offset the asymptotic entropy + increase to reach full entropy in a buffer. + + The strategy is consistent with the requirements in + NIST SP800-90C and is only enforced with fips=1. + + If unsure, say N. + +config LRNG_OVERSAMPLE_ES_BITS + int + default 0 if !LRNG_OVERSAMPLE_ENTROPY_SOURCES + default 64 if LRNG_OVERSAMPLE_ENTROPY_SOURCES + +config LRNG_SEED_BUFFER_INIT_ADD_BITS + int + default 0 if !LRNG_OVERSAMPLE_ENTROPY_SOURCES + default 128 if LRNG_OVERSAMPLE_ENTROPY_SOURCES + +endmenu # "Specific DRNG seeding strategies" + +menu "Entropy Source Configuration" + +comment "Interrupt Entropy Source" + +config LRNG_IRQ + bool "Enable Interrupt Entropy Source as LRNG Seed Source" + default y + help + The LRNG models an entropy source based on the timing of the + occurrence of interrupts. Enable this option to enable this + IRQ entropy source. + + The IRQ entropy source is triggered every time an interrupt + arrives and thus causes the interrupt handler to execute + slightly longer. Disabling the IRQ entropy source implies + that the performance penalty on the interrupt handler added + by the LRNG is eliminated. Yet, this entropy source is + considered to be the internal entropy source of the LRNG. + Thus, only disable it if you ensured that other entropy + sources are available that supply the LRNG with entropy. + + If you disable the IRQ entropy source, you MUST ensure + one or more entropy sources collectively have the + capability to deliver sufficient entropy with one invocation + at a rate compliant to the security strength of the DRNG + (usually 256 bits of entropy). In addition, if those + entropy sources do not deliver sufficient entropy during + first request, the reseed must be triggered from user + space or kernel space when sufficient entropy is considered + to be present. + + If unsure, say Y. + +choice + prompt "Continuous entropy compression boot time setting" + default LRNG_CONTINUOUS_COMPRESSION_ENABLED + depends on LRNG_IRQ + help + Select the default behavior of the interrupt entropy source + continuous compression operation. + + The Linux RNG collects entropy data during each interrupt. + For performance reasons, a amount of entropy data defined by + the LRNG entropy collection pool size is concatenated into + an array. When that array is filled up, a hash is calculated + to compress the entropy. That hash is calculated in + interrupt context. + + In case such hash calculation in interrupt context is deemed + too time-consuming, the continuous compression operation + can be disabled. If disabled, the collection of entropy will + not trigger a hash compression operation in interrupt context. + The compression happens only when the DRNG is reseeded which is + in process context. This implies that old entropy data + collected after the last DRNG-reseed is overwritten with newer + entropy data once the collection pool is full instead of + retaining its entropy with the compression operation. + + config LRNG_CONTINUOUS_COMPRESSION_ENABLED + bool "Enable continuous compression (default)" + + config LRNG_CONTINUOUS_COMPRESSION_DISABLED + bool "Disable continuous compression" +endchoice + +config LRNG_ENABLE_CONTINUOUS_COMPRESSION + bool + default y if LRNG_CONTINUOUS_COMPRESSION_ENABLED + default n if LRNG_CONTINUOUS_COMPRESSION_DISABLED + +config LRNG_SWITCHABLE_CONTINUOUS_COMPRESSION + bool "Runtime-switchable continuous entropy compression" + depends on LRNG_IRQ + help + Per default, the interrupt entropy source continuous + compression operation behavior is hard-wired into the kernel. + Enable this option to allow it to be configurable at boot time. + + To modify the default behavior of the continuous + compression operation, use the kernel command line option + of lrng_sw_noise.lrng_pcpu_continuous_compression. + + If unsure, say N. + +choice + prompt "LRNG Entropy Collection Pool Size" + default LRNG_COLLECTION_SIZE_1024 + depends on LRNG_IRQ + help + Select the size of the LRNG entropy collection pool + storing data for the interrupt entropy source without + performing a compression operation. The larger the + collection size is, the faster the average interrupt + handling will be. The collection size represents the + number of bytes of the per-CPU memory used to batch + up entropy event data. + + The default value is good for regular operations. Choose + larger sizes for servers that have no memory limitations. + If runtime memory is precious, choose a smaller size. + + The collection size is unrelated to the entropy rate + or the amount of entropy the LRNG can process. + + config LRNG_COLLECTION_SIZE_32 + depends on LRNG_CONTINUOUS_COMPRESSION_ENABLED + depends on !LRNG_SWITCHABLE_CONTINUOUS_COMPRESSION + depends on !LRNG_OVERSAMPLE_ENTROPY_SOURCES + bool "32 interrupt events" + + config LRNG_COLLECTION_SIZE_256 + depends on !LRNG_OVERSAMPLE_ENTROPY_SOURCES + bool "256 interrupt events" + + config LRNG_COLLECTION_SIZE_512 + bool "512 interrupt events" + + config LRNG_COLLECTION_SIZE_1024 + bool "1024 interrupt events (default)" + + config LRNG_COLLECTION_SIZE_2048 + bool "2048 interrupt events" + + config LRNG_COLLECTION_SIZE_4096 + bool "4096 interrupt events" + + config LRNG_COLLECTION_SIZE_8192 + bool "8192 interrupt events" + +endchoice + +config LRNG_COLLECTION_SIZE + int + default 32 if LRNG_COLLECTION_SIZE_32 + default 256 if LRNG_COLLECTION_SIZE_256 + default 512 if LRNG_COLLECTION_SIZE_512 + default 1024 if LRNG_COLLECTION_SIZE_1024 + default 2048 if LRNG_COLLECTION_SIZE_2048 + default 4096 if LRNG_COLLECTION_SIZE_4096 + default 8192 if LRNG_COLLECTION_SIZE_8192 + +config LRNG_HEALTH_TESTS + bool "Enable interrupt entropy source online health tests" + depends on LRNG_IRQ + help + The online health tests applied to the interrupt entropy + source validate the noise source at runtime for fatal + errors. These tests include SP800-90B compliant tests + which are invoked if the system is booted with fips=1. + In case of fatal errors during active SP800-90B tests, + the issue is logged and the noise data is discarded. + These tests are required for full compliance of the + interrupt entropy source with SP800-90B. + + If unsure, say Y. + +config LRNG_RCT_BROKEN + bool "SP800-90B RCT with dangerous low cutoff value" + depends on LRNG_HEALTH_TESTS + depends on BROKEN + default n + help + This option enables a dangerously low SP800-90B repetitive + count test (RCT) cutoff value which makes it very likely + that the RCT is triggered to raise a self test failure. + + This option is ONLY intended for developers wanting to + test the effectiveness of the SP800-90B RCT health test. + + If unsure, say N. + +config LRNG_APT_BROKEN + bool "SP800-90B APT with dangerous low cutoff value" + depends on LRNG_HEALTH_TESTS + depends on BROKEN + default n + help + This option enables a dangerously low SP800-90B adaptive + proportion test (APT) cutoff value which makes it very + likely that the APT is triggered to raise a self test + failure. + + This option is ONLY intended for developers wanting to + test the effectiveness of the SP800-90B APT health test. + + If unsure, say N. + +# Default taken from SP800-90B sec 4.4.1 - significance level 2^-30 +config LRNG_RCT_CUTOFF + int + default 31 if !LRNG_RCT_BROKEN + default 1 if LRNG_RCT_BROKEN + +# Default taken from SP800-90B sec 4.4.2 - significance level 2^-30 +config LRNG_APT_CUTOFF + int + default 325 if !LRNG_APT_BROKEN + default 32 if LRNG_APT_BROKEN + +config LRNG_IRQ_ENTROPY_RATE + int "Interrupt Entropy Source Entropy Rate" + depends on LRNG_IRQ + range 256 4294967295 + default 256 + help + The LRNG will collect the configured number of interrupts to + obtain 256 bits of entropy. This value can be set to any between + 256 and 4294967295. The LRNG guarantees that this value is not + lower than 256. This lower limit implies that one interrupt event + is credited with one bit of entropy. This value is subject to the + increase by the oversampling factor, if no high-resolution timer + is found. + + In order to effectively disable the interrupt entropy source, + the option has to be set to 4294967295. In this case, the + interrupt entropy source will still deliver data but without + being credited with entropy. + +comment "Jitter RNG Entropy Source" + +config LRNG_JENT + bool "Enable Jitter RNG as LRNG Seed Source" + depends on CRYPTO + select CRYPTO_JITTERENTROPY + help + The Linux RNG may use the Jitter RNG as noise source. Enabling + this option enables the use of the Jitter RNG. Its default + entropy level is 16 bits of entropy per 256 data bits delivered + by the Jitter RNG. This entropy level can be changed at boot + time or at runtime with the lrng_base.jitterrng configuration + variable. + +config LRNG_JENT_ENTROPY_RATE + int "Jitter RNG Entropy Source Entropy Rate" + depends on LRNG_JENT + range 0 256 + default 16 + help + The option defines the amount of entropy the LRNG applies to 256 + bits of data obtained from the Jitter RNG entropy source. The + LRNG enforces the limit that this value must be in the range + between 0 and 256. + + When configuring this value to 0, the Jitter RNG entropy source + will provide 256 bits of data without being credited to contain + entropy. + +comment "CPU Entropy Source" + +config LRNG_CPU + bool "Enable CPU Entropy Source as LRNG Seed Source" + default y + help + Current CPUs commonly contain entropy sources which can be + used to seed the LRNG. For example, the Intel RDSEED + instruction, or the POWER DARN instruction will be sourced + to seed the LRNG if this option is enabled. + + Note, if this option is enabled and the underlying CPU + does not offer such entropy source, the LRNG will automatically + detect this and ignore the hardware. + +config LRNG_CPU_FULL_ENT_MULTIPLIER + int + default 1 if !LRNG_TEST_CPU_ES_COMPRESSION + default 123 if LRNG_TEST_CPU_ES_COMPRESSION + +config LRNG_CPU_ENTROPY_RATE + int "CPU Entropy Source Entropy Rate" + depends on LRNG_CPU + range 0 256 + default 8 + help + The option defines the amount of entropy the LRNG applies to 256 + bits of data obtained from the CPU entropy source. The LRNG + enforces the limit that this value must be in the range between + 0 and 256. + + When configuring this value to 0, the CPU entropy source will + provide 256 bits of data without being credited to contain + entropy. + + Note, this option is overwritten when the option + CONFIG_RANDOM_TRUST_CPU is set. + +endmenu # "Entropy Source Configuration" + +menuconfig LRNG_DRNG_SWITCH + bool "Support DRNG runtime switching" + help + The Linux RNG per default uses a ChaCha20 DRNG that is + accessible via the external interfaces. With this configuration + option other DRNGs can be selected and loaded at runtime. + +if LRNG_DRNG_SWITCH + +config LRNG_KCAPI_HASH + bool + select CRYPTO_HASH + +config LRNG_DRBG + tristate "SP800-90A support for the LRNG" + depends on CRYPTO + select CRYPTO_DRBG_MENU + select CRYPTO_SHA512 + select LRNG_KCAPI_HASH + help + Enable the SP800-90A DRBG support for the LRNG. Once the + module is loaded, output from /dev/random, /dev/urandom, + getrandom(2), or get_random_bytes_full is provided by a DRBG. + +config LRNG_KCAPI + tristate "Kernel Crypto API support for the LRNG" + depends on CRYPTO + depends on !LRNG_DRBG + select CRYPTO_RNG + select LRNG_KCAPI_HASH + help + Enable the support for generic pseudo-random number + generators offered by the kernel crypto API with the + LRNG. Once the module is loaded, output from /dev/random, + /dev/urandom, getrandom(2), or get_random_bytes is + provided by the selected kernel crypto API RNG. +endif # LRNG_DRNG_SWITCH + +menuconfig LRNG_TESTING_MENU + bool "LRNG testing interfaces" + depends on DEBUG_FS + help + Enable one or more of the following test interfaces. + + If unsure, say N. + +if LRNG_TESTING_MENU + +config LRNG_RAW_HIRES_ENTROPY + bool "Enable entropy test interface to hires timer noise source" + default y + help + The test interface allows a privileged process to capture + the raw unconditioned high resolution time stamp noise that + is collected by the LRNG for statistical analysis. Extracted + noise data is not used to seed the LRNG. + + The raw noise data can be obtained using the lrng_raw_hires + debugfs file. Using the option lrng_testing.boot_raw_hires_test=1 + the raw noise of the first 1000 entropy events since boot + can be sampled. + +config LRNG_RAW_JIFFIES_ENTROPY + bool "Enable entropy test interface to Jiffies noise source" + help + The test interface allows a privileged process to capture + the raw unconditioned Jiffies that is collected by + the LRNG for statistical analysis. This data is used for + seeding the LRNG if a high-resolution time stamp is not + available. If a high-resolution time stamp is detected, + the Jiffies value is not collected by the LRNG and no + data is provided via the test interface. Extracted noise + data is not used to seed the random number generator. + + The raw noise data can be obtained using the lrng_raw_jiffies + debugfs file. Using the option lrng_testing.boot_raw_jiffies_test=1 + the raw noise of the first 1000 entropy events since boot + can be sampled. + +config LRNG_RAW_IRQ_ENTROPY + bool "Enable entropy test interface to IRQ number noise source" + help + The test interface allows a privileged process to capture + the raw unconditioned interrupt number that is collected by + the LRNG for statistical analysis. This data is used for + seeding the random32 PRNG external to the LRNG if a + high-resolution time stamp is available or it will be used to + seed the LRNG otherwise. Extracted noise data is not used to + seed the random number generator. + + The raw noise data can be obtained using the lrng_raw_irq + debugfs file. Using the option lrng_testing.boot_raw_irq_test=1 + the raw noise of the first 1000 entropy events since boot + can be sampled. + +config LRNG_RAW_IRQFLAGS_ENTROPY + bool "Enable entropy test interface to IRQ flags noise source" + help + The test interface allows a privileged process to capture + the raw unconditioned interrupt flags that is collected by + the LRNG for statistical analysis. This data is used for + seeding the random32 PRNG external to the LRNG if a + high-resolution time stamp is available or it will be used to + seed the LRNG otherwise. Extracted noise data is not used to + seed the random number generator. + + The raw noise data can be obtained using the lrng_raw_irqflags + debugfs file. Using the option lrng_testing.boot_raw_irqflags_test=1 + the raw noise of the first 1000 entropy events since boot + can be sampled. + +config LRNG_RAW_RETIP_ENTROPY + bool "Enable entropy test interface to RETIP value noise source" + help + The test interface allows a privileged process to capture + the raw unconditioned return instruction pointer value + that is collected by the LRNG for statistical analysis. + This data is used for seeding the random32 PRNG external + to the LRNG if a high-resolution time stamp is available or + it will be used to seed the LRNG otherwise. Extracted noise + data is not used to seed the random number generator. + + The raw noise data can be obtained using the lrng_raw_retip + debugfs file. Using the option lrng_testing.boot_raw_retip_test=1 + the raw noise of the first 1000 entropy events since boot + can be sampled. + +config LRNG_RAW_REGS_ENTROPY + bool "Enable entropy test interface to IRQ register value noise source" + help + The test interface allows a privileged process to capture + the raw unconditioned interrupt register value that is + collected by the LRNG for statistical analysis. Extracted noise + data is not used to seed the random number generator. + + The raw noise data can be obtained using the lrng_raw_regs + debugfs file. Using the option lrng_testing.boot_raw_regs_test=1 + the raw noise of the first 1000 entropy events since boot + can be sampled. + +config LRNG_RAW_ARRAY + bool "Enable test interface to LRNG raw entropy storage array" + help + The test interface allows a privileged process to capture + the raw noise data that is collected by the LRNG + in the per-CPU array for statistical analysis. The purpose + of this interface is to verify that the array handling code + truly only concatenates data and provides the same entropy + rate as the raw unconditioned noise source when assessing + the collected data byte-wise. + + The data can be obtained using the lrng_raw_array debugfs + file. Using the option lrng_testing.boot_raw_array=1 + the raw noise of the first 1000 entropy events since boot + can be sampled. + +config LRNG_IRQ_PERF + bool "Enable LRNG interrupt performance monitor" + help + With this option, the performance monitor of the LRNG + interrupt handling code is enabled. The file provides + the execution time of the interrupt handler in + cycles. + + The interrupt performance data can be obtained using + the lrng_irq_perf debugfs file. Using the option + lrng_testing.boot_irq_perf=1 the performance data of + the first 1000 entropy events since boot can be sampled. + +config LRNG_ACVT_HASH + bool "Enable LRNG ACVT Hash interface" + help + With this option, the LRNG built-in hash function used for + auxiliary pool management and prior to switching the + cryptographic backends is made available for ACVT. The + interface allows writing of the data to be hashed + into the interface. The read operation triggers the hash + operation to generate message digest. + + The ACVT interface is available with the lrng_acvt_hash + debugfs file. + +config LRNG_RUNTIME_ES_CONFIG + bool "Enable runtime configuration of entropy sources" + help + When enabling this option, the LRNG provides the mechanism + allowing to alter the entropy rate of each entropy source + during boot time and runtime. + + The following interfaces are available: + lrng_archrandom.archrandom for the CPU entropy source, + lrng_jent.jitterrng for the Jitter RNG entropy source, and + lrng_sw_noise.irq_entropy for the interrupt entropy source. + +config LRNG_RUNTIME_MAX_WO_RESEED_CONFIG + bool "Enable runtime configuration of max reseed threshold" + help + When enabling this option, the LRNG provides an interface + allowing the setting of the maximum number of DRNG generate + operations without a reseed that has full entropy. The + interface is lrng_drng.max_wo_reseed. + +config LRNG_TEST_CPU_ES_COMPRESSION + bool "Force CPU ES compression operation" + help + When enabling this option, the CPU ES compression operation + is forced by setting an arbitrary value > 1 for the data + multiplier even when the CPU ES would deliver full entropy. + This allows testing of the compression operation. It + therefore forces to pull more data from the CPU ES + than what may be required. + +config LRNG_TESTING + bool + default y if (LRNG_RAW_HIRES_ENTROPY || LRNG_RAW_JIFFIES_ENTROPY ||LRNG_RAW_IRQ_ENTROPY || LRNG_RAW_IRQFLAGS_ENTROPY || LRNG_RAW_RETIP_ENTROPY || LRNG_RAW_REGS_ENTROPY || LRNG_RAW_ARRAY || LRNG_IRQ_PERF || LRNG_ACVT_HASH) + +endif #LRNG_TESTING_MENU + +config LRNG_SELFTEST + bool "Enable power-on and on-demand self-tests" + help + The power-on self-tests are executed during boot time + covering the ChaCha20 DRNG, the hash operation used for + processing the entropy pools and the auxiliary pool, and + the time stamp management of the LRNG. + + The on-demand self-tests are triggered by writing any + value into the SysFS file selftest_status. At the same + time, when reading this file, the test status is + returned. A zero indicates that all tests were executed + successfully. + + If unsure, say Y. + +if LRNG_SELFTEST + +config LRNG_SELFTEST_PANIC + bool "Panic the kernel upon self-test failure" + help + If the option is enabled, the kernel is terminated if an + LRNG power-on self-test failure is detected. + +endif # LRNG_SELFTEST + +endif # LRNG + +endmenu # LRNG diff --git a/drivers/char/lrng/Makefile b/drivers/char/lrng/Makefile new file mode 100644 index 000000000000..e4f7f9702eb4 --- /dev/null +++ b/drivers/char/lrng/Makefile @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Linux Random Number Generator. +# + +obj-y += lrng_es_mgr.o lrng_aux.o \ + lrng_drng.o lrng_chacha20.o \ + lrng_interfaces.o lrng_es_aux.o + +obj-$(CONFIG_LRNG_IRQ) += lrng_es_irq.o +obj-$(CONFIG_SYSCTL) += lrng_proc.o +obj-$(CONFIG_NUMA) += lrng_numa.o +obj-$(CONFIG_LRNG_CPU) += lrng_es_archrandom.o +obj-$(CONFIG_LRNG_DRNG_SWITCH) += lrng_switch.o +obj-$(CONFIG_LRNG_KCAPI_HASH) += lrng_kcapi_hash.o +obj-$(CONFIG_LRNG_DRBG) += lrng_drbg.o +obj-$(CONFIG_LRNG_KCAPI) += lrng_kcapi.o +obj-$(CONFIG_LRNG_JENT) += lrng_es_jent.o +obj-$(CONFIG_LRNG_HEALTH_TESTS) += lrng_health.o +obj-$(CONFIG_LRNG_TESTING) += lrng_testing.o +obj-$(CONFIG_LRNG_SELFTEST) += lrng_selftest.o diff --git a/drivers/char/lrng/lrng_aux.c b/drivers/char/lrng/lrng_aux.c new file mode 100644 index 000000000000..e3b994f6e4c1 --- /dev/null +++ b/drivers/char/lrng/lrng_aux.c @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause +/* + * LRNG auxiliary interfaces + * + * Copyright (C) 2019 - 2021 Stephan Mueller + * Copyright (C) 2017 Jason A. Donenfeld . All + * Rights Reserved. + * Copyright (C) 2016 Jason Cooper + */ + +#include +#include + +#include "lrng_internal.h" + +struct batched_entropy { + union { + u64 entropy_u64[LRNG_DRNG_BLOCKSIZE / sizeof(u64)]; + u32 entropy_u32[LRNG_DRNG_BLOCKSIZE / sizeof(u32)]; + }; + unsigned int position; + spinlock_t batch_lock; +}; + +/* + * Get a random word for internal kernel use only. The quality of the random + * number is as good as /dev/urandom, but there is no backtrack protection, + * with the goal of being quite fast and not depleting entropy. + */ +static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = { + .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock), +}; + +u64 get_random_u64(void) +{ + u64 ret; + unsigned long flags; + struct batched_entropy *batch; + + lrng_debug_report_seedlevel("get_random_u64"); + + batch = raw_cpu_ptr(&batched_entropy_u64); + spin_lock_irqsave(&batch->batch_lock, flags); + if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) { + lrng_drng_get_atomic((u8 *)batch->entropy_u64, + LRNG_DRNG_BLOCKSIZE); + batch->position = 0; + } + ret = batch->entropy_u64[batch->position++]; + spin_unlock_irqrestore(&batch->batch_lock, flags); + return ret; +} +EXPORT_SYMBOL(get_random_u64); + +static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = { + .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock), +}; + +u32 get_random_u32(void) +{ + u32 ret; + unsigned long flags; + struct batched_entropy *batch; + + lrng_debug_report_seedlevel("get_random_u32"); + + batch = raw_cpu_ptr(&batched_entropy_u32); + spin_lock_irqsave(&batch->batch_lock, flags); + if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) { + lrng_drng_get_atomic((u8 *)batch->entropy_u32, + LRNG_DRNG_BLOCKSIZE); + batch->position = 0; + } + ret = batch->entropy_u32[batch->position++]; + spin_unlock_irqrestore(&batch->batch_lock, flags); + return ret; +} +EXPORT_SYMBOL(get_random_u32); + +/* + * It's important to invalidate all potential batched entropy that might + * be stored before the crng is initialized, which we can do lazily by + * simply resetting the counter to zero so that it's re-extracted on the + * next usage. + */ +void invalidate_batched_entropy(void) +{ + int cpu; + unsigned long flags; + + for_each_possible_cpu(cpu) { + struct batched_entropy *batched_entropy; + + batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu); + spin_lock_irqsave(&batched_entropy->batch_lock, flags); + batched_entropy->position = 0; + spin_unlock(&batched_entropy->batch_lock); + + batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu); + spin_lock(&batched_entropy->batch_lock); + batched_entropy->position = 0; + spin_unlock_irqrestore(&batched_entropy->batch_lock, flags); + } +} + +/* + * randomize_page - Generate a random, page aligned address + * @start: The smallest acceptable address the caller will take. + * @range: The size of the area, starting at @start, within which the + * random address must fall. + * + * If @start + @range would overflow, @range is capped. + * + * NOTE: Historical use of randomize_range, which this replaces, presumed that + * @start was already page aligned. We now align it regardless. + * + * Return: A page aligned address within [start, start + range). On error, + * @start is returned. + */ +unsigned long randomize_page(unsigned long start, unsigned long range) +{ + if (!PAGE_ALIGNED(start)) { + range -= PAGE_ALIGN(start) - start; + start = PAGE_ALIGN(start); + } + + if (start > ULONG_MAX - range) + range = ULONG_MAX - start; + + range >>= PAGE_SHIFT; + + if (range == 0) + return start; + + return start + (get_random_long() % range << PAGE_SHIFT); +} diff --git a/drivers/char/lrng/lrng_chacha20.c b/drivers/char/lrng/lrng_chacha20.c new file mode 100644 index 000000000000..b5387bb33095 --- /dev/null +++ b/drivers/char/lrng/lrng_chacha20.c @@ -0,0 +1,321 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause +/* + * Backend for the LRNG providing the cryptographic primitives using + * ChaCha20 cipher implementations. + * + * Copyright (C) 2016 - 2021, Stephan Mueller + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include + +#include "lrng_chacha20.h" +#include "lrng_internal.h" + +/******************************* ChaCha20 DRNG *******************************/ + +#define CHACHA_BLOCK_WORDS (CHACHA_BLOCK_SIZE / sizeof(u32)) + +struct chacha20_state { + struct chacha20_block block; +}; + +/* + * Have a static memory blocks for the ChaCha20 DRNG instance to avoid calling + * kmalloc too early in the boot cycle. For subsequent allocation requests, + * such as per-NUMA-node DRNG instances, kmalloc will be used. + */ +struct chacha20_state chacha20; + +/* + * Update of the ChaCha20 state by either using an unused buffer part or by + * generating one ChaCha20 block which is half of the state of the ChaCha20. + * The block is XORed into the key part of the state. This shall ensure + * backtracking resistance as well as a proper mix of the ChaCha20 state once + * the key is injected. + */ +static void lrng_chacha20_update(struct chacha20_state *chacha20_state, + __le32 *buf, u32 used_words) +{ + struct chacha20_block *chacha20 = &chacha20_state->block; + u32 i; + __le32 tmp[CHACHA_BLOCK_WORDS]; + + BUILD_BUG_ON(sizeof(struct chacha20_block) != CHACHA_BLOCK_SIZE); + BUILD_BUG_ON(CHACHA_BLOCK_SIZE != 2 * CHACHA_KEY_SIZE); + + if (used_words > CHACHA_KEY_SIZE_WORDS) { + chacha20_block(&chacha20->constants[0], (u8 *)tmp); + for (i = 0; i < CHACHA_KEY_SIZE_WORDS; i++) + chacha20->key.u[i] ^= le32_to_cpu(tmp[i]); + memzero_explicit(tmp, sizeof(tmp)); + } else { + for (i = 0; i < CHACHA_KEY_SIZE_WORDS; i++) + chacha20->key.u[i] ^= le32_to_cpu(buf[i + used_words]); + } + + /* Deterministic increment of nonce as required in RFC 7539 chapter 4 */ + chacha20->nonce[0]++; + if (chacha20->nonce[0] == 0) { + chacha20->nonce[1]++; + if (chacha20->nonce[1] == 0) + chacha20->nonce[2]++; + } + + /* Leave counter untouched as it is start value is undefined in RFC */ +} + +/* + * Seed the ChaCha20 DRNG by injecting the input data into the key part of + * the ChaCha20 state. If the input data is longer than the ChaCha20 key size, + * perform a ChaCha20 operation after processing of key size input data. + * This operation shall spread out the entropy into the ChaCha20 state before + * new entropy is injected into the key part. + */ +static int lrng_cc20_drng_seed_helper(void *drng, const u8 *inbuf, u32 inbuflen) +{ + struct chacha20_state *chacha20_state = (struct chacha20_state *)drng; + struct chacha20_block *chacha20 = &chacha20_state->block; + + while (inbuflen) { + u32 i, todo = min_t(u32, inbuflen, CHACHA_KEY_SIZE); + + for (i = 0; i < todo; i++) + chacha20->key.b[i] ^= inbuf[i]; + + /* Break potential dependencies between the inbuf key blocks */ + lrng_chacha20_update(chacha20_state, NULL, + CHACHA_BLOCK_WORDS); + inbuf += todo; + inbuflen -= todo; + } + + return 0; +} + +/* + * Chacha20 DRNG generation of random numbers: the stream output of ChaCha20 + * is the random number. After the completion of the generation of the + * stream, the entire ChaCha20 state is updated. + * + * Note, as the ChaCha20 implements a 32 bit counter, we must ensure + * that this function is only invoked for at most 2^32 - 1 ChaCha20 blocks + * before a reseed or an update happens. This is ensured by the variable + * outbuflen which is a 32 bit integer defining the number of bytes to be + * generated by the ChaCha20 DRNG. At the end of this function, an update + * operation is invoked which implies that the 32 bit counter will never be + * overflown in this implementation. + */ +static int lrng_cc20_drng_generate_helper(void *drng, u8 *outbuf, u32 outbuflen) +{ + struct chacha20_state *chacha20_state = (struct chacha20_state *)drng; + struct chacha20_block *chacha20 = &chacha20_state->block; + __le32 aligned_buf[CHACHA_BLOCK_WORDS]; + u32 ret = outbuflen, used = CHACHA_BLOCK_WORDS; + int zeroize_buf = 0; + + while (outbuflen >= CHACHA_BLOCK_SIZE) { + chacha20_block(&chacha20->constants[0], outbuf); + outbuf += CHACHA_BLOCK_SIZE; + outbuflen -= CHACHA_BLOCK_SIZE; + } + + if (outbuflen) { + chacha20_block(&chacha20->constants[0], (u8 *)aligned_buf); + memcpy(outbuf, aligned_buf, outbuflen); + used = ((outbuflen + sizeof(aligned_buf[0]) - 1) / + sizeof(aligned_buf[0])); + zeroize_buf = 1; + } + + lrng_chacha20_update(chacha20_state, aligned_buf, used); + + if (zeroize_buf) + memzero_explicit(aligned_buf, sizeof(aligned_buf)); + + return ret; +} + +void lrng_cc20_init_state(struct chacha20_state *state) +{ + lrng_cc20_init_rfc7539(&state->block); +} + +/* + * Allocation of the DRNG state + */ +static void *lrng_cc20_drng_alloc(u32 sec_strength) +{ + struct chacha20_state *state = NULL; + + if (sec_strength > CHACHA_KEY_SIZE) { + pr_err("Security strength of ChaCha20 DRNG (%u bits) lower than requested by LRNG (%u bits)\n", + CHACHA_KEY_SIZE * 8, sec_strength * 8); + return ERR_PTR(-EINVAL); + } + if (sec_strength < CHACHA_KEY_SIZE) + pr_warn("Security strength of ChaCha20 DRNG (%u bits) higher than requested by LRNG (%u bits)\n", + CHACHA_KEY_SIZE * 8, sec_strength * 8); + + state = kmalloc(sizeof(struct chacha20_state), GFP_KERNEL); + if (!state) + return ERR_PTR(-ENOMEM); + pr_debug("memory for ChaCha20 core allocated\n"); + + lrng_cc20_init_state(state); + + return state; +} + +static void lrng_cc20_drng_dealloc(void *drng) +{ + struct chacha20_state *chacha20_state = (struct chacha20_state *)drng; + + if (drng == &chacha20) { + memzero_explicit(chacha20_state, sizeof(*chacha20_state)); + pr_debug("static ChaCha20 core zeroized\n"); + return; + } + + pr_debug("ChaCha20 core zeroized and freed\n"); + kfree_sensitive(chacha20_state); +} + +/******************************* Hash Operation *******************************/ + +#ifdef CONFIG_CRYPTO_LIB_SHA256 + +#include + +static u32 lrng_cc20_hash_digestsize(void *hash) +{ + return SHA256_DIGEST_SIZE; +} + +static int lrng_cc20_hash_init(struct shash_desc *shash, void *hash) +{ + /* + * We do not need a TFM - we only need sufficient space for + * struct sha256_state on the stack. + */ + sha256_init(shash_desc_ctx(shash)); + return 0; +} + +static int lrng_cc20_hash_update(struct shash_desc *shash, + const u8 *inbuf, u32 inbuflen) +{ + sha256_update(shash_desc_ctx(shash), inbuf, inbuflen); + return 0; +} + +static int lrng_cc20_hash_final(struct shash_desc *shash, u8 *digest) +{ + sha256_final(shash_desc_ctx(shash), digest); + return 0; +} + +static const char *lrng_cc20_hash_name(void) +{ + return "SHA-256"; +} + +static void lrng_cc20_hash_desc_zero(struct shash_desc *shash) +{ + memzero_explicit(shash_desc_ctx(shash), sizeof(struct sha256_state)); +} + +#else /* CONFIG_CRYPTO_LIB_SHA256 */ + +#include +#include + +/* + * If the SHA-256 support is not compiled, we fall back to SHA-1 that is always + * compiled and present in the kernel. + */ +static u32 lrng_cc20_hash_digestsize(void *hash) +{ + return SHA1_DIGEST_SIZE; +} + +static void lrng_sha1_block_fn(struct sha1_state *sctx, const u8 *src, + int blocks) +{ + u32 temp[SHA1_WORKSPACE_WORDS]; + + while (blocks--) { + sha1_transform(sctx->state, src, temp); + src += SHA1_BLOCK_SIZE; + } + memzero_explicit(temp, sizeof(temp)); +} + +static int lrng_cc20_hash_init(struct shash_desc *shash, void *hash) +{ + /* + * We do not need a TFM - we only need sufficient space for + * struct sha1_state on the stack. + */ + sha1_base_init(shash); + return 0; +} + +static int lrng_cc20_hash_update(struct shash_desc *shash, + const u8 *inbuf, u32 inbuflen) +{ + return sha1_base_do_update(shash, inbuf, inbuflen, lrng_sha1_block_fn); +} + +static int lrng_cc20_hash_final(struct shash_desc *shash, u8 *digest) +{ + return sha1_base_do_finalize(shash, lrng_sha1_block_fn) ?: + sha1_base_finish(shash, digest); +} + +static const char *lrng_cc20_hash_name(void) +{ + return "SHA-1"; +} + +static void lrng_cc20_hash_desc_zero(struct shash_desc *shash) +{ + memzero_explicit(shash_desc_ctx(shash), sizeof(struct sha1_state)); +} + +#endif /* CONFIG_CRYPTO_LIB_SHA256 */ + +static void *lrng_cc20_hash_alloc(void) +{ + pr_info("Hash %s allocated\n", lrng_cc20_hash_name()); + return NULL; +} + +static void lrng_cc20_hash_dealloc(void *hash) +{ +} + +static const char *lrng_cc20_drng_name(void) +{ + return "ChaCha20 DRNG"; +} + +const struct lrng_crypto_cb lrng_cc20_crypto_cb = { + .lrng_drng_name = lrng_cc20_drng_name, + .lrng_hash_name = lrng_cc20_hash_name, + .lrng_drng_alloc = lrng_cc20_drng_alloc, + .lrng_drng_dealloc = lrng_cc20_drng_dealloc, + .lrng_drng_seed_helper = lrng_cc20_drng_seed_helper, + .lrng_drng_generate_helper = lrng_cc20_drng_generate_helper, + .lrng_hash_alloc = lrng_cc20_hash_alloc, + .lrng_hash_dealloc = lrng_cc20_hash_dealloc, + .lrng_hash_digestsize = lrng_cc20_hash_digestsize, + .lrng_hash_init = lrng_cc20_hash_init, + .lrng_hash_update = lrng_cc20_hash_update, + .lrng_hash_final = lrng_cc20_hash_final, + .lrng_hash_desc_zero = lrng_cc20_hash_desc_zero, +}; diff --git a/drivers/char/lrng/lrng_chacha20.h b/drivers/char/lrng/lrng_chacha20.h new file mode 100644 index 000000000000..bd0c0bee38f3 --- /dev/null +++ b/drivers/char/lrng/lrng_chacha20.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ +/* + * LRNG ChaCha20 definitions + * + * Copyright (C) 2016 - 2021, Stephan Mueller + */ + +#include + +/* State according to RFC 7539 section 2.3 */ +struct chacha20_block { + u32 constants[4]; + union { +#define CHACHA_KEY_SIZE_WORDS (CHACHA_KEY_SIZE / sizeof(u32)) + u32 u[CHACHA_KEY_SIZE_WORDS]; + u8 b[CHACHA_KEY_SIZE]; + } key; + u32 counter; + u32 nonce[3]; +}; + +static inline void lrng_cc20_init_rfc7539(struct chacha20_block *chacha20) +{ + chacha_init_consts(chacha20->constants); +} diff --git a/drivers/char/lrng/lrng_drbg.c b/drivers/char/lrng/lrng_drbg.c new file mode 100644 index 000000000000..6ca6b05eccf4 --- /dev/null +++ b/drivers/char/lrng/lrng_drbg.c @@ -0,0 +1,198 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause +/* + * Backend for the LRNG providing the cryptographic primitives using the + * kernel crypto API and its DRBG. + * + * Copyright (C) 2016 - 2021, Stephan Mueller + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include + +#include "lrng_kcapi_hash.h" + +/* + * Define a DRBG plus a hash / MAC used to extract data from the entropy pool. + * For LRNG_HASH_NAME you can use a hash or a MAC (HMAC or CMAC) of your choice + * (Note, you should use the suggested selections below -- using SHA-1 or MD5 + * is not wise). The idea is that the used cipher primitive can be selected to + * be the same as used for the DRBG. I.e. the LRNG only uses one cipher + * primitive using the same cipher implementation with the options offered in + * the following. This means, if the CTR DRBG is selected and AES-NI is present, + * both the CTR DRBG and the selected cmac(aes) use AES-NI. + * + * The security strengths of the DRBGs are all 256 bits according to + * SP800-57 section 5.6.1. + * + * This definition is allowed to be changed. + */ +#ifdef CONFIG_CRYPTO_DRBG_CTR +static unsigned int lrng_drbg_type = 0; +#elif defined CONFIG_CRYPTO_DRBG_HMAC +static unsigned int lrng_drbg_type = 1; +#elif defined CONFIG_CRYPTO_DRBG_HASH +static unsigned int lrng_drbg_type = 2; +#else +#error "Unknown DRBG in use" +#endif + +/* The parameter must be r/o in sysfs as otherwise races appear. */ +module_param(lrng_drbg_type, uint, 0444); +MODULE_PARM_DESC(lrng_drbg_type, "DRBG type used for LRNG (0->CTR_DRBG, 1->HMAC_DRBG, 2->Hash_DRBG)"); + +struct lrng_drbg { + const char *hash_name; + const char *drbg_core; +}; + +static const struct lrng_drbg lrng_drbg_types[] = { + { /* CTR_DRBG with AES-256 using derivation function */ + .hash_name = "sha512", + .drbg_core = "drbg_nopr_ctr_aes256", + }, { /* HMAC_DRBG with SHA-512 */ + .hash_name = "sha512", + .drbg_core = "drbg_nopr_hmac_sha512", + }, { /* Hash_DRBG with SHA-512 using derivation function */ + .hash_name = "sha512", + .drbg_core = "drbg_nopr_sha512" + } +}; + +static int lrng_drbg_drng_seed_helper(void *drng, const u8 *inbuf, u32 inbuflen) +{ + struct drbg_state *drbg = (struct drbg_state *)drng; + LIST_HEAD(seedlist); + struct drbg_string data; + int ret; + + drbg_string_fill(&data, inbuf, inbuflen); + list_add_tail(&data.list, &seedlist); + ret = drbg->d_ops->update(drbg, &seedlist, drbg->seeded); + + if (ret >= 0) + drbg->seeded = true; + + return ret; +} + +static int lrng_drbg_drng_generate_helper(void *drng, u8 *outbuf, u32 outbuflen) +{ + struct drbg_state *drbg = (struct drbg_state *)drng; + + return drbg->d_ops->generate(drbg, outbuf, outbuflen, NULL); +} + +static void *lrng_drbg_drng_alloc(u32 sec_strength) +{ + struct drbg_state *drbg; + int coreref = -1; + bool pr = false; + int ret; + + drbg_convert_tfm_core(lrng_drbg_types[lrng_drbg_type].drbg_core, + &coreref, &pr); + if (coreref < 0) + return ERR_PTR(-EFAULT); + + drbg = kzalloc(sizeof(struct drbg_state), GFP_KERNEL); + if (!drbg) + return ERR_PTR(-ENOMEM); + + drbg->core = &drbg_cores[coreref]; + drbg->seeded = false; + ret = drbg_alloc_state(drbg); + if (ret) + goto err; + + if (sec_strength > drbg_sec_strength(drbg->core->flags)) { + pr_err("Security strength of DRBG (%u bits) lower than requested by LRNG (%u bits)\n", + drbg_sec_strength(drbg->core->flags) * 8, + sec_strength * 8); + goto dealloc; + } + + if (sec_strength < drbg_sec_strength(drbg->core->flags)) + pr_warn("Security strength of DRBG (%u bits) higher than requested by LRNG (%u bits)\n", + drbg_sec_strength(drbg->core->flags) * 8, + sec_strength * 8); + + pr_info("DRBG with %s core allocated\n", drbg->core->backend_cra_name); + + return drbg; + +dealloc: + if (drbg->d_ops) + drbg->d_ops->crypto_fini(drbg); + drbg_dealloc_state(drbg); +err: + kfree(drbg); + return ERR_PTR(-EINVAL); +} + +static void lrng_drbg_drng_dealloc(void *drng) +{ + struct drbg_state *drbg = (struct drbg_state *)drng; + + if (drbg && drbg->d_ops) + drbg->d_ops->crypto_fini(drbg); + drbg_dealloc_state(drbg); + kfree_sensitive(drbg); + pr_info("DRBG deallocated\n"); +} + +static void *lrng_drbg_hash_alloc(void) +{ + return lrng_kcapi_hash_alloc(lrng_drbg_types[lrng_drbg_type].hash_name); +} + +static const char *lrng_drbg_name(void) +{ + return lrng_drbg_types[lrng_drbg_type].drbg_core; +} + +static const char *lrng_hash_name(void) +{ + return lrng_drbg_types[lrng_drbg_type].hash_name; +} + +static const struct lrng_crypto_cb lrng_drbg_crypto_cb = { + .lrng_drng_name = lrng_drbg_name, + .lrng_hash_name = lrng_hash_name, + .lrng_drng_alloc = lrng_drbg_drng_alloc, + .lrng_drng_dealloc = lrng_drbg_drng_dealloc, + .lrng_drng_seed_helper = lrng_drbg_drng_seed_helper, + .lrng_drng_generate_helper = lrng_drbg_drng_generate_helper, + .lrng_hash_alloc = lrng_drbg_hash_alloc, + .lrng_hash_dealloc = lrng_kcapi_hash_dealloc, + .lrng_hash_digestsize = lrng_kcapi_hash_digestsize, + .lrng_hash_init = lrng_kcapi_hash_init, + .lrng_hash_update = lrng_kcapi_hash_update, + .lrng_hash_final = lrng_kcapi_hash_final, + .lrng_hash_desc_zero = lrng_kcapi_hash_zero, +}; + +static int __init lrng_drbg_init(void) +{ + if (lrng_drbg_type >= ARRAY_SIZE(lrng_drbg_types)) { + pr_err("lrng_drbg_type parameter too large (given %u - max: %lu)", + lrng_drbg_type, + (unsigned long)ARRAY_SIZE(lrng_drbg_types) - 1); + return -EAGAIN; + } + return lrng_set_drng_cb(&lrng_drbg_crypto_cb); +} + +static void __exit lrng_drbg_exit(void) +{ + lrng_set_drng_cb(NULL); +} + +late_initcall(lrng_drbg_init); +module_exit(lrng_drbg_exit); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Stephan Mueller "); +MODULE_DESCRIPTION("Linux Random Number Generator - SP800-90A DRBG backend"); diff --git a/drivers/char/lrng/lrng_drng.c b/drivers/char/lrng/lrng_drng.c new file mode 100644 index 000000000000..b1d89a6f548d --- /dev/null +++ b/drivers/char/lrng/lrng_drng.c @@ -0,0 +1,451 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause +/* + * LRNG DRNG processing + * + * Copyright (C) 2016 - 2021, Stephan Mueller + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include + +#include "lrng_internal.h" + +/* + * Maximum number of seconds between DRNG reseed intervals of the DRNG. Note, + * this is enforced with the next request of random numbers from the + * DRNG. Setting this value to zero implies a reseeding attempt before every + * generated random number. + */ +int lrng_drng_reseed_max_time = 600; + +static atomic_t lrng_avail = ATOMIC_INIT(0); + +DEFINE_MUTEX(lrng_crypto_cb_update); + +/* DRNG for /dev/urandom, getrandom(2), get_random_bytes */ +static struct lrng_drng lrng_drng_init = { + .drng = &chacha20, + .crypto_cb = &lrng_cc20_crypto_cb, + .lock = __MUTEX_INITIALIZER(lrng_drng_init.lock), + .spin_lock = __SPIN_LOCK_UNLOCKED(lrng_drng_init.spin_lock), + .hash_lock = __RW_LOCK_UNLOCKED(lrng_drng_init.hash_lock) +}; + +/* + * DRNG for get_random_bytes when called in atomic context. This + * DRNG will always use the ChaCha20 DRNG. It will never benefit from a + * DRNG switch like the "regular" DRNG. If there was no DRNG switch, the atomic + * DRNG is identical to the "regular" DRNG. + * + * The reason for having this is due to the fact that DRNGs other than + * the ChaCha20 DRNG may sleep. + */ +static struct lrng_drng lrng_drng_atomic = { + .drng = &chacha20, + .crypto_cb = &lrng_cc20_crypto_cb, + .spin_lock = __SPIN_LOCK_UNLOCKED(lrng_drng_atomic.spin_lock), + .hash_lock = __RW_LOCK_UNLOCKED(lrng_drng_atomic.hash_lock) +}; + +static u32 max_wo_reseed = LRNG_DRNG_MAX_WITHOUT_RESEED; +#ifdef CONFIG_LRNG_RUNTIME_MAX_WO_RESEED_CONFIG +module_param(max_wo_reseed, uint, 0444); +MODULE_PARM_DESC(max_wo_reseed, + "Maximum number of DRNG generate operation without full reseed\n"); +#endif + +/********************************** Helper ************************************/ + +bool lrng_get_available(void) +{ + return likely(atomic_read(&lrng_avail)); +} + +void lrng_set_available(void) +{ + atomic_set(&lrng_avail, 1); +} + +struct lrng_drng *lrng_drng_init_instance(void) +{ + return &lrng_drng_init; +} + +struct lrng_drng *lrng_drng_atomic_instance(void) +{ + return &lrng_drng_atomic; +} + +void lrng_drng_reset(struct lrng_drng *drng) +{ + atomic_set(&drng->requests, LRNG_DRNG_RESEED_THRESH); + atomic_set(&drng->requests_since_fully_seeded, 0); + drng->last_seeded = jiffies; + drng->fully_seeded = false; + drng->force_reseed = true; + pr_debug("reset DRNG\n"); +} + +/* Initialize the default DRNG during boot */ +static void lrng_drng_seed(struct lrng_drng *drng); +void lrng_drngs_init_cc20(bool force_seed) +{ + unsigned long flags = 0; + + if (lrng_get_available()) + return; + + lrng_drng_lock(&lrng_drng_init, &flags); + if (lrng_get_available()) { + lrng_drng_unlock(&lrng_drng_init, &flags); + if (force_seed) + goto seed; + return; + } + + lrng_drng_reset(&lrng_drng_init); + lrng_cc20_init_state(&chacha20); + lrng_drng_unlock(&lrng_drng_init, &flags); + + lrng_drng_lock(&lrng_drng_atomic, &flags); + lrng_drng_reset(&lrng_drng_atomic); + /* + * We do not initialize the state of the atomic DRNG as it is identical + * to the DRNG at this point. + */ + lrng_drng_unlock(&lrng_drng_atomic, &flags); + + lrng_set_available(); + +seed: + /* Seed the DRNG with any entropy available */ + if (!lrng_pool_trylock()) { + lrng_drng_seed(&lrng_drng_init); + pr_info("ChaCha20 core initialized with first seeding\n"); + lrng_pool_unlock(); + } else { + pr_info("ChaCha20 core initialized without seeding\n"); + } +} + +bool lrng_sp80090c_compliant(void) +{ + if (!IS_ENABLED(CONFIG_LRNG_OVERSAMPLE_ENTROPY_SOURCES)) + return false; + + /* Entropy source hash must be capable of transporting enough entropy */ + if (lrng_get_digestsize() < + (lrng_security_strength() + CONFIG_LRNG_SEED_BUFFER_INIT_ADD_BITS)) + return false; + + /* SP800-90C only requested in FIPS mode */ + return fips_enabled; +} + +/************************* Random Number Generation ***************************/ + +/* Inject a data buffer into the DRNG */ +static void lrng_drng_inject(struct lrng_drng *drng, + const u8 *inbuf, u32 inbuflen, bool fully_seeded) +{ + const char *drng_type = unlikely(drng == &lrng_drng_atomic) ? + "atomic" : "regular"; + unsigned long flags = 0; + + BUILD_BUG_ON(LRNG_DRNG_RESEED_THRESH > INT_MAX); + pr_debug("seeding %s DRNG with %u bytes\n", drng_type, inbuflen); + lrng_drng_lock(drng, &flags); + if (drng->crypto_cb->lrng_drng_seed_helper(drng->drng, + inbuf, inbuflen) < 0) { + pr_warn("seeding of %s DRNG failed\n", drng_type); + drng->force_reseed = true; + } else { + int gc = LRNG_DRNG_RESEED_THRESH - atomic_read(&drng->requests); + + pr_debug("%s DRNG stats since last seeding: %lu secs; generate calls: %d\n", + drng_type, + (time_after(jiffies, drng->last_seeded) ? + (jiffies - drng->last_seeded) : 0) / HZ, gc); + + /* Count the numbers of generate ops since last fully seeded */ + if (fully_seeded) + atomic_set(&drng->requests_since_fully_seeded, 0); + else + atomic_add(gc, &drng->requests_since_fully_seeded); + + drng->last_seeded = jiffies; + atomic_set(&drng->requests, LRNG_DRNG_RESEED_THRESH); + drng->force_reseed = false; + + if (!drng->fully_seeded) { + drng->fully_seeded = fully_seeded; + if (drng->fully_seeded) + pr_debug("DRNG fully seeded\n"); + } + + if (drng->drng == lrng_drng_atomic.drng) { + lrng_drng_atomic.last_seeded = jiffies; + atomic_set(&lrng_drng_atomic.requests, + LRNG_DRNG_RESEED_THRESH); + lrng_drng_atomic.force_reseed = false; + } + } + lrng_drng_unlock(drng, &flags); +} + +/* + * Perform the seeding of the DRNG with data from noise source + */ +static void _lrng_drng_seed(struct lrng_drng *drng) +{ + struct entropy_buf seedbuf __aligned(LRNG_KCAPI_ALIGN); + + lrng_fill_seed_buffer(&seedbuf, + lrng_get_seed_entropy_osr(drng->fully_seeded)); + lrng_init_ops(&seedbuf); + lrng_drng_inject(drng, (u8 *)&seedbuf, sizeof(seedbuf), + lrng_fully_seeded(drng->fully_seeded, &seedbuf)); + memzero_explicit(&seedbuf, sizeof(seedbuf)); +} + +static int lrng_drng_get(struct lrng_drng *drng, u8 *outbuf, u32 outbuflen); +static void lrng_drng_seed(struct lrng_drng *drng) +{ + _lrng_drng_seed(drng); + + BUILD_BUG_ON(LRNG_MIN_SEED_ENTROPY_BITS > + LRNG_DRNG_SECURITY_STRENGTH_BITS); + + /* + * Reseed atomic DRNG from current DRNG, + * + * We can obtain random numbers from DRNG as the lock type + * chosen by lrng_drng_get is usable with the current caller. + */ + if ((drng->drng != lrng_drng_atomic.drng) && + (lrng_drng_atomic.force_reseed || + atomic_read(&lrng_drng_atomic.requests) <= 0 || + time_after(jiffies, lrng_drng_atomic.last_seeded + + lrng_drng_reseed_max_time * HZ))) { + u8 seedbuf[LRNG_DRNG_SECURITY_STRENGTH_BYTES] + __aligned(LRNG_KCAPI_ALIGN); + int ret = lrng_drng_get(drng, seedbuf, sizeof(seedbuf)); + + if (ret < 0) { + pr_warn("Error generating random numbers for atomic DRNG: %d\n", + ret); + } else { + lrng_drng_inject(&lrng_drng_atomic, seedbuf, ret, true); + } + memzero_explicit(&seedbuf, sizeof(seedbuf)); + } +} + +static void _lrng_drng_seed_work(struct lrng_drng *drng, u32 node) +{ + pr_debug("reseed triggered by interrupt noise source for DRNG on NUMA node %d\n", + node); + lrng_drng_seed(drng); + if (drng->fully_seeded) { + /* Prevent reseed storm */ + drng->last_seeded += node * 100 * HZ; + /* Prevent draining of pool on idle systems */ + lrng_drng_reseed_max_time += 100; + } +} + +/* + * DRNG reseed trigger: Kernel thread handler triggered by the schedule_work() + */ +void lrng_drng_seed_work(struct work_struct *dummy) +{ + struct lrng_drng **lrng_drng = lrng_drng_instances(); + u32 node; + + if (lrng_drng) { + for_each_online_node(node) { + struct lrng_drng *drng = lrng_drng[node]; + + if (drng && !drng->fully_seeded) { + _lrng_drng_seed_work(drng, node); + goto out; + } + } + } else { + if (!lrng_drng_init.fully_seeded) { + _lrng_drng_seed_work(&lrng_drng_init, 0); + goto out; + } + } + + lrng_pool_all_numa_nodes_seeded(true); + +out: + /* Allow the seeding operation to be called again */ + lrng_pool_unlock(); +} + +/* Force all DRNGs to reseed before next generation */ +void lrng_drng_force_reseed(void) +{ + struct lrng_drng **lrng_drng = lrng_drng_instances(); + u32 node; + + /* + * If the initial DRNG is over the reseed threshold, allow a forced + * reseed only for the initial DRNG as this is the fallback for all. It + * must be kept seeded before all others to keep the LRNG operational. + */ + if (!lrng_drng || + (atomic_read_u32(&lrng_drng_init.requests_since_fully_seeded) > + LRNG_DRNG_RESEED_THRESH)) { + lrng_drng_init.force_reseed = lrng_drng_init.fully_seeded; + pr_debug("force reseed of initial DRNG\n"); + return; + } + for_each_online_node(node) { + struct lrng_drng *drng = lrng_drng[node]; + + if (!drng) + continue; + + drng->force_reseed = drng->fully_seeded; + pr_debug("force reseed of DRNG on node %u\n", node); + } + lrng_drng_atomic.force_reseed = lrng_drng_atomic.fully_seeded; +} + +/* + * lrng_drng_get() - Get random data out of the DRNG which is reseeded + * frequently. + * + * @outbuf: buffer for storing random data + * @outbuflen: length of outbuf + * + * Return: + * * < 0 in error case (DRNG generation or update failed) + * * >=0 returning the returned number of bytes + */ +static int lrng_drng_get(struct lrng_drng *drng, u8 *outbuf, u32 outbuflen) +{ + unsigned long flags = 0; + u32 processed = 0; + + if (!outbuf || !outbuflen) + return 0; + + outbuflen = min_t(size_t, outbuflen, INT_MAX); + + lrng_drngs_init_cc20(false); + + /* If DRNG operated without proper reseed for too long, block LRNG */ + BUILD_BUG_ON(LRNG_DRNG_MAX_WITHOUT_RESEED < LRNG_DRNG_RESEED_THRESH); + if (atomic_read_u32(&drng->requests_since_fully_seeded) > max_wo_reseed) + lrng_unset_fully_seeded(drng); + + while (outbuflen) { + u32 todo = min_t(u32, outbuflen, LRNG_DRNG_MAX_REQSIZE); + int ret; + + /* All but the atomic DRNG are seeded during generation */ + if (atomic_dec_and_test(&drng->requests) || + drng->force_reseed || + time_after(jiffies, drng->last_seeded + + lrng_drng_reseed_max_time * HZ)) { + if (likely(drng != &lrng_drng_atomic)) { + if (lrng_pool_trylock()) { + drng->force_reseed = true; + } else { + lrng_drng_seed(drng); + lrng_pool_unlock(); + } + } + } + + lrng_drng_lock(drng, &flags); + ret = drng->crypto_cb->lrng_drng_generate_helper( + drng->drng, outbuf + processed, todo); + lrng_drng_unlock(drng, &flags); + if (ret <= 0) { + pr_warn("getting random data from DRNG failed (%d)\n", + ret); + return -EFAULT; + } + processed += ret; + outbuflen -= ret; + } + + return processed; +} + +int lrng_drng_get_atomic(u8 *outbuf, u32 outbuflen) +{ + return lrng_drng_get(&lrng_drng_atomic, outbuf, outbuflen); +} + +int lrng_drng_get_sleep(u8 *outbuf, u32 outbuflen) +{ + struct lrng_drng **lrng_drng = lrng_drng_instances(); + struct lrng_drng *drng = &lrng_drng_init; + int node = numa_node_id(); + + might_sleep(); + + if (lrng_drng && lrng_drng[node] && lrng_drng[node]->fully_seeded) + drng = lrng_drng[node]; + + return lrng_drng_get(drng, outbuf, outbuflen); +} + +/* Reset LRNG such that all existing entropy is gone */ +static void _lrng_reset(struct work_struct *work) +{ + struct lrng_drng **lrng_drng = lrng_drng_instances(); + unsigned long flags = 0; + + if (!lrng_drng) { + lrng_drng_lock(&lrng_drng_init, &flags); + lrng_drng_reset(&lrng_drng_init); + lrng_drng_unlock(&lrng_drng_init, &flags); + } else { + u32 node; + + for_each_online_node(node) { + struct lrng_drng *drng = lrng_drng[node]; + + if (!drng) + continue; + lrng_drng_lock(drng, &flags); + lrng_drng_reset(drng); + lrng_drng_unlock(drng, &flags); + } + } + lrng_set_entropy_thresh(LRNG_INIT_ENTROPY_BITS); + + lrng_reset_state(); +} + +static DECLARE_WORK(lrng_reset_work, _lrng_reset); + +void lrng_reset(void) +{ + schedule_work(&lrng_reset_work); +} + +/***************************** Initialize LRNG *******************************/ + +static int __init lrng_init(void) +{ + lrng_drngs_init_cc20(false); + + lrng_drngs_numa_alloc(); + return 0; +} + +late_initcall(lrng_init); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Stephan Mueller "); +MODULE_DESCRIPTION("Linux Random Number Generator"); diff --git a/drivers/char/lrng/lrng_es_archrandom.c b/drivers/char/lrng/lrng_es_archrandom.c new file mode 100644 index 000000000000..337f84fab120 --- /dev/null +++ b/drivers/char/lrng/lrng_es_archrandom.c @@ -0,0 +1,230 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause +/* + * LRNG Fast Entropy Source: CPU-based entropy source + * + * Copyright (C) 2016 - 2021, Stephan Mueller + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include + +#include "lrng_internal.h" + +/* + * Estimated entropy of data is a 32th of LRNG_DRNG_SECURITY_STRENGTH_BITS. + * As we have no ability to review the implementation of those noise sources, + * it is prudent to have a conservative estimate here. + */ +#define LRNG_ARCHRANDOM_DEFAULT_STRENGTH CONFIG_LRNG_CPU_ENTROPY_RATE +#define LRNG_ARCHRANDOM_TRUST_CPU_STRENGTH LRNG_DRNG_SECURITY_STRENGTH_BITS +#ifdef CONFIG_RANDOM_TRUST_CPU +static u32 archrandom = LRNG_ARCHRANDOM_TRUST_CPU_STRENGTH; +#else +static u32 archrandom = LRNG_ARCHRANDOM_DEFAULT_STRENGTH; +#endif +#ifdef CONFIG_LRNG_RUNTIME_ES_CONFIG +module_param(archrandom, uint, 0644); +MODULE_PARM_DESC(archrandom, "Entropy in bits of 256 data bits from CPU noise source (e.g. RDSEED)"); +#endif + +static int __init lrng_parse_trust_cpu(char *arg) +{ + int ret; + bool trust_cpu = false; + + ret = kstrtobool(arg, &trust_cpu); + if (ret) + return ret; + + if (trust_cpu) { + archrandom = LRNG_ARCHRANDOM_TRUST_CPU_STRENGTH; + lrng_pool_add_entropy(); + } else { + archrandom = LRNG_ARCHRANDOM_DEFAULT_STRENGTH; + } + + return 0; +} +early_param("random.trust_cpu", lrng_parse_trust_cpu); + +u32 lrng_archrandom_entropylevel(u32 requested_bits) +{ + return lrng_fast_noise_entropylevel(archrandom, requested_bits); +} + +static u32 lrng_get_arch_data(u8 *outbuf, u32 requested_bits) +{ + u32 i; + + /* operate on full blocks */ + BUILD_BUG_ON(LRNG_DRNG_SECURITY_STRENGTH_BYTES % sizeof(unsigned long)); + BUILD_BUG_ON(CONFIG_LRNG_SEED_BUFFER_INIT_ADD_BITS % + sizeof(unsigned long)); + /* ensure we have aligned buffers */ + BUILD_BUG_ON(LRNG_KCAPI_ALIGN % sizeof(unsigned long)); + + for (i = 0; i < (requested_bits >> 3); + i += sizeof(unsigned long)) { + if (!arch_get_random_seed_long((unsigned long *)(outbuf + i)) && + !arch_get_random_long((unsigned long *)(outbuf + i))) { + archrandom = 0; + return 0; + } + } + + return requested_bits; +} + +static u32 lrng_get_arch_data_compress(u8 *outbuf, u32 requested_bits, + u32 data_multiplier) +{ + SHASH_DESC_ON_STACK(shash, NULL); + const struct lrng_crypto_cb *crypto_cb; + struct lrng_drng *drng = lrng_drng_init_instance(); + unsigned long flags; + u32 ent_bits = 0, i, partial_bits = 0, + full_bits = requested_bits * data_multiplier; + void *hash; + + /* Calculate oversampling for SP800-90C */ + if (lrng_sp80090c_compliant()) { + /* Complete amount of bits to be pulled */ + full_bits += CONFIG_LRNG_OVERSAMPLE_ES_BITS * data_multiplier; + /* Full blocks that will be pulled */ + data_multiplier = full_bits / requested_bits; + /* Partial block in bits to be pulled */ + partial_bits = full_bits - (data_multiplier * requested_bits); + } + + lrng_hash_lock(drng, &flags); + crypto_cb = drng->crypto_cb; + hash = drng->hash; + + if (crypto_cb->lrng_hash_init(shash, hash)) + goto out; + + /* Hash all data from the CPU entropy source */ + for (i = 0; i < data_multiplier; i++) { + ent_bits = lrng_get_arch_data(outbuf, requested_bits); + if (!ent_bits) + goto out; + + if (crypto_cb->lrng_hash_update(shash, outbuf, ent_bits >> 3)) + goto err; + } + + /* Hash partial block, if applicable */ + ent_bits = lrng_get_arch_data(outbuf, partial_bits); + if (ent_bits && + crypto_cb->lrng_hash_update(shash, outbuf, ent_bits >> 3)) + goto err; + + pr_debug("pulled %u bits from CPU RNG entropy source\n", full_bits); + + /* Generate the compressed data to be returned to the caller */ + ent_bits = crypto_cb->lrng_hash_digestsize(hash) << 3; + if (requested_bits < ent_bits) { + u8 digest[LRNG_MAX_DIGESTSIZE]; + + if (crypto_cb->lrng_hash_final(shash, digest)) + goto err; + + /* Truncate output data to requested size */ + memcpy(outbuf, digest, requested_bits >> 3); + memzero_explicit(digest, crypto_cb->lrng_hash_digestsize(hash)); + ent_bits = requested_bits; + } else { + if (crypto_cb->lrng_hash_final(shash, outbuf)) + goto err; + } + +out: + crypto_cb->lrng_hash_desc_zero(shash); + lrng_hash_unlock(drng, flags); + return ent_bits; + +err: + ent_bits = 0; + goto out; +} + +/* + * If CPU entropy source requires does not return full entropy, return the + * multiplier of how much data shall be sampled from it. + */ +static u32 lrng_arch_multiplier(void) +{ + static u32 data_multiplier = 0; + unsigned long v; + + if (data_multiplier > 0) + return data_multiplier; + + if (IS_ENABLED(CONFIG_X86) && !arch_get_random_seed_long(&v)) { + /* + * Intel SPEC: pulling 512 blocks from RDRAND ensures + * one reseed making it logically equivalent to RDSEED. + */ + data_multiplier = 512; + } else if (IS_ENABLED(CONFIG_PPC)) { + /* + * PowerISA defines DARN to deliver at least 0.5 bits of + * entropy per data bit. + */ + data_multiplier = 2; + } else if (IS_ENABLED(CONFIG_RISCV)) { + /* + * riscv-crypto-spec-scalar-1.0.0-rc6.pdf section 4.2 defines + * this requirement. + */ + data_multiplier = 2; + } else { + /* CPU provides full entropy */ + data_multiplier = CONFIG_LRNG_CPU_FULL_ENT_MULTIPLIER; + } + return data_multiplier; +} + +/* + * lrng_get_arch() - Get CPU entropy source entropy + * + * @outbuf: buffer to store entropy of size requested_bits + * + * Return: + * * > 0 on success where value provides the added entropy in bits + * * 0 if no fast source was available + */ +u32 lrng_get_arch(u8 *outbuf, u32 requested_bits) +{ + u32 ent_bits, data_multiplier = lrng_arch_multiplier(); + + if (data_multiplier <= 1) { + ent_bits = lrng_get_arch_data(outbuf, requested_bits); + } else { + ent_bits = lrng_get_arch_data_compress(outbuf, requested_bits, + data_multiplier); + } + + ent_bits = lrng_archrandom_entropylevel(ent_bits); + pr_debug("obtained %u bits of entropy from CPU RNG entropy source\n", + ent_bits); + return ent_bits; +} + +void lrng_arch_es_state(unsigned char *buf, size_t buflen) +{ + const struct lrng_drng *lrng_drng_init = lrng_drng_init_instance(); + u32 data_multiplier = lrng_arch_multiplier(); + + /* Assume the lrng_drng_init lock is taken by caller */ + snprintf(buf, buflen, + "CPU ES properties:\n" + " Hash for compressing data: %s\n" + " Data multiplier: %u\n", + (data_multiplier <= 1) ? + "N/A" : lrng_drng_init->crypto_cb->lrng_hash_name(), + data_multiplier); +} diff --git a/drivers/char/lrng/lrng_es_aux.c b/drivers/char/lrng/lrng_es_aux.c new file mode 100644 index 000000000000..c8a040d29f78 --- /dev/null +++ b/drivers/char/lrng/lrng_es_aux.c @@ -0,0 +1,295 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause +/* + * LRNG Slow Entropy Source: Auxiliary entropy pool + * + * Copyright (C) 2016 - 2021, Stephan Mueller + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include + +#include "lrng_internal.h" + +/* + * This is the auxiliary pool + * + * The aux pool array is aligned to 8 bytes to comfort the kernel crypto API + * cipher implementations of the hash functions used to read the pool: for some + * accelerated implementations, we need an alignment to avoid a realignment + * which involves memcpy(). The alignment to 8 bytes should satisfy all crypto + * implementations. + */ +struct lrng_pool { + u8 aux_pool[LRNG_POOL_SIZE]; /* Aux pool: digest state */ + atomic_t aux_entropy_bits; + atomic_t digestsize; /* Digest size of used hash */ + bool initialized; /* Aux pool initialized? */ + + /* Serialize read of entropy pool and update of aux pool */ + spinlock_t lock; +}; + +static struct lrng_pool lrng_pool __aligned(LRNG_KCAPI_ALIGN) = { + .aux_entropy_bits = ATOMIC_INIT(0), + .digestsize = ATOMIC_INIT(LRNG_ATOMIC_DIGEST_SIZE), + .initialized = false, + .lock = __SPIN_LOCK_UNLOCKED(lrng_pool.lock) +}; + +/********************************** Helper ***********************************/ + +/* Entropy in bits present in aux pool */ +u32 lrng_avail_aux_entropy(void) +{ + /* Cap available entropy with max entropy */ + u32 avail_bits = min_t(u32, lrng_get_digestsize(), + atomic_read_u32(&lrng_pool.aux_entropy_bits)); + + /* Consider oversampling rate due to aux pool conditioning */ + return lrng_reduce_by_osr(avail_bits); +} + +/* Set the digest size of the used hash in bytes */ +static void lrng_set_digestsize(u32 digestsize) +{ + struct lrng_pool *pool = &lrng_pool; + u32 ent_bits = atomic_xchg_relaxed(&pool->aux_entropy_bits, 0), + old_digestsize = lrng_get_digestsize(); + + atomic_set(&lrng_pool.digestsize, digestsize); + + /* + * Update the /proc/.../write_wakeup_threshold which must not be larger + * than the digest size of the curent conditioning hash. + */ + digestsize <<= 3; + lrng_proc_update_max_write_thresh(digestsize); + if (lrng_write_wakeup_bits > digestsize) + lrng_write_wakeup_bits = digestsize; + + /* + * In case the new digest is larger than the old one, cap the available + * entropy to the old message digest used to process the existing data. + */ + ent_bits = min_t(u32, ent_bits, old_digestsize); + atomic_add(ent_bits, &pool->aux_entropy_bits); +} + +/* Obtain the digest size provided by the used hash in bits */ +u32 lrng_get_digestsize(void) +{ + return atomic_read_u32(&lrng_pool.digestsize) << 3; +} + +/* Set entropy content in user-space controllable aux pool */ +void lrng_pool_set_entropy(u32 entropy_bits) +{ + atomic_set(&lrng_pool.aux_entropy_bits, entropy_bits); +} + +/* + * Replace old with new hash for auxiliary pool handling + * + * Assumption: the caller must guarantee that the new_cb is available during the + * entire operation (e.g. it must hold the write lock against pointer updating). + */ +int lrng_aux_switch_hash(const struct lrng_crypto_cb *new_cb, void *new_hash, + const struct lrng_crypto_cb *old_cb) +{ + struct lrng_pool *pool = &lrng_pool; + struct shash_desc *shash = (struct shash_desc *)pool->aux_pool; + u8 digest[LRNG_MAX_DIGESTSIZE]; + int ret; + + if (!IS_ENABLED(CONFIG_LRNG_DRNG_SWITCH)) + return -EOPNOTSUPP; + + if (unlikely(!pool->initialized)) + return 0; + + /* Get the aux pool hash with old digest ... */ + ret = old_cb->lrng_hash_final(shash, digest) ?: + /* ... re-initialize the hash with the new digest ... */ + new_cb->lrng_hash_init(shash, new_hash) ?: + /* + * ... feed the old hash into the new state. We may feed + * uninitialized memory into the new state, but this is + * considered no issue and even good as we have some more + * uncertainty here. + */ + new_cb->lrng_hash_update(shash, digest, sizeof(digest)); + if (!ret) { + lrng_set_digestsize(new_cb->lrng_hash_digestsize(new_hash)); + pr_debug("Re-initialize aux entropy pool with hash %s\n", + new_cb->lrng_hash_name()); + } + + memzero_explicit(digest, sizeof(digest)); + return ret; +} + +/* Insert data into auxiliary pool by using the hash update function. */ +static int +lrng_pool_insert_aux_locked(const u8 *inbuf, u32 inbuflen, u32 entropy_bits) +{ + struct lrng_pool *pool = &lrng_pool; + struct shash_desc *shash = (struct shash_desc *)pool->aux_pool; + struct lrng_drng *drng = lrng_drng_init_instance(); + const struct lrng_crypto_cb *crypto_cb; + unsigned long flags; + void *hash; + int ret; + + entropy_bits = min_t(u32, entropy_bits, inbuflen << 3); + + lrng_hash_lock(drng, &flags); + + crypto_cb = drng->crypto_cb; + hash = drng->hash; + + if (unlikely(!pool->initialized)) { + ret = crypto_cb->lrng_hash_init(shash, hash); + if (ret) + goto out; + pool->initialized = true; + } + + ret = crypto_cb->lrng_hash_update(shash, inbuf, inbuflen); + if (ret) + goto out; + + /* + * Cap the available entropy to the hash output size compliant to + * SP800-90B section 3.1.5.1 table 1. + */ + entropy_bits += atomic_read_u32(&pool->aux_entropy_bits); + atomic_set(&pool->aux_entropy_bits, + min_t(u32, entropy_bits, + crypto_cb->lrng_hash_digestsize(hash) << 3)); + +out: + lrng_hash_unlock(drng, flags); + return ret; +} + +int lrng_pool_insert_aux(const u8 *inbuf, u32 inbuflen, u32 entropy_bits) +{ + struct lrng_pool *pool = &lrng_pool; + unsigned long flags; + int ret; + + spin_lock_irqsave(&pool->lock, flags); + ret = lrng_pool_insert_aux_locked(inbuf, inbuflen, entropy_bits); + spin_unlock_irqrestore(&pool->lock, flags); + + lrng_pool_add_entropy(); + + return ret; +} + +/************************* Get data from entropy pool *************************/ + +/* + * Get auxiliary entropy pool and its entropy content for seed buffer. + * Caller must hold lrng_pool.pool->lock. + * @outbuf: buffer to store data in with size requested_bits + * @requested_bits: Requested amount of entropy + * @return: amount of entropy in outbuf in bits. + */ +static u32 lrng_get_aux_pool(u8 *outbuf, u32 requested_bits) +{ + struct lrng_pool *pool = &lrng_pool; + struct shash_desc *shash = (struct shash_desc *)pool->aux_pool; + struct lrng_drng *drng = lrng_drng_init_instance(); + const struct lrng_crypto_cb *crypto_cb; + unsigned long flags; + void *hash; + u32 collected_ent_bits, returned_ent_bits, unused_bits = 0, + digestsize, requested_bits_osr; + u8 aux_output[LRNG_MAX_DIGESTSIZE]; + + if (unlikely(!pool->initialized)) + return 0; + + lrng_hash_lock(drng, &flags); + + crypto_cb = drng->crypto_cb; + hash = drng->hash; + digestsize = crypto_cb->lrng_hash_digestsize(hash); + + /* Ensure that no more than the size of aux_pool can be requested */ + requested_bits = min_t(u32, requested_bits, (LRNG_MAX_DIGESTSIZE << 3)); + requested_bits_osr = requested_bits + lrng_compress_osr(); + + /* Cap entropy with entropy counter from aux pool and the used digest */ + collected_ent_bits = min_t(u32, digestsize << 3, + atomic_xchg_relaxed(&pool->aux_entropy_bits, 0)); + + /* We collected too much entropy and put the overflow back */ + if (collected_ent_bits > requested_bits_osr) { + /* Amount of bits we collected too much */ + unused_bits = collected_ent_bits - requested_bits_osr; + /* Put entropy back */ + atomic_add(unused_bits, &pool->aux_entropy_bits); + /* Fix collected entropy */ + collected_ent_bits = requested_bits_osr; + } + + /* Apply oversampling: discount requested oversampling rate */ + returned_ent_bits = lrng_reduce_by_osr(collected_ent_bits); + + pr_debug("obtained %u bits by collecting %u bits of entropy from aux pool, %u bits of entropy remaining\n", + returned_ent_bits, collected_ent_bits, unused_bits); + + /* Get the digest for the aux pool to be returned to the caller ... */ + if (crypto_cb->lrng_hash_final(shash, aux_output) || + /* + * ... and re-initialize the aux state. Do not add the aux pool + * digest for backward secrecy as it will be added with the + * insertion of the complete seed buffer after it has been filled. + */ + crypto_cb->lrng_hash_init(shash, hash)) { + returned_ent_bits = 0; + } else { + /* + * Do not truncate the output size exactly to collected_ent_bits + * as the aux pool may contain data that is not credited with + * entropy, but we want to use them to stir the DRNG state. + */ + memcpy(outbuf, aux_output, requested_bits >> 3); + } + + lrng_hash_unlock(drng, flags); + memzero_explicit(aux_output, digestsize); + return returned_ent_bits; +} + +void lrng_get_backtrack_aux(struct entropy_buf *entropy_buf, u32 requested_bits) +{ + struct lrng_pool *pool = &lrng_pool; + unsigned long flags; + + /* Ensure aux pool extraction and backtracking op are atomic */ + spin_lock_irqsave(&pool->lock, flags); + + entropy_buf->a_bits = lrng_get_aux_pool(entropy_buf->a, requested_bits); + + /* Mix the extracted data back into pool for backtracking resistance */ + if (lrng_pool_insert_aux_locked((u8 *)entropy_buf, + sizeof(struct entropy_buf), 0)) + pr_warn("Backtracking resistance operation failed\n"); + + spin_unlock_irqrestore(&pool->lock, flags); +} + +void lrng_aux_es_state(unsigned char *buf, size_t buflen) +{ + const struct lrng_drng *lrng_drng_init = lrng_drng_init_instance(); + + /* Assume the lrng_drng_init lock is taken by caller */ + snprintf(buf, buflen, + "Auxiliary ES properties:\n" + " Hash for operating entropy pool: %s\n", + lrng_drng_init->crypto_cb->lrng_hash_name()); +} diff --git a/drivers/char/lrng/lrng_es_irq.c b/drivers/char/lrng/lrng_es_irq.c new file mode 100644 index 000000000000..6b13ff97d178 --- /dev/null +++ b/drivers/char/lrng/lrng_es_irq.c @@ -0,0 +1,824 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause +/* + * LRNG Slow Entropy Source: Interrupt data collection + * + * Copyright (C) 2016 - 2021, Stephan Mueller + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include + +#include "lrng_internal.h" +#include "lrng_es_irq.h" + +/* + * Number of interrupts to be recorded to assume that DRNG security strength + * bits of entropy are received. + * Note: a value below the DRNG security strength should not be defined as this + * may imply the DRNG can never be fully seeded in case other noise + * sources are unavailable. + */ +#define LRNG_IRQ_ENTROPY_BITS CONFIG_LRNG_IRQ_ENTROPY_RATE + + +/* Number of interrupts required for LRNG_DRNG_SECURITY_STRENGTH_BITS entropy */ +static u32 lrng_irq_entropy_bits = LRNG_IRQ_ENTROPY_BITS; +/* Is high-resolution timer present? */ +static bool lrng_irq_highres_timer = false; + +static u32 irq_entropy __read_mostly = LRNG_IRQ_ENTROPY_BITS; +#ifdef CONFIG_LRNG_RUNTIME_ES_CONFIG +module_param(irq_entropy, uint, 0444); +MODULE_PARM_DESC(irq_entropy, + "How many interrupts must be collected for obtaining 256 bits of entropy\n"); +#endif + +/* Per-CPU array holding concatenated entropy events */ +static DEFINE_PER_CPU(u32 [LRNG_DATA_ARRAY_SIZE], lrng_pcpu_array) + __aligned(LRNG_KCAPI_ALIGN); +static DEFINE_PER_CPU(u32, lrng_pcpu_array_ptr) = 0; +static DEFINE_PER_CPU(atomic_t, lrng_pcpu_array_irqs) = ATOMIC_INIT(0); + +/* + * The entropy collection is performed by executing the following steps: + * 1. fill up the per-CPU array holding the time stamps + * 2. once the per-CPU array is full, a compression of the data into + * the entropy pool is performed - this happens in interrupt context + * + * If step 2 is not desired in interrupt context, the following boolean + * needs to be set to false. This implies that old entropy data in the + * per-CPU array collected since the last DRNG reseed is overwritten with + * new entropy data instead of retaining the entropy with the compression + * operation. + * + * Impact on entropy: + * + * If continuous compression is enabled, the maximum entropy that is collected + * per CPU between DRNG reseeds is equal to the digest size of the used hash. + * + * If continuous compression is disabled, the maximum number of entropy events + * that can be collected per CPU is equal to LRNG_DATA_ARRAY_SIZE. This amount + * of events is converted into an entropy statement which then represents the + * maximum amount of entropy collectible per CPU between DRNG reseeds. + */ +static bool lrng_pcpu_continuous_compression __read_mostly = + IS_ENABLED(CONFIG_LRNG_ENABLE_CONTINUOUS_COMPRESSION); + +#ifdef CONFIG_LRNG_SWITCHABLE_CONTINUOUS_COMPRESSION +module_param(lrng_pcpu_continuous_compression, bool, 0444); +MODULE_PARM_DESC(lrng_pcpu_continuous_compression, + "Perform entropy compression if per-CPU entropy data array is full\n"); +#endif + +/* + * Per-CPU entropy pool with compressed entropy event + * + * The per-CPU entropy pool is defined as the hash state. New data is simply + * inserted into the entropy pool by performing a hash update operation. + * To read the entropy pool, a hash final must be invoked. However, before + * the entropy pool is released again after a hash final, the hash init must + * be performed. + */ +static DEFINE_PER_CPU(u8 [LRNG_POOL_SIZE], lrng_pcpu_pool) + __aligned(LRNG_KCAPI_ALIGN); +/* + * Lock to allow other CPUs to read the pool - as this is only done during + * reseed which is infrequent, this lock is hardly contended. + */ +static DEFINE_PER_CPU(spinlock_t, lrng_pcpu_lock); +static DEFINE_PER_CPU(bool, lrng_pcpu_lock_init) = false; + +/* Number of time stamps analyzed to calculate a GCD */ +#define LRNG_GCD_WINDOW_SIZE 100 +static u32 lrng_gcd_history[LRNG_GCD_WINDOW_SIZE]; +static atomic_t lrng_gcd_history_ptr = ATOMIC_INIT(-1); + +/* The common divisor for all timestamps */ +static u32 lrng_gcd_timer = 0; + +static bool lrng_gcd_tested(void) +{ + return (lrng_gcd_timer != 0); +} + +/* Set the GCD for use in IRQ ES - if 0, the GCD calculation is restarted. */ +static void _lrng_gcd_set(u32 running_gcd) +{ + lrng_gcd_timer = running_gcd; + /* Ensure that update to global variable lrng_gcd_timer is visible */ + mb(); +} + +static void lrng_gcd_set(u32 running_gcd) +{ + if (!lrng_gcd_tested()) { + _lrng_gcd_set(running_gcd); + pr_debug("Setting GCD to %u\n", running_gcd); + } +} + +u32 lrng_gcd_analyze(u32 *history, size_t nelem) +{ + u32 running_gcd = 0; + size_t i; + + /* Now perform the analysis on the accumulated time data. */ + for (i = 0; i < nelem; i++) { + /* + * NOTE: this would be the place to add more analysis on the + * appropriateness of the timer like checking the presence + * of sufficient variations in the timer. + */ + + /* + * This calculates the gcd of all the time values. that is + * gcd(time_1, time_2, ..., time_nelem) + * + * Some timers increment by a fixed (non-1) amount each step. + * This code checks for such increments, and allows the library + * to output the number of such changes have occurred. + */ + running_gcd = (u32)gcd(history[i], running_gcd); + + /* Zeroize data */ + history[i] = 0; + } + + return running_gcd; +} + +static void lrng_gcd_add_value(u32 time) +{ + u32 ptr = (u32)atomic_inc_return_relaxed(&lrng_gcd_history_ptr); + + if (ptr < LRNG_GCD_WINDOW_SIZE) { + lrng_gcd_history[ptr] = time; + } else if (ptr == LRNG_GCD_WINDOW_SIZE) { + u32 gcd = lrng_gcd_analyze(lrng_gcd_history, + LRNG_GCD_WINDOW_SIZE); + + if (!gcd) + gcd = 1; + + /* + * Ensure that we have variations in the time stamp below the + * given value. This is just a safety measure to prevent the GCD + * becoming too large. + */ + if (gcd >= 1000) { + pr_warn("calculated GCD is larger than expected: %u\n", + gcd); + gcd = 1000; + } + + /* Adjust all deltas by the observed (small) common factor. */ + lrng_gcd_set(gcd); + atomic_set(&lrng_gcd_history_ptr, 0); + } +} + +/* Return boolean whether LRNG identified presence of high-resolution timer */ +static bool lrng_pool_highres_timer(void) +{ + return lrng_irq_highres_timer; +} + +/* Convert entropy in bits into number of IRQs with the same entropy content. */ +static u32 lrng_entropy_to_data(u32 entropy_bits) +{ + return ((entropy_bits * lrng_irq_entropy_bits) / + LRNG_DRNG_SECURITY_STRENGTH_BITS); +} + +/* Convert number of IRQs into entropy value. */ +static u32 lrng_data_to_entropy(u32 irqnum) +{ + return ((irqnum * LRNG_DRNG_SECURITY_STRENGTH_BITS) / + lrng_irq_entropy_bits); +} + +static bool lrng_pcpu_pool_online(int cpu) +{ + return per_cpu(lrng_pcpu_lock_init, cpu); +} + +static void lrng_pcpu_check_compression_state(void) +{ + /* One pool must hold sufficient entropy for disabled compression */ + if (!lrng_pcpu_continuous_compression) { + u32 max_ent = min_t(u32, lrng_get_digestsize(), + lrng_data_to_entropy(LRNG_DATA_NUM_VALUES)); + if (max_ent < lrng_security_strength()) { + pr_warn("Force continuous compression operation to ensure LRNG can hold enough entropy\n"); + lrng_pcpu_continuous_compression = true; + } + } +} + +static int __init lrng_init_time_source(void) +{ + /* Set a minimum number of interrupts that must be collected */ + irq_entropy = max_t(u32, LRNG_IRQ_ENTROPY_BITS, irq_entropy); + + if ((random_get_entropy() & LRNG_DATA_SLOTSIZE_MASK) || + (random_get_entropy() & LRNG_DATA_SLOTSIZE_MASK)) { + /* + * As the highres timer is identified here, previous interrupts + * obtained during boot time are treated like a lowres-timer + * would have been present. + */ + lrng_irq_highres_timer = true; + lrng_irq_entropy_bits = irq_entropy; + } else { + u32 new_entropy = irq_entropy * LRNG_IRQ_OVERSAMPLING_FACTOR; + + lrng_health_disable(); + lrng_irq_highres_timer = false; + lrng_irq_entropy_bits = (irq_entropy < new_entropy) ? + new_entropy : irq_entropy; + pr_warn("operating without high-resolution timer and applying IRQ oversampling factor %u\n", + LRNG_IRQ_OVERSAMPLING_FACTOR); + lrng_pcpu_check_compression_state(); + } + /* Ensure that changes to global variables are visible */ + mb(); + + return 0; +} +core_initcall(lrng_init_time_source); + +/* + * Reset all per-CPU pools - reset entropy estimator but leave the pool data + * that may or may not have entropy unchanged. + */ +void lrng_pcpu_reset(void) +{ + int cpu; + + /* Trigger GCD calculation anew. */ + _lrng_gcd_set(0); + + for_each_online_cpu(cpu) + atomic_set(per_cpu_ptr(&lrng_pcpu_array_irqs, cpu), 0); +} + +u32 lrng_pcpu_avail_pool_size(void) +{ + u32 max_size = 0, max_pool = lrng_get_digestsize(); + int cpu; + + if (!lrng_pcpu_continuous_compression) + max_pool = min_t(u32, max_pool, LRNG_DATA_NUM_VALUES); + + for_each_online_cpu(cpu) { + if (lrng_pcpu_pool_online(cpu)) + max_size += max_pool; + } + + return max_size; +} + +/* Return entropy of unused IRQs present in all per-CPU pools. */ +u32 lrng_pcpu_avail_entropy(void) +{ + u32 digestsize_irqs, irq = 0; + int cpu; + + /* Obtain the cap of maximum numbers of IRQs we count */ + digestsize_irqs = lrng_entropy_to_data(lrng_get_digestsize()); + if (!lrng_pcpu_continuous_compression) { + /* Cap to max. number of IRQs the array can hold */ + digestsize_irqs = min_t(u32, digestsize_irqs, + LRNG_DATA_NUM_VALUES); + } + + for_each_online_cpu(cpu) { + if (!lrng_pcpu_pool_online(cpu)) + continue; + irq += min_t(u32, digestsize_irqs, + atomic_read_u32(per_cpu_ptr(&lrng_pcpu_array_irqs, + cpu))); + } + + /* Consider oversampling rate */ + return lrng_reduce_by_osr(lrng_data_to_entropy(irq)); +} + +/* + * Trigger a switch of the hash implementation for the per-CPU pool. + * + * For each per-CPU pool, obtain the message digest with the old hash + * implementation, initialize the per-CPU pool again with the new hash + * implementation and inject the message digest into the new state. + * + * Assumption: the caller must guarantee that the new_cb is available during the + * entire operation (e.g. it must hold the lock against pointer updating). + */ +int lrng_pcpu_switch_hash(int node, + const struct lrng_crypto_cb *new_cb, void *new_hash, + const struct lrng_crypto_cb *old_cb) +{ + u8 digest[LRNG_MAX_DIGESTSIZE]; + u32 digestsize_irqs, found_irqs; + int ret = 0, cpu; + + if (!IS_ENABLED(CONFIG_LRNG_DRNG_SWITCH)) + return -EOPNOTSUPP; + + for_each_online_cpu(cpu) { + struct shash_desc *pcpu_shash; + + /* + * Only switch the per-CPU pools for the current node because + * the crypto_cb only applies NUMA-node-wide. + */ + if (cpu_to_node(cpu) != node || !lrng_pcpu_pool_online(cpu)) + continue; + + pcpu_shash = (struct shash_desc *)per_cpu_ptr(lrng_pcpu_pool, + cpu); + + digestsize_irqs = old_cb->lrng_hash_digestsize(pcpu_shash); + digestsize_irqs = lrng_entropy_to_data(digestsize_irqs << 3); + + if (pcpu_shash->tfm == new_hash) + continue; + + /* Get the per-CPU pool hash with old digest ... */ + ret = old_cb->lrng_hash_final(pcpu_shash, digest) ?: + /* ... re-initialize the hash with the new digest ... */ + new_cb->lrng_hash_init(pcpu_shash, new_hash) ?: + /* + * ... feed the old hash into the new state. We may feed + * uninitialized memory into the new state, but this is + * considered no issue and even good as we have some more + * uncertainty here. + */ + new_cb->lrng_hash_update(pcpu_shash, digest, + sizeof(digest)); + if (ret) + goto out; + + /* + * In case the new digest is larger than the old one, cap + * the available entropy to the old message digest used to + * process the existing data. + */ + found_irqs = atomic_xchg_relaxed( + per_cpu_ptr(&lrng_pcpu_array_irqs, cpu), 0); + found_irqs = min_t(u32, found_irqs, digestsize_irqs); + atomic_add_return_relaxed(found_irqs, + per_cpu_ptr(&lrng_pcpu_array_irqs, cpu)); + + pr_debug("Re-initialize per-CPU entropy pool for CPU %d on NUMA node %d with hash %s\n", + cpu, node, new_cb->lrng_hash_name()); + } + +out: + memzero_explicit(digest, sizeof(digest)); + return ret; +} + +/* + * When reading the per-CPU message digest, make sure we use the crypto + * callbacks defined for the NUMA node the per-CPU pool is defined for because + * the LRNG crypto switch support is only atomic per NUMA node. + */ +static u32 +lrng_pcpu_pool_hash_one(const struct lrng_crypto_cb *pcpu_crypto_cb, + void *pcpu_hash, int cpu, u8 *digest, u32 *digestsize) +{ + struct shash_desc *pcpu_shash = + (struct shash_desc *)per_cpu_ptr(lrng_pcpu_pool, cpu); + spinlock_t *lock = per_cpu_ptr(&lrng_pcpu_lock, cpu); + unsigned long flags; + u32 digestsize_irqs, found_irqs; + + /* Lock guarding against reading / writing to per-CPU pool */ + spin_lock_irqsave(lock, flags); + + *digestsize = pcpu_crypto_cb->lrng_hash_digestsize(pcpu_hash); + digestsize_irqs = lrng_entropy_to_data(*digestsize << 3); + + /* Obtain entropy statement like for the entropy pool */ + found_irqs = atomic_xchg_relaxed( + per_cpu_ptr(&lrng_pcpu_array_irqs, cpu), 0); + /* Cap to maximum amount of data we can hold in hash */ + found_irqs = min_t(u32, found_irqs, digestsize_irqs); + + /* Cap to maximum amount of data we can hold in array */ + if (!lrng_pcpu_continuous_compression) + found_irqs = min_t(u32, found_irqs, LRNG_DATA_NUM_VALUES); + + /* Store all not-yet compressed data in data array into hash, ... */ + if (pcpu_crypto_cb->lrng_hash_update(pcpu_shash, + (u8 *)per_cpu_ptr(lrng_pcpu_array, cpu), + LRNG_DATA_ARRAY_SIZE * sizeof(u32)) ?: + /* ... get the per-CPU pool digest, ... */ + pcpu_crypto_cb->lrng_hash_final(pcpu_shash, digest) ?: + /* ... re-initialize the hash, ... */ + pcpu_crypto_cb->lrng_hash_init(pcpu_shash, pcpu_hash) ?: + /* ... feed the old hash into the new state. */ + pcpu_crypto_cb->lrng_hash_update(pcpu_shash, digest, *digestsize)) + found_irqs = 0; + + spin_unlock_irqrestore(lock, flags); + return found_irqs; +} + +/* + * Hash all per-CPU pools and return the digest to be used as seed data for + * seeding a DRNG. The caller must guarantee backtracking resistance. + * The function will only copy as much data as entropy is available into the + * caller-provided output buffer. + * + * This function handles the translation from the number of received interrupts + * into an entropy statement. The conversion depends on LRNG_IRQ_ENTROPY_BITS + * which defines how many interrupts must be received to obtain 256 bits of + * entropy. With this value, the function lrng_data_to_entropy converts a given + * data size (received interrupts, requested amount of data, etc.) into an + * entropy statement. lrng_entropy_to_data does the reverse. + * + * @outbuf: buffer to store data in with size requested_bits + * @requested_bits: Requested amount of entropy + * @fully_seeded: indicator whether LRNG is fully seeded + * @return: amount of entropy in outbuf in bits. + */ +u32 lrng_pcpu_pool_hash(u8 *outbuf, u32 requested_bits, bool fully_seeded) +{ + SHASH_DESC_ON_STACK(shash, NULL); + const struct lrng_crypto_cb *crypto_cb; + struct lrng_drng **lrng_drng = lrng_drng_instances(); + struct lrng_drng *drng = lrng_drng_init_instance(); + u8 digest[LRNG_MAX_DIGESTSIZE]; + unsigned long flags, flags2; + u32 found_irqs, collected_irqs = 0, collected_ent_bits, requested_irqs, + returned_ent_bits; + int ret, cpu; + void *hash; + + /* Lock guarding replacement of per-NUMA hash */ + lrng_hash_lock(drng, &flags); + + crypto_cb = drng->crypto_cb; + hash = drng->hash; + + /* The hash state of filled with all per-CPU pool hashes. */ + ret = crypto_cb->lrng_hash_init(shash, hash); + if (ret) + goto err; + + requested_irqs = lrng_entropy_to_data(requested_bits + + lrng_compress_osr()); + + /* + * Harvest entropy from each per-CPU hash state - even though we may + * have collected sufficient entropy, we will hash all per-CPU pools. + */ + for_each_online_cpu(cpu) { + struct lrng_drng *pcpu_drng = drng; + u32 digestsize, pcpu_unused_irqs = 0; + int node = cpu_to_node(cpu); + + /* If pool is not online, then no entropy is present. */ + if (!lrng_pcpu_pool_online(cpu)) + continue; + + if (lrng_drng && lrng_drng[node]) + pcpu_drng = lrng_drng[node]; + + if (pcpu_drng == drng) { + found_irqs = lrng_pcpu_pool_hash_one(crypto_cb, hash, + cpu, digest, + &digestsize); + } else { + lrng_hash_lock(pcpu_drng, &flags2); + found_irqs = + lrng_pcpu_pool_hash_one(pcpu_drng->crypto_cb, + pcpu_drng->hash, cpu, + digest, &digestsize); + lrng_hash_unlock(pcpu_drng, flags2); + } + + /* Inject the digest into the state of all per-CPU pools */ + ret = crypto_cb->lrng_hash_update(shash, digest, digestsize); + if (ret) + goto err; + + collected_irqs += found_irqs; + if (collected_irqs > requested_irqs) { + pcpu_unused_irqs = collected_irqs - requested_irqs; + atomic_add_return_relaxed(pcpu_unused_irqs, + per_cpu_ptr(&lrng_pcpu_array_irqs, cpu)); + collected_irqs = requested_irqs; + } + pr_debug("%u interrupts used from entropy pool of CPU %d, %u interrupts remain unused\n", + found_irqs - pcpu_unused_irqs, cpu, pcpu_unused_irqs); + } + + ret = crypto_cb->lrng_hash_final(shash, digest); + if (ret) + goto err; + + collected_ent_bits = lrng_data_to_entropy(collected_irqs); + /* Cap to maximum entropy that can ever be generated with given hash */ + collected_ent_bits = min_t(u32, collected_ent_bits, + crypto_cb->lrng_hash_digestsize(hash) << 3); + /* Apply oversampling: discount requested oversampling rate */ + returned_ent_bits = lrng_reduce_by_osr(collected_ent_bits); + + pr_debug("obtained %u bits by collecting %u bits of entropy from entropy pool noise source\n", + returned_ent_bits, collected_ent_bits); + + /* + * Truncate to available entropy as implicitly allowed by SP800-90B + * section 3.1.5.1.1 table 1 which awards truncated hashes full + * entropy. + * + * During boot time, we read requested_bits data with + * returned_ent_bits entropy. In case our conservative entropy + * estimate underestimates the available entropy we can transport as + * much available entropy as possible. + */ + memcpy(outbuf, digest, fully_seeded ? returned_ent_bits >> 3 : + requested_bits >> 3); + +out: + crypto_cb->lrng_hash_desc_zero(shash); + lrng_hash_unlock(drng, flags); + memzero_explicit(digest, sizeof(digest)); + return returned_ent_bits; + +err: + returned_ent_bits = 0; + goto out; +} + +/* Compress the lrng_pcpu_array array into lrng_pcpu_pool */ +static void lrng_pcpu_array_compress(void) +{ + struct shash_desc *shash = + (struct shash_desc *)this_cpu_ptr(lrng_pcpu_pool); + struct lrng_drng **lrng_drng = lrng_drng_instances(); + struct lrng_drng *drng = lrng_drng_init_instance(); + const struct lrng_crypto_cb *crypto_cb; + spinlock_t *lock = this_cpu_ptr(&lrng_pcpu_lock); + unsigned long flags, flags2; + int node = numa_node_id(); + void *hash; + bool init = false; + + /* Get NUMA-node local hash instance */ + if (lrng_drng && lrng_drng[node]) + drng = lrng_drng[node]; + + lrng_hash_lock(drng, &flags); + crypto_cb = drng->crypto_cb; + hash = drng->hash; + + if (unlikely(!this_cpu_read(lrng_pcpu_lock_init))) { + init = true; + spin_lock_init(lock); + this_cpu_write(lrng_pcpu_lock_init, true); + pr_debug("Initializing per-CPU entropy pool for CPU %d on NUMA node %d with hash %s\n", + raw_smp_processor_id(), node, + crypto_cb->lrng_hash_name()); + } + + spin_lock_irqsave(lock, flags2); + + if (unlikely(init) && crypto_cb->lrng_hash_init(shash, hash)) { + this_cpu_write(lrng_pcpu_lock_init, false); + pr_warn("Initialization of hash failed\n"); + } else if (lrng_pcpu_continuous_compression) { + /* Add entire per-CPU data array content into entropy pool. */ + if (crypto_cb->lrng_hash_update(shash, + (u8 *)this_cpu_ptr(lrng_pcpu_array), + LRNG_DATA_ARRAY_SIZE * sizeof(u32))) + pr_warn_ratelimited("Hashing of entropy data failed\n"); + } + + spin_unlock_irqrestore(lock, flags2); + lrng_hash_unlock(drng, flags); +} + +/* Compress data array into hash */ +static void lrng_pcpu_array_to_hash(u32 ptr) +{ + u32 *array = this_cpu_ptr(lrng_pcpu_array); + + /* + * During boot time the hash operation is triggered more often than + * during regular operation. + */ + if (unlikely(!lrng_state_fully_seeded())) { + if ((ptr & 31) && (ptr < LRNG_DATA_WORD_MASK)) + return; + } else if (ptr < LRNG_DATA_WORD_MASK) { + return; + } + + if (lrng_raw_array_entropy_store(*array)) { + u32 i; + + /* + * If we fed even a part of the array to external analysis, we + * mark that the entire array and the per-CPU pool to have no + * entropy. This is due to the non-IID property of the data as + * we do not fully know whether the existing dependencies + * diminish the entropy beyond to what we expect it has. + */ + atomic_set(this_cpu_ptr(&lrng_pcpu_array_irqs), 0); + + for (i = 1; i < LRNG_DATA_ARRAY_SIZE; i++) + lrng_raw_array_entropy_store(*(array + i)); + } else { + lrng_pcpu_array_compress(); + /* Ping pool handler about received entropy */ + lrng_pool_add_entropy(); + } +} + +/* + * Concatenate full 32 bit word at the end of time array even when current + * ptr is not aligned to sizeof(data). + */ +static void _lrng_pcpu_array_add_u32(u32 data) +{ + /* Increment pointer by number of slots taken for input value */ + u32 pre_ptr, mask, ptr = this_cpu_add_return(lrng_pcpu_array_ptr, + LRNG_DATA_SLOTS_PER_UINT); + unsigned int pre_array; + + /* + * This function injects a unit into the array - guarantee that + * array unit size is equal to data type of input data. + */ + BUILD_BUG_ON(LRNG_DATA_ARRAY_MEMBER_BITS != (sizeof(data) << 3)); + + /* + * The following logic requires at least two units holding + * the data as otherwise the pointer would immediately wrap when + * injection an u32 word. + */ + BUILD_BUG_ON(LRNG_DATA_NUM_VALUES <= LRNG_DATA_SLOTS_PER_UINT); + + lrng_pcpu_split_u32(&ptr, &pre_ptr, &mask); + + /* MSB of data go into previous unit */ + pre_array = lrng_data_idx2array(pre_ptr); + /* zeroization of slot to ensure the following OR adds the data */ + this_cpu_and(lrng_pcpu_array[pre_array], ~(0xffffffff & ~mask)); + this_cpu_or(lrng_pcpu_array[pre_array], data & ~mask); + + /* Invoke compression as we just filled data array completely */ + if (unlikely(pre_ptr > ptr)) + lrng_pcpu_array_to_hash(LRNG_DATA_WORD_MASK); + + /* LSB of data go into current unit */ + this_cpu_write(lrng_pcpu_array[lrng_data_idx2array(ptr)], + data & mask); + + if (likely(pre_ptr <= ptr)) + lrng_pcpu_array_to_hash(ptr); +} + +/* Concatenate a 32-bit word at the end of the per-CPU array */ +void lrng_pcpu_array_add_u32(u32 data) +{ + /* + * Disregard entropy-less data without continuous compression to + * avoid it overwriting data with entropy when array ptr wraps. + */ + if (lrng_pcpu_continuous_compression) + _lrng_pcpu_array_add_u32(data); +} + +/* Concatenate data of max LRNG_DATA_SLOTSIZE_MASK at the end of time array */ +static void lrng_pcpu_array_add_slot(u32 data) +{ + /* Get slot */ + u32 ptr = this_cpu_inc_return(lrng_pcpu_array_ptr) & + LRNG_DATA_WORD_MASK; + unsigned int array = lrng_data_idx2array(ptr); + unsigned int slot = lrng_data_idx2slot(ptr); + + BUILD_BUG_ON(LRNG_DATA_ARRAY_MEMBER_BITS % LRNG_DATA_SLOTSIZE_BITS); + /* Ensure consistency of values */ + BUILD_BUG_ON(LRNG_DATA_ARRAY_MEMBER_BITS != + sizeof(lrng_pcpu_array[0]) << 3); + + /* zeroization of slot to ensure the following OR adds the data */ + this_cpu_and(lrng_pcpu_array[array], + ~(lrng_data_slot_val(0xffffffff & LRNG_DATA_SLOTSIZE_MASK, + slot))); + /* Store data into slot */ + this_cpu_or(lrng_pcpu_array[array], lrng_data_slot_val(data, slot)); + + lrng_pcpu_array_to_hash(ptr); +} + +static void +lrng_time_process_common(u32 time, void(*add_time)(u32 data)) +{ + enum lrng_health_res health_test; + + if (lrng_raw_hires_entropy_store(time)) + return; + + health_test = lrng_health_test(time); + if (health_test > lrng_health_fail_use) + return; + + if (health_test == lrng_health_pass) + atomic_inc_return(this_cpu_ptr(&lrng_pcpu_array_irqs)); + + add_time(time); +} + +/* + * Batching up of entropy in per-CPU array before injecting into entropy pool. + */ +static void lrng_time_process(void) +{ + u32 now_time = random_get_entropy(); + + if (unlikely(!lrng_gcd_tested())) { + /* When GCD is unknown, we process the full time stamp */ + lrng_time_process_common(now_time, _lrng_pcpu_array_add_u32); + lrng_gcd_add_value(now_time); + } else { + /* GCD is known and applied */ + lrng_time_process_common((now_time / lrng_gcd_timer) & + LRNG_DATA_SLOTSIZE_MASK, + lrng_pcpu_array_add_slot); + } + + lrng_perf_time(now_time); +} + +/* Hot code path - Callback for interrupt handler */ +void add_interrupt_randomness(int irq) +{ + if (lrng_pool_highres_timer()) { + lrng_time_process(); + } else { + struct pt_regs *regs = get_irq_regs(); + static atomic_t reg_idx = ATOMIC_INIT(0); + u64 ip; + u32 tmp; + + if (regs) { + u32 *ptr = (u32 *)regs; + int reg_ptr = atomic_add_return_relaxed(1, ®_idx); + size_t n = (sizeof(struct pt_regs) / sizeof(u32)); + + ip = instruction_pointer(regs); + tmp = *(ptr + (reg_ptr % n)); + tmp = lrng_raw_regs_entropy_store(tmp) ? 0 : tmp; + _lrng_pcpu_array_add_u32(tmp); + } else { + ip = _RET_IP_; + } + + lrng_time_process(); + + /* + * The XOR operation combining the different values is not + * considered to destroy entropy since the entirety of all + * processed values delivers the entropy (and not each + * value separately of the other values). + */ + tmp = lrng_raw_jiffies_entropy_store(jiffies) ? 0 : jiffies; + tmp ^= lrng_raw_irq_entropy_store(irq) ? 0 : irq; + tmp ^= lrng_raw_retip_entropy_store(ip) ? 0 : ip; + tmp ^= ip >> 32; + _lrng_pcpu_array_add_u32(tmp); + } +} +EXPORT_SYMBOL(add_interrupt_randomness); + +void lrng_irq_es_state(unsigned char *buf, size_t buflen) +{ + const struct lrng_drng *lrng_drng_init = lrng_drng_init_instance(); + + /* Assume the lrng_drng_init lock is taken by caller */ + snprintf(buf, buflen, + "IRQ ES properties:\n" + " Hash for operating entropy pool: %s\n" + " per-CPU interrupt collection size: %u\n" + " Standards compliance: %s\n" + " High-resolution timer: %s\n" + " Continuous compression: %s\n", + lrng_drng_init->crypto_cb->lrng_hash_name(), + LRNG_DATA_NUM_VALUES, + lrng_sp80090b_compliant() ? "SP800-90B " : "", + lrng_pool_highres_timer() ? "true" : "false", + lrng_pcpu_continuous_compression ? "true" : "false"); +} diff --git a/drivers/char/lrng/lrng_es_irq.h b/drivers/char/lrng/lrng_es_irq.h new file mode 100644 index 000000000000..00b16b1aa45f --- /dev/null +++ b/drivers/char/lrng/lrng_es_irq.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ +/* + * LRNG Slow Noise Source: Time stamp array handling + * + * Copyright (C) 2016 - 2021, Stephan Mueller + */ + +/* + * To limit the impact on the interrupt handling, the LRNG concatenates + * entropic LSB parts of the time stamps in a per-CPU array and only + * injects them into the entropy pool when the array is full. + */ + +/* Store multiple integers in one u32 */ +#define LRNG_DATA_SLOTSIZE_BITS (8) +#define LRNG_DATA_SLOTSIZE_MASK ((1 << LRNG_DATA_SLOTSIZE_BITS) - 1) +#define LRNG_DATA_ARRAY_MEMBER_BITS (4 << 3) /* ((sizeof(u32)) << 3) */ +#define LRNG_DATA_SLOTS_PER_UINT (LRNG_DATA_ARRAY_MEMBER_BITS / \ + LRNG_DATA_SLOTSIZE_BITS) + +/* + * Number of time values to store in the array - in small environments + * only one atomic_t variable per CPU is used. + */ +#define LRNG_DATA_NUM_VALUES (CONFIG_LRNG_COLLECTION_SIZE) +/* Mask of LSB of time stamp to store */ +#define LRNG_DATA_WORD_MASK (LRNG_DATA_NUM_VALUES - 1) + +#define LRNG_DATA_SLOTS_MASK (LRNG_DATA_SLOTS_PER_UINT - 1) +#define LRNG_DATA_ARRAY_SIZE (LRNG_DATA_NUM_VALUES / \ + LRNG_DATA_SLOTS_PER_UINT) + +/* Starting bit index of slot */ +static inline unsigned int lrng_data_slot2bitindex(unsigned int slot) +{ + return (LRNG_DATA_SLOTSIZE_BITS * slot); +} + +/* Convert index into the array index */ +static inline unsigned int lrng_data_idx2array(unsigned int idx) +{ + return idx / LRNG_DATA_SLOTS_PER_UINT; +} + +/* Convert index into the slot of a given array index */ +static inline unsigned int lrng_data_idx2slot(unsigned int idx) +{ + return idx & LRNG_DATA_SLOTS_MASK; +} + +/* Convert value into slot value */ +static inline unsigned int lrng_data_slot_val(unsigned int val, + unsigned int slot) +{ + return val << lrng_data_slot2bitindex(slot); +} + +/* + * Return the pointers for the previous and current units to inject a u32 into. + * Also return the mask which the u32 word is to be processed. + */ +static inline void lrng_pcpu_split_u32(u32 *ptr, u32 *pre_ptr, u32 *mask) +{ + /* ptr to previous unit */ + *pre_ptr = (*ptr - LRNG_DATA_SLOTS_PER_UINT) & LRNG_DATA_WORD_MASK; + *ptr &= LRNG_DATA_WORD_MASK; + + /* mask to split data into the two parts for the two units */ + *mask = ((1 << (*pre_ptr & (LRNG_DATA_SLOTS_PER_UINT - 1)) * + LRNG_DATA_SLOTSIZE_BITS)) - 1; +} diff --git a/drivers/char/lrng/lrng_es_jent.c b/drivers/char/lrng/lrng_es_jent.c new file mode 100644 index 000000000000..79b3dc8e8f56 --- /dev/null +++ b/drivers/char/lrng/lrng_es_jent.c @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause +/* + * LRNG Fast Entropy Source: Jitter RNG + * + * Copyright (C) 2016 - 2021, Stephan Mueller + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include + +#include "lrng_internal.h" + +/* + * Estimated entropy of data is a 16th of LRNG_DRNG_SECURITY_STRENGTH_BITS. + * Albeit a full entropy assessment is provided for the noise source indicating + * that it provides high entropy rates and considering that it deactivates + * when it detects insufficient hardware, the chosen under estimation of + * entropy is considered to be acceptable to all reviewers. + */ +static u32 jitterrng = CONFIG_LRNG_JENT_ENTROPY_RATE; +#ifdef CONFIG_LRNG_RUNTIME_ES_CONFIG +module_param(jitterrng, uint, 0644); +MODULE_PARM_DESC(jitterrng, "Entropy in bits of 256 data bits from Jitter RNG noise source"); +#endif + +static bool lrng_jent_initialized = false; +static struct rand_data *lrng_jent_state; + +static int __init lrng_jent_initialize(void) +{ + /* Initialize the Jitter RNG after the clocksources are initialized. */ + if (jent_entropy_init() || + (lrng_jent_state = jent_entropy_collector_alloc(1, 0)) == NULL) { + jitterrng = 0; + pr_info("Jitter RNG unusable on current system\n"); + return 0; + } + lrng_jent_initialized = true; + lrng_pool_add_entropy(); + pr_debug("Jitter RNG working on current system\n"); + + return 0; +} +device_initcall(lrng_jent_initialize); + +/* + * lrng_get_jent() - Get Jitter RNG entropy + * + * @outbuf: buffer to store entropy + * @outbuflen: length of buffer + * + * Return: + * * > 0 on success where value provides the added entropy in bits + * * 0 if no fast source was available + */ +u32 lrng_get_jent(u8 *outbuf, u32 requested_bits) +{ + int ret; + u32 ent_bits = lrng_jent_entropylevel(requested_bits); + unsigned long flags; + static DEFINE_SPINLOCK(lrng_jent_lock); + + spin_lock_irqsave(&lrng_jent_lock, flags); + + if (!lrng_jent_initialized) { + spin_unlock_irqrestore(&lrng_jent_lock, flags); + return 0; + } + + ret = jent_read_entropy(lrng_jent_state, outbuf, requested_bits >> 3); + spin_unlock_irqrestore(&lrng_jent_lock, flags); + + if (ret) { + pr_debug("Jitter RNG failed with %d\n", ret); + return 0; + } + + pr_debug("obtained %u bits of entropy from Jitter RNG noise source\n", + ent_bits); + + return ent_bits; +} + +u32 lrng_jent_entropylevel(u32 requested_bits) +{ + return lrng_fast_noise_entropylevel((lrng_jent_initialized) ? + jitterrng : 0, requested_bits); +} + +void lrng_jent_es_state(unsigned char *buf, size_t buflen) +{ + snprintf(buf, buflen, + "JitterRNG ES properties:\n" + " Enabled: %s\n", lrng_jent_initialized ? "true" : "false"); +} diff --git a/drivers/char/lrng/lrng_es_mgr.c b/drivers/char/lrng/lrng_es_mgr.c new file mode 100644 index 000000000000..efeb62ce0ce9 --- /dev/null +++ b/drivers/char/lrng/lrng_es_mgr.c @@ -0,0 +1,373 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause +/* + * LRNG Entropy sources management + * + * Copyright (C) 2016 - 2021, Stephan Mueller + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include + +#include "lrng_internal.h" + +struct lrng_state { + bool can_invalidate; /* Can invalidate batched entropy? */ + bool perform_seedwork; /* Can seed work be performed? */ + bool lrng_operational; /* Is DRNG operational? */ + bool lrng_fully_seeded; /* Is DRNG fully seeded? */ + bool lrng_min_seeded; /* Is DRNG minimally seeded? */ + bool all_online_numa_node_seeded;/* All NUMA DRNGs seeded? */ + + /* + * To ensure that external entropy providers cannot dominate the + * internal noise sources but yet cannot be dominated by internal + * noise sources, the following booleans are intended to allow + * external to provide seed once when a DRNG reseed occurs. This + * triggering of external noise source is performed even when the + * entropy pool has sufficient entropy. + */ + bool lrng_seed_hw; /* Allow HW to provide seed */ + bool lrng_seed_user; /* Allow user space to provide seed */ + + atomic_t boot_entropy_thresh; /* Reseed threshold */ + atomic_t reseed_in_progress; /* Flag for on executing reseed */ + struct work_struct lrng_seed_work; /* (re)seed work queue */ +}; + +static struct lrng_state lrng_state = { + false, false, false, false, false, false, true, true, + .boot_entropy_thresh = ATOMIC_INIT(LRNG_INIT_ENTROPY_BITS), + .reseed_in_progress = ATOMIC_INIT(0), +}; + +/********************************** Helper ***********************************/ + +/* External entropy provider is allowed to provide seed data */ +bool lrng_state_exseed_allow(enum lrng_external_noise_source source) +{ + if (source == lrng_noise_source_hw) + return lrng_state.lrng_seed_hw; + return lrng_state.lrng_seed_user; +} + +/* Enable / disable external entropy provider to furnish seed */ +void lrng_state_exseed_set(enum lrng_external_noise_source source, bool type) +{ + if (source == lrng_noise_source_hw) + lrng_state.lrng_seed_hw = type; + else + lrng_state.lrng_seed_user = type; +} + +static void lrng_state_exseed_allow_all(void) +{ + lrng_state_exseed_set(lrng_noise_source_hw, true); + lrng_state_exseed_set(lrng_noise_source_user, true); +} + +/* + * Reading of the LRNG pool is only allowed by one caller. The reading is + * only performed to (re)seed DRNGs. Thus, if this "lock" is already taken, + * the reseeding operation is in progress. The caller is not intended to wait + * but continue with its other operation. + */ +int lrng_pool_trylock(void) +{ + return atomic_cmpxchg(&lrng_state.reseed_in_progress, 0, 1); +} + +void lrng_pool_unlock(void) +{ + atomic_set(&lrng_state.reseed_in_progress, 0); +} + +/* Set new entropy threshold for reseeding during boot */ +void lrng_set_entropy_thresh(u32 new_entropy_bits) +{ + atomic_set(&lrng_state.boot_entropy_thresh, new_entropy_bits); +} + +/* + * Reset LRNG state - the entropy counters are reset, but the data that may + * or may not have entropy remains in the pools as this data will not hurt. + */ +void lrng_reset_state(void) +{ + lrng_pool_set_entropy(0); + lrng_pcpu_reset(); + lrng_state.lrng_operational = false; + lrng_state.lrng_fully_seeded = false; + lrng_state.lrng_min_seeded = false; + lrng_state.all_online_numa_node_seeded = false; + pr_debug("reset LRNG\n"); +} + +/* Set flag that all DRNGs are fully seeded */ +void lrng_pool_all_numa_nodes_seeded(bool set) +{ + lrng_state.all_online_numa_node_seeded = set; +} + +/* Return boolean whether LRNG reached minimally seed level */ +bool lrng_state_min_seeded(void) +{ + return lrng_state.lrng_min_seeded; +} + +/* Return boolean whether LRNG reached fully seed level */ +bool lrng_state_fully_seeded(void) +{ + return lrng_state.lrng_fully_seeded; +} + +/* Return boolean whether LRNG is considered fully operational */ +bool lrng_state_operational(void) +{ + return lrng_state.lrng_operational; +} + +/* Policy to check whether entropy buffer contains full seeded entropy */ +bool lrng_fully_seeded(bool fully_seeded, struct entropy_buf *eb) +{ + return ((eb->a_bits + eb->b_bits + eb->c_bits + eb->d_bits) >= + lrng_get_seed_entropy_osr(fully_seeded)); +} + +/* Mark one DRNG as not fully seeded */ +void lrng_unset_fully_seeded(struct lrng_drng *drng) +{ + drng->fully_seeded = false; + lrng_pool_all_numa_nodes_seeded(false); + + /* + * The init DRNG instance must always be fully seeded as this instance + * is the fall-back if any of the per-NUMA node DRNG instances is + * insufficiently seeded. Thus, we mark the entire LRNG as + * non-operational if the initial DRNG becomes not fully seeded. + */ + if (drng == lrng_drng_init_instance() && lrng_state_operational()) { + pr_debug("LRNG set to non-operational\n"); + lrng_state.lrng_operational = false; + lrng_state.lrng_fully_seeded = false; + + /* If sufficient entropy is available, reseed now. */ + lrng_pool_add_entropy(); + } +} + +/* Policy to enable LRNG operational mode */ +static void lrng_set_operational(u32 external_es) +{ + /* LRNG is operational if the initial DRNG is fully seeded ... */ + if (lrng_state.lrng_fully_seeded && + /* ... and either internal ES SP800-90B startup is complete ... */ + (lrng_sp80090b_startup_complete() || + /* ... or the external ES provided sufficient entropy. */ + (lrng_get_seed_entropy_osr(lrng_state_fully_seeded()) <= + external_es))) { + lrng_state.lrng_operational = true; + lrng_process_ready_list(); + lrng_init_wakeup(); + pr_info("LRNG fully operational\n"); + } +} + +/* Available entropy in the entire LRNG considering all entropy sources */ +u32 lrng_avail_entropy(void) +{ + u32 ent_thresh = lrng_security_strength(); + + /* + * Apply oversampling during initialization according to SP800-90C as + * we request a larger buffer from the ES. + */ + if (lrng_sp80090c_compliant() && + !lrng_state.all_online_numa_node_seeded) + ent_thresh += CONFIG_LRNG_SEED_BUFFER_INIT_ADD_BITS; + + return lrng_pcpu_avail_entropy() + lrng_avail_aux_entropy() + + lrng_archrandom_entropylevel(ent_thresh) + + lrng_jent_entropylevel(ent_thresh); +} + +/* + * lrng_init_ops() - Set seed stages of LRNG + * + * Set the slow noise source reseed trigger threshold. The initial threshold + * is set to the minimum data size that can be read from the pool: a word. Upon + * reaching this value, the next seed threshold of 128 bits is set followed + * by 256 bits. + * + * @eb: buffer containing the size of entropy currently injected into DRNG + */ +void lrng_init_ops(struct entropy_buf *eb) +{ + struct lrng_state *state = &lrng_state; + u32 requested_bits, seed_bits, external_es; + + if (state->lrng_operational) + return; + + requested_bits = lrng_get_seed_entropy_osr( + state->all_online_numa_node_seeded); + + /* + * Entropy provided by external entropy sources - if they provide + * the requested amount of entropy, unblock the interface. + */ + external_es = eb->a_bits + eb->c_bits + eb->d_bits; + seed_bits = external_es + eb->b_bits; + + /* DRNG is seeded with full security strength */ + if (state->lrng_fully_seeded) { + lrng_set_operational(external_es); + lrng_set_entropy_thresh(requested_bits); + } else if (lrng_fully_seeded(state->all_online_numa_node_seeded, eb)) { + if (state->can_invalidate) + invalidate_batched_entropy(); + + state->lrng_fully_seeded = true; + lrng_set_operational(external_es); + state->lrng_min_seeded = true; + pr_info("LRNG fully seeded with %u bits of entropy\n", + seed_bits); + lrng_set_entropy_thresh(requested_bits); + } else if (!state->lrng_min_seeded) { + + /* DRNG is seeded with at least 128 bits of entropy */ + if (seed_bits >= LRNG_MIN_SEED_ENTROPY_BITS) { + if (state->can_invalidate) + invalidate_batched_entropy(); + + state->lrng_min_seeded = true; + pr_info("LRNG minimally seeded with %u bits of entropy\n", + seed_bits); + lrng_set_entropy_thresh(requested_bits); + lrng_init_wakeup(); + + /* DRNG is seeded with at least LRNG_INIT_ENTROPY_BITS bits */ + } else if (seed_bits >= LRNG_INIT_ENTROPY_BITS) { + pr_info("LRNG initial entropy level %u bits of entropy\n", + seed_bits); + lrng_set_entropy_thresh(LRNG_MIN_SEED_ENTROPY_BITS); + } + } +} + +int __init rand_initialize(void) +{ + struct seed { + ktime_t time; + unsigned long data[(LRNG_MAX_DIGESTSIZE / + sizeof(unsigned long))]; + struct new_utsname utsname; + } seed __aligned(LRNG_KCAPI_ALIGN); + unsigned int i; + + BUILD_BUG_ON(LRNG_MAX_DIGESTSIZE % sizeof(unsigned long)); + + seed.time = ktime_get_real(); + + for (i = 0; i < ARRAY_SIZE(seed.data); i++) { + if (!arch_get_random_seed_long_early(&(seed.data[i])) && + !arch_get_random_long_early(&seed.data[i])) + seed.data[i] = random_get_entropy(); + } + memcpy(&seed.utsname, utsname(), sizeof(*(utsname()))); + + lrng_pool_insert_aux((u8 *)&seed, sizeof(seed), 0); + memzero_explicit(&seed, sizeof(seed)); + + /* Initialize the seed work queue */ + INIT_WORK(&lrng_state.lrng_seed_work, lrng_drng_seed_work); + lrng_state.perform_seedwork = true; + + lrng_drngs_init_cc20(true); + invalidate_batched_entropy(); + + lrng_state.can_invalidate = true; + + return 0; +} + +/* Interface requesting a reseed of the DRNG */ +void lrng_pool_add_entropy(void) +{ + /* + * Once all DRNGs are fully seeded, the interrupt noise + * sources will not trigger any reseeding any more. + */ + if (likely(lrng_state.all_online_numa_node_seeded)) + return; + + /* Only try to reseed if the DRNG is alive. */ + if (!lrng_get_available()) + return; + + /* Only trigger the DRNG reseed if we have collected entropy. */ + if (lrng_avail_entropy() < + atomic_read_u32(&lrng_state.boot_entropy_thresh)) + return; + + /* Ensure that the seeding only occurs once at any given time. */ + if (lrng_pool_trylock()) + return; + + /* Seed the DRNG with any available noise. */ + if (lrng_state.perform_seedwork) + schedule_work(&lrng_state.lrng_seed_work); + else + lrng_drng_seed_work(NULL); +} + +/* Fill the seed buffer with data from the noise sources */ +void lrng_fill_seed_buffer(struct entropy_buf *entropy_buf, u32 requested_bits) +{ + struct lrng_state *state = &lrng_state; + u32 req_ent = lrng_sp80090c_compliant() ? + lrng_security_strength() : LRNG_MIN_SEED_ENTROPY_BITS; + + /* Guarantee that requested bits is a multiple of bytes */ + BUILD_BUG_ON(LRNG_DRNG_SECURITY_STRENGTH_BITS % 8); + + /* always reseed the DRNG with the current time stamp */ + entropy_buf->now = random_get_entropy(); + + /* + * Require at least 128 bits of entropy for any reseed. If the LRNG is + * operated SP800-90C compliant we want to comply with SP800-90A section + * 9.2 mandating that DRNG is reseeded with the security strength. + */ + if (state->lrng_fully_seeded && (lrng_avail_entropy() < req_ent)) { + entropy_buf->a_bits = entropy_buf->b_bits = 0; + entropy_buf->c_bits = entropy_buf->d_bits = 0; + goto wakeup; + } + + /* Concatenate the output of the entropy sources. */ + entropy_buf->b_bits = lrng_pcpu_pool_hash(entropy_buf->b, + requested_bits, + state->lrng_fully_seeded); + entropy_buf->c_bits = lrng_get_arch(entropy_buf->c, requested_bits); + entropy_buf->d_bits = lrng_get_jent(entropy_buf->d, requested_bits); + lrng_get_backtrack_aux(entropy_buf, requested_bits); + + /* allow external entropy provider to provide seed */ + lrng_state_exseed_allow_all(); + +wakeup: + /* + * Shall we wake up user space writers? This location covers + * ensures that the user space provider does not dominate the internal + * noise sources since in case the first call of this function finds + * sufficient entropy in the entropy pool, it will not trigger the + * wakeup. This implies that when the next /dev/urandom read happens, + * the entropy pool is drained. + */ + lrng_writer_wakeup(); +} diff --git a/drivers/char/lrng/lrng_health.c b/drivers/char/lrng/lrng_health.c new file mode 100644 index 000000000000..61f266537f16 --- /dev/null +++ b/drivers/char/lrng/lrng_health.c @@ -0,0 +1,410 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause +/* + * Linux Random Number Generator (LRNG) Health Testing + * + * Copyright (C) 2019 - 2021, Stephan Mueller + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include + +#include "lrng_internal.h" + +/* Stuck Test */ +struct lrng_stuck_test { + u32 last_time; /* Stuck test: time of previous IRQ */ + u32 last_delta; /* Stuck test: delta of previous IRQ */ + u32 last_delta2; /* Stuck test: 2. time derivation of prev IRQ */ +}; + +/* Repetition Count Test */ +struct lrng_rct { + atomic_t rct_count; /* Number of stuck values */ +}; + +/* Adaptive Proportion Test */ +struct lrng_apt { + /* Data window size */ +#define LRNG_APT_WINDOW_SIZE 512 + /* LSB of time stamp to process */ +#define LRNG_APT_LSB 16 +#define LRNG_APT_WORD_MASK (LRNG_APT_LSB - 1) + atomic_t apt_count; /* APT counter */ + atomic_t apt_base; /* APT base reference */ + + atomic_t apt_trigger; + bool apt_base_set; /* Is APT base set? */ +}; + +/* The health test code must operate lock-less */ +struct lrng_health { + struct lrng_rct rct; + struct lrng_apt apt; + + bool health_test_enabled; + + /* SP800-90B startup health tests */ +#define LRNG_SP80090B_STARTUP_SAMPLES 1024 +#define LRNG_SP80090B_STARTUP_BLOCKS ((LRNG_SP80090B_STARTUP_SAMPLES + \ + LRNG_APT_WINDOW_SIZE - 1) / \ + LRNG_APT_WINDOW_SIZE) + bool sp80090b_startup_done; + atomic_t sp80090b_startup_blocks; +}; + +static struct lrng_health lrng_health = { + .rct.rct_count = ATOMIC_INIT(0), + + .apt.apt_count = ATOMIC_INIT(0), + .apt.apt_base = ATOMIC_INIT(-1), + .apt.apt_trigger = ATOMIC_INIT(LRNG_APT_WINDOW_SIZE), + .apt.apt_base_set = false, + + .health_test_enabled = true, + + .sp80090b_startup_blocks = ATOMIC_INIT(LRNG_SP80090B_STARTUP_BLOCKS), + .sp80090b_startup_done = false, +}; + +static DEFINE_PER_CPU(struct lrng_stuck_test, lrng_stuck_test); + +static bool lrng_sp80090b_health_requested(void) +{ + /* Health tests are only requested in FIPS mode */ + return fips_enabled; +} + +static bool lrng_sp80090b_health_enabled(void) +{ + struct lrng_health *health = &lrng_health; + + return lrng_sp80090b_health_requested() && health->health_test_enabled; +} + +/*************************************************************************** + * SP800-90B Compliance + * + * If the Linux-RNG is booted into FIPS mode, the following interfaces + * provide an SP800-90B compliant noise source: + * + * * /dev/random + * * getrandom(2) + * * get_random_bytes when using it in conjunction with + * add_random_ready_callback + * + * All other interfaces, including /dev/urandom or get_random_bytes without + * the add_random_ready_callback cannot claim to use an SP800-90B compliant + * noise source. + ***************************************************************************/ + +/* + * Perform SP800-90B startup testing + */ +static void lrng_sp80090b_startup(struct lrng_health *health) +{ + if (!health->sp80090b_startup_done && + atomic_dec_and_test(&health->sp80090b_startup_blocks)) { + struct entropy_buf eb; + + health->sp80090b_startup_done = true; + pr_info("SP800-90B startup health tests completed\n"); + memset(&eb, 0, sizeof(eb)); + lrng_init_ops(&eb); + + /* + * Force a reseed of DRNGs to ensure they are seeded with + * entropy that passed the SP800-90B health tests. + * As the DRNG always will reseed before generating + * random numbers, it does not need a reseed trigger. + */ + lrng_drng_force_reseed(); + } +} + +/* + * Handle failure of SP800-90B startup testing + */ +static void lrng_sp80090b_startup_failure(struct lrng_health *health) +{ + /* Reset of LRNG and its entropy - NOTE: we are in atomic context */ + lrng_reset(); + + /* + * Reset the SP800-90B startup test. + * + * NOTE SP800-90B section 4.3 bullet 4 does not specify what + * exactly is to be done in case of failure! Thus, we do what + * makes sense, i.e. restarting the health test and thus gating + * the output function of /dev/random and getrandom(2). + */ + atomic_set(&health->sp80090b_startup_blocks, + LRNG_SP80090B_STARTUP_BLOCKS); +} + +/* + * Handle failure of SP800-90B runtime testing + */ +static void lrng_sp80090b_runtime_failure(struct lrng_health *health) +{ + lrng_sp80090b_startup_failure(health); + health->sp80090b_startup_done = false; +} + +static void lrng_sp80090b_failure(struct lrng_health *health) +{ + if (health->sp80090b_startup_done) { + pr_err("SP800-90B runtime health test failure - invalidating all existing entropy and initiate SP800-90B startup\n"); + lrng_sp80090b_runtime_failure(health); + } else { + pr_err("SP800-90B startup test failure - resetting\n"); + lrng_sp80090b_startup_failure(health); + } +} + +/* + * Is the SP800-90B startup testing complete? + * + * This function is called by the LRNG to determine whether to unblock + * a certain user interface. Therefore, only the potentially blocking + * user interfaces are considered SP800-90B compliant. + */ +bool lrng_sp80090b_startup_complete(void) +{ + struct lrng_health *health = &lrng_health; + + return (lrng_sp80090b_health_enabled()) ? + health->sp80090b_startup_done : true; +} + +bool lrng_sp80090b_compliant(void) +{ + struct lrng_health *health = &lrng_health; + + return lrng_sp80090b_health_enabled() && health->sp80090b_startup_done; +} + +/*************************************************************************** + * Adaptive Proportion Test + * + * This test complies with SP800-90B section 4.4.2. + ***************************************************************************/ + +/* + * Reset the APT counter + * + * @health [in] Reference to health state + */ +static void lrng_apt_reset(struct lrng_health *health, + unsigned int time_masked) +{ + struct lrng_apt *apt = &health->apt; + + pr_debug("APT value %d for base %d\n", + atomic_read(&apt->apt_count), atomic_read(&apt->apt_base)); + + /* Reset APT */ + atomic_set(&apt->apt_count, 0); + atomic_set(&apt->apt_base, time_masked); +} + +static void lrng_apt_restart(struct lrng_health *health) +{ + struct lrng_apt *apt = &health->apt; + + atomic_set(&apt->apt_trigger, LRNG_APT_WINDOW_SIZE); +} + +/* + * Insert a new entropy event into APT + * + * This function does is void as it does not decide about the fate of a time + * stamp. An APT failure can only happen at the same time of a stuck test + * failure. Thus, the stuck failure will already decide how the time stamp + * is handled. + * + * @health [in] Reference to health state + * @now_time [in] Time stamp to process + */ +static void lrng_apt_insert(struct lrng_health *health, + unsigned int now_time) +{ + struct lrng_apt *apt = &health->apt; + + if (!lrng_sp80090b_health_requested()) + return; + + now_time &= LRNG_APT_WORD_MASK; + + /* Initialization of APT */ + if (!apt->apt_base_set) { + atomic_set(&apt->apt_base, now_time); + apt->apt_base_set = true; + return; + } + + if (now_time == (unsigned int)atomic_read(&apt->apt_base)) { + u32 apt_val = (u32)atomic_inc_return_relaxed(&apt->apt_count); + + if (apt_val >= CONFIG_LRNG_APT_CUTOFF) + lrng_sp80090b_failure(health); + } + + if (atomic_dec_and_test(&apt->apt_trigger)) { + lrng_apt_restart(health); + lrng_apt_reset(health, now_time); + lrng_sp80090b_startup(health); + } +} + +/*************************************************************************** + * Repetition Count Test + * + * The LRNG uses an enhanced version of the Repetition Count Test + * (RCT) specified in SP800-90B section 4.4.1. Instead of counting identical + * back-to-back values, the input to the RCT is the counting of the stuck + * values while filling the entropy pool. + * + * The RCT is applied with an alpha of 2^-30 compliant to FIPS 140-2 IG 9.8. + * + * During the counting operation, the LRNG always calculates the RCT + * cut-off value of C. If that value exceeds the allowed cut-off value, + * the LRNG will invalidate all entropy for the entropy pool which implies + * that no data can be extracted from the entropy pool unless new entropy + * is received. + ***************************************************************************/ + +/* + * Hot code path - Insert data for Repetition Count Test + * + * @health: Reference to health information + * @stuck: Decision of stuck test + */ +static void lrng_rct(struct lrng_health *health, int stuck) +{ + struct lrng_rct *rct = &health->rct; + + if (!lrng_sp80090b_health_requested()) + return; + + if (stuck) { + u32 rct_count = atomic_add_return_relaxed(1, &rct->rct_count); + + pr_debug("RCT count: %u\n", rct_count); + + /* + * The cutoff value is based on the following consideration: + * alpha = 2^-30 as recommended in FIPS 140-2 IG 9.8. + * In addition, we imply an entropy value H of 1 bit as this + * is the minimum entropy required to provide full entropy. + * + * Note, rct_count (which equals to value B in the + * pseudo code of SP800-90B section 4.4.1) starts with zero. + * Hence we need to subtract one from the cutoff value as + * calculated following SP800-90B. + */ + if (rct_count >= CONFIG_LRNG_RCT_CUTOFF) { + atomic_set(&rct->rct_count, 0); + + /* + * APT must start anew as we consider all previously + * recorded data to contain no entropy. + */ + lrng_apt_restart(health); + + lrng_sp80090b_failure(health); + } + } else { + atomic_set(&rct->rct_count, 0); + } +} + +/*************************************************************************** + * Stuck Test + * + * Checking the: + * 1st derivative of the event occurrence (time delta) + * 2nd derivative of the event occurrence (delta of time deltas) + * 3rd derivative of the event occurrence (delta of delta of time deltas) + * + * All values must always be non-zero. The stuck test is only valid disabled if + * high-resolution time stamps are identified after initialization. + ***************************************************************************/ + +static u32 lrng_delta(u32 prev, u32 next) +{ + /* + * Note that this (unsigned) subtraction does yield the correct value + * in the wraparound-case, i.e. when next < prev. + */ + return (next - prev); +} + +/* + * Hot code path + * + * @health: Reference to health information + * @now: Event time + * @return: 0 event occurrence not stuck (good time stamp) + * != 0 event occurrence stuck (reject time stamp) + */ +static int lrng_irq_stuck(struct lrng_stuck_test *stuck, u32 now_time) +{ + u32 delta = lrng_delta(stuck->last_time, now_time); + u32 delta2 = lrng_delta(stuck->last_delta, delta); + u32 delta3 = lrng_delta(stuck->last_delta2, delta2); + + stuck->last_time = now_time; + stuck->last_delta = delta; + stuck->last_delta2 = delta2; + + if (!delta || !delta2 || !delta3) + return 1; + + return 0; +} + +/*************************************************************************** + * Health test interfaces + ***************************************************************************/ + +/* + * Disable all health tests + */ +void lrng_health_disable(void) +{ + struct lrng_health *health = &lrng_health; + + health->health_test_enabled = false; + + if (lrng_sp80090b_health_requested()) + pr_warn("SP800-90B compliance requested but the Linux RNG is NOT SP800-90B compliant\n"); +} + +/* + * Hot code path - Perform health test on time stamp received from an event + * + * @now_time Time stamp + */ +enum lrng_health_res lrng_health_test(u32 now_time) +{ + struct lrng_health *health = &lrng_health; + struct lrng_stuck_test *stuck_test = this_cpu_ptr(&lrng_stuck_test); + int stuck; + + if (!health->health_test_enabled) + return lrng_health_pass; + + lrng_apt_insert(health, now_time); + + stuck = lrng_irq_stuck(stuck_test, now_time); + lrng_rct(health, stuck); + if (stuck) { + /* SP800-90B disallows using a failing health test time stamp */ + return lrng_sp80090b_health_requested() ? + lrng_health_fail_drop : lrng_health_fail_use; + } + + return lrng_health_pass; +} diff --git a/drivers/char/lrng/lrng_interfaces.c b/drivers/char/lrng/lrng_interfaces.c new file mode 100644 index 000000000000..134e556ac739 --- /dev/null +++ b/drivers/char/lrng/lrng_interfaces.c @@ -0,0 +1,654 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause +/* + * LRNG User and kernel space interfaces + * + * Copyright (C) 2016 - 2021, Stephan Mueller + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include + +#include "lrng_internal.h" + +/* + * If the entropy count falls under this number of bits, then we + * should wake up processes which are selecting or polling on write + * access to /dev/random. + */ +u32 lrng_write_wakeup_bits = (LRNG_WRITE_WAKEUP_ENTROPY << 3); + +static LIST_HEAD(lrng_ready_list); +static DEFINE_SPINLOCK(lrng_ready_list_lock); + +static DECLARE_WAIT_QUEUE_HEAD(lrng_write_wait); +static DECLARE_WAIT_QUEUE_HEAD(lrng_init_wait); +static struct fasync_struct *fasync; + +/********************************** Helper ***********************************/ + +/* Is the DRNG seed level too low? */ +static bool lrng_need_entropy(void) +{ + return (lrng_avail_aux_entropy() < lrng_write_wakeup_bits); +} + +void lrng_writer_wakeup(void) +{ + if (lrng_need_entropy() && wq_has_sleeper(&lrng_write_wait)) { + wake_up_interruptible(&lrng_write_wait); + kill_fasync(&fasync, SIGIO, POLL_OUT); + } +} + +void lrng_init_wakeup(void) +{ + wake_up_all(&lrng_init_wait); + kill_fasync(&fasync, SIGIO, POLL_IN); +} + +/** + * lrng_process_ready_list() - Ping all kernel internal callers waiting until + * the DRNG is completely initialized to inform that the DRNG reached that + * seed level. + * + * When the SP800-90B testing is enabled, the ping only happens if the SP800-90B + * startup health tests are completed. This implies that kernel internal + * callers always have an SP800-90B compliant noise source when being + * pinged. + */ +void lrng_process_ready_list(void) +{ + unsigned long flags; + struct random_ready_callback *rdy, *tmp; + + if (!lrng_state_operational()) + return; + + spin_lock_irqsave(&lrng_ready_list_lock, flags); + list_for_each_entry_safe(rdy, tmp, &lrng_ready_list, list) { + struct module *owner = rdy->owner; + + list_del_init(&rdy->list); + rdy->func(rdy); + module_put(owner); + } + spin_unlock_irqrestore(&lrng_ready_list_lock, flags); +} + +void lrng_debug_report_seedlevel(const char *name) +{ +#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM + static void *previous = NULL; + void *caller = (void *) _RET_IP_; + + if (READ_ONCE(previous) == caller) + return; + + if (!lrng_state_min_seeded()) + pr_notice("%pS %s called without reaching minimally seeded level (available entropy %u)\n", + caller, name, lrng_avail_entropy()); + + WRITE_ONCE(previous, caller); +#endif +} + +/************************ LRNG kernel input interfaces ************************/ + +/* + * add_hwgenerator_randomness() - Interface for in-kernel drivers of true + * hardware RNGs. + * + * Those devices may produce endless random bits and will be throttled + * when our pool is full. + * + * @buffer: buffer holding the entropic data from HW noise sources to be used to + * insert into entropy pool. + * @count: length of buffer + * @entropy_bits: amount of entropy in buffer (value is in bits) + */ +void add_hwgenerator_randomness(const char *buffer, size_t count, + size_t entropy_bits) +{ + /* + * Suspend writing if we are fully loaded with entropy. + * We'll be woken up again once below lrng_write_wakeup_thresh, + * or when the calling thread is about to terminate. + */ + wait_event_interruptible(lrng_write_wait, + lrng_need_entropy() || + lrng_state_exseed_allow(lrng_noise_source_hw) || + kthread_should_stop()); + lrng_state_exseed_set(lrng_noise_source_hw, false); + lrng_pool_insert_aux(buffer, count, entropy_bits); +} +EXPORT_SYMBOL_GPL(add_hwgenerator_randomness); + +/* + * add_bootloader_randomness() - Handle random seed passed by bootloader. + * + * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise + * it would be regarded as device data. + * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER. + * + * @buf: buffer holding the entropic data from HW noise sources to be used to + * insert into entropy pool. + * @size: length of buffer + */ +void add_bootloader_randomness(const void *buf, unsigned int size) +{ + lrng_pool_insert_aux(buf, size, + IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER) ? + size * 8 : 0); +} +EXPORT_SYMBOL_GPL(add_bootloader_randomness); + +/* + * Callback for HID layer -- use the HID event values to stir the entropy pool + */ +void add_input_randomness(unsigned int type, unsigned int code, + unsigned int value) +{ + static unsigned char last_value; + + /* ignore autorepeat and the like */ + if (value == last_value) + return; + + last_value = value; + + lrng_pcpu_array_add_u32((type << 4) ^ code ^ (code >> 4) ^ value); +} +EXPORT_SYMBOL_GPL(add_input_randomness); + +/* + * add_device_randomness() - Add device- or boot-specific data to the entropy + * pool to help initialize it. + * + * None of this adds any entropy; it is meant to avoid the problem of + * the entropy pool having similar initial state across largely + * identical devices. + * + * @buf: buffer holding the entropic data from HW noise sources to be used to + * insert into entropy pool. + * @size: length of buffer + */ +void add_device_randomness(const void *buf, unsigned int size) +{ + lrng_pool_insert_aux((u8 *)buf, size, 0); +} +EXPORT_SYMBOL(add_device_randomness); + +#ifdef CONFIG_BLOCK +void rand_initialize_disk(struct gendisk *disk) { } +void add_disk_randomness(struct gendisk *disk) { } +EXPORT_SYMBOL(add_disk_randomness); +#endif + +#ifndef CONFIG_LRNG_IRQ +void add_interrupt_randomness(int irq) { } +EXPORT_SYMBOL(add_interrupt_randomness); +#endif + +/* + * del_random_ready_callback() - Delete a previously registered readiness + * callback function. + * + * @rdy: callback definition that was registered initially + */ +void del_random_ready_callback(struct random_ready_callback *rdy) +{ + unsigned long flags; + struct module *owner = NULL; + + spin_lock_irqsave(&lrng_ready_list_lock, flags); + if (!list_empty(&rdy->list)) { + list_del_init(&rdy->list); + owner = rdy->owner; + } + spin_unlock_irqrestore(&lrng_ready_list_lock, flags); + + module_put(owner); +} +EXPORT_SYMBOL(del_random_ready_callback); + +/* + * add_random_ready_callback() - Add a callback function that will be invoked + * when the DRNG is fully initialized and seeded. + * + * @rdy: callback definition to be invoked when the LRNG is seeded + * + * Return: + * * 0 if callback is successfully added + * * -EALREADY if pool is already initialised (callback not called) + * * -ENOENT if module for callback is not alive + */ +int add_random_ready_callback(struct random_ready_callback *rdy) +{ + struct module *owner; + unsigned long flags; + int err = -EALREADY; + + if (likely(lrng_state_operational())) + return err; + + owner = rdy->owner; + if (!try_module_get(owner)) + return -ENOENT; + + spin_lock_irqsave(&lrng_ready_list_lock, flags); + if (lrng_state_operational()) + goto out; + + owner = NULL; + + list_add(&rdy->list, &lrng_ready_list); + err = 0; + +out: + spin_unlock_irqrestore(&lrng_ready_list_lock, flags); + + module_put(owner); + + return err; +} +EXPORT_SYMBOL(add_random_ready_callback); + +/*********************** LRNG kernel output interfaces ************************/ + +/* + * get_random_bytes() - Provider of cryptographic strong random numbers for + * kernel-internal usage. + * + * This function is appropriate for all in-kernel use cases. However, + * it will always use the ChaCha20 DRNG. + * + * @buf: buffer to store the random bytes + * @nbytes: size of the buffer + */ +void get_random_bytes(void *buf, int nbytes) +{ + lrng_drng_get_atomic((u8 *)buf, (u32)nbytes); + lrng_debug_report_seedlevel("get_random_bytes"); +} +EXPORT_SYMBOL(get_random_bytes); + +/* + * get_random_bytes_full() - Provider of cryptographic strong random numbers + * for kernel-internal usage. + * + * This function is appropriate only for non-atomic use cases as this + * function may sleep. Though, it provides access to the full functionality + * of LRNG including the switchable DRNG support, that may support other + * DRNGs such as the SP800-90A DRBG. + * + * @buf: buffer to store the random bytes + * @nbytes: size of the buffer + */ +void get_random_bytes_full(void *buf, int nbytes) +{ + lrng_drng_get_sleep((u8 *)buf, (u32)nbytes); + lrng_debug_report_seedlevel("get_random_bytes_full"); +} +EXPORT_SYMBOL(get_random_bytes_full); + +/* + * wait_for_random_bytes() - Wait for the LRNG to be seeded and thus + * guaranteed to supply cryptographically secure random numbers. + * + * This applies to: the /dev/urandom device, the get_random_bytes function, + * and the get_random_{u32,u64,int,long} family of functions. Using any of + * these functions without first calling this function forfeits the guarantee + * of security. + * + * Return: + * * 0 if the LRNG has been seeded. + * * -ERESTARTSYS if the function was interrupted by a signal. + */ +int wait_for_random_bytes(void) +{ + if (likely(lrng_state_min_seeded())) + return 0; + return wait_event_interruptible(lrng_init_wait, + lrng_state_min_seeded()); +} +EXPORT_SYMBOL(wait_for_random_bytes); + +/* + * get_random_bytes_arch() - This function will use the architecture-specific + * hardware random number generator if it is available. + * + * The arch-specific hw RNG will almost certainly be faster than what we can + * do in software, but it is impossible to verify that it is implemented + * securely (as opposed, to, say, the AES encryption of a sequence number using + * a key known by the NSA). So it's useful if we need the speed, but only if + * we're willing to trust the hardware manufacturer not to have put in a back + * door. + * + * @buf: buffer allocated by caller to store the random data in + * @nbytes: length of outbuf + * + * Return: number of bytes filled in. + */ +int __must_check get_random_bytes_arch(void *buf, int nbytes) +{ + u8 *p = buf; + + while (nbytes) { + unsigned long v; + int chunk = min_t(int, nbytes, sizeof(unsigned long)); + + if (!arch_get_random_long(&v)) + break; + + memcpy(p, &v, chunk); + p += chunk; + nbytes -= chunk; + } + + if (nbytes) + lrng_drng_get_atomic((u8 *)p, (u32)nbytes); + + return nbytes; +} +EXPORT_SYMBOL(get_random_bytes_arch); + +/* + * Returns whether or not the LRNG has been seeded. + * + * Returns: true if the urandom pool has been seeded. + * false if the urandom pool has not been seeded. + */ +bool rng_is_initialized(void) +{ + return lrng_state_operational(); +} +EXPORT_SYMBOL(rng_is_initialized); + +/************************ LRNG user output interfaces *************************/ + +static ssize_t lrng_read_common(char __user *buf, size_t nbytes) +{ + ssize_t ret = 0; + u8 tmpbuf[LRNG_DRNG_BLOCKSIZE] __aligned(LRNG_KCAPI_ALIGN); + u8 *tmp_large = NULL, *tmp = tmpbuf; + u32 tmplen = sizeof(tmpbuf); + + if (nbytes == 0) + return 0; + + /* + * Satisfy large read requests -- as the common case are smaller + * request sizes, such as 16 or 32 bytes, avoid a kmalloc overhead for + * those by using the stack variable of tmpbuf. + */ + if (!CONFIG_BASE_SMALL && (nbytes > sizeof(tmpbuf))) { + tmplen = min_t(u32, nbytes, LRNG_DRNG_MAX_REQSIZE); + tmp_large = kmalloc(tmplen + LRNG_KCAPI_ALIGN, GFP_KERNEL); + if (!tmp_large) + tmplen = sizeof(tmpbuf); + else + tmp = PTR_ALIGN(tmp_large, LRNG_KCAPI_ALIGN); + } + + while (nbytes) { + u32 todo = min_t(u32, nbytes, tmplen); + int rc = 0; + + /* Reschedule if we received a large request. */ + if ((tmp_large) && need_resched()) { + if (signal_pending(current)) { + if (ret == 0) + ret = -ERESTARTSYS; + break; + } + schedule(); + } + + rc = lrng_drng_get_sleep(tmp, todo); + if (rc <= 0) { + if (rc < 0) + ret = rc; + break; + } + if (copy_to_user(buf, tmp, rc)) { + ret = -EFAULT; + break; + } + + nbytes -= rc; + buf += rc; + ret += rc; + } + + /* Wipe data just returned from memory */ + if (tmp_large) + kfree_sensitive(tmp_large); + else + memzero_explicit(tmpbuf, sizeof(tmpbuf)); + + return ret; +} + +static ssize_t +lrng_read_common_block(int nonblock, char __user *buf, size_t nbytes) +{ + if (nbytes == 0) + return 0; + + if (unlikely(!lrng_state_operational())) { + int ret; + + if (nonblock) + return -EAGAIN; + + ret = wait_event_interruptible(lrng_init_wait, + lrng_state_operational()); + if (unlikely(ret)) + return ret; + } + + return lrng_read_common(buf, nbytes); +} + +static ssize_t lrng_drng_read_block(struct file *file, char __user *buf, + size_t nbytes, loff_t *ppos) +{ + return lrng_read_common_block(file->f_flags & O_NONBLOCK, buf, nbytes); +} + +static __poll_t lrng_random_poll(struct file *file, poll_table *wait) +{ + __poll_t mask; + + poll_wait(file, &lrng_init_wait, wait); + poll_wait(file, &lrng_write_wait, wait); + mask = 0; + if (lrng_state_operational()) + mask |= EPOLLIN | EPOLLRDNORM; + if (lrng_need_entropy() || + lrng_state_exseed_allow(lrng_noise_source_user)) { + lrng_state_exseed_set(lrng_noise_source_user, false); + mask |= EPOLLOUT | EPOLLWRNORM; + } + return mask; +} + +static ssize_t lrng_drng_write_common(const char __user *buffer, size_t count, + u32 entropy_bits) +{ + ssize_t ret = 0; + u8 buf[64] __aligned(LRNG_KCAPI_ALIGN); + const char __user *p = buffer; + u32 orig_entropy_bits = entropy_bits; + + if (!lrng_get_available()) + return -EAGAIN; + + count = min_t(size_t, count, INT_MAX); + while (count > 0) { + size_t bytes = min_t(size_t, count, sizeof(buf)); + u32 ent = min_t(u32, bytes<<3, entropy_bits); + + if (copy_from_user(&buf, p, bytes)) + return -EFAULT; + /* Inject data into entropy pool */ + lrng_pool_insert_aux(buf, bytes, ent); + + count -= bytes; + p += bytes; + ret += bytes; + entropy_bits -= ent; + + cond_resched(); + } + + /* Force reseed of DRNG during next data request. */ + if (!orig_entropy_bits) + lrng_drng_force_reseed(); + + return ret; +} + +static ssize_t lrng_drng_read(struct file *file, char __user *buf, + size_t nbytes, loff_t *ppos) +{ + if (!lrng_state_min_seeded()) + pr_notice_ratelimited("%s - use of insufficiently seeded DRNG (%zu bytes read)\n", + current->comm, nbytes); + else if (!lrng_state_operational()) + pr_debug_ratelimited("%s - use of not fully seeded DRNG (%zu bytes read)\n", + current->comm, nbytes); + + return lrng_read_common(buf, nbytes); +} + +static ssize_t lrng_drng_write(struct file *file, const char __user *buffer, + size_t count, loff_t *ppos) +{ + return lrng_drng_write_common(buffer, count, 0); +} + +static long lrng_ioctl(struct file *f, unsigned int cmd, unsigned long arg) +{ + u32 digestsize_bits; + int size, ent_count_bits; + int __user *p = (int __user *)arg; + + switch (cmd) { + case RNDGETENTCNT: + ent_count_bits = lrng_avail_entropy(); + if (put_user(ent_count_bits, p)) + return -EFAULT; + return 0; + case RNDADDTOENTCNT: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (get_user(ent_count_bits, p)) + return -EFAULT; + ent_count_bits = (int)lrng_avail_aux_entropy() + ent_count_bits; + if (ent_count_bits < 0) + ent_count_bits = 0; + digestsize_bits = lrng_get_digestsize(); + if (ent_count_bits > digestsize_bits) + ent_count_bits = digestsize_bits; + lrng_pool_set_entropy(ent_count_bits); + return 0; + case RNDADDENTROPY: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (get_user(ent_count_bits, p++)) + return -EFAULT; + if (ent_count_bits < 0) + return -EINVAL; + if (get_user(size, p++)) + return -EFAULT; + if (size < 0) + return -EINVAL; + /* there cannot be more entropy than data */ + ent_count_bits = min(ent_count_bits, size<<3); + return lrng_drng_write_common((const char __user *)p, size, + ent_count_bits); + case RNDZAPENTCNT: + case RNDCLEARPOOL: + /* Clear the entropy pool counter. */ + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + lrng_pool_set_entropy(0); + return 0; + case RNDRESEEDCRNG: + /* + * We leave the capability check here since it is present + * in the upstream's RNG implementation. Yet, user space + * can trigger a reseed as easy as writing into /dev/random + * or /dev/urandom where no privilege is needed. + */ + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + /* Force a reseed of all DRNGs */ + lrng_drng_force_reseed(); + return 0; + default: + return -EINVAL; + } +} + +static int lrng_fasync(int fd, struct file *filp, int on) +{ + return fasync_helper(fd, filp, on, &fasync); +} + +const struct file_operations random_fops = { + .read = lrng_drng_read_block, + .write = lrng_drng_write, + .poll = lrng_random_poll, + .unlocked_ioctl = lrng_ioctl, + .compat_ioctl = compat_ptr_ioctl, + .fasync = lrng_fasync, + .llseek = noop_llseek, +}; + +const struct file_operations urandom_fops = { + .read = lrng_drng_read, + .write = lrng_drng_write, + .unlocked_ioctl = lrng_ioctl, + .compat_ioctl = compat_ptr_ioctl, + .fasync = lrng_fasync, + .llseek = noop_llseek, +}; + +SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, + unsigned int, flags) +{ + if (flags & ~(GRND_NONBLOCK|GRND_RANDOM|GRND_INSECURE)) + return -EINVAL; + + /* + * Requesting insecure and blocking randomness at the same time makes + * no sense. + */ + if ((flags & + (GRND_INSECURE|GRND_RANDOM)) == (GRND_INSECURE|GRND_RANDOM)) + return -EINVAL; + + if (count > INT_MAX) + count = INT_MAX; + + if (flags & GRND_INSECURE) + return lrng_drng_read(NULL, buf, count, NULL); + + return lrng_read_common_block(flags & GRND_NONBLOCK, buf, count); +} diff --git a/drivers/char/lrng/lrng_internal.h b/drivers/char/lrng/lrng_internal.h new file mode 100644 index 000000000000..c01c4e5fa44f --- /dev/null +++ b/drivers/char/lrng/lrng_internal.h @@ -0,0 +1,487 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ +/* + * Copyright (C) 2018 - 2021, Stephan Mueller + */ + +#ifndef _LRNG_INTERNAL_H +#define _LRNG_INTERNAL_H + +#include +#include +#include +#include +#include +#include +#include +#include + +/*************************** General LRNG parameter ***************************/ + +/* Security strength of LRNG -- this must match DRNG security strength */ +#define LRNG_DRNG_SECURITY_STRENGTH_BYTES 32 +#define LRNG_DRNG_SECURITY_STRENGTH_BITS (LRNG_DRNG_SECURITY_STRENGTH_BYTES * 8) +#define LRNG_DRNG_BLOCKSIZE 64 /* Maximum of DRNG block sizes */ +#define LRNG_DRNG_INIT_SEED_SIZE_BITS (LRNG_DRNG_SECURITY_STRENGTH_BITS + \ + CONFIG_LRNG_SEED_BUFFER_INIT_ADD_BITS) +#define LRNG_DRNG_INIT_SEED_SIZE_BYTES (LRNG_DRNG_INIT_SEED_SIZE_BITS >> 3) + +/* + * SP800-90A defines a maximum request size of 1<<16 bytes. The given value is + * considered a safer margin. + * + * This value is allowed to be changed. + */ +#define LRNG_DRNG_MAX_REQSIZE (1<<12) + +/* + * SP800-90A defines a maximum number of requests between reseeds of 2^48. + * The given value is considered a much safer margin, balancing requests for + * frequent reseeds with the need to conserve entropy. This value MUST NOT be + * larger than INT_MAX because it is used in an atomic_t. + * + * This value is allowed to be changed. + */ +#define LRNG_DRNG_RESEED_THRESH (1<<20) + +/* + * Maximum DRNG generation operations without reseed having full entropy + * This value defines the absolute maximum value of DRNG generation operations + * without a reseed holding full entropy. LRNG_DRNG_RESEED_THRESH is the + * threshold when a new reseed is attempted. But it is possible that this fails + * to deliver full entropy. In this case the DRNG will continue to provide data + * even though it was not reseeded with full entropy. To avoid in the extreme + * case that no reseed is performed for too long, this threshold is enforced. + * If that absolute low value is reached, the LRNG is marked as not operational. + * + * This value is allowed to be changed. + */ +#define LRNG_DRNG_MAX_WITHOUT_RESEED (1<<30) + +/* + * Min required seed entropy is 128 bits covering the minimum entropy + * requirement of SP800-131A and the German BSI's TR02102. + * + * This value is allowed to be changed. + */ +#define LRNG_FULL_SEED_ENTROPY_BITS LRNG_DRNG_SECURITY_STRENGTH_BITS +#define LRNG_MIN_SEED_ENTROPY_BITS 128 +#define LRNG_INIT_ENTROPY_BITS 32 + +/* + * Wakeup value + * + * This value is allowed to be changed but must not be larger than the + * digest size of the hash operation used update the aux_pool. + */ +#ifdef CONFIG_CRYPTO_LIB_SHA256 +# define LRNG_ATOMIC_DIGEST_SIZE SHA256_DIGEST_SIZE +#else +# define LRNG_ATOMIC_DIGEST_SIZE SHA1_DIGEST_SIZE +#endif +#define LRNG_WRITE_WAKEUP_ENTROPY LRNG_ATOMIC_DIGEST_SIZE + +/* + * If the switching support is configured, we must provide support up to + * the largest digest size. Without switching support, we know it is only + * the built-in digest size. + */ +#ifdef CONFIG_LRNG_DRNG_SWITCH +# define LRNG_MAX_DIGESTSIZE 64 +#else +# define LRNG_MAX_DIGESTSIZE LRNG_ATOMIC_DIGEST_SIZE +#endif + +/* + * Oversampling factor of IRQ events to obtain + * LRNG_DRNG_SECURITY_STRENGTH_BYTES. This factor is used when a + * high-resolution time stamp is not available. In this case, jiffies and + * register contents are used to fill the entropy pool. These noise sources + * are much less entropic than the high-resolution timer. The entropy content + * is the entropy content assumed with LRNG_IRQ_ENTROPY_BITS divided by + * LRNG_IRQ_OVERSAMPLING_FACTOR. + * + * This value is allowed to be changed. + */ +#define LRNG_IRQ_OVERSAMPLING_FACTOR 10 + +/* Alignmask that is intended to be identical to CRYPTO_MINALIGN */ +#define LRNG_KCAPI_ALIGN ARCH_KMALLOC_MINALIGN + +/* + * This definition must provide a buffer that is equal to SHASH_DESC_ON_STACK + * as it will be casted into a struct shash_desc. + */ +#define LRNG_POOL_SIZE (sizeof(struct shash_desc) + HASH_MAX_DESCSIZE) + +/************************ Default DRNG implementation *************************/ + +extern struct chacha20_state chacha20; +extern const struct lrng_crypto_cb lrng_cc20_crypto_cb; +void lrng_cc20_init_state(struct chacha20_state *state); + +/********************************** /proc *************************************/ + +#ifdef CONFIG_SYSCTL +void lrng_pool_inc_numa_node(void); +void lrng_proc_update_max_write_thresh(u32 new_digestsize); +#else +static inline void lrng_pool_inc_numa_node(void) { } +static inline void lrng_proc_update_max_write_thresh(u32 new_digestsize) { } +#endif + +/****************************** LRNG interfaces *******************************/ + +extern u32 lrng_write_wakeup_bits; +extern int lrng_drng_reseed_max_time; + +void lrng_writer_wakeup(void); +void lrng_init_wakeup(void); +void lrng_debug_report_seedlevel(const char *name); +void lrng_process_ready_list(void); + +/* External interface to use of the switchable DRBG inside the kernel */ +void get_random_bytes_full(void *buf, int nbytes); + +/************************* Jitter RNG Entropy Source **************************/ + +#ifdef CONFIG_LRNG_JENT +u32 lrng_get_jent(u8 *outbuf, u32 requested_bits); +u32 lrng_jent_entropylevel(u32 requested_bits); +void lrng_jent_es_state(unsigned char *buf, size_t buflen); +#else /* CONFIG_LRNG_JENT */ +static inline u32 lrng_get_jent(u8 *outbuf, u32 requested_bits) { return 0; } +static inline u32 lrng_jent_entropylevel(u32 requested_bits) { return 0; } +static inline void lrng_jent_es_state(unsigned char *buf, size_t buflen) { } +#endif /* CONFIG_LRNG_JENT */ + +/************************** CPU-based Entropy Source **************************/ + +static inline u32 lrng_fast_noise_entropylevel(u32 ent_bits, u32 requested_bits) +{ + /* Obtain entropy statement */ + ent_bits = ent_bits * requested_bits / LRNG_DRNG_SECURITY_STRENGTH_BITS; + /* Cap entropy to buffer size in bits */ + ent_bits = min_t(u32, ent_bits, requested_bits); + return ent_bits; +} + +#ifdef CONFIG_LRNG_CPU +u32 lrng_get_arch(u8 *outbuf, u32 requested_bits); +u32 lrng_archrandom_entropylevel(u32 requested_bits); +void lrng_arch_es_state(unsigned char *buf, size_t buflen); +#else /* CONFIG_LRNG_CPU */ +static inline u32 lrng_get_arch(u8 *outbuf, u32 requested_bits) { return 0; } +static inline u32 lrng_archrandom_entropylevel(u32 requested_bits) { return 0; } +static inline void lrng_arch_es_state(unsigned char *buf, size_t buflen) { } +#endif /* CONFIG_LRNG_CPU */ + +/************************** Interrupt Entropy Source **************************/ + +#ifdef CONFIG_LRNG_IRQ +void lrng_pcpu_reset(void); +u32 lrng_pcpu_avail_pool_size(void); +u32 lrng_pcpu_avail_entropy(void); +int lrng_pcpu_switch_hash(int node, + const struct lrng_crypto_cb *new_cb, void *new_hash, + const struct lrng_crypto_cb *old_cb); +u32 lrng_pcpu_pool_hash(u8 *outbuf, u32 requested_bits, bool fully_seeded); +void lrng_pcpu_array_add_u32(u32 data); +u32 lrng_gcd_analyze(u32 *history, size_t nelem); +void lrng_irq_es_state(unsigned char *buf, size_t buflen); +#else /* CONFIG_LRNG_IRQ */ +static inline void lrng_pcpu_reset(void) { } +static inline u32 lrng_pcpu_avail_pool_size(void) { return 0; } +static inline u32 lrng_pcpu_avail_entropy(void) { return 0; } +static inline int lrng_pcpu_switch_hash(int node, + const struct lrng_crypto_cb *new_cb, void *new_hash, + const struct lrng_crypto_cb *old_cb) +{ + return 0; +} +static inline u32 lrng_pcpu_pool_hash(u8 *outbuf, u32 requested_bits, + bool fully_seeded) +{ + return 0; +} +static inline void lrng_pcpu_array_add_u32(u32 data) { } +static inline void lrng_irq_es_state(unsigned char *buf, size_t buflen) { } +#endif /* CONFIG_LRNG_IRQ */ + +/****************************** DRNG processing *******************************/ + +/* DRNG state handle */ +struct lrng_drng { + void *drng; /* DRNG handle */ + void *hash; /* Hash handle */ + const struct lrng_crypto_cb *crypto_cb; /* Crypto callbacks */ + atomic_t requests; /* Number of DRNG requests */ + atomic_t requests_since_fully_seeded; /* Number DRNG requests since + * last fully seeded + */ + unsigned long last_seeded; /* Last time it was seeded */ + bool fully_seeded; /* Is DRNG fully seeded? */ + bool force_reseed; /* Force a reseed */ + + /* Lock write operations on DRNG state, DRNG replacement of crypto_cb */ + struct mutex lock; + spinlock_t spin_lock; + /* Lock *hash replacement - always take before DRNG lock */ + rwlock_t hash_lock; +}; + +extern struct mutex lrng_crypto_cb_update; + +struct lrng_drng *lrng_drng_init_instance(void); +struct lrng_drng *lrng_drng_atomic_instance(void); + +static __always_inline bool lrng_drng_is_atomic(struct lrng_drng *drng) +{ + return (drng->drng == lrng_drng_atomic_instance()->drng); +} + +/* Lock the DRNG */ +static __always_inline void lrng_drng_lock(struct lrng_drng *drng, + unsigned long *flags) + __acquires(&drng->spin_lock) +{ + /* Use spin lock in case the atomic DRNG context is used */ + if (lrng_drng_is_atomic(drng)) { + spin_lock_irqsave(&drng->spin_lock, *flags); + + /* + * In case a lock transition happened while we were spinning, + * catch this case and use the new lock type. + */ + if (!lrng_drng_is_atomic(drng)) { + spin_unlock_irqrestore(&drng->spin_lock, *flags); + __acquire(&drng->spin_lock); + mutex_lock(&drng->lock); + } + } else { + __acquire(&drng->spin_lock); + mutex_lock(&drng->lock); + } +} + +/* Unlock the DRNG */ +static __always_inline void lrng_drng_unlock(struct lrng_drng *drng, + unsigned long *flags) + __releases(&drng->spin_lock) +{ + if (lrng_drng_is_atomic(drng)) { + spin_unlock_irqrestore(&drng->spin_lock, *flags); + } else { + mutex_unlock(&drng->lock); + __release(&drng->spin_lock); + } +} + +void lrng_reset(void); +void lrng_drngs_init_cc20(bool force_seed); +bool lrng_sp80090c_compliant(void); + +static inline u32 lrng_compress_osr(void) +{ + return lrng_sp80090c_compliant() ? CONFIG_LRNG_OVERSAMPLE_ES_BITS : 0; +} + +static inline u32 lrng_reduce_by_osr(u32 entropy_bits) +{ + u32 osr_bits = lrng_compress_osr(); + + return (entropy_bits >= osr_bits) ? (entropy_bits - osr_bits) : 0; +} + +bool lrng_get_available(void); +void lrng_set_available(void); +void lrng_drng_reset(struct lrng_drng *drng); +int lrng_drng_get_atomic(u8 *outbuf, u32 outbuflen); +int lrng_drng_get_sleep(u8 *outbuf, u32 outbuflen); +void lrng_drng_force_reseed(void); +void lrng_drng_seed_work(struct work_struct *dummy); + +#ifdef CONFIG_NUMA +struct lrng_drng **lrng_drng_instances(void); +void lrng_drngs_numa_alloc(void); +#else /* CONFIG_NUMA */ +static inline struct lrng_drng **lrng_drng_instances(void) { return NULL; } +static inline void lrng_drngs_numa_alloc(void) { return; } +#endif /* CONFIG_NUMA */ + +/************************* Entropy sources management *************************/ + +enum lrng_external_noise_source { + lrng_noise_source_hw, + lrng_noise_source_user +}; + +void lrng_set_entropy_thresh(u32 new); +u32 lrng_avail_entropy(void); +void lrng_reset_state(void); + +bool lrng_state_exseed_allow(enum lrng_external_noise_source source); +void lrng_state_exseed_set(enum lrng_external_noise_source source, bool type); +bool lrng_state_min_seeded(void); +bool lrng_state_fully_seeded(void); +bool lrng_state_operational(void); + +int lrng_pool_trylock(void); +void lrng_pool_unlock(void); +void lrng_pool_all_numa_nodes_seeded(bool set); +void lrng_pool_add_entropy(void); + +struct entropy_buf { + u8 a[LRNG_DRNG_INIT_SEED_SIZE_BYTES]; + u8 b[LRNG_DRNG_INIT_SEED_SIZE_BYTES]; + u8 c[LRNG_DRNG_INIT_SEED_SIZE_BYTES]; + u8 d[LRNG_DRNG_INIT_SEED_SIZE_BYTES]; + u32 now, a_bits, b_bits, c_bits, d_bits; +}; + +bool lrng_fully_seeded(bool fully_seeded, struct entropy_buf *eb); +void lrng_unset_fully_seeded(struct lrng_drng *drng); +void lrng_fill_seed_buffer(struct entropy_buf *entropy_buf, u32 requested_bits); +void lrng_init_ops(struct entropy_buf *eb); + +/*********************** Auxiliary Pool Entropy Source ************************/ + +u32 lrng_avail_aux_entropy(void); +void lrng_aux_es_state(unsigned char *buf, size_t buflen); +u32 lrng_get_digestsize(void); +void lrng_pool_set_entropy(u32 entropy_bits); +int lrng_aux_switch_hash(const struct lrng_crypto_cb *new_cb, void *new_hash, + const struct lrng_crypto_cb *old_cb); +int lrng_pool_insert_aux(const u8 *inbuf, u32 inbuflen, u32 entropy_bits); +void lrng_get_backtrack_aux(struct entropy_buf *entropy_buf, + u32 requested_bits); + +/* Obtain the security strength of the LRNG in bits */ +static inline u32 lrng_security_strength(void) +{ + /* + * We use a hash to read the entropy in the entropy pool. According to + * SP800-90B table 1, the entropy can be at most the digest size. + * Considering this together with the last sentence in section 3.1.5.1.2 + * the security strength of a (approved) hash is equal to its output + * size. On the other hand the entropy cannot be larger than the + * security strength of the used DRBG. + */ + return min_t(u32, LRNG_FULL_SEED_ENTROPY_BITS, lrng_get_digestsize()); +} + +static inline u32 lrng_get_seed_entropy_osr(bool fully_seeded) +{ + u32 requested_bits = lrng_security_strength(); + + /* Apply oversampling during initialization according to SP800-90C */ + if (lrng_sp80090c_compliant() && !fully_seeded) + requested_bits += CONFIG_LRNG_SEED_BUFFER_INIT_ADD_BITS; + return requested_bits; +} + +/************************** Health Test linking code **************************/ + +enum lrng_health_res { + lrng_health_pass, /* Health test passes on time stamp */ + lrng_health_fail_use, /* Time stamp unhealthy, but mix in */ + lrng_health_fail_drop /* Time stamp unhealthy, drop it */ +}; + +#ifdef CONFIG_LRNG_HEALTH_TESTS +bool lrng_sp80090b_startup_complete(void); +bool lrng_sp80090b_compliant(void); + +enum lrng_health_res lrng_health_test(u32 now_time); +void lrng_health_disable(void); + +#else /* CONFIG_LRNG_HEALTH_TESTS */ +static inline bool lrng_sp80090b_startup_complete(void) { return true; } +static inline bool lrng_sp80090b_compliant(void) { return false; } + +static inline enum lrng_health_res +lrng_health_test(u32 now_time) { return lrng_health_pass; } +static inline void lrng_health_disable(void) { } +#endif /* CONFIG_LRNG_HEALTH_TESTS */ + +/****************************** Helper code ***********************************/ + +static inline u32 atomic_read_u32(atomic_t *v) +{ + return (u32)atomic_read(v); +} + +/******************** Crypto Primitive Switching Support **********************/ + +#ifdef CONFIG_LRNG_DRNG_SWITCH +static inline void lrng_hash_lock(struct lrng_drng *drng, unsigned long *flags) +{ + read_lock_irqsave(&drng->hash_lock, *flags); +} + +static inline void lrng_hash_unlock(struct lrng_drng *drng, unsigned long flags) +{ + read_unlock_irqrestore(&drng->hash_lock, flags); +} +#else /* CONFIG_LRNG_DRNG_SWITCH */ +static inline void lrng_hash_lock(struct lrng_drng *drng, unsigned long *flags) +{ } + +static inline void lrng_hash_unlock(struct lrng_drng *drng, unsigned long flags) +{ } +#endif /* CONFIG_LRNG_DRNG_SWITCH */ + +/*************************** Auxiliary functions ******************************/ + +void invalidate_batched_entropy(void); + +/***************************** Testing code ***********************************/ + +#ifdef CONFIG_LRNG_RAW_HIRES_ENTROPY +bool lrng_raw_hires_entropy_store(u32 value); +#else /* CONFIG_LRNG_RAW_HIRES_ENTROPY */ +static inline bool lrng_raw_hires_entropy_store(u32 value) { return false; } +#endif /* CONFIG_LRNG_RAW_HIRES_ENTROPY */ + +#ifdef CONFIG_LRNG_RAW_JIFFIES_ENTROPY +bool lrng_raw_jiffies_entropy_store(u32 value); +#else /* CONFIG_LRNG_RAW_JIFFIES_ENTROPY */ +static inline bool lrng_raw_jiffies_entropy_store(u32 value) { return false; } +#endif /* CONFIG_LRNG_RAW_JIFFIES_ENTROPY */ + +#ifdef CONFIG_LRNG_RAW_IRQ_ENTROPY +bool lrng_raw_irq_entropy_store(u32 value); +#else /* CONFIG_LRNG_RAW_IRQ_ENTROPY */ +static inline bool lrng_raw_irq_entropy_store(u32 value) { return false; } +#endif /* CONFIG_LRNG_RAW_IRQ_ENTROPY */ + +#ifdef CONFIG_LRNG_RAW_IRQFLAGS_ENTROPY +bool lrng_raw_irqflags_entropy_store(u32 value); +#else /* CONFIG_LRNG_RAW_IRQFLAGS_ENTROPY */ +static inline bool lrng_raw_irqflags_entropy_store(u32 value) { return false; } +#endif /* CONFIG_LRNG_RAW_IRQFLAGS_ENTROPY */ + +#ifdef CONFIG_LRNG_RAW_RETIP_ENTROPY +bool lrng_raw_retip_entropy_store(u32 value); +#else /* CONFIG_LRNG_RAW_RETIP_ENTROPY */ +static inline bool lrng_raw_retip_entropy_store(u32 value) { return false; } +#endif /* CONFIG_LRNG_RAW_RETIP_ENTROPY */ + +#ifdef CONFIG_LRNG_RAW_REGS_ENTROPY +bool lrng_raw_regs_entropy_store(u32 value); +#else /* CONFIG_LRNG_RAW_REGS_ENTROPY */ +static inline bool lrng_raw_regs_entropy_store(u32 value) { return false; } +#endif /* CONFIG_LRNG_RAW_REGS_ENTROPY */ + +#ifdef CONFIG_LRNG_RAW_ARRAY +bool lrng_raw_array_entropy_store(u32 value); +#else /* CONFIG_LRNG_RAW_ARRAY */ +static inline bool lrng_raw_array_entropy_store(u32 value) { return false; } +#endif /* CONFIG_LRNG_RAW_ARRAY */ + +#ifdef CONFIG_LRNG_IRQ_PERF +bool lrng_perf_time(u32 start); +#else /* CONFIG_LRNG_IRQ_PERF */ +static inline bool lrng_perf_time(u32 start) { return false; } +#endif /*CONFIG_LRNG_IRQ_PERF */ + +#endif /* _LRNG_INTERNAL_H */ diff --git a/drivers/char/lrng/lrng_kcapi.c b/drivers/char/lrng/lrng_kcapi.c new file mode 100644 index 000000000000..fa76b2d57b7f --- /dev/null +++ b/drivers/char/lrng/lrng_kcapi.c @@ -0,0 +1,225 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause +/* + * Backend for the LRNG providing the cryptographic primitives using the + * kernel crypto API. + * + * Copyright (C) 2018 - 2021, Stephan Mueller + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include + +#include "lrng_kcapi_hash.h" + +static char *drng_name = NULL; +module_param(drng_name, charp, 0444); +MODULE_PARM_DESC(drng_name, "Kernel crypto API name of DRNG"); + +static char *pool_hash = "sha512"; +module_param(pool_hash, charp, 0444); +MODULE_PARM_DESC(pool_hash, + "Kernel crypto API name of hash or keyed message digest to read the entropy pool"); + +static char *seed_hash = NULL; +module_param(seed_hash, charp, 0444); +MODULE_PARM_DESC(seed_hash, + "Kernel crypto API name of hash with output size equal to seedsize of DRNG to bring seed string to the size required by the DRNG"); + +struct lrng_drng_info { + struct crypto_rng *kcapi_rng; + void *lrng_hash; +}; + +static void *lrng_kcapi_drng_hash_alloc(void) +{ + return lrng_kcapi_hash_alloc(pool_hash); +} + +static int lrng_kcapi_drng_seed_helper(void *drng, const u8 *inbuf, + u32 inbuflen) +{ + SHASH_DESC_ON_STACK(shash, NULL); + struct lrng_drng_info *lrng_drng_info = (struct lrng_drng_info *)drng; + struct crypto_rng *kcapi_rng = lrng_drng_info->kcapi_rng; + void *hash = lrng_drng_info->lrng_hash; + u32 digestsize = lrng_kcapi_hash_digestsize(hash); + u8 digest[HASH_MAX_DIGESTSIZE] __aligned(8); + int ret; + + if (!hash) + return crypto_rng_reset(kcapi_rng, inbuf, inbuflen); + + ret = lrng_kcapi_hash_init(shash, hash) ?: + lrng_kcapi_hash_update(shash, inbuf, inbuflen) ?: + lrng_kcapi_hash_final(shash, digest); + lrng_kcapi_hash_zero(shash); + if (ret) + return ret; + + ret = crypto_rng_reset(kcapi_rng, digest, digestsize); + if (ret) + return ret; + + memzero_explicit(digest, digestsize); + return 0; +} + +static int lrng_kcapi_drng_generate_helper(void *drng, u8 *outbuf, + u32 outbuflen) +{ + struct lrng_drng_info *lrng_drng_info = (struct lrng_drng_info *)drng; + struct crypto_rng *kcapi_rng = lrng_drng_info->kcapi_rng; + int ret = crypto_rng_get_bytes(kcapi_rng, outbuf, outbuflen); + + if (ret < 0) + return ret; + + return outbuflen; +} + +static void *lrng_kcapi_drng_alloc(u32 sec_strength) +{ + struct lrng_drng_info *lrng_drng_info; + struct crypto_rng *kcapi_rng; + int seedsize; + void *ret = ERR_PTR(-ENOMEM); + + if (!drng_name) { + pr_err("DRNG name missing\n"); + return ERR_PTR(-EINVAL); + } + + if (!memcmp(drng_name, "drbg", 4) || + !memcmp(drng_name, "stdrng", 6) || + !memcmp(drng_name, "jitterentropy_rng", 17)) { + pr_err("Refusing to load the requested random number generator\n"); + return ERR_PTR(-EINVAL); + } + + lrng_drng_info = kmalloc(sizeof(*lrng_drng_info), GFP_KERNEL); + if (!lrng_drng_info) + return ERR_PTR(-ENOMEM); + + kcapi_rng = crypto_alloc_rng(drng_name, 0, 0); + if (IS_ERR(kcapi_rng)) { + pr_err("DRNG %s cannot be allocated\n", drng_name); + ret = ERR_CAST(kcapi_rng); + goto free; + } + lrng_drng_info->kcapi_rng = kcapi_rng; + + seedsize = crypto_rng_seedsize(kcapi_rng); + + if (sec_strength > seedsize) + pr_info("Seedsize DRNG (%u bits) lower than security strength of LRNG noise source (%u bits)\n", + crypto_rng_seedsize(kcapi_rng) * 8, sec_strength * 8); + + if (seedsize) { + void *lrng_hash; + + if (!seed_hash) { + switch (seedsize) { + case 32: + seed_hash = "sha256"; + break; + case 48: + seed_hash = "sha384"; + break; + case 64: + seed_hash = "sha512"; + break; + default: + pr_err("Seed size %d cannot be processed\n", + seedsize); + goto dealloc; + } + } + + lrng_hash = lrng_kcapi_hash_alloc(seed_hash); + if (IS_ERR(lrng_hash)) { + ret = ERR_CAST(lrng_hash); + goto dealloc; + } + + if (seedsize != lrng_kcapi_hash_digestsize(lrng_hash)) { + pr_err("Seed hash output size not equal to DRNG seed size\n"); + lrng_kcapi_hash_dealloc(lrng_hash); + ret = ERR_PTR(-EINVAL); + goto dealloc; + } + + lrng_drng_info->lrng_hash = lrng_hash; + + pr_info("Seed hash %s allocated\n", seed_hash); + } else { + lrng_drng_info->lrng_hash = NULL; + } + + pr_info("Kernel crypto API DRNG %s allocated\n", drng_name); + + return lrng_drng_info; + +dealloc: + crypto_free_rng(kcapi_rng); +free: + kfree(lrng_drng_info); + return ret; +} + +static void lrng_kcapi_drng_dealloc(void *drng) +{ + struct lrng_drng_info *lrng_drng_info = (struct lrng_drng_info *)drng; + struct crypto_rng *kcapi_rng = lrng_drng_info->kcapi_rng; + + crypto_free_rng(kcapi_rng); + if (lrng_drng_info->lrng_hash) + lrng_kcapi_hash_dealloc(lrng_drng_info->lrng_hash); + kfree(lrng_drng_info); + pr_info("DRNG %s deallocated\n", drng_name); +} + +static const char *lrng_kcapi_drng_name(void) +{ + return drng_name; +} + +static const char *lrng_kcapi_pool_hash(void) +{ + return pool_hash; +} + +static const struct lrng_crypto_cb lrng_kcapi_crypto_cb = { + .lrng_drng_name = lrng_kcapi_drng_name, + .lrng_hash_name = lrng_kcapi_pool_hash, + .lrng_drng_alloc = lrng_kcapi_drng_alloc, + .lrng_drng_dealloc = lrng_kcapi_drng_dealloc, + .lrng_drng_seed_helper = lrng_kcapi_drng_seed_helper, + .lrng_drng_generate_helper = lrng_kcapi_drng_generate_helper, + .lrng_hash_alloc = lrng_kcapi_drng_hash_alloc, + .lrng_hash_dealloc = lrng_kcapi_hash_dealloc, + .lrng_hash_digestsize = lrng_kcapi_hash_digestsize, + .lrng_hash_init = lrng_kcapi_hash_init, + .lrng_hash_update = lrng_kcapi_hash_update, + .lrng_hash_final = lrng_kcapi_hash_final, + .lrng_hash_desc_zero = lrng_kcapi_hash_zero, +}; + +static int __init lrng_kcapi_init(void) +{ + return lrng_set_drng_cb(&lrng_kcapi_crypto_cb); +} +static void __exit lrng_kcapi_exit(void) +{ + lrng_set_drng_cb(NULL); +} + +late_initcall(lrng_kcapi_init); +module_exit(lrng_kcapi_exit); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Stephan Mueller "); +MODULE_DESCRIPTION("Linux Random Number Generator - kernel crypto API DRNG backend"); diff --git a/drivers/char/lrng/lrng_kcapi_hash.c b/drivers/char/lrng/lrng_kcapi_hash.c new file mode 100644 index 000000000000..9927e1022de5 --- /dev/null +++ b/drivers/char/lrng/lrng_kcapi_hash.c @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause +/* + * Backend for providing the hash primitive using the kernel crypto API. + * + * Copyright (C) 2021, Stephan Mueller + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include + +#include "lrng_kcapi_hash.h" + +struct lrng_hash_info { + struct crypto_shash *tfm; +}; + +static void _lrng_kcapi_hash_free(struct lrng_hash_info *lrng_hash) +{ + struct crypto_shash *tfm = lrng_hash->tfm; + + crypto_free_shash(tfm); + kfree(lrng_hash); +} + +void *lrng_kcapi_hash_alloc(const char *name) +{ + struct lrng_hash_info *lrng_hash; + struct crypto_shash *tfm; + int ret; + + if (!name) { + pr_err("Hash name missing\n"); + return ERR_PTR(-EINVAL); + } + + tfm = crypto_alloc_shash(name, 0, 0); + if (IS_ERR(tfm)) { + pr_err("could not allocate hash %s\n", name); + return ERR_CAST(tfm); + } + + ret = sizeof(struct lrng_hash_info); + lrng_hash = kmalloc(ret, GFP_KERNEL); + if (!lrng_hash) { + crypto_free_shash(tfm); + return ERR_PTR(-ENOMEM); + } + + lrng_hash->tfm = tfm; + + pr_info("Hash %s allocated\n", name); + + return lrng_hash; +} +EXPORT_SYMBOL(lrng_kcapi_hash_alloc); + +u32 lrng_kcapi_hash_digestsize(void *hash) +{ + struct lrng_hash_info *lrng_hash = (struct lrng_hash_info *)hash; + struct crypto_shash *tfm = lrng_hash->tfm; + + return crypto_shash_digestsize(tfm); +} +EXPORT_SYMBOL(lrng_kcapi_hash_digestsize); + +void lrng_kcapi_hash_dealloc(void *hash) +{ + struct lrng_hash_info *lrng_hash = (struct lrng_hash_info *)hash; + + _lrng_kcapi_hash_free(lrng_hash); + pr_info("Hash deallocated\n"); +} +EXPORT_SYMBOL(lrng_kcapi_hash_dealloc); + +int lrng_kcapi_hash_init(struct shash_desc *shash, void *hash) +{ + struct lrng_hash_info *lrng_hash = (struct lrng_hash_info *)hash; + struct crypto_shash *tfm = lrng_hash->tfm; + + shash->tfm = tfm; + return crypto_shash_init(shash); +} +EXPORT_SYMBOL(lrng_kcapi_hash_init); + +int lrng_kcapi_hash_update(struct shash_desc *shash, const u8 *inbuf, + u32 inbuflen) +{ + return crypto_shash_update(shash, inbuf, inbuflen); +} +EXPORT_SYMBOL(lrng_kcapi_hash_update); + +int lrng_kcapi_hash_final(struct shash_desc *shash, u8 *digest) +{ + return crypto_shash_final(shash, digest); +} +EXPORT_SYMBOL(lrng_kcapi_hash_final); + +void lrng_kcapi_hash_zero(struct shash_desc *shash) +{ + shash_desc_zero(shash); +} +EXPORT_SYMBOL(lrng_kcapi_hash_zero); diff --git a/drivers/char/lrng/lrng_kcapi_hash.h b/drivers/char/lrng/lrng_kcapi_hash.h new file mode 100644 index 000000000000..2f94558d2dd6 --- /dev/null +++ b/drivers/char/lrng/lrng_kcapi_hash.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ +/* + * Copyright (C) 2020 - 2021, Stephan Mueller + */ + +#ifndef _LRNG_KCAPI_HASH_H +#define _LRNG_KCAPI_HASH_H + +#include + +void *lrng_kcapi_hash_alloc(const char *name); +u32 lrng_kcapi_hash_digestsize(void *hash); +void lrng_kcapi_hash_dealloc(void *hash); +int lrng_kcapi_hash_init(struct shash_desc *shash, void *hash); +int lrng_kcapi_hash_update(struct shash_desc *shash, const u8 *inbuf, + u32 inbuflen); +int lrng_kcapi_hash_final(struct shash_desc *shash, u8 *digest); +void lrng_kcapi_hash_zero(struct shash_desc *shash); + +#endif /* _LRNG_KCAPI_HASH_H */ diff --git a/drivers/char/lrng/lrng_numa.c b/drivers/char/lrng/lrng_numa.c new file mode 100644 index 000000000000..094929107c46 --- /dev/null +++ b/drivers/char/lrng/lrng_numa.c @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause +/* + * LRNG NUMA support + * + * Copyright (C) 2016 - 2021, Stephan Mueller + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include + +#include "lrng_internal.h" + +static struct lrng_drng **lrng_drng __read_mostly = NULL; + +struct lrng_drng **lrng_drng_instances(void) +{ + /* counterpart to cmpxchg_release in _lrng_drngs_numa_alloc */ + return READ_ONCE(lrng_drng); +} + +/* Allocate the data structures for the per-NUMA node DRNGs */ +static void _lrng_drngs_numa_alloc(struct work_struct *work) +{ + struct lrng_drng **drngs; + struct lrng_drng *lrng_drng_init = lrng_drng_init_instance(); + u32 node; + bool init_drng_used = false; + + mutex_lock(&lrng_crypto_cb_update); + + /* per-NUMA-node DRNGs are already present */ + if (lrng_drng) + goto unlock; + + drngs = kcalloc(nr_node_ids, sizeof(void *), GFP_KERNEL|__GFP_NOFAIL); + for_each_online_node(node) { + struct lrng_drng *drng; + + if (!init_drng_used) { + drngs[node] = lrng_drng_init; + init_drng_used = true; + continue; + } + + drng = kmalloc_node(sizeof(struct lrng_drng), + GFP_KERNEL|__GFP_NOFAIL, node); + memset(drng, 0, sizeof(lrng_drng)); + + drng->crypto_cb = lrng_drng_init->crypto_cb; + drng->drng = drng->crypto_cb->lrng_drng_alloc( + LRNG_DRNG_SECURITY_STRENGTH_BYTES); + if (IS_ERR(drng->drng)) { + kfree(drng); + goto err; + } + + drng->hash = drng->crypto_cb->lrng_hash_alloc(); + if (IS_ERR(drng->hash)) { + drng->crypto_cb->lrng_drng_dealloc(drng->drng); + kfree(drng); + goto err; + } + + mutex_init(&drng->lock); + spin_lock_init(&drng->spin_lock); + rwlock_init(&drng->hash_lock); + + /* + * Switch the hash used by the per-CPU pool. + * We do not need to lock the new hash as it is not usable yet + * due to **drngs not yet being initialized. + */ + if (lrng_pcpu_switch_hash(node, drng->crypto_cb, drng->hash, + &lrng_cc20_crypto_cb)) + goto err; + + /* + * No reseeding of NUMA DRNGs from previous DRNGs as this + * would complicate the code. Let it simply reseed. + */ + lrng_drng_reset(drng); + drngs[node] = drng; + + lrng_pool_inc_numa_node(); + pr_info("DRNG and entropy pool read hash for NUMA node %d allocated\n", + node); + } + + /* counterpart to smp_load_acquire in lrng_drng_instances */ + if (!cmpxchg_release(&lrng_drng, NULL, drngs)) { + lrng_pool_all_numa_nodes_seeded(false); + goto unlock; + } + +err: + for_each_online_node(node) { + struct lrng_drng *drng = drngs[node]; + + if (drng == lrng_drng_init) + continue; + + if (drng) { + lrng_pcpu_switch_hash(node, &lrng_cc20_crypto_cb, NULL, + drng->crypto_cb); + drng->crypto_cb->lrng_hash_dealloc(drng->hash); + drng->crypto_cb->lrng_drng_dealloc(drng->drng); + kfree(drng); + } + } + kfree(drngs); + +unlock: + mutex_unlock(&lrng_crypto_cb_update); +} + +static DECLARE_WORK(lrng_drngs_numa_alloc_work, _lrng_drngs_numa_alloc); + +void lrng_drngs_numa_alloc(void) +{ + schedule_work(&lrng_drngs_numa_alloc_work); +} diff --git a/drivers/char/lrng/lrng_proc.c b/drivers/char/lrng/lrng_proc.c new file mode 100644 index 000000000000..5c7b4908fb0c --- /dev/null +++ b/drivers/char/lrng/lrng_proc.c @@ -0,0 +1,210 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause +/* + * LRNG proc and sysctl interfaces + * + * Copyright (C) 2016 - 2021, Stephan Mueller + */ + +#include +#include +#include +#include +#include + +#include "lrng_internal.h" + +/* + * This function is used to return both the bootid UUID, and random + * UUID. The difference is in whether table->data is NULL; if it is, + * then a new UUID is generated and returned to the user. + * + * If the user accesses this via the proc interface, the UUID will be + * returned as an ASCII string in the standard UUID format; if via the + * sysctl system call, as 16 bytes of binary data. + */ +static int lrng_proc_do_uuid(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos) +{ + struct ctl_table fake_table; + unsigned char buf[64], tmp_uuid[16], *uuid; + + uuid = table->data; + if (!uuid) { + uuid = tmp_uuid; + generate_random_uuid(uuid); + } else { + static DEFINE_SPINLOCK(bootid_spinlock); + + spin_lock(&bootid_spinlock); + if (!uuid[8]) + generate_random_uuid(uuid); + spin_unlock(&bootid_spinlock); + } + + sprintf(buf, "%pU", uuid); + + fake_table.data = buf; + fake_table.maxlen = sizeof(buf); + + return proc_dostring(&fake_table, write, buffer, lenp, ppos); +} + +static int lrng_proc_do_entropy(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos) +{ + struct ctl_table fake_table; + int entropy_count; + + entropy_count = lrng_avail_entropy(); + + fake_table.data = &entropy_count; + fake_table.maxlen = sizeof(entropy_count); + + return proc_dointvec(&fake_table, write, buffer, lenp, ppos); +} + +static int lrng_proc_do_poolsize(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos) +{ + struct ctl_table fake_table; + int entropy_count; + + /* LRNG can at most retain entropy in per-CPU pools and aux pool */ + entropy_count = lrng_get_digestsize() + lrng_pcpu_avail_pool_size(); + + fake_table.data = &entropy_count; + fake_table.maxlen = sizeof(entropy_count); + + return proc_dointvec(&fake_table, write, buffer, lenp, ppos); +} + +static int lrng_min_write_thresh; +static int lrng_max_write_thresh = (LRNG_WRITE_WAKEUP_ENTROPY << 3); +static char lrng_sysctl_bootid[16]; +static int lrng_drng_reseed_max_min; + +void lrng_proc_update_max_write_thresh(u32 new_digestsize) +{ + lrng_max_write_thresh = (int)new_digestsize; + /* Ensure that changes to the global variable are visible */ + mb(); +} + +static struct ctl_table random_table[] = { + { + .procname = "poolsize", + .maxlen = sizeof(int), + .mode = 0444, + .proc_handler = lrng_proc_do_poolsize, + }, + { + .procname = "entropy_avail", + .maxlen = sizeof(int), + .mode = 0444, + .proc_handler = lrng_proc_do_entropy, + }, + { + .procname = "write_wakeup_threshold", + .data = &lrng_write_wakeup_bits, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &lrng_min_write_thresh, + .extra2 = &lrng_max_write_thresh, + }, + { + .procname = "boot_id", + .data = &lrng_sysctl_bootid, + .maxlen = 16, + .mode = 0444, + .proc_handler = lrng_proc_do_uuid, + }, + { + .procname = "uuid", + .maxlen = 16, + .mode = 0444, + .proc_handler = lrng_proc_do_uuid, + }, + { + .procname = "urandom_min_reseed_secs", + .data = &lrng_drng_reseed_max_time, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + .extra1 = &lrng_drng_reseed_max_min, + }, + { } +}; + +/* Number of online DRNGs */ +static u32 numa_drngs = 1; + +void lrng_pool_inc_numa_node(void) +{ + numa_drngs++; +} + +static int lrng_proc_type_show(struct seq_file *m, void *v) +{ + struct lrng_drng *lrng_drng_init = lrng_drng_init_instance(); + unsigned long flags = 0; + unsigned char buf[250], irq[200], aux[100], cpu[90], jent[45]; + + lrng_drng_lock(lrng_drng_init, &flags); + snprintf(buf, sizeof(buf), + "DRNG name: %s\n" + "LRNG security strength in bits: %d\n" + "number of DRNG instances: %u\n" + "Standards compliance: %s\n" + "Entropy Sources: %s%s%sAuxiliary\n" + "LRNG minimally seeded: %s\n" + "LRNG fully seeded: %s\n", + lrng_drng_init->crypto_cb->lrng_drng_name(), + lrng_security_strength(), + numa_drngs, + lrng_sp80090c_compliant() ? "SP800-90C " : "", + IS_ENABLED(CONFIG_LRNG_IRQ) ? "IRQ " : "", + IS_ENABLED(CONFIG_LRNG_JENT) ? "JitterRNG " : "", + IS_ENABLED(CONFIG_LRNG_CPU) ? "CPU " : "", + lrng_state_min_seeded() ? "true" : "false", + lrng_state_fully_seeded() ? "true" : "false"); + + lrng_aux_es_state(aux, sizeof(aux)); + + irq[0] = '\0'; + lrng_irq_es_state(irq, sizeof(irq)); + + jent[0] = '\0'; + lrng_jent_es_state(jent, sizeof(jent)); + + cpu[0] = '\0'; + lrng_arch_es_state(cpu, sizeof(cpu)); + + lrng_drng_unlock(lrng_drng_init, &flags); + + seq_write(m, buf, strlen(buf)); + seq_write(m, aux, strlen(aux)); + seq_write(m, irq, strlen(irq)); + seq_write(m, jent, strlen(jent)); + seq_write(m, cpu, strlen(cpu)); + + return 0; +} + +/* + * rand_initialize() is called before sysctl_init(), + * so we cannot call register_sysctl_init() in rand_initialize() + */ +static int __init random_sysctls_init(void) +{ + register_sysctl_init("kernel/random", random_table); + return 0; +} +device_initcall(random_sysctls_init); + +static int __init lrng_proc_type_init(void) +{ + proc_create_single("lrng_type", 0444, NULL, &lrng_proc_type_show); + return 0; +} +module_init(lrng_proc_type_init); diff --git a/drivers/char/lrng/lrng_selftest.c b/drivers/char/lrng/lrng_selftest.c new file mode 100644 index 000000000000..2e81047fe6a7 --- /dev/null +++ b/drivers/char/lrng/lrng_selftest.c @@ -0,0 +1,387 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause +/* + * LRNG power-on and on-demand self-test + * + * Copyright (C) 2016 - 2021, Stephan Mueller + */ + +/* + * In addition to the self-tests below, the following LRNG components + * are covered with self-tests during regular operation: + * + * * power-on self-test: SP800-90A DRBG provided by the Linux kernel crypto API + * * power-on self-test: PRNG provided by the Linux kernel crypto API + * * runtime test: Raw noise source data testing including SP800-90B compliant + * tests when enabling CONFIG_LRNG_HEALTH_TESTS + * + * Additional developer tests present with LRNG code: + * * SP800-90B APT and RCT test enforcement validation when enabling + * CONFIG_LRNG_APT_BROKEN or CONFIG_LRNG_RCT_BROKEN. + * * Collection of raw entropy from the interrupt noise source when enabling + * CONFIG_LRNG_TESTING and pulling the data from the kernel with the provided + * interface. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include + +#include "lrng_chacha20.h" +#include "lrng_internal.h" + +#define LRNG_SELFTEST_PASSED 0 +#define LRNG_SEFLTEST_ERROR_TIME (1 << 0) +#define LRNG_SEFLTEST_ERROR_CHACHA20 (1 << 1) +#define LRNG_SEFLTEST_ERROR_HASH (1 << 2) +#define LRNG_SEFLTEST_ERROR_GCD (1 << 3) +#define LRNG_SELFTEST_NOT_EXECUTED 0xffffffff + +#ifdef CONFIG_LRNG_IRQ + +#include "lrng_es_irq.h" + +static u32 lrng_data_selftest_ptr = 0; +static u32 lrng_data_selftest[LRNG_DATA_ARRAY_SIZE]; + +static void lrng_data_process_selftest_insert(u32 time) +{ + u32 ptr = lrng_data_selftest_ptr++ & LRNG_DATA_WORD_MASK; + unsigned int array = lrng_data_idx2array(ptr); + unsigned int slot = lrng_data_idx2slot(ptr); + + /* zeroization of slot to ensure the following OR adds the data */ + lrng_data_selftest[array] &= + ~(lrng_data_slot_val(0xffffffff & LRNG_DATA_SLOTSIZE_MASK, + slot)); + lrng_data_selftest[array] |= + lrng_data_slot_val(time & LRNG_DATA_SLOTSIZE_MASK, slot); +} + +static void lrng_data_process_selftest_u32(u32 data) +{ + u32 pre_ptr, ptr, mask; + unsigned int pre_array; + + /* Increment pointer by number of slots taken for input value */ + lrng_data_selftest_ptr += LRNG_DATA_SLOTS_PER_UINT; + + /* ptr to current unit */ + ptr = lrng_data_selftest_ptr; + + lrng_pcpu_split_u32(&ptr, &pre_ptr, &mask); + + /* MSB of data go into previous unit */ + pre_array = lrng_data_idx2array(pre_ptr); + /* zeroization of slot to ensure the following OR adds the data */ + lrng_data_selftest[pre_array] &= ~(0xffffffff & ~mask); + lrng_data_selftest[pre_array] |= data & ~mask; + + /* LSB of data go into current unit */ + lrng_data_selftest[lrng_data_idx2array(ptr)] = data & mask; +} + +static unsigned int lrng_data_process_selftest(void) +{ + u32 time; + u32 idx_zero_compare = (0 << 0) | (1 << 8) | (2 << 16) | (3 << 24); + u32 idx_one_compare = (4 << 0) | (5 << 8) | (6 << 16) | (7 << 24); + u32 idx_last_compare = + (((LRNG_DATA_NUM_VALUES - 4) & LRNG_DATA_SLOTSIZE_MASK) << 0) | + (((LRNG_DATA_NUM_VALUES - 3) & LRNG_DATA_SLOTSIZE_MASK) << 8) | + (((LRNG_DATA_NUM_VALUES - 2) & LRNG_DATA_SLOTSIZE_MASK) << 16) | + (((LRNG_DATA_NUM_VALUES - 1) & LRNG_DATA_SLOTSIZE_MASK) << 24); + + (void)idx_one_compare; + + /* "poison" the array to verify the operation of the zeroization */ + lrng_data_selftest[0] = 0xffffffff; + lrng_data_selftest[1] = 0xffffffff; + + lrng_data_process_selftest_insert(0); + /* + * Note, when using lrng_data_process_u32() on unaligned ptr, + * the first slots will go into next word, and the last slots go + * into the previous word. + */ + lrng_data_process_selftest_u32((4 << 0) | (1 << 8) | (2 << 16) | + (3 << 24)); + lrng_data_process_selftest_insert(5); + lrng_data_process_selftest_insert(6); + lrng_data_process_selftest_insert(7); + + if ((lrng_data_selftest[0] != idx_zero_compare) || + (lrng_data_selftest[1] != idx_one_compare)) + goto err; + + /* Reset for next test */ + lrng_data_selftest[0] = 0; + lrng_data_selftest[1] = 0; + lrng_data_selftest_ptr = 0; + + for (time = 0; time < LRNG_DATA_NUM_VALUES; time++) + lrng_data_process_selftest_insert(time); + + if ((lrng_data_selftest[0] != idx_zero_compare) || + (lrng_data_selftest[1] != idx_one_compare) || + (lrng_data_selftest[LRNG_DATA_ARRAY_SIZE - 1] != idx_last_compare)) + goto err; + + return LRNG_SELFTEST_PASSED; + +err: + pr_err("LRNG data array self-test FAILED\n"); + return LRNG_SEFLTEST_ERROR_TIME; +} + +static unsigned int lrng_gcd_selftest(void) +{ + u32 history[10]; + unsigned int i; + +#define LRNG_GCD_SELFTEST 3 + for (i = 0; i < ARRAY_SIZE(history); i++) + history[i] = i * LRNG_GCD_SELFTEST; + + if (lrng_gcd_analyze(history, ARRAY_SIZE(history)) == LRNG_GCD_SELFTEST) + return LRNG_SELFTEST_PASSED; + + pr_err("LRNG GCD self-test FAILED\n"); + return LRNG_SEFLTEST_ERROR_GCD; +} + +#else /* CONFIG_LRNG_IRQ */ + +static unsigned int lrng_data_process_selftest(void) +{ + return LRNG_SELFTEST_PASSED; +} + +static unsigned int lrng_gcd_selftest(void) +{ + return LRNG_SELFTEST_PASSED; +} + +#endif /* CONFIG_LRNG_IRQ */ + +static void lrng_selftest_bswap32(u32 *ptr, u32 words) +{ + u32 i; + + /* Byte-swap data which is an LE representation */ + for (i = 0; i < words; i++) { + __le32 *p = (__le32 *)ptr; + + *p = cpu_to_le32(*ptr); + ptr++; + } +} + +/* The test vectors are taken from crypto/testmgr.h */ +static unsigned int lrng_hash_selftest(void) +{ + SHASH_DESC_ON_STACK(shash, NULL); + const struct lrng_crypto_cb *crypto_cb = &lrng_cc20_crypto_cb; + static const u8 lrng_hash_selftest_result[] = +#ifdef CONFIG_CRYPTO_LIB_SHA256 + { 0xba, 0x78, 0x16, 0xbf, 0x8f, 0x01, 0xcf, 0xea, + 0x41, 0x41, 0x40, 0xde, 0x5d, 0xae, 0x22, 0x23, + 0xb0, 0x03, 0x61, 0xa3, 0x96, 0x17, 0x7a, 0x9c, + 0xb4, 0x10, 0xff, 0x61, 0xf2, 0x00, 0x15, 0xad }; +#else /* CONFIG_CRYPTO_LIB_SHA256 */ + { 0xa9, 0x99, 0x3e, 0x36, 0x47, 0x06, 0x81, 0x6a, 0xba, 0x3e, + 0x25, 0x71, 0x78, 0x50, 0xc2, 0x6c, 0x9c, 0xd0, 0xd8, 0x9d }; +#endif /* CONFIG_CRYPTO_LIB_SHA256 */ + static const u8 hash_input[] = { 0x61, 0x62, 0x63 }; /* "abc" */ + u8 digest[sizeof(lrng_hash_selftest_result)] __aligned(sizeof(u32)); + + if (sizeof(digest) != crypto_cb->lrng_hash_digestsize(NULL)) + return LRNG_SEFLTEST_ERROR_HASH; + + if (!crypto_cb->lrng_hash_init(shash, NULL) && + !crypto_cb->lrng_hash_update(shash, hash_input, + sizeof(hash_input)) && + !crypto_cb->lrng_hash_final(shash, digest) && + !memcmp(digest, lrng_hash_selftest_result, sizeof(digest))) + return 0; + + pr_err("LRNG %s Hash self-test FAILED\n", crypto_cb->lrng_hash_name()); + return LRNG_SEFLTEST_ERROR_HASH; +} + +/* + * The test vectors were generated using the ChaCha20 DRNG from + * https://www.chronox.de/chacha20.html + */ +static unsigned int lrng_chacha20_drng_selftest(void) +{ + const struct lrng_crypto_cb *crypto_cb = &lrng_cc20_crypto_cb; + u8 seed[CHACHA_KEY_SIZE * 2] = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, + 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, + 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, + }; + struct chacha20_block chacha20; + int ret; + u8 outbuf[CHACHA_KEY_SIZE * 2] __aligned(sizeof(u32)); + + /* + * Expected result when ChaCha20 DRNG state is zero: + * * constants are set to "expand 32-byte k" + * * remaining state is 0 + * and pulling one half ChaCha20 DRNG block. + */ + static const u8 expected_halfblock[CHACHA_KEY_SIZE] = { + 0x76, 0xb8, 0xe0, 0xad, 0xa0, 0xf1, 0x3d, 0x90, + 0x40, 0x5d, 0x6a, 0xe5, 0x53, 0x86, 0xbd, 0x28, + 0xbd, 0xd2, 0x19, 0xb8, 0xa0, 0x8d, 0xed, 0x1a, + 0xa8, 0x36, 0xef, 0xcc, 0x8b, 0x77, 0x0d, 0xc7 }; + + /* + * Expected result when ChaCha20 DRNG state is zero: + * * constants are set to "expand 32-byte k" + * * remaining state is 0 + * followed by a reseed with two keyblocks + * 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + * 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + * 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + * 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + * 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, + * 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, + * 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, + * 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f + * and pulling one ChaCha20 DRNG block. + */ + static const u8 expected_oneblock[CHACHA_KEY_SIZE * 2] = { + 0xe3, 0xb0, 0x8a, 0xcc, 0x34, 0xc3, 0x17, 0x0e, + 0xc3, 0xd8, 0xc3, 0x40, 0xe7, 0x73, 0xe9, 0x0d, + 0xd1, 0x62, 0xa3, 0x5d, 0x7d, 0xf2, 0xf1, 0x4a, + 0x24, 0x42, 0xb7, 0x1e, 0xb0, 0x05, 0x17, 0x07, + 0xb9, 0x35, 0x10, 0x69, 0x8b, 0x46, 0xfb, 0x51, + 0xe9, 0x91, 0x3f, 0x46, 0xf2, 0x4d, 0xea, 0xd0, + 0x81, 0xc1, 0x1b, 0xa9, 0x5d, 0x52, 0x91, 0x5f, + 0xcd, 0xdc, 0xc6, 0xd6, 0xc3, 0x7c, 0x50, 0x23 }; + + /* + * Expected result when ChaCha20 DRNG state is zero: + * * constants are set to "expand 32-byte k" + * * remaining state is 0 + * followed by a reseed with one key block plus one byte + * 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + * 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + * 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + * 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + * 0x20 + * and pulling less than one ChaCha20 DRNG block. + */ + static const u8 expected_block_nonalinged[CHACHA_KEY_SIZE + 4] = { + 0x9c, 0xfc, 0x5e, 0x31, 0x21, 0x62, 0x11, 0x85, + 0xd3, 0x77, 0xd3, 0x69, 0x0f, 0xa8, 0x16, 0x55, + 0xb4, 0x4c, 0xf6, 0x52, 0xf3, 0xa8, 0x37, 0x99, + 0x38, 0x76, 0xa0, 0x66, 0xec, 0xbb, 0xce, 0xa9, + 0x9c, 0x95, 0xa1, 0xfd }; + + BUILD_BUG_ON(sizeof(seed) % sizeof(u32)); + + memset(&chacha20, 0, sizeof(chacha20)); + lrng_cc20_init_rfc7539(&chacha20); + lrng_selftest_bswap32((u32 *)seed, sizeof(seed) / sizeof(u32)); + + /* Generate with zero state */ + ret = crypto_cb->lrng_drng_generate_helper(&chacha20, outbuf, + sizeof(expected_halfblock)); + if (ret != sizeof(expected_halfblock)) + goto err; + if (memcmp(outbuf, expected_halfblock, sizeof(expected_halfblock))) + goto err; + + /* Clear state of DRNG */ + memset(&chacha20.key.u[0], 0, 48); + + /* Reseed with 2 key blocks */ + ret = crypto_cb->lrng_drng_seed_helper(&chacha20, seed, + sizeof(expected_oneblock)); + if (ret < 0) + goto err; + ret = crypto_cb->lrng_drng_generate_helper(&chacha20, outbuf, + sizeof(expected_oneblock)); + if (ret != sizeof(expected_oneblock)) + goto err; + if (memcmp(outbuf, expected_oneblock, sizeof(expected_oneblock))) + goto err; + + /* Clear state of DRNG */ + memset(&chacha20.key.u[0], 0, 48); + + /* Reseed with 1 key block and one byte */ + ret = crypto_cb->lrng_drng_seed_helper(&chacha20, seed, + sizeof(expected_block_nonalinged)); + if (ret < 0) + goto err; + ret = crypto_cb->lrng_drng_generate_helper(&chacha20, outbuf, + sizeof(expected_block_nonalinged)); + if (ret != sizeof(expected_block_nonalinged)) + goto err; + if (memcmp(outbuf, expected_block_nonalinged, + sizeof(expected_block_nonalinged))) + goto err; + + return LRNG_SELFTEST_PASSED; + +err: + pr_err("LRNG ChaCha20 DRNG self-test FAILED\n"); + return LRNG_SEFLTEST_ERROR_CHACHA20; +} + +static unsigned int lrng_selftest_status = LRNG_SELFTEST_NOT_EXECUTED; + +static int lrng_selftest(void) +{ + unsigned int ret = lrng_data_process_selftest(); + + ret |= lrng_chacha20_drng_selftest(); + ret |= lrng_hash_selftest(); + ret |= lrng_gcd_selftest(); + + if (ret) { + if (IS_ENABLED(CONFIG_LRNG_SELFTEST_PANIC)) + panic("LRNG self-tests failed: %u\n", ret); + } else { + pr_info("LRNG self-tests passed\n"); + } + + lrng_selftest_status = ret; + + if (lrng_selftest_status) + return -EFAULT; + return 0; +} + +#ifdef CONFIG_SYSFS +/* Re-perform self-test when any value is written to the sysfs file. */ +static int lrng_selftest_sysfs_set(const char *val, + const struct kernel_param *kp) +{ + return lrng_selftest(); +} + +static const struct kernel_param_ops lrng_selftest_sysfs = { + .set = lrng_selftest_sysfs_set, + .get = param_get_uint, +}; +module_param_cb(selftest_status, &lrng_selftest_sysfs, &lrng_selftest_status, + 0644); +#endif /* CONFIG_SYSFS */ + +static int __init lrng_selftest_init(void) +{ + return lrng_selftest(); +} + +module_init(lrng_selftest_init); diff --git a/drivers/char/lrng/lrng_switch.c b/drivers/char/lrng/lrng_switch.c new file mode 100644 index 000000000000..5fce40149911 --- /dev/null +++ b/drivers/char/lrng/lrng_switch.c @@ -0,0 +1,226 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause +/* + * LRNG DRNG switching support + * + * Copyright (C) 2016 - 2021, Stephan Mueller + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include + +#include "lrng_internal.h" + +static int lrng_drng_switch(struct lrng_drng *drng_store, + const struct lrng_crypto_cb *cb, int node) +{ + const struct lrng_crypto_cb *old_cb; + unsigned long flags = 0, flags2 = 0; + int ret; + u8 seed[LRNG_DRNG_SECURITY_STRENGTH_BYTES]; + void *new_drng = cb->lrng_drng_alloc(LRNG_DRNG_SECURITY_STRENGTH_BYTES); + void *old_drng, *new_hash, *old_hash; + u32 current_security_strength; + bool sl = false, reset_drng = !lrng_get_available(); + + if (IS_ERR(new_drng)) { + pr_warn("could not allocate new DRNG for NUMA node %d (%ld)\n", + node, PTR_ERR(new_drng)); + return PTR_ERR(new_drng); + } + + new_hash = cb->lrng_hash_alloc(); + if (IS_ERR(new_hash)) { + pr_warn("could not allocate new LRNG pool hash (%ld)\n", + PTR_ERR(new_hash)); + cb->lrng_drng_dealloc(new_drng); + return PTR_ERR(new_hash); + } + + if (cb->lrng_hash_digestsize(new_hash) > LRNG_MAX_DIGESTSIZE) { + pr_warn("digest size of newly requested hash too large\n"); + cb->lrng_hash_dealloc(new_hash); + cb->lrng_drng_dealloc(new_drng); + return -EINVAL; + } + + current_security_strength = lrng_security_strength(); + lrng_drng_lock(drng_store, &flags); + + /* + * Pull from existing DRNG to seed new DRNG regardless of seed status + * of old DRNG -- the entropy state for the DRNG is left unchanged which + * implies that als the new DRNG is reseeded when deemed necessary. This + * seeding of the new DRNG shall only ensure that the new DRNG has the + * same entropy as the old DRNG. + */ + ret = drng_store->crypto_cb->lrng_drng_generate_helper( + drng_store->drng, seed, sizeof(seed)); + lrng_drng_unlock(drng_store, &flags); + + if (ret < 0) { + reset_drng = true; + pr_warn("getting random data from DRNG failed for NUMA node %d (%d)\n", + node, ret); + } else { + /* seed new DRNG with data */ + ret = cb->lrng_drng_seed_helper(new_drng, seed, ret); + memzero_explicit(seed, sizeof(seed)); + if (ret < 0) { + reset_drng = true; + pr_warn("seeding of new DRNG failed for NUMA node %d (%d)\n", + node, ret); + } else { + pr_debug("seeded new DRNG of NUMA node %d instance from old DRNG instance\n", + node); + } + } + + mutex_lock(&drng_store->lock); + write_lock_irqsave(&drng_store->hash_lock, flags2); + /* + * If we switch the DRNG from the initial ChaCha20 DRNG to something + * else, there is a lock transition from spin lock to mutex (see + * lrng_drng_is_atomic and how the lock is taken in lrng_drng_lock). + * Thus, we need to take both locks during the transition phase. + */ + if (lrng_drng_is_atomic(drng_store)) { + spin_lock_irqsave(&drng_store->spin_lock, flags); + sl = true; + } else { + __acquire(&drng_store->spin_lock); + } + + /* Trigger the switch of the aux entropy pool for current node. */ + if (drng_store == lrng_drng_init_instance()) { + ret = lrng_aux_switch_hash(cb, new_hash, drng_store->crypto_cb); + if (ret) + goto err; + } + + /* Trigger the switch of the per-CPU entropy pools for current node. */ + ret = lrng_pcpu_switch_hash(node, cb, new_hash, drng_store->crypto_cb); + if (ret) { + /* Switch the crypto operation back to be consistent */ + WARN_ON(lrng_aux_switch_hash(drng_store->crypto_cb, + drng_store->hash, cb)); + } else { + if (reset_drng) + lrng_drng_reset(drng_store); + + old_drng = drng_store->drng; + old_cb = drng_store->crypto_cb; + drng_store->drng = new_drng; + drng_store->crypto_cb = cb; + + old_hash = drng_store->hash; + drng_store->hash = new_hash; + pr_info("Entropy pool read-hash allocated for DRNG for NUMA node %d\n", + node); + + /* Reseed if previous LRNG security strength was insufficient */ + if (current_security_strength < lrng_security_strength()) + drng_store->force_reseed = true; + + /* Force oversampling seeding as we initialize DRNG */ + if (IS_ENABLED(CONFIG_LRNG_OVERSAMPLE_ENTROPY_SOURCES)) + lrng_unset_fully_seeded(drng_store); + + if (lrng_state_min_seeded()) + lrng_set_entropy_thresh(lrng_get_seed_entropy_osr( + drng_store->fully_seeded)); + + /* ChaCha20 serves as atomic instance left untouched. */ + if (old_drng != &chacha20) { + old_cb->lrng_drng_dealloc(old_drng); + old_cb->lrng_hash_dealloc(old_hash); + } + + pr_info("DRNG of NUMA node %d switched\n", node); + } + +err: + if (sl) + spin_unlock_irqrestore(&drng_store->spin_lock, flags); + else + __release(&drng_store->spin_lock); + write_unlock_irqrestore(&drng_store->hash_lock, flags2); + mutex_unlock(&drng_store->lock); + + return ret; +} + +/* + * Switch the existing DRNG instances with new using the new crypto callbacks. + * The caller must hold the lrng_crypto_cb_update lock. + */ +static int lrng_drngs_switch(const struct lrng_crypto_cb *cb) +{ + struct lrng_drng **lrng_drng = lrng_drng_instances(); + struct lrng_drng *lrng_drng_init = lrng_drng_init_instance(); + int ret = 0; + + /* Update DRNG */ + if (lrng_drng) { + u32 node; + + for_each_online_node(node) { + if (lrng_drng[node]) + ret = lrng_drng_switch(lrng_drng[node], cb, + node); + } + } else { + ret = lrng_drng_switch(lrng_drng_init, cb, 0); + } + + if (!ret) + lrng_set_available(); + + return 0; +} + +/* + * lrng_set_drng_cb - Register new cryptographic callback functions for DRNG + * The registering implies that all old DRNG states are replaced with new + * DRNG states. + * + * @cb: Callback functions to be registered -- if NULL, use the default + * callbacks pointing to the ChaCha20 DRNG. + * + * Return: + * * 0 on success + * * < 0 on error + */ +int lrng_set_drng_cb(const struct lrng_crypto_cb *cb) +{ + struct lrng_drng *lrng_drng_init = lrng_drng_init_instance(); + int ret; + + if (!cb) + cb = &lrng_cc20_crypto_cb; + + mutex_lock(&lrng_crypto_cb_update); + + /* + * If a callback other than the default is set, allow it only to be + * set back to the default callback. This ensures that multiple + * different callbacks can be registered at the same time. If a + * callback different from the current callback and the default + * callback shall be set, the current callback must be deregistered + * (e.g. the kernel module providing it must be unloaded) and the new + * implementation can be registered. + */ + if ((cb != &lrng_cc20_crypto_cb) && + (lrng_drng_init->crypto_cb != &lrng_cc20_crypto_cb)) { + pr_warn("disallow setting new cipher callbacks, unload the old callbacks first!\n"); + ret = -EINVAL; + goto out; + } + + ret = lrng_drngs_switch(cb); + +out: + mutex_unlock(&lrng_crypto_cb_update); + return ret; +} +EXPORT_SYMBOL(lrng_set_drng_cb); diff --git a/drivers/char/lrng/lrng_testing.c b/drivers/char/lrng/lrng_testing.c new file mode 100644 index 000000000000..3517319acb43 --- /dev/null +++ b/drivers/char/lrng/lrng_testing.c @@ -0,0 +1,689 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause +/* + * Linux Random Number Generator (LRNG) testing interfaces + * + * Copyright (C) 2019 - 2021, Stephan Mueller + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "lrng_internal.h" + +#define LRNG_TESTING_RINGBUFFER_SIZE 1024 +#define LRNG_TESTING_RINGBUFFER_MASK (LRNG_TESTING_RINGBUFFER_SIZE - 1) + +struct lrng_testing { + u32 lrng_testing_rb[LRNG_TESTING_RINGBUFFER_SIZE]; + u32 rb_reader; + u32 rb_writer; + atomic_t lrng_testing_enabled; + spinlock_t lock; + wait_queue_head_t read_wait; +}; + +/*************************** Generic Data Handling ****************************/ + +/* + * boot variable: + * 0 ==> No boot test, gathering of runtime data allowed + * 1 ==> Boot test enabled and ready for collecting data, gathering runtime + * data is disabled + * 2 ==> Boot test completed and disabled, gathering of runtime data is + * disabled + */ + +static void lrng_testing_reset(struct lrng_testing *data) +{ + unsigned long flags; + + spin_lock_irqsave(&data->lock, flags); + data->rb_reader = 0; + data->rb_writer = 0; + spin_unlock_irqrestore(&data->lock, flags); +} + +static void lrng_testing_init(struct lrng_testing *data, u32 boot) +{ + /* + * The boot time testing implies we have a running test. If the + * caller wants to clear it, he has to unset the boot_test flag + * at runtime via sysfs to enable regular runtime testing + */ + if (boot) + return; + + lrng_testing_reset(data); + atomic_set(&data->lrng_testing_enabled, 1); + pr_warn("Enabling data collection\n"); +} + +static void lrng_testing_fini(struct lrng_testing *data, u32 boot) +{ + /* If we have boot data, we do not reset yet to allow data to be read */ + if (boot) + return; + + atomic_set(&data->lrng_testing_enabled, 0); + lrng_testing_reset(data); + pr_warn("Disabling data collection\n"); +} + +static bool lrng_testing_store(struct lrng_testing *data, u32 value, + u32 *boot) +{ + unsigned long flags; + + if (!atomic_read(&data->lrng_testing_enabled) && (*boot != 1)) + return false; + + spin_lock_irqsave(&data->lock, flags); + + /* + * Disable entropy testing for boot time testing after ring buffer + * is filled. + */ + if (*boot) { + if (data->rb_writer > LRNG_TESTING_RINGBUFFER_SIZE) { + *boot = 2; + pr_warn_once("One time data collection test disabled\n"); + spin_unlock_irqrestore(&data->lock, flags); + return false; + } + + if (data->rb_writer == 1) + pr_warn("One time data collection test enabled\n"); + } + + data->lrng_testing_rb[data->rb_writer & LRNG_TESTING_RINGBUFFER_MASK] = + value; + data->rb_writer++; + + spin_unlock_irqrestore(&data->lock, flags); + + if (wq_has_sleeper(&data->read_wait)) + wake_up_interruptible(&data->read_wait); + + return true; +} + +static bool lrng_testing_have_data(struct lrng_testing *data) +{ + return ((data->rb_writer & LRNG_TESTING_RINGBUFFER_MASK) != + (data->rb_reader & LRNG_TESTING_RINGBUFFER_MASK)); +} + +static int lrng_testing_reader(struct lrng_testing *data, u32 *boot, + u8 *outbuf, u32 outbuflen) +{ + unsigned long flags; + int collected_data = 0; + + lrng_testing_init(data, *boot); + + while (outbuflen) { + spin_lock_irqsave(&data->lock, flags); + + /* We have no data or reached the writer. */ + if (!data->rb_writer || + (data->rb_writer == data->rb_reader)) { + + spin_unlock_irqrestore(&data->lock, flags); + + /* + * Now we gathered all boot data, enable regular data + * collection. + */ + if (*boot) { + *boot = 0; + goto out; + } + + wait_event_interruptible(data->read_wait, + lrng_testing_have_data(data)); + if (signal_pending(current)) { + collected_data = -ERESTARTSYS; + goto out; + } + + continue; + } + + /* We copy out word-wise */ + if (outbuflen < sizeof(u32)) { + spin_unlock_irqrestore(&data->lock, flags); + goto out; + } + + memcpy(outbuf, &data->lrng_testing_rb[data->rb_reader], + sizeof(u32)); + data->rb_reader++; + + spin_unlock_irqrestore(&data->lock, flags); + + outbuf += sizeof(u32); + outbuflen -= sizeof(u32); + collected_data += sizeof(u32); + } + +out: + lrng_testing_fini(data, *boot); + return collected_data; +} + +static int lrng_testing_extract_user(struct file *file, char __user *buf, + size_t nbytes, loff_t *ppos, + int (*reader)(u8 *outbuf, u32 outbuflen)) +{ + u8 *tmp, *tmp_aligned; + int ret = 0, large_request = (nbytes > 256); + + if (!nbytes) + return 0; + + /* + * The intention of this interface is for collecting at least + * 1000 samples due to the SP800-90B requirements. So, we make no + * effort in avoiding allocating more memory that actually needed + * by the user. Hence, we allocate sufficient memory to always hold + * that amount of data. + */ + tmp = kmalloc(LRNG_TESTING_RINGBUFFER_SIZE + sizeof(u32), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + tmp_aligned = PTR_ALIGN(tmp, sizeof(u32)); + + while (nbytes) { + int i; + + if (large_request && need_resched()) { + if (signal_pending(current)) { + if (ret == 0) + ret = -ERESTARTSYS; + break; + } + schedule(); + } + + i = min_t(int, nbytes, LRNG_TESTING_RINGBUFFER_SIZE); + i = reader(tmp_aligned, i); + if (i <= 0) { + if (i < 0) + ret = i; + break; + } + if (copy_to_user(buf, tmp_aligned, i)) { + ret = -EFAULT; + break; + } + + nbytes -= i; + buf += i; + ret += i; + } + + kfree_sensitive(tmp); + + if (ret > 0) + *ppos += ret; + + return ret; +} + +/************** Raw High-Resolution Timer Entropy Data Handling ***************/ + +#ifdef CONFIG_LRNG_RAW_HIRES_ENTROPY + +static u32 boot_raw_hires_test = 0; +module_param(boot_raw_hires_test, uint, 0644); +MODULE_PARM_DESC(boot_raw_hires_test, "Enable gathering boot time high resolution timer entropy of the first entropy events"); + +static struct lrng_testing lrng_raw_hires = { + .rb_reader = 0, + .rb_writer = 0, + .lock = __SPIN_LOCK_UNLOCKED(lrng_raw_hires.lock), + .read_wait = __WAIT_QUEUE_HEAD_INITIALIZER(lrng_raw_hires.read_wait) +}; + +bool lrng_raw_hires_entropy_store(u32 value) +{ + return lrng_testing_store(&lrng_raw_hires, value, &boot_raw_hires_test); +} + +static int lrng_raw_hires_entropy_reader(u8 *outbuf, u32 outbuflen) +{ + return lrng_testing_reader(&lrng_raw_hires, &boot_raw_hires_test, + outbuf, outbuflen); +} + +static ssize_t lrng_raw_hires_read(struct file *file, char __user *to, + size_t count, loff_t *ppos) +{ + return lrng_testing_extract_user(file, to, count, ppos, + lrng_raw_hires_entropy_reader); +} + +static const struct file_operations lrng_raw_hires_fops = { + .owner = THIS_MODULE, + .read = lrng_raw_hires_read, +}; + +#endif /* CONFIG_LRNG_RAW_HIRES_ENTROPY */ + +/********************* Raw Jiffies Entropy Data Handling **********************/ + +#ifdef CONFIG_LRNG_RAW_JIFFIES_ENTROPY + +static u32 boot_raw_jiffies_test = 0; +module_param(boot_raw_jiffies_test, uint, 0644); +MODULE_PARM_DESC(boot_raw_jiffies_test, "Enable gathering boot time high resolution timer entropy of the first entropy events"); + +static struct lrng_testing lrng_raw_jiffies = { + .rb_reader = 0, + .rb_writer = 0, + .lock = __SPIN_LOCK_UNLOCKED(lrng_raw_jiffies.lock), + .read_wait = __WAIT_QUEUE_HEAD_INITIALIZER(lrng_raw_jiffies.read_wait) +}; + +bool lrng_raw_jiffies_entropy_store(u32 value) +{ + return lrng_testing_store(&lrng_raw_jiffies, value, + &boot_raw_jiffies_test); +} + +static int lrng_raw_jiffies_entropy_reader(u8 *outbuf, u32 outbuflen) +{ + return lrng_testing_reader(&lrng_raw_jiffies, &boot_raw_jiffies_test, + outbuf, outbuflen); +} + +static ssize_t lrng_raw_jiffies_read(struct file *file, char __user *to, + size_t count, loff_t *ppos) +{ + return lrng_testing_extract_user(file, to, count, ppos, + lrng_raw_jiffies_entropy_reader); +} + +static const struct file_operations lrng_raw_jiffies_fops = { + .owner = THIS_MODULE, + .read = lrng_raw_jiffies_read, +}; + +#endif /* CONFIG_LRNG_RAW_JIFFIES_ENTROPY */ + +/************************** Raw IRQ Data Handling ****************************/ + +#ifdef CONFIG_LRNG_RAW_IRQ_ENTROPY + +static u32 boot_raw_irq_test = 0; +module_param(boot_raw_irq_test, uint, 0644); +MODULE_PARM_DESC(boot_raw_irq_test, "Enable gathering boot time entropy of the first IRQ entropy events"); + +static struct lrng_testing lrng_raw_irq = { + .rb_reader = 0, + .rb_writer = 0, + .lock = __SPIN_LOCK_UNLOCKED(lrng_raw_irq.lock), + .read_wait = __WAIT_QUEUE_HEAD_INITIALIZER(lrng_raw_irq.read_wait) +}; + +bool lrng_raw_irq_entropy_store(u32 value) +{ + return lrng_testing_store(&lrng_raw_irq, value, &boot_raw_irq_test); +} + +static int lrng_raw_irq_entropy_reader(u8 *outbuf, u32 outbuflen) +{ + return lrng_testing_reader(&lrng_raw_irq, &boot_raw_irq_test, outbuf, + outbuflen); +} + +static ssize_t lrng_raw_irq_read(struct file *file, char __user *to, + size_t count, loff_t *ppos) +{ + return lrng_testing_extract_user(file, to, count, ppos, + lrng_raw_irq_entropy_reader); +} + +static const struct file_operations lrng_raw_irq_fops = { + .owner = THIS_MODULE, + .read = lrng_raw_irq_read, +}; + +#endif /* CONFIG_LRNG_RAW_IRQ_ENTROPY */ + +/************************ Raw IRQFLAGS Data Handling **************************/ + +#ifdef CONFIG_LRNG_RAW_IRQFLAGS_ENTROPY + +static u32 boot_raw_irqflags_test = 0; +module_param(boot_raw_irqflags_test, uint, 0644); +MODULE_PARM_DESC(boot_raw_irqflags_test, "Enable gathering boot time entropy of the first IRQ flags entropy events"); + +static struct lrng_testing lrng_raw_irqflags = { + .rb_reader = 0, + .rb_writer = 0, + .lock = __SPIN_LOCK_UNLOCKED(lrng_raw_irqflags.lock), + .read_wait = __WAIT_QUEUE_HEAD_INITIALIZER(lrng_raw_irqflags.read_wait) +}; + +bool lrng_raw_irqflags_entropy_store(u32 value) +{ + return lrng_testing_store(&lrng_raw_irqflags, value, + &boot_raw_irqflags_test); +} + +static int lrng_raw_irqflags_entropy_reader(u8 *outbuf, u32 outbuflen) +{ + return lrng_testing_reader(&lrng_raw_irqflags, &boot_raw_irqflags_test, + outbuf, outbuflen); +} + +static ssize_t lrng_raw_irqflags_read(struct file *file, char __user *to, + size_t count, loff_t *ppos) +{ + return lrng_testing_extract_user(file, to, count, ppos, + lrng_raw_irqflags_entropy_reader); +} + +static const struct file_operations lrng_raw_irqflags_fops = { + .owner = THIS_MODULE, + .read = lrng_raw_irqflags_read, +}; + +#endif /* CONFIG_LRNG_RAW_IRQFLAGS_ENTROPY */ + +/************************ Raw _RET_IP_ Data Handling **************************/ + +#ifdef CONFIG_LRNG_RAW_RETIP_ENTROPY + +static u32 boot_raw_retip_test = 0; +module_param(boot_raw_retip_test, uint, 0644); +MODULE_PARM_DESC(boot_raw_retip_test, "Enable gathering boot time entropy of the first return instruction pointer entropy events"); + +static struct lrng_testing lrng_raw_retip = { + .rb_reader = 0, + .rb_writer = 0, + .lock = __SPIN_LOCK_UNLOCKED(lrng_raw_retip.lock), + .read_wait = __WAIT_QUEUE_HEAD_INITIALIZER(lrng_raw_retip.read_wait) +}; + +bool lrng_raw_retip_entropy_store(u32 value) +{ + return lrng_testing_store(&lrng_raw_retip, value, &boot_raw_retip_test); +} + +static int lrng_raw_retip_entropy_reader(u8 *outbuf, u32 outbuflen) +{ + return lrng_testing_reader(&lrng_raw_retip, &boot_raw_retip_test, + outbuf, outbuflen); +} + +static ssize_t lrng_raw_retip_read(struct file *file, char __user *to, + size_t count, loff_t *ppos) +{ + return lrng_testing_extract_user(file, to, count, ppos, + lrng_raw_retip_entropy_reader); +} + +static const struct file_operations lrng_raw_retip_fops = { + .owner = THIS_MODULE, + .read = lrng_raw_retip_read, +}; + +#endif /* CONFIG_LRNG_RAW_RETIP_ENTROPY */ + +/********************** Raw IRQ register Data Handling ************************/ + +#ifdef CONFIG_LRNG_RAW_REGS_ENTROPY + +static u32 boot_raw_regs_test = 0; +module_param(boot_raw_regs_test, uint, 0644); +MODULE_PARM_DESC(boot_raw_regs_test, "Enable gathering boot time entropy of the first interrupt register entropy events"); + +static struct lrng_testing lrng_raw_regs = { + .rb_reader = 0, + .rb_writer = 0, + .lock = __SPIN_LOCK_UNLOCKED(lrng_raw_regs.lock), + .read_wait = __WAIT_QUEUE_HEAD_INITIALIZER(lrng_raw_regs.read_wait) +}; + +bool lrng_raw_regs_entropy_store(u32 value) +{ + return lrng_testing_store(&lrng_raw_regs, value, &boot_raw_regs_test); +} + +static int lrng_raw_regs_entropy_reader(u8 *outbuf, u32 outbuflen) +{ + return lrng_testing_reader(&lrng_raw_regs, &boot_raw_regs_test, + outbuf, outbuflen); +} + +static ssize_t lrng_raw_regs_read(struct file *file, char __user *to, + size_t count, loff_t *ppos) +{ + return lrng_testing_extract_user(file, to, count, ppos, + lrng_raw_regs_entropy_reader); +} + +static const struct file_operations lrng_raw_regs_fops = { + .owner = THIS_MODULE, + .read = lrng_raw_regs_read, +}; + +#endif /* CONFIG_LRNG_RAW_REGS_ENTROPY */ + +/********************** Raw Entropy Array Data Handling ***********************/ + +#ifdef CONFIG_LRNG_RAW_ARRAY + +static u32 boot_raw_array = 0; +module_param(boot_raw_array, uint, 0644); +MODULE_PARM_DESC(boot_raw_array, "Enable gathering boot time raw noise array data of the first entropy events"); + +static struct lrng_testing lrng_raw_array = { + .rb_reader = 0, + .rb_writer = 0, + .lock = __SPIN_LOCK_UNLOCKED(lrng_raw_array.lock), + .read_wait = __WAIT_QUEUE_HEAD_INITIALIZER(lrng_raw_array.read_wait) +}; + +bool lrng_raw_array_entropy_store(u32 value) +{ + return lrng_testing_store(&lrng_raw_array, value, &boot_raw_array); +} + +static int lrng_raw_array_entropy_reader(u8 *outbuf, u32 outbuflen) +{ + return lrng_testing_reader(&lrng_raw_array, &boot_raw_array, outbuf, + outbuflen); +} + +static ssize_t lrng_raw_array_read(struct file *file, char __user *to, + size_t count, loff_t *ppos) +{ + return lrng_testing_extract_user(file, to, count, ppos, + lrng_raw_array_entropy_reader); +} + +static const struct file_operations lrng_raw_array_fops = { + .owner = THIS_MODULE, + .read = lrng_raw_array_read, +}; + +#endif /* CONFIG_LRNG_RAW_ARRAY */ + +/******************** Interrupt Performance Data Handling *********************/ + +#ifdef CONFIG_LRNG_IRQ_PERF + +static u32 boot_irq_perf = 0; +module_param(boot_irq_perf, uint, 0644); +MODULE_PARM_DESC(boot_irq_perf, "Enable gathering boot time interrupt performance data of the first entropy events"); + +static struct lrng_testing lrng_irq_perf = { + .rb_reader = 0, + .rb_writer = 0, + .lock = __SPIN_LOCK_UNLOCKED(lrng_irq_perf.lock), + .read_wait = __WAIT_QUEUE_HEAD_INITIALIZER(lrng_irq_perf.read_wait) +}; + +bool lrng_perf_time(u32 start) +{ + return lrng_testing_store(&lrng_irq_perf, random_get_entropy() - start, + &boot_irq_perf); +} + +static int lrng_irq_perf_reader(u8 *outbuf, u32 outbuflen) +{ + return lrng_testing_reader(&lrng_irq_perf, &boot_irq_perf, outbuf, + outbuflen); +} + +static ssize_t lrng_irq_perf_read(struct file *file, char __user *to, + size_t count, loff_t *ppos) +{ + return lrng_testing_extract_user(file, to, count, ppos, + lrng_irq_perf_reader); +} + +static const struct file_operations lrng_irq_perf_fops = { + .owner = THIS_MODULE, + .read = lrng_irq_perf_read, +}; + +#endif /* CONFIG_LRNG_IRQ_PERF */ + +/*********************************** ACVT ************************************/ + +#ifdef CONFIG_LRNG_ACVT_HASH + +/* maximum amount of data to be hashed as defined by ACVP */ +#define LRNG_ACVT_MAX_SHA_MSG (65536 >> 3) + +/* + * As we use static variables to store the data, it is clear that the + * test interface is only able to handle single threaded testing. This is + * considered to be sufficient for testing. If multi-threaded use of the + * ACVT test interface would be performed, the caller would get garbage + * but the kernel operation is unaffected by this. + */ +static u8 lrng_acvt_hash_data[LRNG_ACVT_MAX_SHA_MSG] + __aligned(LRNG_KCAPI_ALIGN); +static atomic_t lrng_acvt_hash_data_size = ATOMIC_INIT(0); +static u8 lrng_acvt_hash_digest[LRNG_ATOMIC_DIGEST_SIZE]; + +static ssize_t lrng_acvt_hash_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + if (nbytes > LRNG_ACVT_MAX_SHA_MSG) + return -EINVAL; + + atomic_set(&lrng_acvt_hash_data_size, (int)nbytes); + + return simple_write_to_buffer(lrng_acvt_hash_data, + LRNG_ACVT_MAX_SHA_MSG, ppos, buf, nbytes); +} + +static ssize_t lrng_acvt_hash_read(struct file *file, char __user *to, + size_t count, loff_t *ppos) +{ + SHASH_DESC_ON_STACK(shash, NULL); + const struct lrng_crypto_cb *crypto_cb = &lrng_cc20_crypto_cb; + ssize_t ret; + + if (count > LRNG_ATOMIC_DIGEST_SIZE) + return -EINVAL; + + ret = crypto_cb->lrng_hash_init(shash, NULL) ?: + crypto_cb->lrng_hash_update(shash, lrng_acvt_hash_data, + atomic_read_u32(&lrng_acvt_hash_data_size)) ?: + crypto_cb->lrng_hash_final(shash, lrng_acvt_hash_digest); + if (ret) + return ret; + + return simple_read_from_buffer(to, count, ppos, lrng_acvt_hash_digest, + sizeof(lrng_acvt_hash_digest)); +} + +static const struct file_operations lrng_acvt_hash_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .llseek = default_llseek, + .read = lrng_acvt_hash_read, + .write = lrng_acvt_hash_write, +}; + +#endif /* CONFIG_LRNG_ACVT_DRNG */ + +/************************************************************************** + * Debugfs interface + **************************************************************************/ + +static int __init lrng_raw_init(void) +{ + struct dentry *lrng_raw_debugfs_root; + + lrng_raw_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); + +#ifdef CONFIG_LRNG_RAW_HIRES_ENTROPY + debugfs_create_file_unsafe("lrng_raw_hires", 0400, + lrng_raw_debugfs_root, NULL, + &lrng_raw_hires_fops); +#endif +#ifdef CONFIG_LRNG_RAW_JIFFIES_ENTROPY + debugfs_create_file_unsafe("lrng_raw_jiffies", 0400, + lrng_raw_debugfs_root, NULL, + &lrng_raw_jiffies_fops); +#endif +#ifdef CONFIG_LRNG_RAW_IRQ_ENTROPY + debugfs_create_file_unsafe("lrng_raw_irq", 0400, lrng_raw_debugfs_root, + NULL, &lrng_raw_irq_fops); +#endif +#ifdef CONFIG_LRNG_RAW_IRQFLAGS_ENTROPY + debugfs_create_file_unsafe("lrng_raw_irqflags", 0400, + lrng_raw_debugfs_root, NULL, + &lrng_raw_irqflags_fops); +#endif +#ifdef CONFIG_LRNG_RAW_RETIP_ENTROPY + debugfs_create_file_unsafe("lrng_raw_retip", 0400, + lrng_raw_debugfs_root, NULL, + &lrng_raw_retip_fops); +#endif +#ifdef CONFIG_LRNG_RAW_REGS_ENTROPY + debugfs_create_file_unsafe("lrng_raw_regs", 0400, + lrng_raw_debugfs_root, NULL, + &lrng_raw_regs_fops); +#endif +#ifdef CONFIG_LRNG_RAW_ARRAY + debugfs_create_file_unsafe("lrng_raw_array", 0400, + lrng_raw_debugfs_root, NULL, + &lrng_raw_array_fops); +#endif +#ifdef CONFIG_LRNG_IRQ_PERF + debugfs_create_file_unsafe("lrng_irq_perf", 0400, lrng_raw_debugfs_root, + NULL, &lrng_irq_perf_fops); +#endif +#ifdef CONFIG_LRNG_ACVT_HASH + debugfs_create_file_unsafe("lrng_acvt_hash", 0600, + lrng_raw_debugfs_root, NULL, + &lrng_acvt_hash_fops); +#endif + + return 0; +} + +module_init(lrng_raw_init); diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h index af5ad51d3eef..b12ae9bdebf4 100644 --- a/include/crypto/drbg.h +++ b/include/crypto/drbg.h @@ -283,4 +283,11 @@ enum drbg_prefixes { DRBG_PREFIX3 }; +extern int drbg_alloc_state(struct drbg_state *drbg); +extern void drbg_dealloc_state(struct drbg_state *drbg); +extern void drbg_convert_tfm_core(const char *cra_driver_name, int *coreref, + bool *pr); +extern const struct drbg_core drbg_cores[]; +extern unsigned short drbg_sec_strength(drbg_flag_t flags); + #endif /* _DRBG_H */ diff --git a/crypto/jitterentropy.h b/include/crypto/internal/jitterentropy.h similarity index 100% rename from crypto/jitterentropy.h rename to include/crypto/internal/jitterentropy.h diff --git a/include/linux/lrng.h b/include/linux/lrng.h new file mode 100644 index 000000000000..3e8f93b53c84 --- /dev/null +++ b/include/linux/lrng.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ +/* + * Copyright (C) 2018 - 2021, Stephan Mueller + */ + +#ifndef _LRNG_H +#define _LRNG_H + +#include +#include +#include + +/* + * struct lrng_crypto_cb - cryptographic callback functions + * @lrng_drng_name Name of DRNG + * @lrng_hash_name Name of Hash used for reading entropy pool + * @lrng_drng_alloc: Allocate DRNG -- the provided integer should be + * used for sanity checks. + * return: allocated data structure or PTR_ERR on + * error + * @lrng_drng_dealloc: Deallocate DRNG + * @lrng_drng_seed_helper: Seed the DRNG with data of arbitrary length + * drng: is pointer to data structure allocated + * with lrng_drng_alloc + * return: >= 0 on success, < 0 on error + * @lrng_drng_generate_helper: Generate random numbers from the DRNG with + * arbitrary length + * @lrng_hash_alloc: Allocate the hash for reading the entropy pool + * return: allocated data structure (NULL is + * success too) or ERR_PTR on error + * @lrng_hash_dealloc: Deallocate Hash + * @lrng_hash_digestsize: Return the digestsize for the used hash to read + * out entropy pool + * hash: is pointer to data structure allocated + * with lrng_hash_alloc + * return: size of digest of hash in bytes + * @lrng_hash_init: Initialize hash + * hash: is pointer to data structure allocated + * with lrng_hash_alloc + * return: 0 on success, < 0 on error + * @lrng_hash_update: Update hash operation + * hash: is pointer to data structure allocated + * with lrng_hash_alloc + * return: 0 on success, < 0 on error + * @lrng_hash_final Final hash operation + * hash: is pointer to data structure allocated + * with lrng_hash_alloc + * return: 0 on success, < 0 on error + * @lrng_hash_desc_zero Zeroization of hash state buffer + * + * Assumptions: + * + * 1. Hash operation will not sleep + * 2. The hash' volatile state information is provided with *shash by caller. + */ +struct lrng_crypto_cb { + const char *(*lrng_drng_name)(void); + const char *(*lrng_hash_name)(void); + void *(*lrng_drng_alloc)(u32 sec_strength); + void (*lrng_drng_dealloc)(void *drng); + int (*lrng_drng_seed_helper)(void *drng, const u8 *inbuf, u32 inbuflen); + int (*lrng_drng_generate_helper)(void *drng, u8 *outbuf, u32 outbuflen); + void *(*lrng_hash_alloc)(void); + void (*lrng_hash_dealloc)(void *hash); + u32 (*lrng_hash_digestsize)(void *hash); + int (*lrng_hash_init)(struct shash_desc *shash, void *hash); + int (*lrng_hash_update)(struct shash_desc *shash, const u8 *inbuf, + u32 inbuflen); + int (*lrng_hash_final)(struct shash_desc *shash, u8 *digest); + void (*lrng_hash_desc_zero)(struct shash_desc *shash); +}; + +/* Register cryptographic backend */ +#ifdef CONFIG_LRNG_DRNG_SWITCH +int lrng_set_drng_cb(const struct lrng_crypto_cb *cb); +#else /* CONFIG_LRNG_DRNG_SWITCH */ +static inline int +lrng_set_drng_cb(const struct lrng_crypto_cb *cb) { return -EOPNOTSUPP; } +#endif /* CONFIG_LRNG_DRNG_SWITCH */ + +#endif /* _LRNG_H */ -- 2.36.0