# An auth token for admin access to Diom # # It's useful for bootstrapping a new Diom cluster, or using it in CI pipelines or other # automated testing environments where you need a stable, well-known token for testing or # scripted setup. # # If you're using it to bootstrap a new Diom cluster, it’s recommended that you only use # it during bootstrapping, and remove this configuration once done. # admin_token = # How often to run background cleanup/garbage collection jobs # # Correctness should never be affected by this, just wasted memory/disk. background_cleanup_interval_ms = 10000 # YAML bootstrap data. Takes precedence over `bootstrap_cfg_path` # bootstrap_cfg = # The path to a YAML bootstrap file # bootstrap_cfg_path = # Maximum time to wait for cluster initialization at startup # # If this is unset, we will wait indefinitely. # bootstrap_max_wait_ms = # The environment (dev, staging, or prod) that the server is running in. environment = "dev" # When fsyncing, should we use fsync(2) or fdatasync(2) fsync_mode = "sync-data" # The address to listen on listen_address = "[::]:8624" # The log format that all output will follow. Supported: default, json log_format = "default" # The log level to run the service with. Supported: info, debug, trace log_level = "info" # How to persist data to the actual underlying database # # This is similar to the `cluster.log_sync` options, but applies to the actual # primary data as opposed to the log, and is applied at every batch commit from the # underlying replication system. sync_mode = "buffer" # Configuration for the cluster/replication system [cluster] # Address that other nodes should use to communicate with this one. # # If not passed, we'll attempt to discover it at boot time. # This cannot currently be changed after cluster initialization. # advertised_address = # Automatically initialize the cluster on bootup if we can't discover any # peers and we don't have any existing state. # # If you initialize all peers at exactly the same time, this can potentially cause errors. auto_initialize = true # Timeout for new connections. # # If you want to be tolerant of dropped packets, this should be set to at least TO + ε, # where TO is the initial TCP retransmission timer (typically either 1s or 3s, # depending on your operating system). connection_timeout_ms = 3100 # Timeout for discovery requests. # # This should be set to approximately 2X the RTT of your farthest-apart nodes. discovery_request_timeout_ms = 10000 discovery_timeout_ms = 30000 # The minimum time to let an election run for. # # This should be set to at least 5x the RTT of your farthest-apart nodes # and must not be less than `cluster_election_timeout_max`. election_timeout_max_ms = 3500 # The minimum time to let an election run for. # # This should be set to at least 4x the RTT of your farthest-apart nodes, # and must not be less than `heartbeat_interval_ms`. election_timeout_min_ms = 1500 # How often to send heartbeats. # # This controls how fast lost leaders can be detected. # Must not be less than `replication_request_timeout`. heartbeat_interval_ms = 500 # The address to listen on for replication. listen_address = "[::]:8625" log_index_interval_ms = 600000 # Location to store logs. For high-throughput systems, this should be a separate volume. # # Defaults to a subdirectory under the persistent DB path if not passed. # log_path = # Automatically attempt to determine the log sync interval from observed fsync timings log_sync_interval_auto = true # Interval (in transactions) between fsyncing the commit log. # # This can be used to force transactions to fsync logs more often than the # default `log_sync_interval_ms` timer. If `log_sync_mode` is set to "buffer", it's # reasonable to set this value to `1` to flush to the OS buffer on every log. # # If this is set to 0, only the interval timer will be used # # If this is set to a value higher than 1 and the interval timer is long, then # single-threaded clients (including bootstrap) will be extremely slow. log_sync_interval_commits = 0 # Interval (in milliseconds) between fsyncing the commit log. # # If `log_sync_interval_auto` is set to true, this is just the initial estimate # and will be auto-scaled log_sync_interval_ms = 2 # Should a log sync actually trigger an fsync? # # If this is set to "buffer" and a node suffers a catastrophic failure where OS buffers # are not written to disk, that node should be erased and re-snapshotted before being # re-added to the cluster. log_sync_mode = "sync" # Human-facing name for this cluster. # # Only used in discovery and debugging. name = "diom" # How many commits behind must the current node be to be considered "lagging" and eligible for # re-snapshotting? # # The ideal value here depends both on your data-set size and on your average write-rate. If # your data is large, then setting this value too small can mean that a snapshot can never # catch up because it'll take too long to replicate. Typically this should be around twice the # number of commits that you generate in the time it takes to replicate a full snapshot. replication_lag_threshold = 50000 # Timeout for replication requests. # # This should be set to approximately 2X the RTT of your farthest-apart nodes. replication_request_timeout_ms = 5000 # Shared secret for intra-cluster communications # # This must be the same on all nodes. # secret = # Other nodes that we should attempt to join a cluster with at boot time. seed_nodes = [] # The minimum time to let an election run for. # # This should be set to at least 5x the RTT of your farthest-apart nodes # and must not be less than `cluster_election_timeout_max`. send_snapshot_ms = 30000 # Trigger a background snapshot after this many milliseconds snapshot_after_ms = 900000 # Trigger a background snapshot after this many writes # snapshot_after_writes = # Location to store snapshots. # # This volume must have at least as much space as the persistent DB path # and ephemeral DB path combined. Defaults to a subdirectory under the # persistent DB path if not passed. # snapshot_path = startup_discovery_delay_ms = 10 # Storage configuration for the ephemeral database, which is used by rate-limiting and some # other modules, and can be placed on less-durable storage, such as local instance storage [ephemeral_db] # Amount of memory to reserve for the database layer's # caches for this database type. # # Can be specified as a bare value of bytes (e.g., 1024000), a unit-ed amount # (e.g., 1024MiB), or a percentage (e.g., 20%), which will be applied against # the current cgroup limit if present and the total system memory otherwise cache_size = "20%" # Filename under the directory specified in `path`. filename = "fjall_ephemeral" # Directory in which this database is stored path = "./db" # Configuration for verifying JWT bearer tokens. # # When set, bearer tokens in JWT format are verified using this configuration. # The JWT must contain a `role` claim (string) and may contain a `context` claim # (object with string values) that is forwarded to internal diom handlers. [jwt] # JWT algorithm. # # Supported values are HS256, HS384, HS512, RS256, RS384, RS512, ES256, ES384, PS256, PS384, PS512. # algorithm = # Secret for JWT algorithm HS256, HS384 or HS512 # secret = # Public key PEM for JWT algorithm RS256, RS384, RS512, ES256, ES384, PS256, PS384 or PS512 # public_key_pem = # Expected `aud` values. When set, the token must contain one of these # values in its `aud` claim. When absent, `aud` is not validated. # audience = # Expected `iss` values. When set, the token's `iss` claim must match one # of these values. When absent, `iss` is not validated. # issuer = [opentelemetry] # The OpenTelemetry address to send events to if given. # # Currently only GRPC exports are supported. # address = # The OpenTelemetry address to send metrics to if given. # # If not specified, the server will attempt to fall back # to `opentelemetry_address`. # metrics_address = metrics_period_ms = 10000 # OpenTelemetry metrics protocol # # By default, metrics are sent via GRPC. Some metrics destinations, most # notably Prometheus, only support receiving metrics via HTTP. metrics_protocol = "grpc" # The ratio at which to sample spans when sending to OpenTelemetry. # # When not given it defaults to always sending. # If the OpenTelemetry address is not set, this will do nothing. # sample_ratio = # The service name to use for OpenTelemetry. If not provided, it defaults to "diom". service_name = "diom" # Storage configuration for the persistent database, which is used by most modules and # should be placed on durable storage [persistent_db] # Amount of memory to reserve for the database layer's # caches for this database type. # # Can be specified as a bare value of bytes (e.g., 1024000), a unit-ed amount # (e.g., 1024MiB), or a percentage (e.g., 20%), which will be applied against # the current cgroup limit if present and the total system memory otherwise cache_size = "20%" # Filename under the directory specified in `path`. filename = "fjall_persistent" # Directory in which this database is stored path = "./db"