# # Copyright © 2016-2026 The Thingsboard Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Server common parameters server: # Server bind-address address: "${HTTP_BIND_ADDRESS:0.0.0.0}" # Server bind port port: "${HTTP_BIND_PORT:8080}" # Server forward headers strategy. Required for SWAGGER UI when reverse proxy is used forward_headers_strategy: "${HTTP_FORWARD_HEADERS_STRATEGY:framework}" # Server SSL configuration ssl: # Enable/disable SSL support enabled: "${SSL_ENABLED:false}" # Server SSL credentials credentials: # Server credentials type (PEM - pem certificate file; KEYSTORE - java keystore) type: "${SSL_CREDENTIALS_TYPE:PEM}" # PEM server credentials pem: # Path to the server certificate file (holds server certificate or certificate chain, may include server private key) cert_file: "${SSL_PEM_CERT:server.pem}" # Path to the server certificate private key file (optional). Required if the private key is not present in the server certificate file key_file: "${SSL_PEM_KEY:server_key.pem}" # Server certificate private key password (optional) key_password: "${SSL_PEM_KEY_PASSWORD:server_key_password}" # Keystore server credentials keystore: # Type of the key store (JKS or PKCS12) type: "${SSL_KEY_STORE_TYPE:PKCS12}" # Path to the key store that holds the SSL certificate store_file: "${SSL_KEY_STORE:classpath:keystore/keystore.p12}" # Password used to access the key store store_password: "${SSL_KEY_STORE_PASSWORD:thingsboard}" # Key alias key_alias: "${SSL_KEY_ALIAS:tomcat}" # Password used to access the key key_password: "${SSL_KEY_PASSWORD:thingsboard}" # HTTP settings http: # Semi-colon-separated list of urlPattern=maxPayloadSize pairs that define max http request size for specified url pattern. After first match all other will be skipped max_payload_size: "${HTTP_MAX_PAYLOAD_SIZE_LIMIT_CONFIGURATION:/api/image*/**=52428800;/api/resource/**=52428800;/api/**=16777216}" # HTTP/2 support (takes effect only if server SSL is enabled) http2: # Enable/disable HTTP/2 support enabled: "${HTTP2_ENABLED:true}" # Log errors with stacktrace when REST API throws an exception with the message "Please contact sysadmin" log_controller_error_stack_trace: "${HTTP_LOG_CONTROLLER_ERROR_STACK_TRACE:false}" ws: # Timeout for sending data to client WebSocket session in milliseconds send_timeout: "${TB_SERVER_WS_SEND_TIMEOUT:5000}" # recommended timeout >= 30 seconds. The platform will attempt to send a 'ping' request 3 times within the timeout ping_timeout: "${TB_SERVER_WS_PING_TIMEOUT:30000}" dynamic_page_link: # Refresh rate of the dynamic alarm end entity data queries refresh_interval: "${TB_SERVER_WS_DYNAMIC_PAGE_LINK_REFRESH_INTERVAL_SEC:60}" # Thread pool size to execute dynamic queries refresh_pool_size: "${TB_SERVER_WS_DYNAMIC_PAGE_LINK_REFRESH_POOL_SIZE:1}" # Maximum number of dynamic queries per refresh interval. For example, no more than 10 alarm queries are executed by the user simultaneously in all browsers. max_alarm_queries_per_refresh_interval: "${TB_SERVER_WS_MAX_ALARM_QUERIES_PER_REFRESH_INTERVAL:10}" # Maximum number of dynamic queries per user. For example, no more than 10 alarm widgets opened by the user simultaneously in all browsers max_per_user: "${TB_SERVER_WS_DYNAMIC_PAGE_LINK_MAX_PER_USER:10}" # Maximum number of entities returned for single entity subscription. For example, no more than 10,000 entities on the map widget max_entities_per_data_subscription: "${TB_SERVER_WS_MAX_ENTITIES_PER_DATA_SUBSCRIPTION:10000}" # Maximum number of alarms returned for single alarm subscription. For example, no more than 10,000 alarms on the alarm widget max_entities_per_alarm_subscription: "${TB_SERVER_WS_MAX_ENTITIES_PER_ALARM_SUBSCRIPTION:10000}" # Maximum queue size of the websocket updates per session. This restriction prevents infinite updates of WS max_queue_messages_per_session: "${TB_SERVER_WS_DEFAULT_QUEUE_MESSAGES_PER_SESSION:1000}" # Maximum time between WS session opening and sending auth command auth_timeout_ms: "${TB_SERVER_WS_AUTH_TIMEOUT_MS:10000}" rate_limits: # Per-tenant rate limit for WS subscriptions subscriptions_per_tenant: "${TB_SERVER_WS_SUBSCRIPTIONS_PER_TENANT_RATE_LIMIT:}" # Per-user rate limit for WS subscriptions subscriptions_per_user: "${TB_SERVER_WS_SUBSCRIPTIONS_PER_USER_RATE_LIMIT:}" # Maximum number of active originator alarm ids being saved in cache for single alarm status subscription. For example, no more than 10 alarm ids on the alarm widget alarms_per_alarm_status_subscription_cache_size: "${TB_ALARMS_PER_ALARM_STATUS_SUBSCRIPTION_CACHE_SIZE:10}" rest: server_side_rpc: # Minimum value of the server-side RPC timeout. May override value provided in the REST API call. # Since 2.5 migration to queues, the RPC delay depends on the size of the pending messages in the queue. # So default UI parameter of 500ms may not be sufficient for loaded environments. min_timeout: "${MIN_SERVER_SIDE_RPC_TIMEOUT:5000}" # Default value of the server-side RPC timeout. default_timeout: "${DEFAULT_SERVER_SIDE_RPC_TIMEOUT:10000}" rate_limits: # Limit that prohibits resetting the password for the user too often. The value of the rate limit. By default, no more than 5 requests per hour reset_password_per_user: "${RESET_PASSWORD_PER_USER_RATE_LIMIT_CONFIGURATION:5:3600}" rule_engine: # Default timeout for waiting response of REST API request to Rule Engine in milliseconds response_timeout: "${DEFAULT_RULE_ENGINE_RESPONSE_TIMEOUT:10000}" # Application info parameters app: # Application version version: "@project.version@" # Zookeeper connection parameters zk: # Enable/disable zookeeper discovery service. enabled: "${ZOOKEEPER_ENABLED:false}" # Zookeeper connect string url: "${ZOOKEEPER_URL:localhost:2181}" # Zookeeper retry interval in milliseconds retry_interval_ms: "${ZOOKEEPER_RETRY_INTERVAL_MS:3000}" # Zookeeper connection timeout in milliseconds connection_timeout_ms: "${ZOOKEEPER_CONNECTION_TIMEOUT_MS:3000}" # Zookeeper session timeout in milliseconds session_timeout_ms: "${ZOOKEEPER_SESSION_TIMEOUT_MS:3000}" # Name of the directory in zookeeper 'filesystem' zk_dir: "${ZOOKEEPER_NODES_DIR:/thingsboard}" # The recalculate_delay property is recommended in a microservices architecture setup for rule-engine services. # This property provides a pause to ensure that when a rule-engine service is restarted, other nodes don't immediately attempt to recalculate their partitions. # The delay is recommended because the initialization of rule chain actors is time-consuming. Avoiding unnecessary recalculations during a restart can enhance system performance and stability. recalculate_delay: "${ZOOKEEPER_RECALCULATE_DELAY_MS:0}" # Cluster parameters cluster: stats: # Enable/Disable the cluster statistics. Calculates the number of messages sent between cluster nodes based on each type enabled: "${TB_CLUSTER_STATS_ENABLED:false}" # Interval of printing the cluster stats to the log file print_interval_ms: "${TB_CLUSTER_STATS_PRINT_INTERVAL_MS:10000}" # Plugins configuration parameters plugins: # Comma-separated package list used during classpath scanning for plugins scan_packages: "${PLUGINS_SCAN_PACKAGES:org.thingsboard.server.extensions,org.thingsboard.rule.engine}" # Security parameters security: # JWT Token parameters jwt: # Since 3.4.2 values are persisted in the database during installation or upgrade. On Install, the key will be generated randomly if no custom value set. You can change it later from Web UI under SYS_ADMIN tokenExpirationTime: "${JWT_TOKEN_EXPIRATION_TIME:9000}" # Number of seconds (2.5 hours) refreshTokenExpTime: "${JWT_REFRESH_TOKEN_EXPIRATION_TIME:604800}" # Number of seconds (1 week). tokenIssuer: "${JWT_TOKEN_ISSUER:thingsboard.io}" # User JWT Token issuer tokenSigningKey: "${JWT_TOKEN_SIGNING_KEY:thingsboardDefaultSigningKey}" # Base64 encoded # Enable/disable access to Tenant Administrators JWT token by System Administrator or Customer Users JWT token by Tenant Administrator user_token_access_enabled: "${SECURITY_USER_TOKEN_ACCESS_ENABLED:true}" # API key parameters api_key: # Prefix for the auto-generated API key. For example, tb_Ood4dQMxWvMH-76z3E_Cv0mZaBWT0Clk3hRSO0P_jNQ value_prefix: "${SECURITY_API_KEY_VALUE_PREFIX:tb_}" # Length of the auto-generated API key. Max is 255 value_bytes_size: "${SECURITY_API_KEY_VALUE_PREFIX:64}" # Enable/disable case-sensitive username login user_login_case_sensitive: "${SECURITY_USER_LOGIN_CASE_SENSITIVE:true}" claim: # Enable/disable claiming devices; if false -> the device's [claimingAllowed] SERVER_SCOPE attribute must be set to [true] to allow claiming the specific device allowClaimingByDefault: "${SECURITY_CLAIM_ALLOW_CLAIMING_BY_DEFAULT:true}" # Time allowed to claim the device in milliseconds duration: "${SECURITY_CLAIM_DURATION:86400000}" # 1 minute, note this value must equal claimDevices.timeToLiveInMinutes value basic: # Enable/Disable basic security options enabled: "${SECURITY_BASIC_ENABLED:false}" oauth2: # Redirect URL where access code from external user management system will be processed loginProcessingUrl: "${SECURITY_OAUTH2_LOGIN_PROCESSING_URL:/login/oauth2/code/}" githubMapper: # The email addresses that will be mapped from the URL emailUrl: "${SECURITY_OAUTH2_GITHUB_MAPPER_EMAIL_URL_KEY:https://api.github.com/user/emails}" java_cacerts: # CA certificates keystore default path. Typically this keystore is at JAVA_HOME/lib/security/cacerts path: "${SECURITY_JAVA_CACERTS_PATH:${java.home}/lib/security/cacerts}" # The password of the cacerts keystore file password: "${SECURITY_JAVA_CACERTS_PASSWORD:changeit}" # Mail settings parameters mail: oauth2: # Interval for checking refresh token expiration in seconds(by default, 1 day). refreshTokenCheckingInterval: "${REFRESH_TOKEN_EXPIRATION_CHECKING_INTERVAL:86400}" # Rate limits for sending mails per tenant. As example for 1000 per minute and 10000 per hour is "1000:60,10000:3600" per_tenant_rate_limits: "${MAIL_PER_TENANT_RATE_LIMITS:}" # Usage statistics parameters usage: stats: report: # Enable/Disable the collection of API usage statistics. Collected on a system and tenant level by default enabled: "${USAGE_STATS_REPORT_ENABLED:true}" # Enable/Disable the collection of API usage statistics on a customer level enabled_per_customer: "${USAGE_STATS_REPORT_PER_CUSTOMER_ENABLED:false}" # Statistics reporting interval, set to send summarized data every 10 seconds by default interval: "${USAGE_STATS_REPORT_INTERVAL:60}" # Amount of statistic messages in pack pack_size: "${USAGE_STATS_REPORT_PACK_SIZE:1024}" check: # Interval of checking the start of the next cycle and re-enabling the blocked tenants/customers cycle: "${USAGE_STATS_CHECK_CYCLE:60000}" # In milliseconds. The default value is 3 minutes gauge_report_interval: "${USAGE_STATS_GAUGE_REPORT_INTERVAL:180000}" devices: # In seconds, the default value is 1 minute. When changing, in cluster mode, make sure usage.stats.gauge_report_interval is set to x2-x3 of this value report_interval: "${DEVICES_STATS_REPORT_INTERVAL:60}" # UI settings parameters ui: # Dashboard parameters dashboard: # Maximum allowed datapoints fetched by widgets max_datapoints_limit: "${DASHBOARD_MAX_DATAPOINTS_LIMIT:50000}" # Help parameters help: # Base URL for UI help assets base-url: "${UI_HELP_BASE_URL:https://raw.githubusercontent.com/thingsboard/thingsboard-ui-help/release-4.4}" # Database telemetry parameters database: ts_max_intervals: "${DATABASE_TS_MAX_INTERVALS:700}" # Max number of DB queries generated by a single API call to fetch telemetry records ts: type: "${DATABASE_TS_TYPE:sql}" # cassandra, sql, or timescale (for hybrid mode, DATABASE_TS_TYPE value should be cassandra, or timescale) ts_latest: type: "${DATABASE_TS_LATEST_TYPE:sql}" # cassandra, sql, or timescale (for hybrid mode, DATABASE_TS_TYPE value should be cassandra, or timescale) # Cassandra driver configuration parameters cassandra: # Thingsboard cluster name cluster_name: "${CASSANDRA_CLUSTER_NAME:Thingsboard Cluster}" # Thingsboard keyspace name keyspace_name: "${CASSANDRA_KEYSPACE_NAME:thingsboard}" # Specify node list url: "${CASSANDRA_URL:127.0.0.1:9042}" # Specify the local data center name local_datacenter: "${CASSANDRA_LOCAL_DATACENTER:datacenter1}" ssl: # Enable/disable secure connection enabled: "${CASSANDRA_USE_SSL:false}" # Enable/disable validation of Cassandra server hostname # If enabled, the hostname of the Cassandra server must match the CN of the server certificate hostname_validation: "${CASSANDRA_SSL_HOSTNAME_VALIDATION:true}" # Set trust store for client authentication of the server (optional, uses trust store from default SSLContext if not set) trust_store: "${CASSANDRA_SSL_TRUST_STORE:}" # The password for Cassandra trust store key trust_store_password: "${CASSANDRA_SSL_TRUST_STORE_PASSWORD:}" # Set key store for server authentication of the client (optional, uses key store from default SSLContext if not set) # A key store is only needed if the Cassandra server requires client authentication key_store: "${CASSANDRA_SSL_KEY_STORE:}" # The password for the Cassandra key store key_store_password: "${CASSANDRA_SSL_KEY_STORE_PASSWORD:}" # Comma-separated list of cipher suites (optional, uses Java default cipher suites if not set) cipher_suites: "${CASSANDRA_SSL_CIPHER_SUITES:}" # Enable/disable JMX jmx: "${CASSANDRA_USE_JMX:false}" # Enable/disable metrics collection. metrics: "${CASSANDRA_USE_METRICS:false}" # NONE SNAPPY LZ4 compression: "${CASSANDRA_COMPRESSION:none}" # Specify cassandra cluster initialization timeout in milliseconds (if no hosts are available during startup) init_timeout_ms: "${CASSANDRA_CLUSTER_INIT_TIMEOUT_MS:300000}" # Specify cassandra cluster initialization retry interval (if no hosts available during startup) init_retry_interval_ms: "${CASSANDRA_CLUSTER_INIT_RETRY_INTERVAL_MS:3000}" # Cassandra max local requests per connection max_requests_per_connection_local: "${CASSANDRA_MAX_REQUESTS_PER_CONNECTION_LOCAL:32768}" # Cassandra max remote requests per connection max_requests_per_connection_remote: "${CASSANDRA_MAX_REQUESTS_PER_CONNECTION_REMOTE:32768}" # Credential parameters credentials: "${CASSANDRA_USE_CREDENTIALS:false}" # Specify your username username: "${CASSANDRA_USERNAME:}" # Specify your password password: "${CASSANDRA_PASSWORD:}" # Astra DB connect https://astra.datastax.com/ cloud: # /etc/thingsboard/astra/secure-connect-thingsboard.zip secure_connect_bundle_path: "${CASSANDRA_CLOUD_SECURE_BUNDLE_PATH:}" # DucitQPHMzPCBOZqFYexAfKk client_id: "${CASSANDRA_CLOUD_CLIENT_ID:}" # ZnF7FpuHp43FP5BzM+KY8wGmSb4Ql6BhT4Z7sOU13ze+gXQ-n7OkFpNuB,oACUIQObQnK0g4bSPoZhK5ejkcF9F.j6f64j71Sr.tiRe0Fsq2hPS1ZCGSfAaIgg63IydG client_secret: "${CASSANDRA_CLOUD_CLIENT_SECRET:}" # Cassandra cluster connection socket parameters # socket: # Sets the timeout, in milliseconds, of a native connection from ThingsBoard to Cassandra. The default value is 5000 connect_timeout: "${CASSANDRA_SOCKET_TIMEOUT:5000}" # Timeout before closing the connection. Value set in milliseconds read_timeout: "${CASSANDRA_SOCKET_READ_TIMEOUT:20000}" # Gets if TCP keep-alive must be used keep_alive: "${CASSANDRA_SOCKET_KEEP_ALIVE:true}" # Enable/Disable reuse-address. The socket option allows for the reuse of local addresses and ports reuse_address: "${CASSANDRA_SOCKET_REUSE_ADDRESS:true}" # Sets the linger-on-close timeout. By default, this option is not set by the driver. The actual value will be the default from the underlying Netty transport so_linger: "${CASSANDRA_SOCKET_SO_LINGER:}" # Enable/Disable Nagle's algorithm tcp_no_delay: "${CASSANDRA_SOCKET_TCP_NO_DELAY:false}" # Sets a hint to the size of the underlying buffers for incoming network I/O. By default, this option is not set by the driver. The actual value will be the default from the underlying Netty transport receive_buffer_size: "${CASSANDRA_SOCKET_RECEIVE_BUFFER_SIZE:}" # Returns the hint to the size of the underlying buffers for outgoing network I/O. By default, this option is not set by the driver. The actual value will be the default from the underlying Netty transport send_buffer_size: "${CASSANDRA_SOCKET_SEND_BUFFER_SIZE:}" # Cassandra cluster connection query parameters query: # Consistency levels in Cassandra can be configured to manage availability versus data accuracy. The consistency level defaults to ONE for all write and read operations read_consistency_level: "${CASSANDRA_READ_CONSISTENCY_LEVEL:ONE}" # Consistency levels in Cassandra can be configured to manage availability versus data accuracy. The consistency level defaults to ONE for all write and read operations write_consistency_level: "${CASSANDRA_WRITE_CONSISTENCY_LEVEL:ONE}" # The fetch size specifies how many rows will be returned at once by Cassandra (in other words, it’s the size of each page) default_fetch_size: "${CASSANDRA_DEFAULT_FETCH_SIZE:2000}" # Specify partitioning size for timestamp key-value storage. Example: MINUTES, HOURS, DAYS, MONTHS, INDEFINITE ts_key_value_partitioning: "${TS_KV_PARTITIONING:MONTHS}" # Enable/Disable timestamp key-value partitioning on read queries use_ts_key_value_partitioning_on_read: "${USE_TS_KV_PARTITIONING_ON_READ:true}" # Safety trigger to fall back to use_ts_key_value_partitioning_on_read as true if estimated partitions count is greater than safety trigger value. # It helps to prevent building huge partition list (OOM) for corner cases (like from 0 to infinity) and prefer fewer reads strategy from NoSQL database use_ts_key_value_partitioning_on_read_max_estimated_partition_count: "${USE_TS_KV_PARTITIONING_ON_READ_MAX_ESTIMATED_PARTITION_COUNT:40}" # The number of partitions that are cached in memory of each service. It is useful to decrease the load of re-inserting the same partitions again ts_key_value_partitions_max_cache_size: "${TS_KV_PARTITIONS_MAX_CACHE_SIZE:100000}" # Timeseries Time To Live (in seconds) for Cassandra Record. 0 - record has never expired ts_key_value_ttl: "${TS_KV_TTL:0}" # Maximum number of Cassandra queries that are waiting for execution buffer_size: "${CASSANDRA_QUERY_BUFFER_SIZE:200000}" # Maximum number of concurrent Cassandra queries concurrent_limit: "${CASSANDRA_QUERY_CONCURRENT_LIMIT:1000}" # Max time in milliseconds query waits for execution permit_max_wait_time: "${PERMIT_MAX_WAIT_TIME:120000}" # Amount of threads to dispatch cassandra queries dispatcher_threads: "${CASSANDRA_QUERY_DISPATCHER_THREADS:2}" callback_threads: "${CASSANDRA_QUERY_CALLBACK_THREADS:4}" # Buffered rate executor (read, write) for managing I/O rate. See "nosql-*-callback" threads in JMX result_processing_threads: "${CASSANDRA_QUERY_RESULT_PROCESSING_THREADS:50}" # Result set transformer and processing. See "cassandra-callback" threads in JMX # Cassandra query queue polling interval in milliseconds poll_ms: "${CASSANDRA_QUERY_POLL_MS:50}" # Interval in milliseconds for printing Cassandra query queue statistic rate_limit_print_interval_ms: "${CASSANDRA_QUERY_RATE_LIMIT_PRINT_MS:10000}" # When saving a value, set other data types to null (to avoid having multiple telemetry values with the same timestamp). set_null_values_enabled: "${CASSANDRA_QUERY_SET_NULL_VALUES_ENABLED:true}" # log one of cassandra queries with specified frequency (0 - logging is disabled) print_queries_freq: "${CASSANDRA_QUERY_PRINT_FREQ:0}" tenant_rate_limits: # Whether to print rate-limited tenant names when printing Cassandra query queue statistic print_tenant_names: "${CASSANDRA_QUERY_TENANT_RATE_LIMITS_PRINT_TENANT_NAMES:false}" # SQL configuration parameters sql: # Specify batch size for persisting attribute updates attributes: batch_size: "${SQL_ATTRIBUTES_BATCH_SIZE:1000}" # Batch size for persisting attribute updates batch_max_delay: "${SQL_ATTRIBUTES_BATCH_MAX_DELAY_MS:50}" # Max timeout for attributes entries queue polling. The value is set in milliseconds stats_print_interval_ms: "${SQL_ATTRIBUTES_BATCH_STATS_PRINT_MS:10000}" # Interval in milliseconds for printing attributes updates statistic batch_threads: "${SQL_ATTRIBUTES_BATCH_THREADS:3}" # batch thread count has to be a prime number like 3 or 5 to gain perfect hash distribution value_no_xss_validation: "${SQL_ATTRIBUTES_VALUE_NO_XSS_VALIDATION:false}" # If true attribute values will be checked for XSS vulnerability ts: batch_size: "${SQL_TS_BATCH_SIZE:10000}" # Batch size for persisting timeseries inserts batch_max_delay: "${SQL_TS_BATCH_MAX_DELAY_MS:100}" # Max timeout for time-series entries queue polling. The value set in milliseconds stats_print_interval_ms: "${SQL_TS_BATCH_STATS_PRINT_MS:10000}" # Interval in milliseconds for printing timeseries insert statistic batch_threads: "${SQL_TS_BATCH_THREADS:3}" # batch thread count has to be a prime number like 3 or 5 to gain perfect hash distribution value_no_xss_validation: "${SQL_TS_VALUE_NO_XSS_VALIDATION:false}" # If true telemetry values will be checked for XSS vulnerability callback_thread_pool_size: "${SQL_TS_CALLBACK_THREAD_POOL_SIZE:12}" # Thread pool size for telemetry callback executor ts_latest: batch_size: "${SQL_TS_LATEST_BATCH_SIZE:1000}" # Batch size for persisting latest telemetry updates batch_max_delay: "${SQL_TS_LATEST_BATCH_MAX_DELAY_MS:50}" # Maximum timeout for latest telemetry entries queue polling. The value set in milliseconds stats_print_interval_ms: "${SQL_TS_LATEST_BATCH_STATS_PRINT_MS:10000}" # Interval in milliseconds for printing latest telemetry updates statistic batch_threads: "${SQL_TS_LATEST_BATCH_THREADS:3}" # batch thread count has to be a prime number like 3 or 5 to gain perfect hash distribution update_by_latest_ts: "${SQL_TS_UPDATE_BY_LATEST_TIMESTAMP:true}" # Update latest values only if the timestamp of the new record is greater or equals the timestamp of the previously saved latest value. The latest values are stored separately from historical values for fast lookup from DB. Insert of historical value happens in any case events: batch_size: "${SQL_EVENTS_BATCH_SIZE:10000}" # Batch size for persisting latest telemetry updates batch_max_delay: "${SQL_EVENTS_BATCH_MAX_DELAY_MS:100}" # Max timeout for latest telemetry entries queue polling. The value set in milliseconds stats_print_interval_ms: "${SQL_EVENTS_BATCH_STATS_PRINT_MS:10000}" # Interval in milliseconds for printing latest telemetry updates statistic batch_threads: "${SQL_EVENTS_BATCH_THREADS:3}" # batch thread count has to be a prime number like 3 or 5 to gain perfect hash distribution partition_size: "${SQL_EVENTS_REGULAR_PARTITION_SIZE_HOURS:168}" # Number of hours to partition the events. The current value corresponds to one week. debug_partition_size: "${SQL_EVENTS_DEBUG_PARTITION_SIZE_HOURS:1}" # Number of hours to partition the debug events. The current value corresponds to one hour. edge_events: batch_size: "${SQL_EDGE_EVENTS_BATCH_SIZE:1000}" # Batch size for persisting latest telemetry updates batch_max_delay: "${SQL_EDGE_EVENTS_BATCH_MAX_DELAY_MS:100}" # Max timeout for latest telemetry entries queue polling. The value set in milliseconds stats_print_interval_ms: "${SQL_EDGE_EVENTS_BATCH_STATS_PRINT_MS:10000}" # Interval in milliseconds for printing latest telemetry updates statistic partition_size: "${SQL_EDGE_EVENTS_PARTITION_SIZE_HOURS:168}" # Number of hours to partition the events. The current value corresponds to one week. audit_logs: partition_size: "${SQL_AUDIT_LOGS_PARTITION_SIZE_HOURS:168}" # Default value - 1 week alarm_comments: partition_size: "${SQL_ALARM_COMMENTS_PARTITION_SIZE_HOURS:168}" # Default value - 1 week notifications: partition_size: "${SQL_NOTIFICATIONS_PARTITION_SIZE_HOURS:168}" # Default value - 1 week # Specify whether to sort entities before batch update. Should be enabled for cluster mode to avoid deadlocks batch_sort: "${SQL_BATCH_SORT:true}" # Specify whether to remove null characters from strValue of attributes and timeseries before insert remove_null_chars: "${SQL_REMOVE_NULL_CHARS:true}" # Specify whether to log database queries and their parameters generated by the entity query repository log_queries: "${SQL_LOG_QUERIES:false}" # Threshold of slow SQL queries to log. The value set in milliseconds log_queries_threshold: "${SQL_LOG_QUERIES_THRESHOLD:5000}" # Enable/Disable logging statistic information about tenants log_tenant_stats: "${SQL_LOG_TENANT_STATS:true}" # Interval in milliseconds for printing the latest statistic information about the tenant log_tenant_stats_interval_ms: "${SQL_LOG_TENANT_STATS_INTERVAL_MS:60000}" postgres: # Specify partitioning size for timestamp key-value storage. Example: DAYS, MONTHS, YEARS, INDEFINITE. ts_key_value_partitioning: "${SQL_POSTGRES_TS_KV_PARTITIONING:MONTHS}" timescale: # Specify Interval size for new data chunks storage. chunk_time_interval: "${SQL_TIMESCALE_CHUNK_TIME_INTERVAL:604800000}" batch_threads: "${SQL_TIMESCALE_BATCH_THREADS:3}" # batch thread count has to be a prime number like 3 or 5 to gain perfect hash distribution ttl: ts: # Enable/disable TTL (Time To Live) for timeseries records enabled: "${SQL_TTL_TS_ENABLED:true}" execution_interval_ms: "${SQL_TTL_TS_EXECUTION_INTERVAL:86400000}" # Number of milliseconds. The current value corresponds to one day # The parameter to specify system TTL(Time To Live) value for timeseries records. Value set in seconds. # 0 - records are never expired. ts_key_value_ttl: "${SQL_TTL_TS_TS_KEY_VALUE_TTL:0}" events: # Enable/disable TTL (Time To Live) for event records enabled: "${SQL_TTL_EVENTS_ENABLED:true}" execution_interval_ms: "${SQL_TTL_EVENTS_EXECUTION_INTERVAL:3600000}" # Number of milliseconds (max random initial delay and fixed period). # Number of seconds. TTL is disabled by default. The accuracy of the cleanup depends on the sql.events.partition_size parameter. events_ttl: "${SQL_TTL_EVENTS_EVENTS_TTL:0}" # Number of seconds. The current value corresponds to one week. The accuracy of the cleanup depends on the sql.events.debug_partition_size parameter. debug_events_ttl: "${SQL_TTL_EVENTS_DEBUG_EVENTS_TTL:604800}" edge_events: enabled: "${SQL_TTL_EDGE_EVENTS_ENABLED:true}" # Enable/disable TTL (Time To Live) for edge event records execution_interval_ms: "${SQL_TTL_EDGE_EVENTS_EXECUTION_INTERVAL:86400000}" # Number of milliseconds. The current value corresponds to one day edge_events_ttl: "${SQL_TTL_EDGE_EVENTS_TTL:2628000}" # Number of seconds. The current value corresponds to one month alarms: checking_interval: "${SQL_ALARMS_TTL_CHECKING_INTERVAL:7200000}" # Number of milliseconds. The current value corresponds to two hours removal_batch_size: "${SQL_ALARMS_TTL_REMOVAL_BATCH_SIZE:3000}" # To delete outdated alarms not all at once but in batches rpc: enabled: "${SQL_TTL_RPC_ENABLED:true}" # Enable/disable TTL (Time To Live) for rpc call records checking_interval: "${SQL_RPC_TTL_CHECKING_INTERVAL:7200000}" # Number of milliseconds. The current value corresponds to two hours audit_logs: enabled: "${SQL_TTL_AUDIT_LOGS_ENABLED:true}" # Enable/disable TTL (Time To Live) for audit log records ttl: "${SQL_TTL_AUDIT_LOGS_SECS:0}" # Disabled by default. The accuracy of the cleanup depends on the sql.audit_logs.partition_size checking_interval_ms: "${SQL_TTL_AUDIT_LOGS_CHECKING_INTERVAL_MS:86400000}" # Default value - 1 day notifications: enabled: "${SQL_TTL_NOTIFICATIONS_ENABLED:true}" # Enable/disable TTL (Time To Live) for notification center records ttl: "${SQL_TTL_NOTIFICATIONS_SECS:2592000}" # Default value - 30 days checking_interval_ms: "${SQL_TTL_NOTIFICATIONS_CHECKING_INTERVAL_MS:86400000}" # Default value - 1 day api_keys: enabled: "${SQL_TTL_API_KEYS_ENABLED:true}" # Enable/disable TTL (Time To Live) for expired api keys records checking_interval_ms: "${SQL_TTL_API_KEYS_CHECKING_INTERVAL_MS:86400000}" # Default value - 1 day relations: max_level: "${SQL_RELATIONS_MAX_LEVEL:50}" # This value has to be reasonably small to prevent infinite recursion as early as possible pool_size: "${SQL_RELATIONS_POOL_SIZE:4}" # This value has to be reasonably small to prevent the relation query from blocking all other DB calls query_timeout: "${SQL_RELATIONS_QUERY_TIMEOUT_SEC:20}" # This value has to be reasonably small to prevent the relation query from blocking all other DB calls # Actor system parameters actors: system: throughput: "${ACTORS_SYSTEM_THROUGHPUT:5}" # Number of messages the actor system will process per actor before switching to processing of messages for the next actor scheduler_pool_size: "${ACTORS_SYSTEM_SCHEDULER_POOL_SIZE:1}" # Thread pool size for actor system scheduler max_actor_init_attempts: "${ACTORS_SYSTEM_MAX_ACTOR_INIT_ATTEMPTS:10}" # Maximum number of attempts to init the actor before disabling the actor app_dispatcher_pool_size: "${ACTORS_SYSTEM_APP_DISPATCHER_POOL_SIZE:1}" # Thread pool size for main actor system dispatcher tenant_dispatcher_pool_size: "${ACTORS_SYSTEM_TENANT_DISPATCHER_POOL_SIZE:2}" # Thread pool size for actor system dispatcher that process messages for tenant actors device_dispatcher_pool_size: "${ACTORS_SYSTEM_DEVICE_DISPATCHER_POOL_SIZE:4}" # Thread pool size for actor system dispatcher that process messages for device actors rule_dispatcher_pool_size: "${ACTORS_SYSTEM_RULE_DISPATCHER_POOL_SIZE:8}" # Thread pool size for actor system dispatcher that process messages for rule engine (chain/node) actors edge_dispatcher_pool_size: "${ACTORS_SYSTEM_EDGE_DISPATCHER_POOL_SIZE:4}" # Thread pool size for actor system dispatcher that process messages for edge actors cfm_dispatcher_pool_size: "${ACTORS_SYSTEM_CFM_DISPATCHER_POOL_SIZE:2}" # Thread pool size for actor system dispatcher that process messages for CalculatedField manager actors cfe_dispatcher_pool_size: "${ACTORS_SYSTEM_CFE_DISPATCHER_POOL_SIZE:8}" # Thread pool size for actor system dispatcher that process messages for CalculatedField entity actors tenant: create_components_on_init: "${ACTORS_TENANT_CREATE_COMPONENTS_ON_INIT:true}" # Create components in initialization session: max_concurrent_sessions_per_device: "${ACTORS_MAX_CONCURRENT_SESSION_PER_DEVICE:1}" # Max number of concurrent sessions per device sync: # Default timeout for processing requests using synchronous session (HTTP, CoAP) in milliseconds timeout: "${ACTORS_SESSION_SYNC_TIMEOUT:10000}" rule: # Specify thread pool size for database request callbacks executor service db_callback_thread_pool_size: "${ACTORS_RULE_DB_CALLBACK_THREAD_POOL_SIZE:50}" # Specify thread pool size for mail sender executor service mail_thread_pool_size: "${ACTORS_RULE_MAIL_THREAD_POOL_SIZE:40}" # Specify thread pool size for password reset emails mail_password_reset_thread_pool_size: "${ACTORS_RULE_MAIL_PASSWORD_RESET_THREAD_POOL_SIZE:10}" # Specify thread pool size for sms sender executor service sms_thread_pool_size: "${ACTORS_RULE_SMS_THREAD_POOL_SIZE:50}" # Whether to allow usage of system mail service for rules allow_system_mail_service: "${ACTORS_RULE_ALLOW_SYSTEM_MAIL_SERVICE:true}" # Whether to allow usage of system sms service for rules allow_system_sms_service: "${ACTORS_RULE_ALLOW_SYSTEM_SMS_SERVICE:true}" # Specify thread pool size for external call service external_call_thread_pool_size: "${ACTORS_RULE_EXTERNAL_CALL_THREAD_POOL_SIZE:50}" # Configuration for the thread pool that executes HTTP calls to AI provider APIs ai-requests-thread-pool: # The base name for threads pool-name: "${ACTORS_RULE_AI_REQUESTS_THREAD_POOL_NAME:ai-requests}" # The maximum number of concurrent HTTP requests pool-size: "${ACTORS_RULE_AI_REQUESTS_THREAD_POOL_SIZE:50}" # The maximum time in seconds to wait for active tasks to complete during graceful shutdown termination-timeout-seconds: "${ACTORS_RULE_AI_REQUESTS_THREAD_POOL_TERMINATION_TIMEOUT_SECONDS:60}" chain: # Errors for particular actors are persisted once per specified amount of milliseconds error_persist_frequency: "${ACTORS_RULE_CHAIN_ERROR_FREQUENCY:3000}" debug_mode_rate_limits_per_tenant: # Enable/Disable the rate limit of persisted debug events for all rule nodes per tenant enabled: "${ACTORS_RULE_CHAIN_DEBUG_MODE_RATE_LIMITS_PER_TENANT_ENABLED:true}" # The value of DEBUG mode rate limit. By default, no more than 50 thousand events per hour configuration: "${ACTORS_RULE_CHAIN_DEBUG_MODE_RATE_LIMITS_PER_TENANT_CONFIGURATION:50000:3600}" node: # Errors for particular actor are persisted once per specified amount of milliseconds error_persist_frequency: "${ACTORS_RULE_NODE_ERROR_FREQUENCY:3000}" transaction: # Size of queues that store messages for transaction rule nodes queue_size: "${ACTORS_RULE_TRANSACTION_QUEUE_SIZE:15000}" # Time in milliseconds for transaction to complete duration: "${ACTORS_RULE_TRANSACTION_DURATION:60000}" external: # Force acknowledgment of the incoming message for external rule nodes to decrease processing latency. # Enqueue the result of external node processing as a separate message to the rule engine. force_ack: "${ACTORS_RULE_EXTERNAL_NODE_FORCE_ACK:false}" rpc: # Maximum number of persistent RPC call retries in case of failed request delivery. max_retries: "${ACTORS_RPC_MAX_RETRIES:5}" # RPC submit strategies. Allowed values: BURST, SEQUENTIAL_ON_ACK_FROM_DEVICE, SEQUENTIAL_ON_RESPONSE_FROM_DEVICE. submit_strategy: "${ACTORS_RPC_SUBMIT_STRATEGY_TYPE:BURST}" # Time in milliseconds for RPC to receive a response after delivery. Used only for SEQUENTIAL_ON_RESPONSE_FROM_DEVICE submit strategy. response_timeout_ms: "${ACTORS_RPC_RESPONSE_TIMEOUT_MS:30000}" # Close transport session if RPC delivery timed out. If enabled, RPC will be reverted to the queued state. # Note: #
enum {
# 2^9(1) == 512,
# 2^10(2) == 1024,
# 2^11(3) == 2048,
# 2^12(4) == 4096,
# (255)
# } MaxFragmentLength;
# TLS already requires clients and servers to support fragmentation of handshake messages.
max_fragment_length: "${COAP_DTLS_MAX_FRAGMENT_LENGTH:1024}"
# Server DTLS credentials
credentials:
# Server credentials type (PEM - pem certificate file; KEYSTORE - java keystore)
type: "${COAP_DTLS_CREDENTIALS_TYPE:PEM}"
# PEM server credentials
pem:
# Path to the server certificate file (holds server certificate or certificate chain, may include server private key)
cert_file: "${COAP_DTLS_PEM_CERT:coapserver.pem}"
# Path to the server certificate private key file. Optional by default. Required if the private key is not present in the server certificate file;
key_file: "${COAP_DTLS_PEM_KEY:coapserver_key.pem}"
# Server certificate private key password (optional)
key_password: "${COAP_DTLS_PEM_KEY_PASSWORD:server_key_password}"
# Keystore server credentials
keystore:
# Type of the key store (JKS or PKCS12)
type: "${COAP_DTLS_KEY_STORE_TYPE:JKS}"
# Path to the key store that holds the SSL certificate
store_file: "${COAP_DTLS_KEY_STORE:coapserver.jks}"
# Password used to access the key store
store_password: "${COAP_DTLS_KEY_STORE_PASSWORD:server_ks_password}"
# Key alias
key_alias: "${COAP_DTLS_KEY_ALIAS:serveralias}"
# Password used to access the key
key_password: "${COAP_DTLS_KEY_PASSWORD:server_key_password}"
x509:
# Skip certificate validity check for client certificates.
skip_validity_check_for_client_cert: "${TB_COAP_X509_DTLS_SKIP_VALIDITY_CHECK_FOR_CLIENT_CERT:false}"
# Inactivity timeout of DTLS session. Used to cleanup cache
dtls_session_inactivity_timeout: "${TB_COAP_X509_DTLS_SESSION_INACTIVITY_TIMEOUT:86400000}"
# Interval of periodic eviction of the timed-out DTLS sessions
dtls_session_report_timeout: "${TB_COAP_X509_DTLS_SESSION_REPORT_TIMEOUT:1800000}"
# Device connectivity parameters
device:
connectivity:
http:
# If true check-connectivity service will include curl command to the list of all test commands using DEVICE_CONNECTIVITY_HTTP_HOST and DEVICE_CONNECTIVITY_HTTP_PORT variables
enabled: "${DEVICE_CONNECTIVITY_HTTP_ENABLED:true}"
# Host of http transport service. If empty, the base URL will be used.
host: "${DEVICE_CONNECTIVITY_HTTP_HOST:}"
# Port of http transport service. If empty, default http port will be used.
port: "${DEVICE_CONNECTIVITY_HTTP_PORT:8080}"
https:
# If true check-connectivity service will include curl command to the list of all test commands using DEVICE_CONNECTIVITY_HTTPS_HOST and DEVICE_CONNECTIVITY_HTTPS_PORT variables
enabled: "${DEVICE_CONNECTIVITY_HTTPS_ENABLED:false}"
# Host of http transport service. If empty, the base URL will be used.
host: "${DEVICE_CONNECTIVITY_HTTPS_HOST:}"
# Port of http transport service. If empty, the default https port will be used.
port: "${DEVICE_CONNECTIVITY_HTTPS_PORT:443}"
mqtt:
# If true mosquito command will be included to the list of all test commands using DEVICE_CONNECTIVITY_MQTT_HOST and DEVICE_CONNECTIVITY_MQTT_PORT
enabled: "${DEVICE_CONNECTIVITY_MQTT_ENABLED:true}"
# Host of mqtt transport service. If empty, the base url host will be used.
host: "${DEVICE_CONNECTIVITY_MQTT_HOST:}"
# Port of mqtt transport service
port: "${DEVICE_CONNECTIVITY_MQTT_PORT:1883}"
mqtts:
# If true mosquito command will be included in the list of all test commands using DEVICE_CONNECTIVITY_MQTTS_HOST and DEVICE_CONNECTIVITY_MQTTS_PORT<
enabled: "${DEVICE_CONNECTIVITY_MQTTS_ENABLED:false}"
# Host of mqtt transport service. If empty, the base URL host will be used.
host: "${DEVICE_CONNECTIVITY_MQTTS_HOST:}"
# Port of mqtt transport service. If empty, the default port for mqtts will be used.
port: "${DEVICE_CONNECTIVITY_MQTTS_PORT:8883}"
# Path to the MQTT CA root certificate file
pem_cert_file: "${DEVICE_CONNECTIVITY_MQTTS_CA_ROOT_CERT:cafile.pem}"
coap:
# If true coap command will be included in the list of all test commands using DEVICE_CONNECTIVITY_COAP_HOST and DEVICE_CONNECTIVITY_COAP_PORT.
enabled: "${DEVICE_CONNECTIVITY_COAP_ENABLED:true}"
# Host of coap transport service. If empty, the base URL host will be used.
host: "${DEVICE_CONNECTIVITY_COAP_HOST:}"
# Port of coap transport service. If empty, the default port for coap will be used.
port: "${DEVICE_CONNECTIVITY_COAP_PORT:5683}"
coaps:
# If true coap command will be included in the list of all test commands using DEVICE_CONNECTIVITY_COAPS_HOST and DEVICE_CONNECTIVITY_COAPS_PORT.
enabled: "${DEVICE_CONNECTIVITY_COAPS_ENABLED:false}"
# Host of coap transport service. If empty, the base URL host will be used.
host: "${DEVICE_CONNECTIVITY_COAPS_HOST:}"
# Port of coap transport service. If empty, the default port for coaps will be used.
port: "${DEVICE_CONNECTIVITY_COAPS_PORT:5684}"
# Path to the COAP CA root certificate file
pem_cert_file: "${DEVICE_CONNECTIVITY_COAPS_CA_ROOT_CERT:cafile.pem}"
gateway:
# The docker tag for thingsboard/tb-gateway image used in docker-compose file for gateway launch
image_version: "${DEVICE_CONNECTIVITY_GATEWAY_IMAGE_VERSION:3.8-stable}"
# Edges parameters
edges:
# Enable/disable Edge instance
enabled: "${EDGES_ENABLED:true}"
rpc:
# RPC port bind
port: "${EDGES_RPC_PORT:7070}"
# Specifies the minimum amount of time that should elapse between keepalive pings sent by the client
# This prevents clients from sending pings too frequently, which can be a nuisance to the server (potentially even a denial-of-service attack vector if abused)
# If a client sends pings more frequently than this interval, the server may terminate the connection.
client_max_keep_alive_time_sec: "${EDGES_RPC_CLIENT_MAX_KEEP_ALIVE_TIME_SEC:1}"
# Sets the time of inactivity (no read operations on the connection) after which the server will send a keepalive ping to the client.
# This is used to ensure that the connection is still alive and to prevent network intermediaries from dropping connections due to inactivity.
# It's a way for the server to proactively check if the client is still responsive.
keep_alive_time_sec: "${EDGES_RPC_KEEP_ALIVE_TIME_SEC:10}"
# Specifies the maximum amount of time the server waits for a response to its keepalive ping.
# If the ping is not acknowledged within this time frame, the server considers the connection dead and may close it.
# This timeout helps detect unresponsive clients.
keep_alive_timeout_sec: "${EDGES_RPC_KEEP_ALIVE_TIMEOUT_SEC:5}"
ssl:
# Enable/disable SSL support
enabled: "${EDGES_RPC_SSL_ENABLED:false}"
# Cert file to be used during TLS connectivity to the cloud
cert: "${EDGES_RPC_SSL_CERT:certChainFile.pem}"
# Private key file associated with the Cert certificate. This key is used in the encryption process during a secure connection
private_key: "${EDGES_RPC_SSL_PRIVATE_KEY:privateKeyFile.pem}"
# Maximum size (in bytes) of inbound messages the cloud can handle from the edge. By default, it can handle messages up to 4 Megabytes
max_inbound_message_size: "${EDGES_RPC_MAX_INBOUND_MESSAGE_SIZE:4194304}"
# Maximum length of telemetry (time-series and attributes) message the cloud sends to the edge. By default, there is no limitation.
max_telemetry_message_size: "${EDGES_RPC_MAX_TELEMETRY_MESSAGE_SIZE:0}"
storage:
# Max records of edge event to read from DB and sent to the edge
max_read_records_count: "${EDGES_STORAGE_MAX_READ_RECORDS_COUNT:50}"
# Number of milliseconds to wait before the next check of edge events in DB
no_read_records_sleep: "${EDGES_NO_READ_RECORDS_SLEEP:1000}"
# Number of milliseconds to wait before resending failed batch of edge events to edge
sleep_between_batches: "${EDGES_SLEEP_BETWEEN_BATCHES:60000}"
# Time (in milliseconds) to subtract from the start timestamp when fetching edge events.
# This compensates for possible misordering between `created_time` (used for partitioning)
# and `seqId` (used for sorting). Without this, events with smaller seqId but larger created_time
# might be skipped, especially across partition boundaries.
misordering_compensation_millis: "${EDGES_MISORDERING_COMPENSATION_MILLIS:60000}"
# Max number of high priority edge events per edge session. No persistence - stored in memory
max_high_priority_queue_size_per_session: "${EDGES_MAX_HIGH_PRIORITY_QUEUE_SIZE_PER_SESSION:10000}"
# Number of threads that are used to check DB for edge events
scheduler_pool_size: "${EDGES_SCHEDULER_POOL_SIZE:4}"
# Number of threads that are used to send downlink messages to edge over gRPC
send_scheduler_pool_size: "${EDGES_SEND_SCHEDULER_POOL_SIZE:4}"
# Number of threads that are used to convert edge events from DB into downlink messages and send them for delivery
grpc_callback_thread_pool_size: "${EDGES_GRPC_CALLBACK_POOL_SIZE:4}"
state:
# Persist state of edge (active, last connect, last disconnect) into timeseries or attributes tables. 'false' means to store edge state into attributes table
persistToTelemetry: "${EDGES_PERSIST_STATE_TO_TELEMETRY:false}"
stats:
# Enable or disable reporting of edge communication stats (true or false)
enabled: "${EDGES_STATS_ENABLED:true}"
# Time-to-live in days for stored edge communication stats in timeseries
ttl: "${EDGES_STATS_TTL:30}"
# How often to report edge communication stats in milliseconds
report-interval-millis: "${EDGES_STATS_REPORT_INTERVAL_MS:600000}"
# Spring doc common parameters
springdoc:
# If false swagger API docs will be unavailable
api-docs.enabled: "${SWAGGER_ENABLED:true}"
# Swagger default produces media-type
default-produces-media-type: "${SWAGGER_DEFAULT_PRODUCES_MEDIA_TYPE:application/json}"
# Swagger common parameters
swagger:
# General swagger match pattern of swagger UI links
api_path: "${SWAGGER_API_PATH:/api/**}"
# General swagger match pattern path of swagger UI links
security_path_regex: "${SWAGGER_SECURITY_PATH_REGEX:/api/.*}"
# Nonsecurity API path match pattern of swagger UI links
non_security_path_regex: "${SWAGGER_NON_SECURITY_PATH_REGEX:/api/(?:noauth|v1)/.*}"
# The title on the API doc UI page
title: "${SWAGGER_TITLE:ThingsBoard REST API}"
# The description on the API doc UI page
description: "${SWAGGER_DESCRIPTION: ThingsBoard open-source IoT platform REST API documentation.}"
contact:
# The contact name on the API doc UI page
name: "${SWAGGER_CONTACT_NAME:ThingsBoard team}"
# The contact URL on the API doc UI page
url: "${SWAGGER_CONTACT_URL:https://thingsboard.io}"
# The contact email on the API doc UI page
email: "${SWAGGER_CONTACT_EMAIL:info@thingsboard.io}"
license:
# The license title on the API doc UI page
title: "${SWAGGER_LICENSE_TITLE:Apache License Version 2.0}"
# Link to the license body on the API doc UI page
url: "${SWAGGER_LICENSE_URL:https://github.com/thingsboard/thingsboard/blob/master/LICENSE}"
# The version of the API doc to display. Default to the package version.
version: "${SWAGGER_VERSION:}"
# The group name (definition) on the API doc UI page.
group_name: "${SWAGGER_GROUP_NAME:thingsboard}"
# Control the initial display state of API operations and tags (none or list)
doc_expansion: "${SWAGGER_DOC_EXPANSION:list}"
# Queue configuration parameters
queue:
type: "${TB_QUEUE_TYPE:in-memory}" # in-memory or kafka (Apache Kafka)
prefix: "${TB_QUEUE_PREFIX:}" # Global queue prefix. If specified, prefix is added before default topic name: 'prefix.default_topic_name'. Prefix is applied to all topics (and consumer groups for kafka).
in_memory:
stats:
# For debug level
print-interval-ms: "${TB_QUEUE_IN_MEMORY_STATS_PRINT_INTERVAL_MS:60000}"
kafka:
# Kafka Bootstrap nodes in "host:port" format
bootstrap.servers: "${TB_KAFKA_SERVERS:localhost:9092}"
ssl:
# Enable/Disable SSL Kafka communication
enabled: "${TB_KAFKA_SSL_ENABLED:false}"
# The location of the trust store file
truststore.location: "${TB_KAFKA_SSL_TRUSTSTORE_LOCATION:}"
# The password of trust store file if specified
truststore.password: "${TB_KAFKA_SSL_TRUSTSTORE_PASSWORD:}"
# The location of the key store file. This is optional for the client and can be used for two-way authentication for the client
keystore.location: "${TB_KAFKA_SSL_KEYSTORE_LOCATION:}"
# The store password for the key store file. This is optional for the client and only needed if ‘ssl.keystore.location’ is configured. Key store password is not supported for PEM format
keystore.password: "${TB_KAFKA_SSL_KEYSTORE_PASSWORD:}"
# The password of the private key in the key store file or the PEM key specified in ‘keystore.key’
key.password: "${TB_KAFKA_SSL_KEY_PASSWORD:}"
# The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. The following settings are allowed:0, 1 and all
acks: "${TB_KAFKA_ACKS:all}"
# Number of retries. Resend any record whose send fails with a potentially transient error
retries: "${TB_KAFKA_RETRIES:1}"
# The compression type for all data generated by the producer. The default is none (i.e. no compression). Valid values none or gzip
compression.type: "${TB_KAFKA_COMPRESSION_TYPE:none}" # none or gzip
# Default batch size. This setting gives the upper bound of the batch size to be sent
batch.size: "${TB_KAFKA_BATCH_SIZE:16384}"
# This variable creates a small amount of artificial delay—that is, rather than immediately sending out a record
linger.ms: "${TB_KAFKA_LINGER_MS:1}"
# The maximum size of a request in bytes. This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests
max.request.size: "${TB_KAFKA_MAX_REQUEST_SIZE:1048576}"
# The maximum number of unacknowledged requests the client will send on a single connection before blocking
max.in.flight.requests.per.connection: "${TB_KAFKA_MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION:5}"
# The total bytes of memory the producer can use to buffer records waiting to be sent to the server
buffer.memory: "${TB_BUFFER_MEMORY:33554432}"
# The multiple copies of data over the multiple brokers of Kafka
replication_factor: "${TB_QUEUE_KAFKA_REPLICATION_FACTOR:1}"
# The maximum delay between invocations of poll() method when using consumer group management. This places an upper bound on the amount of time that the consumer can be idle before fetching more records
max_poll_interval_ms: "${TB_QUEUE_KAFKA_MAX_POLL_INTERVAL_MS:300000}"
# The maximum number of records returned in a single call of poll() method
max_poll_records: "${TB_QUEUE_KAFKA_MAX_POLL_RECORDS:8192}"
# The maximum amount of data per-partition the server will return. Records are fetched in batches by the consumer
max_partition_fetch_bytes: "${TB_QUEUE_KAFKA_MAX_PARTITION_FETCH_BYTES:16777216}"
# The maximum amount of data the server will return. Records are fetched in batches by the consumer
fetch_max_bytes: "${TB_QUEUE_KAFKA_FETCH_MAX_BYTES:134217728}"
request.timeout.ms: "${TB_QUEUE_KAFKA_REQUEST_TIMEOUT_MS:30000}" # (30 seconds) # refer to https://docs.confluent.io/platform/current/installation/configuration/producer-configs.html#producerconfigs_request.timeout.ms
session.timeout.ms: "${TB_QUEUE_KAFKA_SESSION_TIMEOUT_MS:10000}" # (10 seconds) # refer to https://docs.confluent.io/platform/current/installation/configuration/consumer-configs.html#consumerconfigs_session.timeout.ms
auto_offset_reset: "${TB_QUEUE_KAFKA_AUTO_OFFSET_RESET:earliest}" # earliest, latest or none
# Enable/Disable using of Confluent Cloud
use_confluent_cloud: "${TB_QUEUE_KAFKA_USE_CONFLUENT_CLOUD:false}"
confluent:
# The endpoint identification algorithm used by clients to validate server hostname. The default value is https
ssl.algorithm: "${TB_QUEUE_KAFKA_CONFLUENT_SSL_ALGORITHM:https}"
# The mechanism used to authenticate Schema Registry requests. SASL/PLAIN should only be used with TLS/SSL as a transport layer to ensure that clear passwords are not transmitted on the wire without encryption
sasl.mechanism: "${TB_QUEUE_KAFKA_CONFLUENT_SASL_MECHANISM:PLAIN}"
# Using JAAS Configuration for specifying multiple SASL mechanisms on a broker
sasl.config: "${TB_QUEUE_KAFKA_CONFLUENT_SASL_JAAS_CONFIG:org.apache.kafka.common.security.plain.PlainLoginModule required username=\"CLUSTER_API_KEY\" password=\"CLUSTER_API_SECRET\";}"
# Protocol used to communicate with brokers. Valid values are: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL
security.protocol: "${TB_QUEUE_KAFKA_CONFLUENT_SECURITY_PROTOCOL:SASL_SSL}"
# Key-value properties for Kafka consumer per specific topic, e.g. tb_ota_package is a topic name for ota, tb_rule_engine.sq is a topic name for default SequentialByOriginator queue.
# Check TB_QUEUE_CORE_OTA_TOPIC and TB_QUEUE_RE_SQ_TOPIC params
consumer-properties-per-topic:
tb_ota_package:
# Key-value properties for Kafka consumer per specific topic, e.g. tb_ota_package is a topic name for ota, tb_rule_engine.sq is a topic name for default SequentialByOriginator queue. Check TB_QUEUE_CORE_OTA_TOPIC and TB_QUEUE_RE_SQ_TOPIC params
- key: max.poll.records
# Example of specific consumer properties value per topic
value: "${TB_QUEUE_KAFKA_OTA_MAX_POLL_RECORDS:10}"
tb_version_control:
# Example of specific consumer properties value per topic for VC
- key: max.poll.interval.ms
# Example of specific consumer properties value per topic for VC
value: "${TB_QUEUE_KAFKA_VC_MAX_POLL_INTERVAL_MS:600000}"
# tb_rule_engine.sq:
# - key: max.poll.records
# value: "${TB_QUEUE_KAFKA_SQ_MAX_POLL_RECORDS:1024}"
tb_edge:
# Properties for consumers targeting edge service update topics.
- key: max.poll.records
# Define the maximum number of records that can be polled from tb_edge topics per request.
value: "${TB_QUEUE_KAFKA_EDGE_EVENTS_MAX_POLL_RECORDS:10}"
tb_edge.notifications:
# Properties for consumers targeting high-priority edge notifications.
# These notifications include RPC calls, lifecycle events, and new queue messages,
# requiring minimal latency and swift processing.
- key: max.poll.records
# Define the maximum number of records that can be polled from tb_edge.notifications.SERVICE_ID topics.
value: "${TB_QUEUE_KAFKA_EDGE_HP_EVENTS_MAX_POLL_RECORDS:10}"
tb_edge_event.notifications:
# Properties for consumers targeting downlinks meant for specific edge topics.
# Topic names are dynamically constructed using tenant and edge identifiers.
- key: max.poll.records
# Define the maximum number of records that can be polled from tb_edge_event.notifications.TENANT_ID.EDGE_ID topics.
value: "${TB_QUEUE_KAFKA_EDGE_NOTIFICATIONS_MAX_POLL_RECORDS:10}"
tb_housekeeper:
# Consumer properties for Housekeeper tasks topic
- key: max.poll.records
# Amount of records to be returned in a single poll. For Housekeeper tasks topic, we should consume messages (tasks) one by one
value: "${TB_QUEUE_KAFKA_HOUSEKEEPER_MAX_POLL_RECORDS:1}"
tb_housekeeper.reprocessing:
# Consumer properties for Housekeeper reprocessing topic
- key: max.poll.records
# Amount of records to be returned in a single poll. For Housekeeper reprocessing topic, we should consume messages (tasks) one by one
value: "${TB_QUEUE_KAFKA_HOUSEKEEPER_REPROCESSING_MAX_POLL_RECORDS:1}"
edqs.events:
# Key-value properties for Kafka consumer for edqs.events topic
- key: max.poll.records
# Max poll records for edqs.events topic
value: "${TB_QUEUE_KAFKA_EDQS_EVENTS_MAX_POLL_RECORDS:512}"
edqs.state:
# Key-value properties for Kafka consumer for edqs.state topic
- key: max.poll.records
# Max poll records for edqs.state topic
value: "${TB_QUEUE_KAFKA_EDQS_STATE_MAX_POLL_RECORDS:512}"
tasks:
# Key-value properties for Kafka consumer for tasks topics
- key: max.poll.records
# Max poll records for tasks topics
value: "${TB_QUEUE_KAFKA_TASKS_MAX_POLL_RECORDS:1}"
# If you override any default Kafka topic name using environment variables, you must also specify the related consumer properties
# for the new topic in `consumer-properties-per-topic-inline`. Otherwise, the topic will not inherit its expected configuration (e.g., max.poll.records, timeouts, etc).
# Each entry sets a single property for a specific topic. To define multiple properties for a topic, repeat the topic key.
# Format: "topic1:key=value;topic1:key=value;topic2:key=value"
# Example: tb_core_updated:max.poll.records=10;tb_core_updated:bootstrap.servers=kafka1:9092,kafka2:9092;tb_edge_updated:auto.offset.reset=latest
consumer-properties-per-topic-inline: "${TB_QUEUE_KAFKA_CONSUMER_PROPERTIES_PER_TOPIC_INLINE:}"
other-inline: "${TB_QUEUE_KAFKA_OTHER_PROPERTIES:}" # In this section you can specify custom parameters (semicolon separated) for Kafka consumer/producer/admin # Example "metrics.recording.level:INFO;metrics.sample.window.ms:30000"
other: # DEPRECATED. In this section, you can specify custom parameters for Kafka consumer/producer and expose the env variables to configure outside
# - key: "request.timeout.ms" # refer to https://docs.confluent.io/platform/current/installation/configuration/producer-configs.html#producerconfigs_request.timeout.ms
# value: "${TB_QUEUE_KAFKA_REQUEST_TIMEOUT_MS:30000}" # (30 seconds)
# - key: "session.timeout.ms" # refer to https://docs.confluent.io/platform/current/installation/configuration/consumer-configs.html#consumerconfigs_session.timeout.ms
# value: "${TB_QUEUE_KAFKA_SESSION_TIMEOUT_MS:10000}" # (10 seconds)
topic-properties:
# Kafka properties for Rule Engine
rule-engine: "${TB_QUEUE_KAFKA_RE_TOPIC_PROPERTIES:retention.ms:604800000;segment.bytes:52428800;retention.bytes:1048576000;partitions:1;min.insync.replicas:1}"
# Kafka properties for Core topics
core: "${TB_QUEUE_KAFKA_CORE_TOPIC_PROPERTIES:retention.ms:604800000;segment.bytes:52428800;retention.bytes:1048576000;partitions:1;min.insync.replicas:1}"
# Kafka properties for Transport Api topics
transport-api: "${TB_QUEUE_KAFKA_TA_TOPIC_PROPERTIES:retention.ms:604800000;segment.bytes:52428800;retention.bytes:1048576000;partitions:10;min.insync.replicas:1}"
# Kafka properties for Notifications topics
notifications: "${TB_QUEUE_KAFKA_NOTIFICATIONS_TOPIC_PROPERTIES:retention.ms:604800000;segment.bytes:52428800;retention.bytes:1048576000;partitions:1;min.insync.replicas:1}"
# Kafka properties for JS Executor topics
js-executor: "${TB_QUEUE_KAFKA_JE_TOPIC_PROPERTIES:retention.ms:86400000;segment.bytes:52428800;retention.bytes:104857600;partitions:30;min.insync.replicas:1}"
# Kafka properties for OTA updates topic
ota-updates: "${TB_QUEUE_KAFKA_OTA_TOPIC_PROPERTIES:retention.ms:604800000;segment.bytes:52428800;retention.bytes:1048576000;partitions:10;min.insync.replicas:1}"
# Kafka properties for Version Control topic
version-control: "${TB_QUEUE_KAFKA_VC_TOPIC_PROPERTIES:retention.ms:604800000;segment.bytes:52428800;retention.bytes:1048576000;partitions:1;min.insync.replicas:1}"
# Kafka properties for Housekeeper tasks topic
housekeeper: "${TB_QUEUE_KAFKA_HOUSEKEEPER_TOPIC_PROPERTIES:retention.ms:604800000;segment.bytes:52428800;retention.bytes:1048576000;partitions:10;min.insync.replicas:1}"
# Kafka properties for Housekeeper reprocessing topic; retention.ms is set to 90 days; partitions is set to 1 since only one reprocessing service is running at a time
housekeeper-reprocessing: "${TB_QUEUE_KAFKA_HOUSEKEEPER_REPROCESSING_TOPIC_PROPERTIES:retention.ms:7776000000;segment.bytes:52428800;retention.bytes:1048576000;partitions:1;min.insync.replicas:1}"
# Kafka properties for Edge topic
edge: "${TB_QUEUE_KAFKA_EDGE_TOPIC_PROPERTIES:retention.ms:604800000;segment.bytes:52428800;retention.bytes:1048576000;partitions:1;min.insync.replicas:1}"
# Kafka properties for Edge event topic
edge-event: "${TB_QUEUE_KAFKA_EDGE_EVENT_TOPIC_PROPERTIES:retention.ms:2592000000;segment.bytes:52428800;retention.bytes:1048576000;partitions:1;min.insync.replicas:1}"
# Kafka properties for Calculated Field topics
calculated-field: "${TB_QUEUE_KAFKA_CF_TOPIC_PROPERTIES:retention.ms:604800000;segment.bytes:52428800;retention.bytes:1048576000;partitions:1;min.insync.replicas:1}"
# Kafka properties for Calculated Field State topics
calculated-field-state: "${TB_QUEUE_KAFKA_CF_STATE_TOPIC_PROPERTIES:retention.ms:-1;segment.bytes:52428800;retention.bytes:104857600000;partitions:1;min.insync.replicas:1;cleanup.policy:compact}"
# Kafka properties for EDQS events topics
edqs-events: "${TB_QUEUE_KAFKA_EDQS_EVENTS_TOPIC_PROPERTIES:retention.ms:86400000;segment.bytes:52428800;retention.bytes:-1;partitions:1;min.insync.replicas:1}"
# Kafka properties for EDQS requests topic (default: 3 minutes retention)
edqs-requests: "${TB_QUEUE_KAFKA_EDQS_REQUESTS_TOPIC_PROPERTIES:retention.ms:180000;segment.bytes:52428800;retention.bytes:1048576000;partitions:1;min.insync.replicas:1}"
# Kafka properties for EDQS state topic (infinite retention, compaction)
edqs-state: "${TB_QUEUE_KAFKA_EDQS_STATE_TOPIC_PROPERTIES:retention.ms:-1;segment.bytes:52428800;retention.bytes:-1;partitions:1;min.insync.replicas:1;cleanup.policy:compact}"
# Kafka properties for tasks topics
tasks: "${TB_QUEUE_KAFKA_TASKS_TOPIC_PROPERTIES:retention.ms:604800000;segment.bytes:52428800;retention.bytes:104857600;partitions:1;min.insync.replicas:1}"
consumer-stats:
# Prints lag between consumer group offset and last messages offset in Kafka topics
enabled: "${TB_QUEUE_KAFKA_CONSUMER_STATS_ENABLED:true}"
# Statistics printing interval for Kafka's consumer-groups stats
print-interval-ms: "${TB_QUEUE_KAFKA_CONSUMER_STATS_MIN_PRINT_INTERVAL_MS:60000}"
# Time to wait for the stats-loading requests to Kafka to finish
kafka-response-timeout-ms: "${TB_QUEUE_KAFKA_CONSUMER_STATS_RESPONSE_TIMEOUT_MS:1000}"
# Topics cache TTL in milliseconds. 5 minutes by default
topics_cache_ttl_ms: "${TB_QUEUE_KAFKA_TOPICS_CACHE_TTL_MS:300000}"
partitions:
hash_function_name: "${TB_QUEUE_PARTITIONS_HASH_FUNCTION_NAME:murmur3_128}" # murmur3_32, murmur3_128 or sha256
transport_api:
# Topic used to consume api requests from transport microservices
requests_topic: "${TB_QUEUE_TRANSPORT_API_REQUEST_TOPIC:tb_transport.api.requests}"
# Topic used to produce api responses to transport microservices
responses_topic: "${TB_QUEUE_TRANSPORT_API_RESPONSE_TOPIC:tb_transport.api.responses}"
# Maximum pending api requests from transport microservices to be handled by server
max_pending_requests: "${TB_QUEUE_TRANSPORT_MAX_PENDING_REQUESTS:10000}"
# Maximum timeout in milliseconds to handle api request from transport microservice by server
max_requests_timeout: "${TB_QUEUE_TRANSPORT_MAX_REQUEST_TIMEOUT:10000}"
# Amount of threads used to invoke callbacks
max_callback_threads: "${TB_QUEUE_TRANSPORT_MAX_CALLBACK_THREADS:100}"
# Amount of threads used for transport API requests
max_core_handler_threads: "${TB_QUEUE_TRANSPORT_MAX_CORE_HANDLER_THREADS:16}"
# Interval in milliseconds to poll api requests from transport microservices
request_poll_interval: "${TB_QUEUE_TRANSPORT_REQUEST_POLL_INTERVAL_MS:25}"
# Interval in milliseconds to poll api response from transport microservices
response_poll_interval: "${TB_QUEUE_TRANSPORT_RESPONSE_POLL_INTERVAL_MS:25}"
core:
# Default topic name
topic: "${TB_QUEUE_CORE_TOPIC:tb_core}"
# For high-priority notifications that require minimum latency and processing time
notifications_topic: "${TB_QUEUE_CORE_NOTIFICATIONS_TOPIC:tb_core.notifications}"
# Interval in milliseconds to poll messages by Core microservices
poll-interval: "${TB_QUEUE_CORE_POLL_INTERVAL_MS:25}"
# Amount of partitions used by Core microservices
partitions: "${TB_QUEUE_CORE_PARTITIONS:10}"
# Timeout for processing a message pack by Core microservices
pack-processing-timeout: "${TB_QUEUE_CORE_PACK_PROCESSING_TIMEOUT_MS:2000}"
# Enable/disable a separate consumer per partition for Core queue
consumer-per-partition: "${TB_QUEUE_CORE_CONSUMER_PER_PARTITION:true}"
ota:
# Default topic name for OTA updates
topic: "${TB_QUEUE_CORE_OTA_TOPIC:tb_ota_package}"
# The interval of processing the OTA updates for devices. Used to avoid any harm to the network due to many parallel OTA updates
pack-interval-ms: "${TB_QUEUE_CORE_OTA_PACK_INTERVAL_MS:60000}"
# The size of OTA updates notifications fetched from the queue. The queue stores pairs of firmware and device ids
pack-size: "${TB_QUEUE_CORE_OTA_PACK_SIZE:100}"
# Stats topic name
usage-stats-topic: "${TB_QUEUE_US_TOPIC:tb_usage_stats}"
stats:
# Enable/disable statistics for Core microservices
enabled: "${TB_QUEUE_CORE_STATS_ENABLED:true}"
# Statistics printing interval for Core microservices
print-interval-ms: "${TB_QUEUE_CORE_STATS_PRINT_INTERVAL_MS:60000}"
housekeeper:
# Topic name for Housekeeper tasks
topic: "${TB_HOUSEKEEPER_TOPIC:tb_housekeeper}"
# Topic name for Housekeeper tasks to be reprocessed
reprocessing-topic: "${TB_HOUSEKEEPER_REPROCESSING_TOPIC:tb_housekeeper.reprocessing}"
# Poll interval for topics related to Housekeeper
poll-interval-ms: "${TB_HOUSEKEEPER_POLL_INTERVAL_MS:500}"
# Timeout in milliseconds for task processing. Tasks that fail to finish on time will be submitted for reprocessing
task-processing-timeout-ms: "${TB_HOUSEKEEPER_TASK_PROCESSING_TIMEOUT_MS:120000}"
# Comma-separated list of task types that shouldn't be processed. Available task types:
# DELETE_ATTRIBUTES, DELETE_TELEMETRY (both DELETE_LATEST_TS and DELETE_TS_HISTORY will be disabled),
# DELETE_LATEST_TS, DELETE_TS_HISTORY, DELETE_EVENTS, DELETE_ALARMS, UNASSIGN_ALARMS
disabled-task-types: "${TB_HOUSEKEEPER_DISABLED_TASK_TYPES:}"
# Delay in milliseconds between tasks reprocessing
task-reprocessing-delay-ms: "${TB_HOUSEKEEPER_TASK_REPROCESSING_DELAY_MS:3000}"
# Maximum amount of task reprocessing attempts. After exceeding, the task will be dropped
max-reprocessing-attempts: "${TB_HOUSEKEEPER_MAX_REPROCESSING_ATTEMPTS:10}"
stats:
# Enable/disable statistics for Housekeeper
enabled: "${TB_HOUSEKEEPER_STATS_ENABLED:true}"
# Statistics printing interval for Housekeeper
print-interval-ms: "${TB_HOUSEKEEPER_STATS_PRINT_INTERVAL_MS:60000}"
edqs:
sync:
# Enable/disable EDQS synchronization
enabled: "${TB_EDQS_SYNC_ENABLED:false}"
# Batch size of entities being synced with EDQS
entity_batch_size: "${TB_EDQS_SYNC_ENTITY_BATCH_SIZE:10000}"
# Batch size of timeseries data being synced with EDQS
ts_batch_size: "${TB_EDQS_SYNC_TS_BATCH_SIZE:10000}"
api:
# Whether to forward entity data query requests to EDQS (otherwise use PostgreSQL implementation)
supported: "${TB_EDQS_API_SUPPORTED:false}"
# Whether to auto-enable EDQS API (if queue.edqs.api.supported is true) when sync of data to Kafka is finished
auto_enable: "${TB_EDQS_API_AUTO_ENABLE:true}"
# Interval in milliseconds to check for ready EDQS servers
readiness_check_interval: "${TB_EDQS_READINESS_CHECK_INTERVAL_MS:60000}"
# Mode of EDQS: local (for monolith) or remote (with separate EDQS microservices)
mode: "${TB_EDQS_MODE:local}"
local:
# Path to RocksDB for EDQS backup when running in local mode
rocksdb_path: "${TB_EDQS_ROCKSDB_PATH:${user.home}/.rocksdb/edqs}"
# Number of partitions for EDQS topics
partitions: "${TB_EDQS_PARTITIONS:12}"
# EDQS partitioning strategy: tenant (partition is resolved by tenant id) or none (no specific strategy, resolving by message key)
partitioning_strategy: "${TB_EDQS_PARTITIONING_STRATEGY:tenant}"
# EDQS events topic
events_topic: "${TB_EDQS_EVENTS_TOPIC:edqs.events}"
# EDQS state topic
state_topic: "${TB_EDQS_STATE_TOPIC:edqs.state}"
# EDQS requests topic
requests_topic: "${TB_EDQS_REQUESTS_TOPIC:edqs.requests}"
# EDQS responses topic
responses_topic: "${TB_EDQS_RESPONSES_TOPIC:edqs.responses}"
# Poll interval for EDQS topics
poll_interval: "${TB_EDQS_POLL_INTERVAL_MS:25}"
# Maximum amount of pending requests to EDQS
max_pending_requests: "${TB_EDQS_MAX_PENDING_REQUESTS:10000}"
# Maximum timeout for requests to EDQS
max_request_timeout: "${TB_EDQS_MAX_REQUEST_TIMEOUT:20000}"
# Thread pool size for EDQS requests executor
request_executor_size: "${TB_EDQS_REQUEST_EXECUTOR_SIZE:50}"
# Time to live for EDQS versions cache in minutes. Must be bigger than the time taken for the sync process.
versions_cache_ttl: "${TB_EDQS_VERSIONS_CACHE_TTL_MINUTES:60}"
# Strings longer than this threshold will be compressed
string_compression_length_threshold: "${TB_EDQS_STRING_COMPRESSION_LENGTH_THRESHOLD:512}"
stats:
# Enable/disable statistics for EDQS
enabled: "${TB_EDQS_STATS_ENABLED:true}"
# Threshold for slow queries to log, in milliseconds
slow_query_threshold: "${TB_EDQS_SLOW_QUERY_THRESHOLD_MS:3000}"
vc:
# Default topic name
topic: "${TB_QUEUE_VC_TOPIC:tb_version_control}"
# Number of partitions to associate with this queue. Used for scaling the number of messages that can be processed in parallel
partitions: "${TB_QUEUE_VC_PARTITIONS:10}"
# Interval in milliseconds between polling of the messages if no new messages arrive
poll-interval: "${TB_QUEUE_VC_INTERVAL_MS:25}"
# Timeout before retrying all failed and timed-out messages from the processing pack
pack-processing-timeout: "${TB_QUEUE_VC_PACK_PROCESSING_TIMEOUT_MS:180000}"
# Timeout for a request to VC-executor (for a request for the version of the entity, for a commit charge, etc.)
request-timeout: "${TB_QUEUE_VC_REQUEST_TIMEOUT:180000}"
# Limit for single queue message size
msg-chunk-size: "${TB_QUEUE_VC_MSG_CHUNK_SIZE:250000}"
js:
# JS Eval request topic
request_topic: "${REMOTE_JS_EVAL_REQUEST_TOPIC:js_eval.requests}"
# JS Eval responses topic prefix that is combined with node id
response_topic_prefix: "${REMOTE_JS_EVAL_RESPONSE_TOPIC:js_eval.responses}"
# JS Eval max pending requests
max_pending_requests: "${REMOTE_JS_MAX_PENDING_REQUESTS:10000}"
# JS Eval max request timeout
max_eval_requests_timeout: "${REMOTE_JS_MAX_EVAL_REQUEST_TIMEOUT:60000}"
# JS max request timeout
max_requests_timeout: "${REMOTE_JS_MAX_REQUEST_TIMEOUT:10000}"
# JS execution max request timeout
max_exec_requests_timeout: "${REMOTE_JS_MAX_EXEC_REQUEST_TIMEOUT:2000}"
# JS response poll interval
response_poll_interval: "${REMOTE_JS_RESPONSE_POLL_INTERVAL_MS:25}"
rule-engine:
# Deprecated. It will be removed in the nearest releases
topic: "${TB_QUEUE_RULE_ENGINE_TOPIC:tb_rule_engine}"
# For high-priority notifications that require minimum latency and processing time
notifications_topic: "${TB_QUEUE_RULE_ENGINE_NOTIFICATIONS_TOPIC:tb_rule_engine.notifications}"
# Interval in milliseconds to poll messages by Rule Engine
poll-interval: "${TB_QUEUE_RULE_ENGINE_POLL_INTERVAL_MS:25}"
# Timeout for processing a message pack of Rule Engine
pack-processing-timeout: "${TB_QUEUE_RULE_ENGINE_PACK_PROCESSING_TIMEOUT_MS:2000}"
stats:
# Enable/disable statistics for Rule Engine
enabled: "${TB_QUEUE_RULE_ENGINE_STATS_ENABLED:true}"
# Statistics printing interval for Rule Engine
print-interval-ms: "${TB_QUEUE_RULE_ENGINE_STATS_PRINT_INTERVAL_MS:60000}"
# Max length of the error message that is printed by statistics
max-error-message-length: "${TB_QUEUE_RULE_ENGINE_MAX_ERROR_MESSAGE_LENGTH:4096}"
# After a queue is deleted (or the profile's isolation option was disabled), Rule Engine will continue reading related topics during this period before deleting the actual topics
topic-deletion-delay: "${TB_QUEUE_RULE_ENGINE_TOPIC_DELETION_DELAY_SEC:15}"
# Size of the thread pool that handles such operations as partition changes, config updates, queue deletion
management-thread-pool-size: "${TB_QUEUE_RULE_ENGINE_MGMT_THREAD_POOL_SIZE:12}"
calculated_fields:
# Topic name for Calculated Field (CF) events from Rule Engine
event_topic: "${TB_QUEUE_CF_EVENT_TOPIC:tb_cf_event}"
# Topic name for Calculated Field (CF) compacted states
state_topic: "${TB_QUEUE_CF_STATE_TOPIC:tb_cf_state}"
# For high-priority notifications that require minimum latency and processing time
notifications_topic: "${TB_QUEUE_CF_NOTIFICATIONS_TOPIC:calculated_field.notifications}"
# Interval in milliseconds to poll messages by CF (Rule Engine) microservices
poll_interval: "${TB_QUEUE_CF_POLL_INTERVAL_MS:1000}"
# Timeout for processing a message pack by CF microservices
pack_processing_timeout: "${TB_QUEUE_CF_PACK_PROCESSING_TIMEOUT_MS:60000}"
# Thread pool size for processing of the incoming messages
pool_size: "${TB_QUEUE_CF_POOL_SIZE:8}"
# RocksDB path for storing CF states
rocks_db_path: "${TB_QUEUE_CF_ROCKS_DB_PATH:${user.home}/.rocksdb/cf_states}"
# The fetch size specifies how many rows will be fetched from the database per request for initial fetching
init_fetch_pack_size: "${TB_QUEUE_CF_FETCH_PACK_SIZE:50000}"
# The fetch size specifies how many rows will be fetched from the database per request for per-tenant fetching
init_tenant_fetch_pack_size: "${TB_QUEUE_CF_TENANT_FETCH_PACK_SIZE:1000}"
transport:
# For high-priority notifications that require minimum latency and processing time
notifications_topic: "${TB_QUEUE_TRANSPORT_NOTIFICATIONS_TOPIC:tb_transport.notifications}"
# Interval in milliseconds to poll messages
poll_interval: "${TB_QUEUE_TRANSPORT_NOTIFICATIONS_POLL_INTERVAL_MS:25}"
edge:
# Topic name to notify edge service on entity updates, assignment, etc.
topic: "${TB_QUEUE_EDGE_TOPIC:tb_edge}"
# Topic prefix for high-priority edge notifications (rpc, lifecycle, new messages in queue) that require minimum latency and processing time.
# Each tb-core has its own topic: PREFIX.SERVICE_ID
notifications_topic: "${TB_QUEUE_EDGE_NOTIFICATIONS_TOPIC:tb_edge.notifications}"
# Topic prefix for downlinks to be pushed to specific edge.
# Every edge has its own unique topic: PREFIX.TENANT_ID.EDGE_ID
event_notifications_topic: "${TB_QUEUE_EDGE_EVENT_NOTIFICATIONS_TOPIC:tb_edge_event.notifications}"
# Amount of partitions used by Edge services
partitions: "${TB_QUEUE_EDGE_PARTITIONS:10}"
# Poll interval for topics related to Edge services
poll-interval: "${TB_QUEUE_EDGE_POLL_INTERVAL_MS:25}"
# Timeout for processing a message pack by Edge services
pack-processing-timeout: "${TB_QUEUE_EDGE_PACK_PROCESSING_TIMEOUT_MS:10000}"
# Retries for processing a failure message pack by Edge services
pack-processing-retries: "${TB_QUEUE_EDGE_MESSAGE_PROCESSING_RETRIES:3}"
# Enable/disable a separate consumer per partition for Edge queue
consumer-per-partition: "${TB_QUEUE_EDGE_CONSUMER_PER_PARTITION:false}"
stats:
# Enable/disable statistics for Edge services
enabled: "${TB_QUEUE_EDGE_STATS_ENABLED:true}"
# Statistics printing interval for Edge services
print-interval-ms: "${TB_QUEUE_EDGE_STATS_PRINT_INTERVAL_MS:60000}"
tasks:
# Poll interval in milliseconds for tasks topics
poll_interval: "${TB_QUEUE_TASKS_POLL_INTERVAL_MS:500}"
# Partitions count for tasks queues
partitions: "${TB_QUEUE_TASKS_PARTITIONS:12}"
# Custom partitions count for tasks queues per type. Format: 'TYPE1:24;TYPE2:36', e.g. 'CF_REPROCESSING:24;TENANT_EXPORT:6'
partitions_per_type: "${TB_QUEUE_TASKS_PARTITIONS_PER_TYPE:}"
# Tasks partitioning strategy: 'tenant' or 'entity'. By default, using 'tenant' - tasks of a specific tenant are processed in the same partition.
# In a single-tenant environment, use 'entity' strategy to distribute the tasks among multiple partitions.
partitioning_strategy: "${TB_QUEUE_TASKS_PARTITIONING_STRATEGY:tenant}"
stats:
# Name for the tasks stats topic
topic: "${TB_QUEUE_TASKS_STATS_TOPIC:jobs.stats}"
# Poll interval in milliseconds for tasks stats topic
poll_interval: "${TB_QUEUE_TASKS_STATS_POLL_INTERVAL_MS:500}"
# Interval in milliseconds to process job stats
processing_interval: "${TB_QUEUE_TASKS_STATS_PROCESSING_INTERVAL_MS:1000}"
# Event configuration parameters
event:
debug:
# Maximum number of symbols per debug event. The event content will be truncated if needed
max-symbols: "${TB_MAX_DEBUG_EVENT_SYMBOLS:4096}"
# General service parameters
service:
type: "${TB_SERVICE_TYPE:monolith}" # monolith or tb-core or tb-rule-engine
# Unique id for this service (autogenerated if empty)
id: "${TB_SERVICE_ID:}"
rule_engine:
# Comma-separated list of tenant profile ids assigned to this Rule Engine.
# This Rule Engine will only be responsible for tenants with these profiles (in case 'isolation' option is enabled in the profile).
assigned_tenant_profiles: "${TB_RULE_ENGINE_ASSIGNED_TENANT_PROFILES:}"
pubsub:
# Thread pool size for pubsub rule node executor provider. If not set - default pubsub executor provider value will be used (5 * number of available processors)
executor_thread_pool_size: "${TB_RULE_ENGINE_PUBSUB_EXECUTOR_THREAD_POOL_SIZE:0}"
# Metrics parameters
metrics:
# Enable/disable actuator metrics.
enabled: "${METRICS_ENABLED:false}"
timer:
# Metrics percentiles returned by actuator for timer metrics. List of double values (divided by ,).
percentiles: "${METRICS_TIMER_PERCENTILES:0.5}"
system_info:
# Persist frequency of system info (CPU, memory usage, etc.) in seconds
persist_frequency: "${METRICS_SYSTEM_INFO_PERSIST_FREQUENCY_SECONDS:60}"
# TTL in days for system info timeseries
ttl: "${METRICS_SYSTEM_INFO_TTL_DAYS:7}"
# Version control parameters
vc:
# Pool size for handling export tasks
thread_pool_size: "${TB_VC_POOL_SIZE:6}"
git:
# Pool size for handling the git IO operations
io_pool_size: "${TB_VC_GIT_POOL_SIZE:3}"
# Default storing repository path
repositories-folder: "${TB_VC_GIT_REPOSITORIES_FOLDER:${java.io.tmpdir}/repositories}"
# Notification system parameters
notification_system:
# Specify thread pool size for Notification System processing notification rules and notification sending. Recommend value <= 10
thread_pool_size: "${TB_NOTIFICATION_SYSTEM_THREAD_POOL_SIZE:10}"
rules:
# Semicolon-separated deduplication durations (in millis) for trigger types. Format: 'NotificationRuleTriggerType1:123;NotificationRuleTriggerType2:456'
deduplication_durations: "${TB_NOTIFICATION_RULES_DEDUPLICATION_DURATIONS:NEW_PLATFORM_VERSION:0;RATE_LIMITS:14400000;}"
# General management parameters
management:
endpoints:
web:
exposure:
# Expose metrics endpoint (use value 'prometheus' to enable prometheus metrics).
include: '${METRICS_ENDPOINTS_EXPOSE:info}'
health:
elasticsearch:
# Enable the org.springframework.boot.actuate.elasticsearch.ElasticsearchRestClientHealthIndicator.doHealthCheck
enabled: "false"
# Mobile application settings for Thingsboard mobile application
mobileApp:
# Server domain name for Thingsboard Live mobile application
domain: "${TB_MOBILE_APP_DOMAIN:demo.thingsboard.io}"
# Link to Google Play store for Thingsboard Live mobile application
googlePlayLink: "${TB_MOBILE_APP_GOOGLE_PLAY_LINK:https://play.google.com/store/apps/details?id=org.thingsboard.demo.app}"
# Link to App Store for Thingsboard Live mobile application
appStoreLink: "${TB_MOBILE_APP_APP_STORE_LINK:https://apps.apple.com/us/app/thingsboard-live/id1594355695}"
mqtt:
# MQTT client configuration parameters
client:
# Parameters that control the retransmission mechanism.
# This mechanism only applies to the handling of MQTT Publish, Subscribe, Unsubscribe and Pubrel messages.
# With the updated default settings:
# - After sending the message, wait approximately 5000 ms (± jitter) for the 1st attempt.
# - The 2nd attempt will occur after roughly 5000 * 2 = 10,000 ms (± jitter).
# - The 3rd attempt will occur after roughly 5000 * 4 = 20,000 ms (± jitter).
# - The 4th "attempt" will not actually perform a retransmission.
# Instead, the system will detect that the maximum number of attempts has been reached and drop the pending message.
retransmission:
# Maximum number of retransmission attempts allowed.
# If the attempt count exceeds this value, retransmissions will stop and the pending message will be dropped.
max_attempts: "${TB_MQTT_CLIENT_RETRANSMISSION_MAX_ATTEMPTS:3}"
# Base delay (in milliseconds) before the first retransmission attempt, measured from the moment the message is sent.
# Subsequent delays are calculated using exponential backoff.
# This base delay is also used as the reference value for applying jitter.
initial_delay_millis: "${TB_MQTT_CLIENT_RETRANSMISSION_INITIAL_DELAY_MILLIS:5000}"
# Jitter factor applied to the calculated retransmission delay.
# The actual delay is randomized within a range defined by multiplying the base delay by a factor between (1 - jitter_factor) and (1 + jitter_factor).
# For example, a jitter_factor of 0.15 means the actual delay may vary by up to ±15% of the base delay.
jitter_factor: "${TB_MQTT_CLIENT_RETRANSMISSION_JITTER_FACTOR:0.15}"