# # Galaxy is configured by default to be usable in a single-user development # environment. To tune the application for a multi-user production # environment, see the documentation at: # # http://usegalaxy.org/production # # Throughout this sample configuration file, except where stated otherwise, # uncommented values override the default if left unset, whereas commented # values are set to the default value. Relative paths are relative to the root # Galaxy directory. # # Examples of many of these options are explained in more detail in the Galaxy # Community Hub. # # https://galaxyproject.org/admin/config # # Config hackers are encouraged to check there before asking for help. # ---- HTTP Server ---------------------------------------------------------- # Configuration of the internal HTTP server. [server:main] # The internal HTTP server to use. Currently only Paste is provided. This # option is required. use = egg:Paste#http # The port on which to listen. #port = 8080 # The address on which to listen. By default, only listen to localhost (Galaxy # will not be accessible over the network). Use '0.0.0.0' to listen on all # available network interfaces. #host = 127.0.0.1 # Use a threadpool for the web server instead of creating a thread for each # request. use_threadpool = True # Number of threads in the web server thread pool. #threadpool_workers = 10 # Set the number of seconds a thread can work before you should kill it # (assuming it will never finish) to 3 hours. Default is 600 (10 minutes). threadpool_kill_thread_limit = 10800 # ---- Filters -------------------------------------------------------------- # Filters sit between Galaxy and the HTTP server. # These filters are disabled by default. They can be enabled with # 'filter-with' in the [app:main] section below. # Define the gzip filter. [filter:gzip] use = egg:Paste#gzip # Define the proxy-prefix filter. [filter:proxy-prefix] use = egg:PasteDeploy#prefix prefix = /galaxy # ---- Galaxy --------------------------------------------------------------- # Configuration of the Galaxy application. [app:main] # -- Application and filtering # The factory for the WSGI application. This should not be changed. paste.app_factory = galaxy.web.buildapp:app_factory # If not running behind a proxy server, you may want to enable gzip compression # to decrease the size of data transferred over the network. If using a proxy # server, please enable gzip compression there instead. #filter-with = gzip # If running behind a proxy server and Galaxy is served from a subdirectory, # enable the proxy-prefix filter and set the prefix in the # [filter:proxy-prefix] section above. #filter-with = proxy-prefix # If proxy-prefix is enabled and you're running more than one Galaxy instance # behind one hostname, you will want to set this to the same path as the prefix # in the filter above. This value becomes the "path" attribute set in the # cookie so the cookies from each instance will not clobber each other. #cookie_path = None # -- Database # By default, Galaxy uses a SQLite database at 'database/universe.sqlite'. You # may use a SQLAlchemy connection string to specify an external database # instead. This string takes many options which are explained in detail in the # config file documentation. #database_connection = sqlite:///./database/universe.sqlite?isolation_level=IMMEDIATE # If the server logs errors about not having enough database pool connections, # you will want to increase these values, or consider running more Galaxy # processes. #database_engine_option_pool_size = 5 #database_engine_option_max_overflow = 10 # If using MySQL and the server logs the error "MySQL server has gone away", # you will want to set this to some positive value (7200 should work). #database_engine_option_pool_recycle = -1 # If large database query results are causing memory or response time issues in # the Galaxy process, leave the result on the server instead. This option is # only available for PostgreSQL and is highly recommended. #database_engine_option_server_side_cursors = False # Log all database transactions, can be useful for debugging and performance # profiling. Logging is done via Python's 'logging' module under the qualname # 'galaxy.model.orm.logging_connection_proxy' #database_query_profiling_proxy = False # Slow query logging. Queries slower than the threshold indicated below will # be logged to debug. A value of '0' is disabled. For example, you would set # this to .005 to log all queries taking longer than 5 milliseconds. #slow_query_log_threshold = 0 # By default, Galaxy will use the same database to track user data and # tool shed install data. There are many situations in which it is # valuable to separate these - for instance bootstrapping fresh Galaxy # instances with pretested installs. The following option can be used to # separate the tool shed install database (all other options listed above # but prefixed with install_ are also available). #install_database_connection = sqlite:///./database/universe.sqlite?isolation_level=IMMEDIATE # Setting the following option to true will cause Galaxy to automatically # migrate the database forward after updates. This is not recommended for production # use. #database_auto_migrate = False # -- Files and directories # Dataset files are stored in this directory. #file_path = database/files # Temporary files are stored in this directory. #new_file_path = database/tmp # Tool config files, defines what tools are available in Galaxy. # Tools can be locally developed or installed from Galaxy tool sheds. # (config/tool_conf.xml.sample will be used if left unset and # config/tool_conf.xml does not exist). #tool_config_file = config/tool_conf.xml,config/shed_tool_conf.xml # Enable / disable checking if any tools defined in the above non-shed # tool_config_files (i.e., tool_conf.xml) have been migrated from the Galaxy # code distribution to the Tool Shed. This setting should generally be set to # False only for development Galaxy environments that are often rebuilt from # scratch where migrated tools do not need to be available in the Galaxy tool # panel. If the following setting remains commented, the default setting will # be True. #check_migrate_tools = True # Tool config maintained by tool migration scripts. If you use the migration # scripts to install tools that have been migrated to the tool shed upon a new # release, they will be added to this tool config file. #migrated_tools_config = config/migrated_tools_conf.xml # File that contains the XML section and tool tags from all tool panel config # files integrated into a single file that defines the tool panel layout. This # file can be changed by the Galaxy administrator to alter the layout of the # tool panel. If not present, Galaxy will create it. #integrated_tool_panel_config = integrated_tool_panel.xml # Default path to the directory containing the tools defined in tool_conf.xml. # Other tool config files must include the tool_path as an attribute in the # tag. #tool_path = tools # -- Tool dependencies # Path to the directory in which tool dependencies are placed. This is used by # the Tool Shed to install dependencies and can also be used by administrators # to manually install or link to dependencies. For details, see: # https://galaxyproject.org/admin/config/tool-dependencies # Set the string to None to explicitly disable tool dependency handling. # If this option is set to none or an invalid path, installing tools with dependencies # from the Tool Shed will fail. #tool_dependency_dir = database/dependencies # The dependency resolvers config file specifies an ordering and options for how # Galaxy resolves tool dependencies (requirement tags in Tool XML). The default # ordering is to the use the Tool Shed for tools installed that way, use local # Galaxy packages, and then use Conda if available. # See https://github.com/galaxyproject/galaxy/blob/dev/doc/source/admin/dependency_resolvers.rst # for more information on these options. #dependency_resolvers_config_file = config/dependency_resolvers_conf.xml # The following Conda dependency resolution options will change the defaults for # all Conda resolvers, but multiple resolvers can be configured independently # in dependency_resolvers_config_file and these options overridden. # Location on the filesystem where Conda packages are installed # conda_prefix is the location on the filesystem where Conda packages and environments are installed # IMPORTANT: Due to a current limitation in conda, the total length of the # conda_prefix and the job_working_directory path should be less than 50 characters! #conda_prefix = /_conda # Override the Conda executable to use, it will default to the one on the # PATH (if available) and then to /bin/conda #conda_exec = # Pass debug flag to conda commands. #conda_debug = False # conda channels to enable by default (http://conda.pydata.org/docs/custom-channels.html) #conda_ensure_channels = iuc,bioconda,conda-forge,defaults # Set to True to instruct Galaxy to look for and install missing tool # dependencies before each job runs. #conda_auto_install = False # Set to True to instruct Galaxy to install Conda from the web automatically # if it cannot find a local copy and conda_exec is not configured. #conda_auto_init = True # You must set this to True if conda_prefix and job_working_directory are not on the same # volume, or some conda dependencies will fail to execute at job runtime. # Conda will copy packages content instead of creating hardlinks or symlinks. # This will prevent problems with some specific packages (perl, R), at the cost # of extra disk space usage and extra time spent copying packages. #conda_copy_dependencies = False # Certain dependency resolvers (namely Conda) take a considerable amount of # time to build an isolated job environment in the job_working_directory if the # job working directory is on a network share. Set the following option to True # to cache the dependencies in a folder. This option is beta and should only be # used if you experience long waiting times before a job is actually submitted # to your cluster. #use_cached_dependency_manager = False # By default the tool_dependency_cache_dir is the _cache directory # of the tool dependency directory #tool_dependency_cache_dir = /_cache # By default, when using a cached dependency manager, the dependencies are cached # when installing new tools and when using tools for the first time. # Set this to False if you prefer dependencies to be cached only when installing new tools. #precache_dependencies = True # File containing the Galaxy Tool Sheds that should be made available to # install from in the admin interface (.sample used if default does not exist). #tool_sheds_config_file = config/tool_sheds_conf.xml # Set to True to enable monitoring of tools and tool directories # listed in any tool config file specified in tool_config_file option. # If changes are found, tools are automatically reloaded. Watchdog ( # https://pypi.python.org/pypi/watchdog ) must be installed and # available to Galaxy to use this option. Other options include 'auto' # which will attempt to watch tools if the watchdog library is available # but won't fail to load Galaxy if it is not and 'polling' which will use # a less efficient monitoring scheme that may work in wider range of scenarios # than the watchdog default. #watch_tools = False # Enable Galaxy to fetch Docker containers registered with quay.io generated # from tool requirements resolved through conda. These containers (when # available) have been generated using mulled - https://github.com/mulled. # These containers are highly beta and availablity will vary by tool. # This option will additionally only be used for job destinations with # Docker enabled. #enable_beta_mulled_containers = False # Container resolvers configuration (beta). Setup a file describing container # resolvers to use when discovering containers for Galaxy. If this is set to # None, the default containers loaded is determined by # enable_beta_mulled_containers. #containers_resolvers_config_file = None # involucro is a tool used to build Docker containers for tools from Conda # dependencies referenced in tools as `requirement`s. The following path is # the location of involucro on the Galaxy host. This is ignored if the relevant # container resolver isn't enabled, and will install on demand unless # involucro_auto_init is set to False. #involucro_path = database/dependencies/involucro # Install involucro as needed to build Docker containers for tools. Ignored if # relevant container resolver is not used. #involucro_auto_init = True # Enable automatic polling of relative tool sheds to see if any updates # are available for installed repositories. Ideally only one Galaxy # server process should be able to check for repository updates. The # setting for hours_between_check should be an integer between 1 and 24. #enable_tool_shed_check = False #hours_between_check = 12 # Enable use of an in-memory registry with bi-directional relationships between # repositories (i.e., in addition to lists of dependencies for a repository, # keep an in-memory registry of dependent items for each repository. #manage_dependency_relationships = False # XML config file that contains data table entries for the # ToolDataTableManager. This file is manually # maintained by the Galaxy # administrator (.sample used if default does not exist). #tool_data_table_config_path = config/tool_data_table_conf.xml # XML config file that contains additional data table entries for the # ToolDataTableManager. This file is automatically generated based on the # current installed tool shed repositories that contain valid # tool_data_table_conf.xml.sample files. At the time of installation, these # entries are automatically added to the following file, which is parsed and # applied to the ToolDataTableManager at server start up. #shed_tool_data_table_config = config/shed_tool_data_table_conf.xml # Directory where data used by tools is located. See the samples in that # directory and the Galaxy Community Hub for help: # https://galaxyproject.org/admin/data-integration #tool_data_path = tool-data # Directory where Tool Data Table related files will be placed # when installed from a ToolShed. Defaults to tool_data_path. #shed_tool_data_path = tool-data # Set to True to enable monitoring of the tool_data and shed_tool_data_path # directories. If changes in tool data table files are found, the tool data # tables for that data manager are automatically reloaded. # Watchdog ( https://pypi.python.org/pypi/watchdog ) must be installed and # available to Galaxy to use this option. Other options include 'auto' # which will attempt to use the watchdog library if it is available but won't # fail to load Galaxy if it is not and 'polling' which will use a less # efficient monitoring scheme that may work in wider range of scenarios # than the watchdog default. #watch_tool_data_dir = False # File containing old-style genome builds #builds_file_path = tool-data/shared/ucsc/builds.txt # Directory where chrom len files are kept, currently mainly used by trackster #len_file_path = tool-data/shared/ucsc/chrom # Datatypes config file(s), defines what data (file) types are available in # Galaxy (.sample is used if default does not exist). If a datatype appears in # multiple files, the last definition is used (though the first sniffer is used # so limit sniffer definitions to one file). #datatypes_config_file = config/datatypes_conf.xml # Disable the 'Auto-detect' option for file uploads #datatypes_disable_auto = False # Visualizations config directory: where to look for individual visualization # plugins. The path is relative to the Galaxy root dir. To use an absolute # path begin the path with '/'. This is a comma separated list. # Defaults to "config/plugins/visualizations". #visualization_plugins_directory = config/plugins/visualizations # Interactive environment plugins root directory: where to look for interactive # environment plugins. By default none will be loaded. Set to # config/plugins/interactive_environments to load Galaxy's stock plugins. # These will require Docker to be configured and have security considerations, # so proceed with caution. The path is relative to the Galaxy root dir. To use # an absolute path begin the path with '/'. This is a comma # separated list. #interactive_environment_plugins_directory = # To run interactive environment containers in Docker Swarm mode (on an # existing swarm), set this option to True and set `docker_connect_port` in the # IE plugin config (ini) file(s) of any IE plugins you have enabled and ensure # that you are not using any `docker run`-specific options in your plugins' # `command_inject` options (swarm mode services run using `docker service # create`, which has a different and more limited set of options). This option # can be overridden on a per-plugin basis by using the `swarm_mode` option in # the plugin's ini config file. #interactive_environment_swarm_mode = False # Galaxy can run a "swarm manager" service that will monitor utilization of the # swarm and provision/deprovision worker nodes as necessary. The service has # its own configuration file. #swarm_manager_config_file = config/swarm_manager_conf.yml # Interactive tour directory: where to store interactive tour definition files. # Galaxy ships with several basic interface tours enabled, though a different # directory with custom tours can be specified here. The path is relative to the # Galaxy root dir. To use an absolute path begin the path with '/'. This is a comma # separated list. #tour_config_dir = config/plugins/tours # Webhooks directory: where to store webhooks - plugins to extend the Galaxy UI. # By default none will be loaded. Set to config/plugins/webhooks/demo to load Galaxy's # demo webhooks. To use an absolute path begin the path with '/'. This is a comma # separated list. Add test/functional/webhooks to this list to include the demo webhooks # used to test the webhook framework. #webhooks_dir = config/plugins/webhooks # Each job is given a unique empty directory as its current working directory. # This option defines in what parent directory those directories will be # created. #job_working_directory = database/jobs_directory # If using a cluster, Galaxy will write job scripts and stdout/stderr to this # directory. #cluster_files_directory = database/pbs # Mako templates are compiled as needed and cached for reuse, this directory is # used for the cache #template_cache_path = database/compiled_templates # Set to false to disable various checks Galaxy will do to ensure it # can run job scripts before attempting to execute or submit them. #check_job_script_integrity = True # Number of checks to execute if check_job_script_integrity is enabled. #check_job_script_integrity_count = 35 # Time to sleep between checks if check_job_script_integrity is enabled (in seconds). #check_job_script_integrity_sleep = .25 # Set the default shell used by non-containerized jobs Galaxy-wide. This # defaults to bash for all jobs and can be overidden at the destination # level for heterogenous clusters. conda job resolution requires bash or zsh # so if this is switched to /bin/sh for instance - conda resolution # should be disabled. Containerized jobs always use /bin/sh - so more maximum # portability tool authors should assume generated commands run in sh. #default_job_shell = /bin/bash # Citation related caching. Tool citations information maybe fetched from # external sources such as http://dx.doi.org/ by Galaxy - the following # parameters can be used to control the caching used to store this information. #citation_cache_type = file #citation_cache_data_dir = database/citations/data #citation_cache_lock_dir = database/citations/lock # External service types config file, defining what types of external_services # configurations are available in Galaxy (.sample is used if default does not # exist). #external_service_type_config_file = config/external_service_types_conf.xml # Path to the directory containing the external_service_types defined in the # config. #external_service_type_path = external_service_types # Tools with a number of outputs not known until runtime can write these # outputs to a directory for collection by Galaxy when the job is done. # Previously, this directory was new_file_path, but using one global directory # can cause performance problems, so using job_working_directory ('.' or cwd # when a job is run) is encouraged. By default, both are checked to avoid # breaking existing tools. #collect_outputs_from = new_file_path,job_working_directory # -- Data Storage (Object Store) # # Configuration file for the object store # If this is set and exists, it overrides any other objectstore settings. #object_store_config_file = config/object_store_conf.xml # -- Mail and notification # Galaxy sends mail for various things: subscribing users to the mailing list # if they request it, password resets, notifications from the Galaxy Sample # Tracking system, reporting dataset errors, and sending activation emails. # To do this, it needs to send mail through an SMTP server, which you may # define here (host:port). # Galaxy will automatically try STARTTLS but will continue upon failure. #smtp_server = None # If your SMTP server requires a username and password, you can provide them # here (password in cleartext here, but if your server supports STARTTLS it # will be sent over the network encrypted). #smtp_username = None #smtp_password = None # If your SMTP server requires SSL from the beginning of the connection #smtp_ssl = False # On the user registration form, users may choose to join a mailing list. This # is the address used to subscribe to the list. Uncomment and leave empty if you # want to remove this option from the user registration form. #mailing_join_addr = galaxy-announce-join@bx.psu.edu # Datasets in an error state include a link to report the error. Those reports # will be sent to this address. Error reports are disabled if no address is # set. Also this email is shown as a contact to user in case of Galaxy # misconfiguration and other events user may encounter. #error_email_to = None # Email address to use in the 'From' field when sending emails for # account activations, workflow step notifications and password resets. # We recommend using string in the following format: # Galaxy Project # If not configured, '' will be used. #email_from = None # URL of the support resource for the galaxy instance. Used in activation # emails. #instance_resource_url = https://galaxyproject.org/ # E-mail domains blacklist is used for filtering out users that are using # disposable email address during the registration. If their address domain # matches any domain in the blacklist, they are refused the registration. #blacklist_file = config/disposable_email_blacklist.conf # Registration warning message is used to discourage people from registering # multiple accounts. Applies mostly for the main Galaxy instance. # If no message specified the warning box will not be shown. #registration_warning_message = Please register only one account - we provide this service free of charge and have limited computational resources. Multi-accounts are tracked and will be subjected to account termination and data deletion. # -- Account activation # User account activation feature global flag. If set to "False", the rest of # the Account activation configuration is ignored and user activation is # disabled (i.e. accounts are active since registration). # The activation is also not working in case the SMTP server is not defined. #user_activation_on = False # Activation grace period (in hours). Activation is not forced (login is not # disabled) until grace period has passed. Users under grace period can't run # jobs. Enter 0 to disable grace period. # Users with OpenID logins have grace period forever. #activation_grace_period = 3 # Shown in warning box to users that were not activated yet. # In use only if activation_grace_period is set. #inactivity_box_content = Your account has not been activated yet. Feel free to browse around and see what's available, but you won't be able to upload data or run jobs until you have verified your email address. # Password expiration period (in days). Users are required to change their # password every x days. Users will be redirected to the change password # screen when they log in after their password expires. Enter 0 to disable # password expiration. #password_expiration_period = 0 # Galaxy Session Timeout # This provides a timeout (in minutes) after which a user will have to log back in. # A duration of 0 disables this feature. #session_duration = 0 # -- Analytics # You can enter tracking code here to track visitor's behavior # through your Google Analytics account. Example: UA-XXXXXXXX-Y #ga_code = None # -- Display sites # Galaxy can display data at various external browsers. These options specify # which browsers should be available. URLs and builds available at these # browsers are defined in the specifield files. # If use_remote_user = True, display application servers will be denied access # to Galaxy and so displaying datasets in these sites will fail. # display_servers contains a list of hostnames which should be allowed to # bypass security to display datasets. Please be aware that there are security # implications if this is allowed. More details (including required changes to # the proxy server config) are available in the Apache proxy documentation on # the Galaxy Community Hub. # # The list of servers in this sample config are for the UCSC Main, Test and # Archaea browsers, but the default if left commented is to not allow any # display sites to bypass security (you must uncomment the line below to allow # them). #display_servers = hgw1.cse.ucsc.edu,hgw2.cse.ucsc.edu,hgw3.cse.ucsc.edu,hgw4.cse.ucsc.edu,hgw5.cse.ucsc.edu,hgw6.cse.ucsc.edu,hgw7.cse.ucsc.edu,hgw8.cse.ucsc.edu,lowepub.cse.ucsc.edu # To disable the old-style display applications that are hardcoded into # datatype classes, set enable_old_display_applications = False. # This may be desirable due to using the new-style, XML-defined, display # applications that have been defined for many of the datatypes that have the # old-style. # There is also a potential security concern with the old-style applications, # where a malicious party could provide a link that appears to reference the # Galaxy server, but contains a redirect to a third-party server, tricking a # Galaxy user to access said site. #enable_old_display_applications = True # -- Next gen LIMS interface on top of existing Galaxy Sample/Request # management code. use_nglims = False nglims_config_file = tool-data/nglims.yaml # -- UI Localization # Show a message box under the masthead. #message_box_visible = False #message_box_content = None #message_box_class = info # Append "/{brand}" to the "Galaxy" text in the masthead. #brand = None # Format string used when showing date and time information. # The string may contain: # - the directives used by Python time.strftime() function (see # https://docs.python.org/2/library/time.html#time.strftime ), # - $locale (complete format string for the server locale), # - $iso8601 (complete format string as specified by ISO 8601 international # standard). #pretty_datetime_format = $locale (UTC) # URL (with schema http/https) of the Galaxy instance as accessible within your # local network - if specified used as a default by pulsar file staging and # Jupyter Docker container for communicating back with Galaxy via the API. # # If you are attempting to setup GIEs on Mac OS X with Docker for Mac - this # should likely be the IP address of your machine on the virtualbox network (vboxnet0) # setup for the Docker host VM. This can found by running ifconfig and using the # IP address of the network vboxnet0. #galaxy_infrastructure_url = http://localhost:8080 # If the above URL cannot be determined ahead of time in dynamic environments # but the port which should be used to access Galaxy can be - this should be # set to prevent Galaxy from having to guess. For example if Galaxy is sitting # behind a proxy with REMOTE_USER enabled - infrastructure shouldn't talk to # Python processes directly and this should be set to 80 or 443, etc... If # unset this file will be read for a server block defining a port corresponding # to the webapp. #galaxy_infrastructure_web_port = 8080 # The URL of the page to display in Galaxy's middle pane when loaded. This can # be an absolute or relative URL. #welcome_url = /static/welcome.html # The URL linked by the "Galaxy/brand" text. #logo_url = / # The URL linked by the "Wiki" link in the "Help" menu. #wiki_url = https://galaxyproject.org/ # The URL linked by the "Support" link in the "Help" menu. #support_url = https://galaxyproject.org/support # The URL linked by the "How to Cite Galaxy" link in the "Help" menu. #citation_url = https://galaxyproject.org/citing-galaxy # The URL linked by the "Search" link in the "Help" menu. #search_url = https://galaxyproject.org/search/ # The URL linked by the "Mailing Lists" link in the "Help" menu. #mailing_lists_url = https://galaxyproject.org/mailing-lists # The URL linked by the "Videos" link in the "Help" menu. #screencasts_url = https://vimeo.com/galaxyproject # Points to the GenomeSpace UI service which will be used by # the GenomeSpace importer and exporter tools #genomespace_ui_url = https://gsui.genomespace.org/jsui/ # The URL linked by the "Terms and Conditions" link in the "Help" menu, as well # as on the user registration and login forms and in the activation emails. #terms_url = None # The URL linked by the "Galaxy Q&A" link in the "Help" menu # The Galaxy Q&A site is under development; when the site is done, this URL # will be set and uncommented. #qa_url = # Serve static content, which must be enabled if you're not serving it via a # proxy server. These options should be self explanatory and so are not # documented individually. You can use these paths (or ones in the proxy # server) to point to your own styles. #static_enabled = True #static_cache_time = 360 #static_dir = static/ #static_images_dir = static/images #static_favicon_dir = static/favicon.ico #static_scripts_dir = static/scripts/ #static_style_dir = static/june_2007_style/blue #static_robots_txt = static/robots.txt # Incremental Display Options #display_chunk_size = 65536 # -- Advanced proxy features # For help on configuring the Advanced proxy features, see: # http://usegalaxy.org/production # Apache can handle file downloads (Galaxy-to-user) via mod_xsendfile. Set # this to True to inform Galaxy that mod_xsendfile is enabled upstream. #apache_xsendfile = False # The same download handling can be done by nginx using X-Accel-Redirect. This # should be set to the path defined in the nginx config as an internal redirect # with access to Galaxy's data files (see documentation linked above). #nginx_x_accel_redirect_base = False # nginx can make use of mod_zip to create zip files containing multiple library # files. If using X-Accel-Redirect, this can be the same value as that option. #nginx_x_archive_files_base = False # If using compression in the upstream proxy server, use this option to disable # gzipping of library .tar.gz and .zip archives, since the proxy server will do # it faster on the fly. #upstream_gzip = False # The following default adds a header to web request responses that # will cause modern web browsers to not allow Galaxy to be embedded in # the frames of web applications hosted at other hosts - this can help # prevent a class of attack called clickjacking # (https://www.owasp.org/index.php/Clickjacking). If you configure a # proxy in front of Galaxy - please ensure this header remains intact # to protect your users. Uncomment and leave empty to not set the # `X-Frame-Options` header. #x_frame_options = SAMEORIGIN # nginx can also handle file uploads (user-to-Galaxy) via nginx_upload_module. # Configuration for this is complex and explained in detail in the # documentation linked above. The upload store is a temporary directory in # which files uploaded by the upload module will be placed. #nginx_upload_store = False # This value overrides the action set on the file upload form, e.g. the web # path where the nginx_upload_module has been configured to intercept upload # requests. #nginx_upload_path = False # Galaxy can also use nginx_upload_module to receive files staged out upon job # completion by remote job runners (i.e. Pulsar) that initiate staging # operations on the remote end. See the Galaxy nginx documentation for the # corresponding nginx configuration. #nginx_upload_job_files_store = False #nginx_upload_job_files_path = False # Have Galaxy manage dynamic proxy component for routing requests to other # services based on Galaxy's session cookie. It will attempt to do this by # default though you do need to install node+npm and do an npm install from # `lib/galaxy/web/proxy/js`. It is generally more robust to configure this # externally, managing it however Galaxy is managed. If True, Galaxy will only # launch the proxy if it is actually going to be used (e.g. for Jupyter). #dynamic_proxy_manage=True # As of 16.04 Galaxy supports multiple proxy types. The original NodeJS # implementation, alongside a new Golang single-binary-no-dependencies # version. Valid values are (node, golang) #dynamic_proxy=node # The NodeJS dynamic proxy can use an SQLite database or a JSON file for IPC, # set that here. #dynamic_proxy_session_map=database/session_map.sqlite # Set the port and IP for the the dynamic proxy to bind to, this must match # the external configuration if dynamic_proxy_manage is False. #dynamic_proxy_bind_port=8800 #dynamic_proxy_bind_ip=0.0.0.0 # Enable verbose debugging of Galaxy-managed dynamic proxy. #dynamic_proxy_debug=False # The dynamic proxy is proxied by an external proxy (e.g. apache frontend to # nodejs to wrap connections in SSL). #dynamic_proxy_external_proxy=False # Additionally, when the dynamic proxy is proxied by an upstream server, you'll # want to specify a prefixed URL so both Galaxy and the proxy reside under the # same path that your cookies are under. This will result in a url like # https://FQDN/galaxy-prefix/gie_proxy for proxying #dynamic_proxy_prefix=gie_proxy # The Golang proxy also manages the docker containers more closely than the # NodeJS proxy, so is able to expose more container management related options # This attribute governs the minimum length of time between consecutive HTTP/WS # requests through the proxy, before the proxy considers a container as being # inactive and kills it. #dynamic_proxy_golang_noaccess = 60 # In order to kill containers, the golang proxy has to check at some interval # for possibly dead containers. This is exposed as a configurable parameter, # but the default value is probably fine. #dynamic_proxy_golang_clean_interval = 10 # The golang proxy needs to know how to talk to your docker daemon. Currently # TLS is not supported, that will come in an update. #dynamic_proxy_golang_docker_address = unix:///var/run/docker.sock # The golang proxy uses a RESTful HTTP API for communication with Galaxy # instead of a JSON or SQLite file for IPC. If you do not specify this, it will # be set randomly for you. You should set this if you are managing the proxy # manually. #dynamic_proxy_golang_api_key = None # -- Logging and Debugging # If True, Galaxy will attempt to configure a simple root logger if a # "loggers" section does not appear in this configuration file. #auto_configure_logging = True # Verbosity of console log messages. Acceptable values can be found here: # https://docs.python.org/2/library/logging.html#logging-levels #log_level = DEBUG # Print database operations to the server log (warning, quite verbose!). #database_engine_option_echo = False # Print database pool operations to the server log (warning, quite verbose!). #database_engine_option_echo_pool = False # Turn on logging of application events and some user events to the database. #log_events = True # Turn on logging of user actions to the database. Actions currently logged # are grid views, tool searches, and use of "recently" used tools menu. The # log_events and log_actions functionality will eventually be merged. #log_actions = True # Fluentd configuration. Various events can be logged to the fluentd instance # configured below by enabling fluent_log. #fluent_log = False #fluent_host = localhost #fluent_port = 24224 # Sanitize all HTML tool output. By default, all tool output served as # 'text/html' will be sanitized thoroughly. This can be disabled if you have # special tools that require unaltered output. WARNING: disabling this does # make the Galaxy instance susceptible to XSS attacks initiated by your users. #sanitize_all_html = True # Whitelist sanitization file. # Datasets created by tools listed in this file are trusted and will not have # their HTML sanitized on display. This can be manually edited or manipulated # through the Admin control panel -- see "Manage Display Whitelist" #sanitize_whitelist_file = config/sanitize_whitelist.txt # By default Galaxy will serve non-HTML tool output that may potentially # contain browser executable JavaScript content as plain text. This will for # instance cause SVG datasets to not render properly and so may be disabled # by setting the following option to True. #serve_xss_vulnerable_mimetypes = False # Return a Access-Control-Allow-Origin response header that matches the Origin # header of the request if that Origin hostname matches one of the strings or # regular expressions listed here. This is a comma separated list of hostname # strings or regular expressions beginning and ending with /. # E.g. mysite.com,google.com,usegalaxy.org,/^[\w\.]*example\.com/ # See: https://developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS #allowed_origin_hostnames = None # Set the following to True to use Jupyter nbconvert to build HTML from Jupyter # notebooks in Galaxy histories. This process may allow users to execute # arbitrary code or serve arbitrary HTML. If enabled, Jupyter must be # available and on Galaxy's PATH, to do this run # `pip install jinja2 pygments jupyter` in Galaxy's virtualenv. #trust_jupyter_notebook_conversion = False # Debug enables access to various config options useful for development and # debugging: use_lint, use_profile, use_printdebug and use_interactive. It # also causes the files used by PBS/SGE (submission script, output, and error) # to remain on disk after the job is complete. #debug = False # Check for WSGI compliance. #use_lint = False # Run the Python profiler on each request. #use_profile = False # Intercept print statements and show them on the returned page. #use_printdebug = True # Enable live debugging in your browser. This should NEVER be enabled on a # public site. Enabled in the sample config for development. use_interactive = True # Write thread status periodically to 'heartbeat.log', (careful, uses disk # space rapidly!). Useful to determine why your processes may be consuming a # lot of CPU. #use_heartbeat = False # Control the period (in seconds) between dumps. Use -1 to disable. Regardless # of this setting, if use_heartbeat is enabled, you can send a Galaxy process # (unless running with uWSGI) SIGUSR1 (`kill -USR1`) to force a dump. #heartbeat_interval = 20 # Heartbeat log filename. Can accept the template variables {server_name} and # {pid} #heartbeat_log = heartbeat_{server_name}.log # Log to Sentry # Sentry is an open source logging and error aggregation platform. Setting # sentry_dsn will enable the Sentry middleware and errors will be sent to the # indicated sentry instance. This connection string is available in your # sentry instance under -> Settings -> API Keys. #sentry_dsn = None # Log to statsd # Statsd is an external statistics aggregator (https://github.com/etsy/statsd) # Enabling the following options will cause galaxy to log request timing and # other statistics to the configured statsd instance. The statsd_prefix is # useful if you are running multiple Galaxy instances and want to segment # statistics between them within the same aggregator. #statsd_host= #statsd_port=8125 #statsd_prefix=galaxy # Log to graphite # Graphite is an external statistics aggregator (https://github.com/graphite-project/carbon) # Enabling the following options will cause galaxy to log request timing and # other statistics to the configured graphite instance. The graphite_prefix is # useful if you are running multiple Galaxy instances and want to segment # statistics between them within the same aggregator. #graphite_host= #graphite_port=2003 #graphite_prefix=galaxy # -- Data Libraries # These library upload options are described in much more detail in the Galaxy # Community Hub: # https://galaxyproject.org/data-libraries/ # Add an option to the library upload form which allows administrators to # upload a directory of files. #library_import_dir = None # Add an option to the library upload form which allows authorized # non-administrators to upload a directory of files. The configured directory # must contain sub-directories named the same as the non-admin user's Galaxy # login ( email ). The non-admin user is restricted to uploading files or # sub-directories of files contained in their directory. #user_library_import_dir = None # For security reasons, users may not import any files that actually lie # outside of their `user_library_import_dir` (e.g. using symbolic links). A # list of directories can be allowed by setting the following option (the list # is comma-separated). Be aware that *any* user with library import permissions # can import from anywhere in these directories (assuming they are able to # create symlinks to them). #user_library_import_symlink_whitelist = None # Allow admins to paste filesystem paths during upload. For libraries this # adds an option to the admin library upload tool allowing admins to paste # filesystem paths to files and directories in a box, and these paths will be # added to a library. For history uploads, this allows pasting in paths as URIs. # (i.e. prefixed with file://). Set to True to enable. Please note the security # implication that this will give Galaxy Admins access to anything your Galaxy # user has access to. #allow_path_paste = False # Users may choose to download multiple files from a library in an archive. By # default, Galaxy allows users to select from a few different archive formats # if testing shows that Galaxy is able to create files using these formats. # Specific formats can be disabled with this option, separate more than one # format with commas. Available formats are currently 'zip', 'gz', and 'bz2'. #disable_library_comptypes = # Some sequencer integration features in beta allow you to automatically # transfer datasets. This is done using a lightweight transfer manager which # runs outside of Galaxy (but is spawned by it automatically). Galaxy will # communicate with this manager over the port specified here. #transfer_manager_port = 8163 # Search data libraries with whoosh #enable_whoosh_library_search = True # Whoosh indexes are stored in this directory. #whoosh_index_dir = database/whoosh_indexes # Search data libraries with lucene #enable_lucene_library_search = False # maximum file size to index for searching, in MB #fulltext_max_size = 500 #fulltext_noindex_filetypes = bam,sam,wig,bigwig,fasta,fastq,fastqsolexa,fastqillumina,fastqsanger # base URL of server providing search functionality using lucene #fulltext_url = http://localhost:8081 # -- Toolbox Search # The following boosts are used to customize this instance's toolbox search. # The higher the boost, the more importance the scoring algorithm gives to the # given field. Section refers to the tool group in the tool panel. Rest of # the fields are tool's attributes. #tool_name_boost = 9 #tool_section_boost = 3 #tool_description_boost = 2 #tool_label_boost = 1 #tool_stub_boost = 5 #tool_help_boost = 0.5 # Limits the number of results in toolbox search. Can be used to tweak how many # results will appear. #tool_search_limit = 20 # Enable/ disable Ngram-search for tools. It makes tool # search results tolerant for spelling mistakes in the query # by dividing the query into multiple ngrams and search for # each ngram #tool_enable_ngram_search = False # Set minimum and maximum sizes of ngrams #tool_ngram_minsize = 3 #tool_ngram_maxsize = 4 # -- Users and Security # Galaxy encodes various internal values when these values will be output in # some format (for example, in a URL or cookie). You should set a key to be # used by the algorithm that encodes and decodes these values. It can be any # string up to 448 bits long. # One simple way to generate a value for this is with the shell command: # python -c 'import time; print time.time()' | md5sum | cut -f 1 -d ' ' #id_secret = USING THE DEFAULT IS NOT SECURE! # User authentication can be delegated to an upstream proxy server (usually # Apache). The upstream proxy should set a REMOTE_USER header in the request. # Enabling remote user disables regular logins. For more information, see: # https://galaxyproject.org/admin/config/apache-proxy #use_remote_user = False # If use_remote_user is enabled and your external authentication # method just returns bare usernames, set a default mail domain to be appended # to usernames, to become your Galaxy usernames (email addresses). #remote_user_maildomain = None # If use_remote_user is enabled, the header that the upstream proxy provides # the remote username in defaults to HTTP_REMOTE_USER (the 'HTTP_' is prepended # by WSGI). This option allows you to change the header. Note, you still need # to prepend 'HTTP_' to the header in this option, but your proxy server should # *not* include 'HTTP_' at the beginning of the header name. #remote_user_header = HTTP_REMOTE_USER # If use_remote_user is enabled, anyone who can log in to the Galaxy host may # impersonate any other user by simply sending the appropriate header. Thus a # secret shared between the upstream proxy server, and Galaxy is required. # If anyone other than the Galaxy user is using the server, then apache/nginx # should pass a value in the header 'GX_SECRET' that is identical to the one # below. #remote_user_secret = USING THE DEFAULT IS NOT SECURE! # If use_remote_user is enabled, you can set this to a URL that will log your # users out. #remote_user_logout_href = None # If your proxy and/or authentication source does not normalize e-mail # addresses or user names being passed to Galaxy - set the following option # to True to force these to lower case. #normalize_remote_user_email = False # If an e-mail address is specified here, it will hijack remote user mechanics # (``use_remote_user``) and have the webapp inject a single fixed user. This # has the effect of turning Galaxy into a single user application with no # login or external proxy required. Such applications should not be exposed to # the world. #single_user = # Administrative users - set this to a comma-separated list of valid Galaxy # users (email addresses). These users will have access to the Admin section # of the server, and will have access to create users, groups, roles, # libraries, and more. For more information, see: # https://galaxyproject.org/admin/ #admin_users = None # Force everyone to log in (disable anonymous access). #require_login = False # Show the site's welcome page (see welcome_url) alongside the login page # (even if require_login is True) #show_welcome_with_login = False # Allow unregistered users to create new accounts (otherwise, they will have to # be created by an admin). #allow_user_creation = True # Allow administrators to delete accounts. #allow_user_deletion = False # Allow administrators to log in as other users (useful for debugging) #allow_user_impersonation = False # Allow users to remove their datasets from disk immediately (otherwise, # datasets will be removed after a time period specified by an administrator in # the cleanup scripts run via cron) #allow_user_dataset_purge = True # By default, users' data will be public, but setting this to True will cause # it to be private. Does not affect existing users and data, only ones created # after this option is set. Users may still change their default back to # public. #new_user_dataset_access_role_default_private = False # Expose user list. Setting this to True will expose the user list to # authenticated users. This makes sharing datasets in smaller galaxy instances # much easier as they can type a name/email and have the correct user show up. # This makes less sense on large public Galaxy instances where that data # shouldn't be exposed. For semi-public Galaxies, it may make sense to expose # just the username and not email, or vice versa. #expose_user_name = False #expose_user_email = False # Whitelist for local network addresses for "Upload from URL" dialog. # By default, Galaxy will deny access to the local network address space, to # prevent users making requests to services which the administrator did not # intend to expose. Previously, you could request any network service that # Galaxy might have had access to, even if the user could not normally access it. # It should be a comma separated list of IP addresses or IP address/mask, e.g. # 10.10.10.10,10.0.1.0/24,fd00::/8 #fetch_url_whitelist= # -- Beta features # Enable new run workflow form #run_workflow_toolform_upgrade = True # Enable new preferences which showcases new UI for the user preferences #enable_new_user_preferences = False # Enable Galaxy to communicate directly with a sequencer #enable_sequencer_communication = False # Enable the new interface for installing tools from Tool Shed # via the API. Admin menu will list both if enabled. #enable_beta_ts_api_install = True # Enable the new container interface for Interactive Environments #enable_beta_containers_interface = False # Set the following to a number of threads greater than 1 to spawn # a Python task queue for dealing with large tool submissions (either # through the tool form or as part of an individual workflow step across # large collection). The size of a "large" tool request is controlled by # the second parameter below and defaults to 10. This affects workflow # scheduling and web processes, not job handlers. #tool_submission_burst_threads = 1 #tool_submission_burst_at = 10 # Enable beta workflow modules that should not yet be considered part of Galaxy's # stable API. #enable_beta_workflow_modules = False # Force usage of Galaxy's beta workflow scheduler under certain circumstances - # this workflow scheduling forces Galaxy to schedule workflows in the background # so initial submission of the workflows is signficantly sped up. This does # however force the user to refresh their history manually to see newly scheduled # steps (for "normal" workflows - steps are still scheduled far in advance of # them being queued and scheduling here doesn't refer to actual cluster job # scheduling). # Workflows containing more than the specified number of steps will always use # the Galaxy's beta workflow scheduling. #force_beta_workflow_scheduled_min_steps=250 # Switch to using Galaxy's beta workflow scheduling for all workflows involving # collections. #force_beta_workflow_scheduled_for_collections=False # If multiple job handlers are enabled, allow Galaxy to schedule workflow invocations # in multiple handlers simultaneously. This is discouraged because it results in a # less predictable order of workflow datasets within in histories. #parallelize_workflow_scheduling_within_histories = False # This is the maximum amount of time a workflow invocation may stay in an active # scheduling state in seconds. Set to -1 to disable this maximum and allow any workflow # invocation to schedule indefinitely. The default corresponds to 1 month. #maximum_workflow_invocation_duration = 2678400 # Force serial scheduling of workflows within the context of a particular history #history_local_serial_workflow_scheduling=False # Enable authentication via OpenID. Allows users to log in to their Galaxy # account by authenticating with an OpenID provider. #enable_openid = False # .sample used if default does not exist #openid_config_file = config/openid_conf.xml #openid_consumer_cache_path = database/openid_consumer_cache # XML config file that allows the use of different authentication providers # (e.g. LDAP) instead or in addition to local authentication (.sample is used # if default does not exist). #auth_config_file = config/auth_conf.xml # Optional list of email addresses of API users who can make calls on behalf of # other users. #api_allow_run_as = None # Master key that allows many API admin actions to be used without actually # having a defined admin user in the database/config. Only set this if you # need to bootstrap Galaxy, you probably do not want to set this on public # servers. #master_api_key = changethis # Enable tool tags (associating tools with tags). This has its own option # since its implementation has a few performance implications on startup for # large servers. #enable_tool_tags = False # Enable a feature when running workflows. When enabled, default datasets # are selected for "Set at Runtime" inputs from the history such that the # same input will not be selected twice, unless there are more inputs than # compatible datasets in the history. # When False, the most recently added compatible item in the history will # be used for each "Set at Runtime" input, independent of others in the Workflow #enable_unique_workflow_defaults = False # The URL to the myExperiment instance being used (omit scheme but include port) #myexperiment_url = www.myexperiment.org:80 # Enable Galaxy's "Upload via FTP" interface. You'll need to install and # configure an FTP server (we've used ProFTPd since it can use Galaxy's # database for authentication) and set the following two options. # This should point to a directory containing subdirectories matching users' # identifier (defaults to e-mail), where Galaxy will look for files. #ftp_upload_dir = None # This should be the hostname of your FTP server, which will be provided to # users in the help text. #ftp_upload_site = None # User attribute to use as subdirectory in calculating default ftp_upload_dir # pattern. By default this will be email so a user's FTP upload directory will be # ${ftp_upload_dir}/${user.email}. Can set this to other attributes such as id or # username though. #ftp_upload_dir_identifier = email # Python string template used to determine an FTP upload directory for a # particular user. #ftp_upload_dir_template = ${ftp_upload_dir}/${ftp_upload_dir_identifier} # This should be set to False to prevent Galaxy from deleting uploaded FTP files # as it imports them. #ftp_upload_purge = True # Enable enforcement of quotas. Quotas can be set from the Admin interface. #enable_quotas = False # This option allows users to see the full path of datasets via the "View # Details" option in the history. This option also exposes the command line to # non-administrative users. Administrators can always see dataset paths. #expose_dataset_path = False # This option allows users to see the job metrics (except for environment # variables). #expose_potentially_sensitive_job_metrics = False # Data manager configuration options # Allow non-admin users to view available Data Manager options. #enable_data_manager_user_view = False # File where Data Managers are configured (.sample used if default does not # exist). #data_manager_config_file = config/data_manager_conf.xml # File where Tool Shed based Data Managers are configured. #shed_data_manager_config_file = config/shed_data_manager_conf.xml # Directory to store Data Manager based tool-data; defaults to tool_data_path. #galaxy_data_manager_data_path = tool-data # -- Job Execution # To increase performance of job execution and the web interface, you can # separate Galaxy into multiple processes. There are more than one way to do # this, and they are explained in detail in the documentation: # # https://galaxyproject.org/admin/config/performance/scaling # By default, Galaxy manages and executes jobs from within a single process and # notifies itself of new jobs via in-memory queues. Jobs are run locally on # the system on which Galaxy is started. Advanced job running capabilities can # be configured through the job configuration file. #job_config_file = config/job_conf.xml # When jobs fail due to job runner problems, Galaxy can be configured to retry # these or reroute the jobs to new destinations. Very fine control of this is # available with resubmit declarations in job_conf.xml. For simple deployments # of Galaxy though, the following attribute can define resubmission conditions # for all job destinations. If any job destination defines even one # resubmission condition explicitly in job_conf.xml - the condition described # by this option will not apply to that destination. For instance, the condition: # 'attempt < 3 and unknown_error and (time_running < 300 or time_since_queued < 300)' # would retry up to two times jobs that didn't fail due to detected memory or # walltime limits but did fail quickly (either while queueing or running). The # commented out default below results in no default job resubmission condition, # failing jobs are just failed outright. #default_job_resubmission_condition = # In multiprocess configurations, notification between processes about new jobs # must be done via the database. In single process configurations, this can be # done in memory, which is a bit quicker. #track_jobs_in_database = True # This enables splitting of jobs into tasks, if specified by the particular tool # config. # This is a new feature and not recommended for production servers yet. #use_tasked_jobs = False #local_task_queue_workers = 2 # Enable job recovery (if Galaxy is restarted while cluster jobs are running, # it can "recover" them when it starts). This is not safe to use if you are # running more than one Galaxy server using the same database. #enable_job_recovery = True # Although it is fairly reliable, setting metadata can occasionally fail. In # these instances, you can choose to retry setting it internally or leave it in # a failed state (since retrying internally may cause the Galaxy process to be # unresponsive). If this option is set to False, the user will be given the # option to retry externally, or set metadata manually (when possible). #retry_metadata_internally = True # Very large metadata values can cause Galaxy crashes. This will allow # limiting the maximum metadata key size (in bytes used in memory, not the end # result database value size) Galaxy will attempt to save with a dataset. Use # 0 to disable this feature. The default is 5MB, but as low as 1MB seems to be # a reasonable size. #max_metadata_value_size = 5242880 # If (for example) you run on a cluster and your datasets (by default, # database/files/) are mounted read-only, this option will override tool output # paths to write outputs to the working directory instead, and the job manager # will move the outputs to their proper place in the dataset directory on the # Galaxy server after the job completes. #outputs_to_working_directory = False # If your network filesystem's caching prevents the Galaxy server from seeing # the job's stdout and stderr files when it completes, you can retry reading # these files. The job runner will retry the number of times specified below, # waiting 1 second between tries. For NFS, you may want to try the -noac mount # option (Linux) or -actimeo=0 (Solaris). #retry_job_output_collection = 0 # In the past Galaxy would preserve its Python environment when running jobs ( # and still does for internal tools packaged with Galaxy). This behavior exposes # Galaxy internals to tools and could result in problems when activating # Python environments for tools (such as with Conda packaging). The default # legacy_only will restrict this behavior to tools identified by the Galaxy # team as requiring this environment. Set this to "always" to restore the # previous behavior (and potentially break Conda dependency resolution for many # tools). Set this to legacy_and_local to preserve the environment for legacy # tools and locally managed tools (this might be useful for instance if you are # installing software into Galaxy's virtualenv for tool development). #preserve_python_environment = legacy_only # Clean up various bits of jobs left on the filesystem after completion. These # bits include the job working directory, external metadata temporary files, # and DRM stdout and stderr files (if using a DRM). Possible values are: # always, onsuccess, never #cleanup_job = always # For sites where all users in Galaxy match users on the system on which Galaxy # runs, the DRMAA job runner can be configured to submit jobs to the DRM as the # actual user instead of as the user running the Galaxy server process. For # details on these options, see the documentation at: # # https://galaxyproject.org/admin/config/performance/cluster # #drmaa_external_runjob_script = sudo -E scripts/drmaa_external_runner.py --assign_all_groups #drmaa_external_killjob_script = sudo -E scripts/drmaa_external_killer.py #external_chown_script = sudo -E scripts/external_chown_script.py # For the job submission as the actual system user, Galaxy can extract # the user name from the email address (actually the local-part before the @) # or the username which are both stored in the Galaxy data base. # The latter option is particularly useful for installations that get the # authentication from LDAP. # Also, Galaxy can accept the name of a common system user (eg. galaxy_worker) # who can run every job being submitted. This user should not be the same user # running the galaxy system. # Possible values are user_email (default), username or #real_system_username = user_email # File to source to set up the environment when running jobs. By default, the # environment in which the Galaxy server starts is used when running jobs # locally, and the environment set up per the DRM's submission method and # policy is used when running jobs on a cluster (try testing with `qsub` on the # command line). environment_setup_file can be set to the path of a file on # the cluster that should be sourced by the user to set up the environment # prior to running tools. This can be especially useful for running jobs as # the actual user, to remove the need to configure each user's environment # individually. #environment_setup_file = None # Optional file containing job resource data entry fields definition. # These fields will be presented to users in the tool forms and allow them to # overwrite default job resources such as number of processors, memory and # walltime. #job_resource_params_file = config/job_resource_params_conf.xml # If using job concurrency limits (configured in job_config_file), several # extra database queries must be performed to determine the number of jobs a # user has dispatched to a given destination. By default, these queries will # happen for every job that is waiting to run, but if cache_user_job_count is # set to True, it will only happen once per iteration of the handler queue. # Although better for performance due to reduced queries, the tradeoff is a # greater possibility that jobs will be dispatched past the configured limits # if running many handlers. #cache_user_job_count = False # -- ToolBox filtering # Modules from lib/galaxy/tools/toolbox/filters/ can be specified in # the following lines. tool_* filters will be applied for all users # and can not be changed by them. user_tool_* filters will be shown # under user preferences and can be toggled on and off at # runtime. Example shown below are not real defaults (no custom # filters are applied by default), but can be enabled by renaming the # examples.py.sample in the filters directory to examples.py. #tool_filters = #tool_label_filters = #tool_section_filters = #user_tool_filters = examples:restrict_upload_to_admins, examples:restrict_encode #user_tool_section_filters = examples:restrict_text #user_tool_label_filters = examples:restrict_upload_to_admins, examples:restrict_encode # The base modules that are searched for modules as described above # can be modified and modules external to Galaxy can be searched by # modifying the following option. #toolbox_filter_base_modules = galaxy.tools.toolbox.filters,galaxy.tools.filters # -- Galaxy Application Internal Message Queue # Galaxy uses AMQP internally for communicating between processes. For # example, when reloading the toolbox or locking job execution, the process # that handled that particular request will tell all others to also reload, # lock jobs, etc. # For connection examples, see http://docs.celeryproject.org/projects/kombu/en/latest/userguide/connections.html # # Without specifying anything here, galaxy will first attempt to use your # specified database_connection above. If that's not specified either, Galaxy # will automatically create and use a separate sqlite database located in your # /database folder (indicated in the commented out line below). #amqp_internal_connection = sqlalchemy+sqlite:///./database/control.sqlite?isolation_level=IMMEDIATE # Galaxy real time communication server settings #enable_communication_server = False #communication_server_host = http://localhost #communication_server_port = 7070 # persistent_communication_rooms is a comma-separated list of rooms that should be always available. #persistent_communication_rooms = # ---- Galaxy External Message Queue ------------------------------------------------- # Galaxy uses Advanced Message Queuing Protocol (AMQP) to receive messages from # external sources like barcode scanners. Galaxy has been tested against # RabbitMQ AMQP implementation. For Galaxy to receive messages from a message # queue, the RabbitMQ server has to be set up with a user account and other # parameters listed below. The 'host' and 'port' fields should point to where # the RabbitMQ server is running. [galaxy_amqp] #host = 127.0.0.1 #port = 5672 #userid = galaxy #password = galaxy #virtual_host = galaxy_messaging_engine #queue = galaxy_queue #exchange = galaxy_exchange #routing_key = bar_code_scanner #rabbitmqctl_path = /path/to/rabbitmqctl