--- # --------------------------------------------------------------------------- # # An orchestrator deployment configuration file. # # # # ...compatible with orchestrator configuration file format version 1. # # # # This working configuration can also act as a template for your own # # deployment. If the 'compact' style suits your needs then feel free # # to copy this to form the basic of your own deployment. # # # # To avoid the comments from becoming out-of-date you should probably strip # # the documentation that accompanies this file from your copy, You can # # rest-assured that this Frankfurt file will be kept up to date # # with any orchestrator changes and new features. # # --------------------------------------------------------------------------- # # The configuration file version. This is dictated by the orchestrator. # As enhanced file versions are brought online this number will # increase. Whether your configuration version is supported is a function of # the orchestrator version you are running. version: 1 # The name of the deployment. and description of the configuration. # This is used for reporting purposes only. name: Compact AWS Deployment (OKD 3.11) [Frankfurt] # A more detailed description of the deployment. # This is used for reporting purposes only. description: > A combined Bastion/Master/Infra/Compute deployment where a single compute instance acts as a compute node and plays the role of the bastion, master and infrastructure node. This example also includes certbot certificate creation, metrics and prometheus. # --- cluster ----------------------------------------------------------------- # Details of the OKD cluster configuration. # As well as the cluster region (provider-specific) # and its hostname the size and machine details of the OKD # cloud instances are defined. cluster: # The provider 'region' used for cluster instances. # The value is provider-specific. region: eu-central-1 # The SSH 'user' that belongs to the ssh-keypair. ssh_user: centos # A unique name for your cluster. # All your clusters must have a different ID. id: compact-frankfurt-311 # Machine image name for the cluster instances. # This should match the start of the Packer-generated images. # The latest image whose name starts with this will be used. # The Bastion, if used, uses its own image. image_prefix: okd-machine-image-311- # The Terraform directory that contains the Terraform definitions # that suitable to build the OKD Cluster machines cloud infrastructure. terraform_dir: aws # You can declare whether the Terraform state files are # stored in a globally available shared service. At the moment we support # Amazon S3. Leaving this field blank means that terraform state # is stored locally (within the project's Terraform directory). # If you provide this information you *MUST* define # # An S3 shared-state configuration would look something like: # # terraform_shared_state: # aws: # s3_bucket: # s3_key: # db_region: # db_table: #terraform_shared_state: # A name prefix for the names of each cluster object instance. # Where supported by your provider this value will be used to alter the # instance names of objects created in your cloud provider. # Nodes will be named 'node-NNN-' but, if your prefix # is 'vre', nodes named 'compute' will be named 'vre-node-001-general'. # Use this to distinguish between nodes of different clusters in the # same cloud provider. name_tag: compact # The external IP address or domain-name of the Master node. # If not set the master public DNS will be used. # Whatever is used must provide a resolvable route to the master. # So, if you have a domain, ensure it's routed to the pre-allocated # public IP of the master (see cluster.master.fixed_ip_id). # If you use an external load balancer, you should specify the address of # the external load balancer. public_hostname: compact.informaticsmatters.com # The external IP address or domain-name for application default routes. # This is a suffix appended to application $service.$namespace routes # for those that need a default. It should resolve to the Infrastructure # (Router) node. default_subdomain: compact.informaticsmatters.com # The Master API/Console Port api_port: 8443 # Configuration details of the Bastion instance. # This section is ignored if the cluster.master is # configured to also act as a Bastion # (i.e. cluster.master.is_bastion resolves to true) bastion: # Machine image name for the Bastion instance. # This should match the start of the Packer-generated image. # The latest image whose name starts with this will be used. # Not required if the Master is acting as a Bastion. image_prefix: okd-bastion-image-311- # Provider-specific machine image type. # See master.type. # Not required if the Master is acting as a Bastion. type: t2.small # Details of the Master nodes. # # 'Simple' deployment topologies (as this is) # are suitable when we're using just 1 Master instance. master: count: 1 # Provider-specific machine image type. # i.e. t2.xlarge on AWS # n1-standard-8 on GCE # START1-L on Scaleway. # # You must ensure that the type you pick satisfies # the minimum requirements for OKD. # For example a machine with at least 16GiB of free memory will # be required on Master nodes, and avoid using any machines with less # than 4 cores. Hardware requirements can be found on the OpenShift site: - # # https://docs.openshift.com/container-platform/3.11/install/prerequisites.html#hardware # # On GCE for example, your Master nodes cannot be smaller than # a n1-standard-8 instance. type: t2.xlarge # If you have pre-allocated public IPs for the Master set the value here. # This is _not_ normally the IP address itself, it is the cloud-provider's # IP identity. In AWS this would be the Elastic IP Allocation ID # e.g. "eipalloc-0000000000000000". # # If you have not provided a pre-allocated (fixed) IP # leave this property empty. fixed_ip_id: eipalloc-010cfe12a8b8c7fa2 # If the master is to also act as the infrastructure node # then set this field to 'yes' while also setting the 'infra.count' to 0. is_infra: yes # If the master is to also act as the Bastion node # then set this field to 'yes'. is_bastion: yes # Set if you also want the Master nod eto be a general compute node. # You can omit this property. # You can also make the node a general compute node after it's been created # by using with the 'oc label' command: - # oc label node node-role.kubernetes.io/compute=true is_compute_node: yes # Details of the Infrastructure nodes. infra: # Provider-specific machine image type. # Not required (and ignored) if the Master is acting as a Infrastructure. count: 0 # Provider-specific machine image type. # Not required (and ignored) if the Master is acting as a Infrastructure. # Refer to the master.type documentation for hardware requirements when # choosing your type. # type: # The (optional) pre-allocated public IP for the Infrastructure. # (See cluster.master.fixed_ip). # Not required (and ignored) if the Master is acting as a Infrastructure. # fixed_ip_id: # Set if you also want the Infrastructure nodes to act as general compute node. # See master.is_compute_node # Not required (and ignored) if the Master is acting as a Infrastructure. # is_compute_node: no # Details of the in-cluster NFS server. # Optionally name the node that will be used as the NFS server. # The name used must be the machine name that terraform would create, # i.e. 'infra1'. nfs: # machine: infra1 # You can define extra node groups that your nodes can be assigned to. # These complement the expected/built-in groups # (e.g. master, infra, compute). # # For each group you define a 'node_group_' will be created. # # Node groups consist of: - # # - An arbitrary and unique 'name' (following standard variable conventions). # Stick to letters, numbers and '_' # - A set of 'labels' that are essentially 'keyword=value' pairs. # The labels are used for Pod node selection. # - An 'is_compute_node' flag. If this evaluates to true # the node is also treated by OKD as a generally available # compute node where Pods can be scheduled using default rules. # If you need an 'exclusive node' (one that is only used for jobs # that name the label explicitly for selection) then set this to false. # By default this is 'no' so groups are not general compute nodes. # # You must not use the following names, # which are built-in and created automatically during the installation: - # # - master # - master-compute # - infra # - infra-compute # - compute # - master-infra # - all-in-one # # You can leave this list blank if you do not need any of your own # node groups. # node_groups: # - name: general # labels: # - general-node=true # is_compute_node: yes # Details of the cluster compute nodes. # This is a list that allows you to define different node 'categories'. # Each entry in the list supports the the following properties: # # Where the provider allows, nodes are placed in different 'zones'. # AWS typically provides 3 'zones' per region. So, if you create # 6 nodes in a group then two will be placed in 'zone a', two in 'zone b' # and two in 'zone c'. Each new node in a group is placed in a different # zone with the first node in each group being placed in the first zone. # # You must have at least 1 compute node. # # Node consist of: - # # - A 'name' used simply to tag the resource, for providers that support it. # - A numerical 'count' to define the number of nodes of this category to # create in the cluster. It can be zero if you do not want any # independent node groups. # - A provider-specific 'type' used to define the compute instance type # for each node in the category. In AWS this might be 't2.medium'. # Refer to the master.type documentation for hardware requirements when # choosing your type. Nodes will typically need at least 15GiB available # disk space and at least 8GiB of free RAM. # - A 'node_group', used to identify the OKD node group the nodes of this # category belong to. OKD provides built-in groups like 'compute' # for general compute nodes but you can define your own groups # (see the node_groups section above) and place your node into one of # those groups. By convention groups are called 'node-config-' # (like 'node-config-compute'). Here you simply need the name # (i.e 'compute'). If you want to place your node into a built-in # group, you can use that group (i.e. 'compute') but every node # must be in a group, even if it's simply `compute' nodes: - count: 0 # name: general # type: t2.medium # node_group: general # --- okd --------------------------------------------------------------------- # The okd section provides details of the OpenShift/OKD deployment. # These are high-level, abstract, configuration details, # that include the tagged version of the software to install # and the playbooks to run. okd: # The version tag for OpenShift Origin/OKD ansible playbooks. # This must be a valid tag in the OpenShift git repository. ansible_tag: openshift-ansible-3.11.97-1 # The version of Ansible required to deploy the cluster. # The orchestrator will ensure that the correct version of # ansible is installed. v2.7.5 is likely to be suitable for # OKD 3.11 and beyond but change it here if you need to. ansible_version: 2.7.5 # The names of the playbooks to play in order to deploy the OKD software. # This, for 3.11, is typically 'prerequisites' followed by # 'deploy_cluster'. You're unlikely to need to change these. # # Any playbook cna be put in here. # For example to run 'playbooks/openshift-glusterfs/uninstall.yml' # just add 'openshift-glusterfs/uninstall' or to uninstall # 3.11 you could replace this with 'adhoc/uninstall' and # run the orchestrator with with -spr and -spo options # to skip pre and post playbooks. play: # This pair will destroy a GlusterFS deployment... #- openshift-glusterfs/uninstall #- adhoc/uninstall - prerequisites - deploy_cluster # Install extra stuff based on playbooks in the 'ansible/post-okd' directory. # These playbooks will be run on the 'cli-node', a node with a suitable # corresponding 'oc' command-line utility (typically the Master). # The named playbooks are executed after the OKD software has been installed. # # inspect the 'ansible/post-okd/playbooks' directory for an up-to-date # catalogue. At the time of writing the OKD Orchestrator is bundled # with the following: - # # - acme-controller # - kube-ops-view # # For each 'play' a corresponding # 'ansible/post-okd/playbooks//deploy.yaml' must exist. # # The orchestrator provides three key system variables that playbooks # can utilise. They are the Master's API URL, the cluster admin user # and its password: - # # - okd_api_hostname # - okd_admin # - okd_admin_password # # Additional variables can be passed to playbooks with a 'vars' list. # For example, if you want to provide playbook variables 'x=1' and # 'y=2' you can add the following the the play section: - # # vars: # - x=1 # - y=2 post_okd: - play: acme-controller # Storage volumes for OKD services. # If undefined defaults # (defined in the respective okd.inventories template files) # will be used. # # We support storage definitions for: - # - alertmanager # - logging # - metrics # - prometheus # - registry storage: metrics: 40Gi registry: 200Gi # What additional OpenShift/OKD services should be enabled? # Add services to this list like metrics with something like: # # enable: # - metrics # # We currently support: # - metrics # - prometheus # - tsb (The Template Service Broker) # - asb (The Ansible Service Broker) enable: - metrics - prometheus - tsb # The namespace (project) where templates will be loaded. # The templates are expected to live in the 'templates' directory # of the deployment. It is an error to set this if there isn't a # 'templates' directory or if 'tsb' has not been enabled (see above). # # If this is not defined, templates will not be installed. # template: # namespace: openshift # Certificate definitions, # currently just the Master API certificate. # # If you do not want to set any certificates during deployments # comment this section out. certificates: # Set to automatically generate a Let's Encrypt/certbot certificate # for the master node when the cluster has been created but prior to # OpenShift installation. # # If generating certificates you *MUST* provide the certbot # email address for certification in your provider-env/setenv.sh, # typically in the 'TF_VAR_master_certbot_email' environment variable. generate_api_cert: yes # Alternatively, if you have your own wildcard certificate # for your domain then you can place the certificate files # (which must consist of files named 'cert.crt', 'private.key' # and 'ca-bundle.crt') into the root of the OKD orchestrator # directory and set 'wildcard_cert' to 'yes. # # Here you *MUST NOT* define an email address for certification. # i.e. 'TF_VAR_master_certbot_email' must not be set. # # When providing your own certificates you must use ansible-vault # to encrypt the certificate files. The OKD Orchestrator will expect to # find a file called `vault-pass.txt` in the root of your deployment # directory (alongside your deployment configuration file). # # See the ansible documentation for further details: # https://docs.ansible.com/ansible/latest/user_guide/playbooks_vault.html #wildcard_cert: yes # If you have a certificate that you want to use to provision # as a master API certificate then place the certificate files # (which must consist of files named 'cert.crt', 'private.key' # and 'ca-bundle.crt') into the root of the OKD orchestrator # directory and provide the certificate set URL in the 'master_api_cert' # value. # # Here you *MUST NOT* define an email address for certification. # i.e. 'TF_VAR_master_certbot_email' must not be set. # # See the note about using vault-encrypted files in the 'wildcard_cert' # section above. #master_api_cert: master.hostname.com # For the above, refer to the following OpenShift documentation: # https://docs.openshift.com/container-platform/3.11/ # install_config/certificate_customization.html # The directory of the OKD Ansible inventory for this deployment. # This directory is expected to exist in `okd/inventories'. # The inventory directory contains template files for the Bastion # and OKD cluster. The templates for a given provider are often # shared between similar deployments. inventory_dir: standard-3-11 # --- my_machines ------------------------------------------------------------- # The 'my_machines' section is used when you're running the orchestrator # on a pre-existing cluster rather than utilising the orchestrator's # terraform-based cluster formation. If you provide a 'my_machines' section # it will be processed instead of running Terraform. # # If this deployment is for a pre-existing cluster you must provide # values that will be used to replace the interpolations in the corresponding # OKD Ansible inventory file (i.e. the file inventory.yaml.tpl that will # be found in the the deployment's okd.inventory_dir. # # 1. You MUST provide a value for 'cli-node' (normally the master # node). This node must be accessible from the development machine # as well as the bastion. It should be a node that, after deployment, # is expected to contain the oc command-line utility and handle # certificate generation. # # 2. You MUST provide a value for 'bastion' (i.e. the bastion or a master # node). It is from here that the second-stage of deployment takes place. # # 3. You MUST provide 'master' values for every master that this # deployment would create if it had used Terraform. So, if # the deployment would create 3 Master instances you will values for # 'master1', 'master2', 'master3'. # # In the unlikely event that you have more than 9 masters, the 10th # value is expected for 'master10'. # # 4. Similarly you MUST provide 'infra'values for # every every Infrastructure node that this deployment would create. # # 5. You MUST provide 'node' values for every # node that would be created where is a non-zero, zero-padded # numerical value, e.g. '001'. # # Importantly, if you are creating nodes of differing category # i.e. where one set of nodes has node.name of 'general' and one category # has a node.name of 'compute' then you need to keep in mind that the # nodes are created in the order they're placed in the 'cluster.nodes' # list. For example, in the following node definition... # # cluster: # nodes: # - count: 2 # name: general # type: t2.medium # node_group: general # - count: 2 # name: compute # type: t2.medium # node_group: compute # # The 'general' nodes will be expected to be named 'node001' and # 'node002' and the 'compute' nodes are expected to be named 'node003' # and 'node004'. # # 6. You MUST provide 'glusterfs' and # 'gluster_device'values for every gluster node # that would be created where is a non-zero numerical value, # e.g. '1'. # # The following example illustrates the definition of a pre-exiting cluster # with one master, one infrastructure and 2 compute nodes. # # Modify it accordingly to suite your pre-existing cluster and deployment file. #my_machines: # # cli_node: 10.0.1.1 # bastion: 10.0.1.1 # # master1: 10.0.0.1 # # infra1: 10.0.0.2 # # node001: 10.0.0.3 # node002: 10.0.0.4 # # glusterfs1: 10.0.0.5 # glusterfs2: 10.0.0.6 # glusterfs3: 10.0.0.7 # glusterfs1_device1: /dev/vdb # glusterfs2_device1: /dev/vdb # glusterfs3_device1: /dev/vdb # -----------------------------------------------------------------------------