# This file contains Kubernetes YAML files for the most important prow
# components. Don't edit resources in this file. Instead, pull them out into
# their own files.
---
apiVersion: v1
kind: Namespace
metadata:
  name: prow
---
apiVersion: v1
kind: ConfigMap
metadata:
  namespace: prow
  name: plugins
data:
  plugins.yaml: |
    plugins:
      $GITHUB_ORG:
        plugins:
        - approve
        - assign
        - blunderbuss
        - cat
        - dog
        - help
        - heart
        - hold
        - label
        - lgtm
        - trigger
        - verify-owners
        - wip
        - yuks
---
apiVersion: v1
kind: Secret
metadata:
  namespace: prow
  name: github-token
stringData:
  cert: $GITHUB_TOKEN
  appid: "$GITHUB_APP_ID"
---
apiVersion: v1
kind: Secret
metadata:
  namespace: prow
  name: hmac-token
stringData:
  # Generate via `openssl rand -hex 20`. This is the secret used in the GitHub webhook configuration
  hmac: $HMAC_TOKEN
---
apiVersion: v1
kind: Secret
metadata:
  namespace: prow
  name: minio-root-creds
stringData:
  user: "$MINIO_ROOT_USER"
  password: "$MINIO_ROOT_PASSWORD"
---
apiVersion: v1
kind: ConfigMap
metadata:
  namespace: prow
  name: config
data:
  config.yaml: |
    prowjob_namespace: prow
    pod_namespace: test-pods

    in_repo_config:
      enabled:
        "*": true

    deck:
     spyglass:
       gcs_browser_prefix: 'https://s3.console.aws.amazon.com/s3/buckets/'
       lenses:
       - lens:
           name: metadata
         required_files:
           - ^(?:started|finished)\.json$
         optional_files:
           - ^(?:podinfo|prowjob)\.json$
       - lens:
           config:
           name: buildlog
         required_files:
         - build-log.txt
       - lens:
           name: junit
         required_files:
         - .*/junit.*\.xml
       - lens:
           name: podinfo
         required_files:
         - podinfo.json

    plank:
      job_url_prefix_config:
        "*": https://$PROW_HOST/view/
      report_templates:
        '*': >-
            [Full PR test history](https://$PROW_HOST/pr-history?org={{.Spec.Refs.Org}}&repo={{.Spec.Refs.Repo}}&pr={{with index .Spec.Refs.Pulls 0}}{{.Number}}{{end}}).
            [Your PR dashboard](https://$PROW_HOST/pr?query=is:pr+state:open+author:{{with
            index .Spec.Refs.Pulls 0}}{{.Author}}{{end}}).
      default_decoration_config_entries:
      - config:
          gcs_configuration:
            bucket: s3://prow-logs
            path_strategy: explicit
          github_api_endpoints:
            - http://ghproxy
            - https://api.github.com
          github_app_id: "$GITHUB_APP_ID"
          github_app_private_key_secret:
            name: github-token
            key: cert
          s3_credentials_secret: s3-credentials
          utility_images:
            clonerefs: us-docker.pkg.dev/k8s-infra-prow/images/clonerefs:v20240802-66b115076
            entrypoint: us-docker.pkg.dev/k8s-infra-prow/images/entrypoint:v20240802-66b115076
            initupload: us-docker.pkg.dev/k8s-infra-prow/images/initupload:v20240802-66b115076
            sidecar: us-docker.pkg.dev/k8s-infra-prow/images/sidecar:v20240802-66b115076

    tide:
      queries:
      - labels:
        - lgtm
        - approved
        missingLabels:
        - needs-rebase
        - do-not-merge/hold
        - do-not-merge/work-in-progress
        - do-not-merge/invalid-owners-file
        orgs:
        - $GITHUB_ORG

    decorate_all_jobs: true
    periodics:
    - interval: 1m
      agent: kubernetes
      name: echo-test
      spec:
        containers:
        - image: alpine
          command: ["/bin/date"]
---
apiVersion: apps/v1
kind: Deployment
metadata:
  namespace: prow
  name: hook
  labels:
    app: hook
spec:
  replicas: 2
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxSurge: 1
      maxUnavailable: 1
  selector:
    matchLabels:
      app: hook
  template:
    metadata:
      labels:
        app: hook
    spec:
      serviceAccountName: "hook"
      terminationGracePeriodSeconds: 180
      containers:
      - name: hook
        image: us-docker.pkg.dev/k8s-infra-prow/images/hook:v20240802-66b115076
        imagePullPolicy: Always
        args:
        - --dry-run=false
        - --config-path=/etc/config/config.yaml
        - --github-endpoint=http://ghproxy
        - --github-endpoint=https://api.github.com
        - --github-app-id=$(GITHUB_APP_ID)
        - --github-app-private-key-path=/etc/github/cert
        env:
        - name: GITHUB_APP_ID
          valueFrom:
            secretKeyRef:
              name: github-token
              key: appid
        ports:
          - name: http
            containerPort: 8888
        volumeMounts:
        - name: hmac
          mountPath: /etc/webhook
          readOnly: true
        - name: github-token
          mountPath: /etc/github
          readOnly: true
        - name: config
          mountPath: /etc/config
          readOnly: true
        - name: plugins
          mountPath: /etc/plugins
          readOnly: true
        livenessProbe:
          httpGet:
            path: /healthz
            port: 8081
          initialDelaySeconds: 3
          periodSeconds: 3
        readinessProbe:
          httpGet:
            path: /healthz/ready
            port: 8081
          initialDelaySeconds: 10
          periodSeconds: 3
          timeoutSeconds: 600
      volumes:
      - name: hmac
        secret:
          secretName: hmac-token
      - name: github-token
        secret:
          secretName: github-token
      - name: config
        configMap:
          name: config
      - name: plugins
        configMap:
          name: plugins
---
apiVersion: v1
kind: Service
metadata:
  namespace: prow
  name: hook
spec:
  selector:
    app: hook
  ports:
  - port: 8888
---
apiVersion: apps/v1
kind: Deployment
metadata:
  namespace: prow
  name: sinker
  labels:
    app: sinker
spec:
  selector:
    matchLabels:
      app: sinker
  replicas: 1
  template:
    metadata:
      labels:
        app: sinker
    spec:
      serviceAccountName: "sinker"
      containers:
      - name: sinker
        image: us-docker.pkg.dev/k8s-infra-prow/images/sinker:v20240802-66b115076
        args:
        - --config-path=/etc/config/config.yaml
        - --dry-run=false
        volumeMounts:
        - name: config
          mountPath: /etc/config
          readOnly: true
      volumes:
      - name: config
        configMap:
          name: config
---
apiVersion: apps/v1
kind: Deployment
metadata:
  namespace: prow
  name: deck
  labels:
    app: deck
spec:
  replicas: 2
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxSurge: 1
      maxUnavailable: 1
  selector:
    matchLabels:
      app: deck
  template:
    metadata:
      labels:
        app: deck
    spec:
      serviceAccountName: "deck"
      terminationGracePeriodSeconds: 30
      containers:
      - name: deck
        image: us-docker.pkg.dev/k8s-infra-prow/images/deck:v20240802-66b115076
        args:
        - --config-path=/etc/config/config.yaml
        - --plugin-config=/etc/plugins/plugins.yaml
        - --tide-url=http://tide/
        - --hook-url=http://hook:8888/plugin-help
        - --github-endpoint=http://ghproxy
        - --github-endpoint=https://api.github.com
        - --github-graphql-endpoint=http://ghproxy/graphql
        - --s3-credentials-file=/etc/s3-credentials/service-account.json
        - --spyglass=true
        - --github-app-id=$(GITHUB_APP_ID)
        - --github-app-private-key-path=/etc/github/cert
        env:
        - name: GITHUB_APP_ID
          valueFrom:
            secretKeyRef:
              name: github-token
              key: appid
        ports:
          - name: http
            containerPort: 8080
        volumeMounts:
        - name: config
          mountPath: /etc/config
          readOnly: true
        - name: github-token
          mountPath: /etc/github
          readOnly: true
        - name: plugins
          mountPath: /etc/plugins
          readOnly: true
        - name: s3-credentials
          mountPath: /etc/s3-credentials
          readOnly: true
        livenessProbe:
          httpGet:
            path: /healthz
            port: 8081
          initialDelaySeconds: 3
          periodSeconds: 3
        readinessProbe:
          httpGet:
            path: /healthz/ready
            port: 8081
          initialDelaySeconds: 10
          periodSeconds: 3
          timeoutSeconds: 600
      volumes:
      - name: config
        configMap:
          name: config
      - name: github-token
        secret:
          secretName: github-token
      - name: plugins
        configMap:
          name: plugins
      - name: s3-credentials
        secret:
          secretName: s3-credentials
---
apiVersion: v1
kind: Service
metadata:
  namespace: prow
  name: deck
spec:
  selector:
    app: deck
  ports:
  - port: 80
    targetPort: 8080
---
apiVersion: apps/v1
kind: Deployment
metadata:
  namespace: prow
  name: horologium
  labels:
    app: horologium
spec:
  replicas: 1 # Do not scale up.
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: horologium
  template:
    metadata:
      labels:
        app: horologium
    spec:
      serviceAccountName: "horologium"
      terminationGracePeriodSeconds: 30
      containers:
      - name: horologium
        image: us-docker.pkg.dev/k8s-infra-prow/images/horologium:v20240802-66b115076
        args:
        - --dry-run=false
        - --config-path=/etc/config/config.yaml
        volumeMounts:
        - name: config
          mountPath: /etc/config
          readOnly: true
      volumes:
      - name: config
        configMap:
          name: config
---
apiVersion: apps/v1
kind: Deployment
metadata:
  namespace: prow
  name: tide
  labels:
    app: tide
spec:
  replicas: 1 # Do not scale up.
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: tide
  template:
    metadata:
      labels:
        app: tide
    spec:
      serviceAccountName: "tide"
      containers:
      - name: tide
        image: us-docker.pkg.dev/k8s-infra-prow/images/tide:v20240802-66b115076
        args:
        - --dry-run=false
        - --config-path=/etc/config/config.yaml
        - --github-endpoint=http://ghproxy
        - --github-endpoint=https://api.github.com
        - --github-graphql-endpoint=http://ghproxy/graphql
        - --s3-credentials-file=/etc/s3-credentials/service-account.json
        - --status-path=s3://tide/tide-status
        - --history-uri=s3://tide/tide-history.json
        - --github-app-id=$(GITHUB_APP_ID)
        - --github-app-private-key-path=/etc/github/cert
        env:
        - name: GITHUB_APP_ID
          valueFrom:
            secretKeyRef:
              name: github-token
              key: appid
        ports:
          - name: http
            containerPort: 8888
        volumeMounts:
        - name: github-token
          mountPath: /etc/github
          readOnly: true
        - name: config
          mountPath: /etc/config
          readOnly: true
        - name: s3-credentials
          mountPath: /etc/s3-credentials
          readOnly: true
      volumes:
      - name: github-token
        secret:
          secretName: github-token
      - name: config
        configMap:
          name: config
      - name: s3-credentials
        secret:
          secretName: s3-credentials
---
apiVersion: v1
kind: Service
metadata:
  namespace: prow
  name: tide
spec:
  selector:
    app: tide
  ports:
  - port: 80
    targetPort: 8888
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  namespace: prow
  name: prow
  annotations:
    # Change this to your issuer when using cert-manager. Does
    # nothing when not using cert-manager.
    cert-manager.io/cluster-issuer: letsencrypt-staging
spec:
  defaultBackend:
    # specify the default backend for `ingress-gce` (https://cloud.google.com/kubernetes-engine/docs/concepts/ingress#default_backend)
    service:
      name: deck
      port:
        number: 80
  rules:
  - host: $PROW_HOST
    http:
      paths:
      - path: /
        pathType: Prefix
        backend:
          service:
            name: deck
            port:
              number: 80
      - path: /hook
        pathType: Prefix
        backend:
          service:
            name: hook
            port:
              number: 8888
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: statusreconciler
  namespace: prow
  labels:
    app: statusreconciler
spec:
  replicas: 1
  selector:
    matchLabels:
      app: statusreconciler
  template:
    metadata:
      labels:
        app: statusreconciler
    spec:
      serviceAccountName: statusreconciler
      terminationGracePeriodSeconds: 180
      containers:
      - name: statusreconciler
        image: us-docker.pkg.dev/k8s-infra-prow/images/status-reconciler:v20240802-66b115076
        args:
        - --dry-run=false
        - --continue-on-error=true
        - --plugin-config=/etc/plugins/plugins.yaml
        - --config-path=/etc/config/config.yaml
        - --github-endpoint=http://ghproxy
        - --github-endpoint=https://api.github.com
        - --s3-credentials-file=/etc/s3-credentials/service-account.json
        - --status-path=s3://status-reconciler/status-reconciler-status
        - --github-app-id=$(GITHUB_APP_ID)
        - --github-app-private-key-path=/etc/github/cert
        env:
        - name: GITHUB_APP_ID
          valueFrom:
            secretKeyRef:
              name: github-token
              key: appid
        volumeMounts:
        - name: github-token
          mountPath: /etc/github
          readOnly: true
        - name: config
          mountPath: /etc/config
          readOnly: true
        - name: plugins
          mountPath: /etc/plugins
          readOnly: true
        - name: s3-credentials
          mountPath: /etc/s3-credentials
          readOnly: true
      volumes:
      - name: github-token
        secret:
          secretName: github-token
      - name: config
        configMap:
          name: config
      - name: plugins
        configMap:
          name: plugins
      - name: s3-credentials
        secret:
          secretName: s3-credentials
---
apiVersion: v1
kind: Namespace
metadata:
  name: test-pods
---
kind: ServiceAccount
apiVersion: v1
metadata:
  namespace: prow
  name: "deck"
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  namespace: prow
  name: "deck"
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: "deck"
subjects:
- kind: ServiceAccount
  name: "deck"
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  namespace: test-pods
  name: "deck"
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: "deck"
subjects:
- kind: ServiceAccount
  name: "deck"
  namespace: prow
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  namespace: prow
  name: "deck"
rules:
  - apiGroups:
      - "prow.k8s.io"
    resources:
      - prowjobs
    verbs:
      - get
      - list
      - watch
      # Required when deck runs with `--rerun-creates-job=true`
      # **Warning:** Only use this for non-public deck instances, this allows
      # anyone with access to your Deck instance to create new Prowjobs
      # - create
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  namespace: test-pods
  name: "deck"
rules:
  - apiGroups:
      - ""
    resources:
      - pods/log
    verbs:
      - get
---
kind: ServiceAccount
apiVersion: v1
metadata:
  namespace: prow
  name: "horologium"
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  namespace: prow
  name: "horologium"
rules:
  - apiGroups:
      - "prow.k8s.io"
    resources:
      - prowjobs
    verbs:
      - create
      - list
      - watch
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  namespace: prow
  name: "horologium"
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: "horologium"
subjects:
- kind: ServiceAccount
  name: "horologium"
---
kind: ServiceAccount
apiVersion: v1
metadata:
  namespace: prow
  name: "sinker"
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  namespace: prow
  name: "sinker"
rules:
  - apiGroups:
    - "prow.k8s.io"
    resources:
    - prowjobs
    verbs:
    - delete
    - list
    - watch
    - get
  - apiGroups:
    - coordination.k8s.io
    resources:
    - leases
    resourceNames:
    - prow-sinker-leaderlock
    verbs:
    - get
    - update
  - apiGroups:
    - coordination.k8s.io
    resources:
    - leases
    verbs:
    - create
  - apiGroups:
    - ""
    resources:
    - configmaps
    resourceNames:
    - prow-sinker-leaderlock
    verbs:
    - get
    - update
  - apiGroups:
    - ""
    resources:
    - configmaps
    - events
    verbs:
    - create
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  namespace: test-pods
  name: "sinker"
rules:
  - apiGroups:
      - ""
    resources:
      - pods
    verbs:
      - delete
      - list
      - watch
      - get
      - patch
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  namespace: prow
  name: "sinker"
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: "sinker"
subjects:
- kind: ServiceAccount
  name: "sinker"
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  namespace: test-pods
  name: "sinker"
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: "sinker"
subjects:
- kind: ServiceAccount
  name: "sinker"
  namespace: prow
---
apiVersion: v1
kind: ServiceAccount
metadata:
  namespace: prow
  name: "hook"
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  namespace: prow
  name: "hook"
rules:
  - apiGroups:
      - "prow.k8s.io"
    resources:
      - prowjobs
    verbs:
      - create
      - get
      - list
      - update
  - apiGroups:
      - ""
    resources:
      - configmaps
    verbs:
      - create
      - get
      - update
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  namespace: prow
  name: "hook"
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: "hook"
subjects:
- kind: ServiceAccount
  name: "hook"
---
apiVersion: v1
kind: ServiceAccount
metadata:
  namespace: prow
  name: "tide"
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  namespace: prow
  name: "tide"
rules:
  - apiGroups:
      - "prow.k8s.io"
    resources:
      - prowjobs
    verbs:
      - create
      - list
      - get
      - watch
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  namespace: prow
  name: "tide"
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: "tide"
subjects:
- kind: ServiceAccount
  name: "tide"
---
apiVersion: v1
kind: ServiceAccount
metadata:
  namespace: prow
  name: "statusreconciler"
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  namespace: prow
  name: "statusreconciler"
rules:
  - apiGroups:
      - "prow.k8s.io"
    resources:
      - prowjobs
    verbs:
      - create
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  namespace: prow
  name: "statusreconciler"
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: "statusreconciler"
subjects:
- kind: ServiceAccount
  name: "statusreconciler"
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  namespace: prow
  labels:
    app: ghproxy
  name: ghproxy
spec:
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 100Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
  namespace: prow
  name: ghproxy
  labels:
    app: ghproxy
spec:
  selector:
    matchLabels:
      app: ghproxy
  strategy:
    type: Recreate
  # GHProxy does not support HA
  replicas: 1
  template:
    metadata:
      labels:
        app: ghproxy
    spec:
      containers:
      - name: ghproxy
        image: us-docker.pkg.dev/k8s-infra-prow/images/ghproxy:v20240802-66b115076
        args:
        - --cache-dir=/cache
        - --cache-sizeGB=99
        - --push-gateway=pushgateway
        - --serve-metrics=true
        ports:
        - containerPort: 8888
        volumeMounts:
        - name: cache
          mountPath: /cache
      volumes:
      - name: cache
        persistentVolumeClaim:
          claimName: ghproxy
---
apiVersion: v1
kind: Service
metadata:
  labels:
    app: ghproxy
  namespace: prow
  name: ghproxy
spec:
  ports:
  - name: main
    port: 80
    protocol: TCP
    targetPort: 8888
  - name: metrics
    port: 9090
  selector:
    app: ghproxy
  type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
  namespace: prow
  name: prow-controller-manager
  labels:
    app: prow-controller-manager
spec:
  replicas: 1
  selector:
    matchLabels:
      app: prow-controller-manager
  template:
    metadata:
      labels:
        app: prow-controller-manager
    spec:
      serviceAccountName: prow-controller-manager
      containers:
      - name: prow-controller-manager
        args:
        - --dry-run=false
        - --config-path=/etc/config/config.yaml
        - --github-endpoint=http://ghproxy
        - --github-endpoint=https://api.github.com
        - --enable-controller=plank
        - --github-app-id=$(GITHUB_APP_ID)
        - --github-app-private-key-path=/etc/github/cert
        env:
        - name: GITHUB_APP_ID
          valueFrom:
            secretKeyRef:
              name: github-token
              key: appid
        image: us-docker.pkg.dev/k8s-infra-prow/images/prow-controller-manager:v20240802-66b115076
        volumeMounts:
        - name: github-token
          mountPath: /etc/github
          readOnly: true
        - name: config
          mountPath: /etc/config
          readOnly: true
      volumes:
      - name: github-token
        secret:
          secretName: github-token
      - name: config
        configMap:
          name: config
---
apiVersion: v1
kind: ServiceAccount
metadata:
  namespace: prow
  name: prow-controller-manager
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  namespace: prow
  name: prow-controller-manager
rules:
  - apiGroups:
    - "prow.k8s.io"
    resources:
    - prowjobs
    verbs:
    - get
    - list
    - watch
    - update
    - patch
  - apiGroups:
    - coordination.k8s.io
    resources:
    - leases
    resourceNames:
    - prow-controller-manager-leader-lock
    verbs:
    - get
    - update
  - apiGroups:
    - coordination.k8s.io
    resources:
    - leases
    verbs:
    - create
  - apiGroups:
    - ""
    resources:
    - configmaps
    resourceNames:
    - prow-controller-manager-leader-lock
    verbs:
    - get
    - update
  - apiGroups:
    - ""
    resources:
    - configmaps
    - events
    verbs:
    - create
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  namespace: test-pods
  name: prow-controller-manager
rules:
  - apiGroups:
      - ""
    resources:
      - pods
    verbs:
      - create
      - delete
      - get
      - list
      - patch
      - watch
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  namespace: prow
  name: prow-controller-manager
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: prow-controller-manager
subjects:
- kind: ServiceAccount
  name: prow-controller-manager
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  namespace: test-pods
  name: prow-controller-manager
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: prow-controller-manager
subjects:
- kind: ServiceAccount
  name: prow-controller-manager
  namespace: prow
---
apiVersion: apps/v1
kind: Deployment
metadata:
  namespace: prow
  name: crier
  labels:
    app: crier
spec:
  replicas: 1
  selector:
    matchLabels:
      app: crier
  template:
    metadata:
      labels:
        app: crier
    spec:
      serviceAccountName: crier
      terminationGracePeriodSeconds: 30
      containers:
      - name: crier
        image: us-docker.pkg.dev/k8s-infra-prow/images/crier:v20240802-66b115076
        args:
        - --blob-storage-workers=10
        - --config-path=/etc/config/config.yaml
        - --s3-credentials-file=/etc/s3-credentials/service-account.json
        - --github-endpoint=http://ghproxy
        - --github-endpoint=https://api.github.com
        - --github-workers=10
        - --kubernetes-blob-storage-workers=10
        - --github-app-id=$(GITHUB_APP_ID)
        - --github-app-private-key-path=/etc/github/cert
        env:
        - name: GITHUB_APP_ID
          valueFrom:
            secretKeyRef:
              name: github-token
              key: appid
        volumeMounts:
        - name: config
          mountPath: /etc/config
          readOnly: true
        - name: github-token
          mountPath: /etc/github
          readOnly: true
        - name: s3-credentials
          mountPath: /etc/s3-credentials
          readOnly: true
      volumes:
      - name: config
        configMap:
          name: config
      - name: github-token
        secret:
          secretName: github-token
      - name: s3-credentials
        secret:
          secretName: s3-credentials
---
kind: ServiceAccount
apiVersion: v1
metadata:
  name: crier
  namespace: prow
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  namespace: prow
  name: crier
rules:
- apiGroups:
    - "prow.k8s.io"
  resources:
    - "prowjobs"
  verbs:
    - "get"
    - "watch"
    - "list"
    - "patch"
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  namespace: test-pods
  name: crier
rules:
- apiGroups:
    - ""
  resources:
    - "pods"
    - "events"
  verbs:
    - "get"
    - "list"
- apiGroups:
    - ""
  resources:
    - "pods"
  verbs:
    - "patch"
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: crier
  namespace: prow
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: crier
subjects:
- kind: ServiceAccount
  name: crier
  namespace: prow
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: crier
  namespace: test-pods
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: crier
subjects:
- kind: ServiceAccount
  name: crier
  namespace: prow
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: minio
  namespace: prow
spec:
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 100Gi
---
apiVersion: v1
kind: Secret
metadata:
  namespace: prow
  name: s3-credentials
stringData:
  service-account.json: |
    {
      "region": "minio",
      "access_key": "$MINIO_ROOT_USER",
      "endpoint": "minio.prow.svc.cluster.local",
      "insecure": true,
      "s3_force_path_style": true,
      "secret_key": "$MINIO_ROOT_PASSWORD"
    }
---
apiVersion: v1
kind: Secret
metadata:
  namespace: test-pods
  name: s3-credentials
stringData:
  service-account.json: |
    {
      "region": "minio",
      "access_key": "$MINIO_ROOT_USER",
      "endpoint": "minio.prow.svc.cluster.local",
      "insecure": true,
      "s3_force_path_style": true,
      "secret_key": "$MINIO_ROOT_PASSWORD"
    }
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: minio
  namespace: prow
spec:
  selector:
    matchLabels:
      app: minio
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: minio
    spec:
      volumes:
      - name: data
        persistentVolumeClaim:
          claimName: minio
      initContainers:
      - name: bucket-creator
        image: alpine
        command:
        - mkdir
        - -p
        - /data/prow-logs
        - /data/tide
        - /data/status-reconciler
        volumeMounts:
        - name: data
          mountPath: "/data"
      containers:
      - name: minio
        volumeMounts:
        - name: data
          mountPath: "/data"
        image: minio/minio:latest
        args:
        - server
        - /data
        env:
        - name: MINIO_ROOT_USER
          valueFrom:
            secretKeyRef:
              name: minio-root-creds
              key: user
        - name: MINIO_ROOT_PASSWORD
          valueFrom:
            secretKeyRef:
              name: minio-root-creds
              key: password
        - name: MINIO_REGION_NAME
          value: minio
        ports:
        - containerPort: 9000
        readinessProbe:
          httpGet:
            path: /minio/health/ready
            port: 9000
          periodSeconds: 20
        livenessProbe:
          httpGet:
            path: /minio/health/live
            port: 9000
---
apiVersion: v1
kind: Service
metadata:
  name: minio
  namespace: prow
spec:
  type: ClusterIP
  ports:
  - port: 80
    targetPort: 9000
    protocol: TCP
  selector:
    app: minio