# values ref: https://github.com/yuvipanda/cryptnono/blob/main/cryptnono/values.yaml cryptnono: enabled: true detectors: execwhacker: metrics: enabled: true # execwhacker needs access to containerd, and docker on build hosts, to lookup the container. # Override if K8s uses a different path for containerd containerdHostPath: /run/containerd/containerd.sock dockerHostPath: /run/dind/docker.sock imagePullSecrets: tags: {} etcJupyter: jupyter_notebook_config.json: NotebookApp: allow_origin: "*" tornado_settings: trust_xheaders: true # shutdown the server after no activity shutdown_no_activity_timeout: 600 # if a user leaves a notebook with a running kernel, # the effective idle timeout will typically be CULL_TIMEOUT + CULL_KERNEL_TIMEOUT # as culling the kernel will register activity, # resetting the no_activity timer for the server as a whole MappingKernelManager: # shutdown kernels after no activity cull_idle_timeout: 600 # check for idle kernels this often cull_interval: 60 # a kernel with open connections but no activity still counts as idle # this is what allows us to shutdown servers # when people leave a notebook open and wander off cull_connected: true # values ref: https://github.com/jupyterhub/binderhub/blob/main/helm-chart/binderhub/values.yaml # can't use binderhub.enabled due to validation errors binderhubEnabled: true binderhub: replicas: 2 resources: requests: cpu: "0.25" memory: 1Gi limits: cpu: "2" memory: 1Gi hpa: enabled: false minReplicas: 1 maxReplicas: 1 targetCPU: 100 # this is in percent of requests.cpu networkPolicy: enabled: true egress: tcpPorts: - 80 # http - 443 # https - 9418 # git - 873 # rsync - 1094 # xroot - 1095 # xroot - 16286 # Wolfram Engine on-demand licensing - 4001 # IPFS cidr: 0.0.0.0/0 ingress: bannedIps: [] config: GitHubRepoProvider: # Add banned repositories to the list below # They should be strings that will match "^/.*" banned_specs: # e.g. '^org/repo.*' - .*xmrig.* - ^a2nk/.* - ^imhajes/.* - ^ines/spacy-binder.* - ^soft4voip/rak.* - ^hmharshit/cn-ait.* - ^shishirchoudharygic/mltraining.* - ^hmharshit/mltraining.* - ^FDesnoyer/MathExp.* - ^GuitarsAI/.* # ferarussia is clearly a fake GitHub account created by GuitarsAI to get around ban # it was created the day GuitarsAI was blocked and does the same thing - ^ferarussia/.* - ^irw4ns1/.* # CryptoCurrency mining bans - ^wyattsanchez54/.* - ^walterpowell201/.* high_quota_specs: [] # - ^jupyterlab/.* # - ^jupyter/.* # - ^jupyterhub/.* # - ^jupyter-widgets/.* spec_config: - pattern: ^ipython/ipython-in-depth.* config: quota: 100 - pattern: ^petlja/.* config: quota: 50 # - pattern: ^github-owner/github-repo-prefix.* # # YYYY-MM-DD of workshop # config: # quota: 123 - pattern: ^MNGuenther/hackanexoplanet-env.* # 2023-04-18 # https://github.com/jupyterhub/mybinder.org-deploy/issues/2534 config: quota: 300 GitRepoProvider: banned_specs: - ^(git|https?)%3A%2F%2Fgithub.com%2Fa2nk%2F.* - ^https%3A%2F%2Fbitbucket.org%2Fnikiubel%2Fnikiubel.bitbucket.io.git/.* - ^https%3A%2F%2Fjovian.ml%2Fapi%2Fgit%2F.* - ^https%3A%2F%2Fframagit.org%2FCecGhesq%2Flic_mdf_nsi_1.* - ^(git|https?)%3A%2F%2Fnotabug.org%2FulslcuRux3Y%2F.* - ^(git|https?)%3A%2F%2Fgitlab.com%2Fjasmt507%2F.* - ^(git|https?)%3A%2F%2Fgitlab.com%2Fh4j3s1978%2F.* - .*%2Fabooz.* - .*%2Firw4ns1.* GitLabRepoProvider: banned_specs: - ^h4j3s1978%2F.* - ^jasmt507%2F.* - .*%2Fabooz.* spec_config: [] BinderHub: use_registry: true per_repo_quota: 100 per_repo_quota_higher: 200 cors_allow_origin: "*" banner_message: | 🤍 Donate to mybinder.org!
Thanks to OVH, GESIS Notebooks and Curvenote for supporting us! 🎉
mybinder.org has updated the base image to Ubuntu 22.04! See the upgrade guide for details.
about_message: |

mybinder.org is public infrastructure operated by the Binder Project team.

The Binder Project is a member of Project Jupyter, which is a fiscally sponsored project of NumFocus, a US 501c3 non-profit.

For abuse please email: binder-team@googlegroups.com, to report a security vulnerability please see: Where can I report a security issue

For more information about the Binder Project, see the About Binder page

extra_footer_scripts: 01-matomo: | // Only load Matomo if DNT is not set. // This respects user preferences, and gives us a full score on uBlock origin if (navigator.doNotTrack != "1" && // Most Firefox & Chrome window.doNotTrack != "1" && // IE & Safari navigator.msDoNotTrack != "1" // Old IE ) { console.log("Loading Matomo tracking, since Do Not Track is off"); var _paq = _paq || []; /* tracker methods like "setCustomDimension" should be called before "trackPageView" */ // this is matomo's own respect-DoNotTrack // should be redundant, but good to be extra explicit _paq.push(["setDoNotTrack", true]); // disable tracking cookies _paq.push(["disableCookies"]); _paq.push(['trackPageView']); (function() { var u="//" + window.location.hostname + "/matomo/"; _paq.push(['setTrackerUrl', u+'piwik.php']); _paq.push(['setSiteId', '1']); var d=document, g=d.createElement('script'), s=d.getElementsByTagName('script')[0]; g.type='text/javascript'; g.async=true; g.defer=true; g.src=u+'piwik.js'; s.parentNode.insertBefore(g,s); })(); } KubernetesBuildExecutor: build_image: quay.io/jupyterhub/repo2docker:2024.03.0 memory_limit: "3G" memory_request: "1G" extraConfig: # Send Events to StackDriver on Google Cloud # This doesn't need any extra permissions, since the GKE nodes have # permission to write to StackDriver by default. We don't block access # to cloud metadata in binderhub pod, so this should 'just work'. 01-eventlog: | import os import google.cloud.logging import google.cloud.logging.handlers from traitlets.log import get_logger # importing google cloud configures a root log handler, # which prevents tornado's pretty-logging import logging logging.getLogger().handlers = [] class JSONCloudLoggingHandler(google.cloud.logging.handlers.CloudLoggingHandler): def emit(self, record): record.name = None super().emit(record) def _make_eventsink_handler(el): client = google.cloud.logging.Client() # These events are not parsed as JSON in stackdriver, so give it a different name # for now. Should be fixed in https://github.com/googleapis/google-cloud-python/pull/6293 name = os.environ.get("EVENT_LOG_NAME") or "binderhub-events-text" get_logger().info("Sending event logs to %s/logs/%s", client.project, name) return [JSONCloudLoggingHandler(client, name=name)] c.EventLog.handlers_maker = _make_eventsink_handler # backport event_loop_seconds metric # from https://github.com/jupyterhub/jupyterhub/pull/4615 02-event-loop-metric: | import time from prometheus_client import Histogram from tornado.ioloop import PeriodicCallback from traitlets.log import get_logger c = get_config() # noqa EVENT_LOOP_INTERVAL_SECONDS = Histogram( 'event_loop_interval_seconds', 'Distribution of measured event loop intervals', namespace="jupyterhub", # Increase resolution to 5ms below 50ms # because this is where we are most sensitive. # No need to have buckets below 25, since we only measure every 20ms. buckets=[ # 5ms from 25-50ms 25e-3, 30e-3, 35e-3, 40e-3, 45e-3, 50e-3, # from here, default prometheus buckets 75e-3, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10, float("inf"), ], ) class EventLoopMetric: event_loop_interval_resolution = 20e-3 event_loop_interval_log_threshold = 1 def _event_loop_tick(self): """Measure a single tick of the event loop This measures the time since the last tick """ now = time.perf_counter() tick_duration = now - self._last_tick self._last_tick = now EVENT_LOOP_INTERVAL_SECONDS.observe(tick_duration) if tick_duration >= self.event_loop_interval_log_threshold: # warn about slow ticks self.log.warning("Event loop was unresponsive for %.2fs!", tick_duration) def start(self): self.log = get_logger() self.log.info("starting!") now = time.perf_counter() self._last_tick = self._last_tick_collect = now pc = PeriodicCallback( self._event_loop_tick, self.event_loop_interval_resolution * 1000, ) pc.start() metric = EventLoopMetric() metric.start() registry: url: https://gcr.io service: type: ClusterIP ingress: enabled: true annotations: kubernetes.io/ingress.class: nginx https: enabled: true type: kube-lego imageBuilderType: dind dind: resources: requests: cpu: "0.5" memory: 1Gi limits: cpu: "4" memory: 4Gi imageCleaner: enabled: true # when 80% of inodes are used, # cull images until only 40% are used. imageGCThresholdHigh: 80 imageGCThresholdLow: 40 jupyterhub: cull: # cull every 11 minutes so it is out of phase # with the proxy check-routes interval of five minutes every: 660 timeout: 600 # maxAge is 6 hours: 6 * 3600 = 21600 maxAge: 21600 hub: networkPolicy: enabled: true resources: requests: cpu: "0.25" memory: 1Gi limits: cpu: "2" memory: 1Gi extraConfig: neverRestart: | c.KubeSpawner.extra_pod_config.update({'restart_policy': 'Never'}) noPrivilegeEscalation: | c.KubeSpawner.allow_privilege_escalation = False noAuthMetrics: | c.JupyterHub.authenticate_prometheus = False config: BinderSpawner: cors_allow_origin: "*" JupyterHub: # only serve the hub's API, not full UI hub_routespec: "/hub/api/" Proxy: # default-route to our nginx "Binder not found" service extra_routes: "/": "http://proxy-patches" service: annotations: prometheus.io/scrape: "true" prometheus.io/path: "/hub/metrics" proxy: service: type: ClusterIP chp: networkPolicy: enabled: true resources: requests: memory: 320Mi cpu: "0.1" limits: memory: 320Mi cpu: "0.5" # FIXME: move to errorTarget/defaultTarget when hub chart dependency is bumped # to include https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/2079 # this still works, though, as repeatedly specifying CLI flags overrides earlier values extraCommandLineFlags: # defaultTarget needs to wait for jupyterhub 1.4 # https://github.com/jupyterhub/jupyterhub/pull/3373 # - --default-target=http://$(PROXY_PATCHES_SERVICE_HOST):$(PROXY_PATCHES_SERVICE_PORT) - --error-target=http://$(PROXY_PATCHES_SERVICE_HOST):$(PROXY_PATCHES_SERVICE_PORT)/hub/error - --log-level=error ingress: enabled: true annotations: ingress.kubernetes.io/proxy-body-size: 64m kubernetes.io/ingress.class: nginx kubernetes.io/tls-acme: "true" scheduling: userScheduler: enabled: true replicas: 2 podPriority: enabled: true userPlaceholder: enabled: true # replicas set in config/ singleuser: # clear singleuser.defaultUrl config from chart defaultUrl: cloudMetadata: # we do this in our own network policy blockWithIptables: false networkPolicy: enabled: true egress: [] egressAllowRules: nonPrivateIPs: false memory: guarantee: 450M limit: 2G cpu: guarantee: 0.01 limit: 1 storage: extraVolumes: - name: etc-jupyter configMap: name: user-etc-jupyter - name: etc-jupyter-templates configMap: name: user-etc-jupyter-templates extraVolumeMounts: - name: etc-jupyter mountPath: /etc/jupyter - name: etc-jupyter-templates mountPath: /etc/jupyter/templates initContainers: - name: tc-init image: jupyterhub/mybinder.org-tc-init:set-by-chartpress imagePullPolicy: IfNotPresent env: - name: WHITELIST_CIDR value: 10.0.0.0/8 - name: EGRESS_BANDWIDTH value: 1mbit securityContext: # capabilities.add seems to be disabled # by the `runAsUser: 1000` in the pod-level securityContext # unless we explicitly run as root runAsUser: 0 capabilities: add: - NET_ADMIN # values ref: https://github.com/kubernetes/ingress-nginx/blob/main/charts/ingress-nginx/values.yaml ingress-nginx: enabled: true rbac: create: true defaultBackend: enabled: true minAvailable: 0 controller: admissionWebhooks: enabled: false resources: requests: cpu: 100m memory: 150Mi limits: cpu: 800m memory: 500Mi tolerations: - key: "node.kubernetes.io/unschedulable" operator: "Exists" effect: "NoExecute" tolerationSeconds: 30 affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 podAffinityTerm: labelSelector: matchExpressions: - key: app.kubernetes.io/name operator: In values: - ingress - key: app.kubernetes.io/component operator: In values: - controller topologyKey: kubernetes.io/hostname replicaCount: 3 scope: enabled: true config: # Allow POSTs of upto 64MB, for large notebook support. proxy-body-size: 64m stats: enabled: true metrics: enabled: true service: annotations: prometheus.io/scrape: "true" prometheus.io/port: "10254" service: # Preserve client IPs externalTrafficPolicy: Local redirector: enabled: false nodeSelector: {} redirects: [] ingress: annotations: kubernetes.io/ingress.class: nginx kubernetes.io/tls-acme: "true" tls: secretName: kubelego-tls-redirector static: ingress: annotations: kubernetes.io/ingress.class: nginx kubernetes.io/tls-acme: "true" tls: secretName: kubelego-tls-static # values ref: https://github.com/grafana/helm-charts/blob/main/charts/grafana/values.yaml grafana: enabled: true ingress: enabled: true annotations: kubernetes.io/ingress.class: nginx kubernetes.io/tls-acme: "true" # deploymentStrategy.type is set to Recreate as we have storage that can only # be attached once, we can't have two replicas as RollingUpdate leads to. deploymentStrategy: type: Recreate persistence: enabled: true size: 1Gi accessModes: - ReadWriteOnce grafana.ini: auth.anonymous: enabled: true org_name: Main Org. org_role: Viewer auth.basic: enabled: true smtp: enabled: true security: allow_embedding: true # values ref: https://github.com/prometheus-community/helm-charts/blob/main/charts/prometheus/values.yaml prometheus: enabled: true alertmanager: enabled: false prometheus-pushgateway: enabled: false server: podLabels: # needs access to the Hub API hub.jupyter.org/network-access-hub: "true" strategy: # The default of RollingUpdate fail because attached storage can only be # mounted on one pod, so we need to use Recreate that first shut down the # pod and then starts it up during updates. type: Recreate ingress: enabled: true annotations: kubernetes.io/ingress.class: nginx kubernetes.io/tls-acme: "true" # make sure we collect metrics on pods by app/component at least kube-state-metrics: metricLabelsAllowlist: - pods=[app,component] - nodes=[*] proxyPatches: enabled: true nodeSelector: {} matomo: enabled: false nodeSelector: {} resources: {} replicas: 1 service: type: ClusterIP db: username: matomo name: matomo tables_prefix: matomo_ gcsProxy: enabled: false replicas: 1 nodeSelector: {} analyticsPublisher: enabled: false events: logName: binderhub-events-text image: name: jupyterhub/mybinder.org-analytics-publisher tag: "set-by-chartpress" cloudCosts: # All billing info goes into same bucket in prod, to which staging has access enabled: true sourceBucket: binder-billing fileName: cloud-costs.jsonl kind: csv nodeSelector: {} resources: requests: cpu: 10m memory: 150Mi limits: cpu: 100m memory: 300Mi # this is defined in secrets/ for the OVH cluster eventsArchiver: serviceAccountKey: "" federationRedirect: enabled: false host: mybinder.org resources: requests: cpu: 10m memory: 50Mi limits: cpu: 100m memory: 250Mi image: name: jupyterhub/mybinder.org-federation-redirect tag: "set-by-chartpress" check: period: 15 jitter: 0.1 retries: 5 timeout: 5 failed_period: 90 load_balancer: "rendezvous" pod_headroom: 10 hosts: {} nodeSelector: {} minesweeper: enabled: true image: jupyterhub/mybinder.org-minesweeper:set-by-chartpress # cluster-autoscaler is responsible for understanding if k8s nodes needs to be # added or removed. A pending pod is a sign that another node should be added, # and an underused node is a sign that a node should be removed. # # We always need a cluster-autoscaler, but only deploy it ourselves if its not # provided as part of the k8s cluster. # # values ref: https://github.com/kubernetes/autoscaler/blob/master/charts/cluster-autoscaler/values.yaml # cluster-autoscaler: enabled: false # Name:Priority pairs of priority classes to create # https://kubernetes.io/blog/2023/01/12/protect-mission-critical-pods-priorityclass/ priorityClasses: {} binderhub-container-registry-helper: enabled: false awsEcrRegistryCleaner: enabled: false image: ghcr.io/manics/aws-ecr-registry-cleaner:0.0.1 expiresAfterPullDays: 7 # 12 hours loopDelay: 43200 serviceAccount: name: binderhub-ecr-registry-cleaner annotations: {} priorityClassName: ""