2

When spinning up a cluster using Nats helm charts, I get the following errors at different levels

P.S. - I put alot of info below, so TLDR:

"Warning FailedScheduling 99s (x4 over 4m4s) default-scheduler 0/1 nodes are available: 1 pod has unbound immediate PersistentVolumeClaims. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling."

Other Supplementary Info

  • statefulset/nats: Waiting for statefulset spec update to be observed...
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  creationTimestamp: "2022-07-08T04:29:45Z"
  finalizers:
  - kubernetes.io/pvc-protection
  labels:
    app.kubernetes.io/instance: nats
    app.kubernetes.io/name: nats
  name: nats-js-pvc-nats-2
  namespace: default
  resourceVersion: "3002438"
  uid: fe665266-be65-4b36-a3ef-5c353193f234
spec:
  accessModes:
  - ReadWriteOnce
  resources:
    requests:
      storage: 2Gi
  storageClassName: do-block-storage
  volumeMode: Filesystem
status:
  phase: Pending
# NATS Clients Port
port: 4222

# PID file shared with configuration reloader.
pid_file: "/var/run/nats/nats.pid"

###############
#             #
# Monitoring  #
#             #
###############
http: 8222
server_name:$SERVER_NAME
###################################
#                                 #
# NATS JetStream                  #
#                                 #
###################################
jetstream {
  max_mem: 1Gi
  store_dir: /data/

  max_file:2Gi
}
###################################
#                                 #
# NATS Full Mesh Clustering Setup #
#                                 #
###################################
cluster {
  port: 6222
  name: asg4-cluster

  routes = [
    nats://nats-0.nats.default.svc.cluster.local:6222,nats://nats-1.nats.default.svc.cluster.local:6222,nats://nats-2.nats.default.svc.cluster.local:6222,
    
  ]
  cluster_advertise: $CLUSTER_ADVERTISE

  connect_retries: 120
}
lame_duck_grace_period: 10s
lame_duck_duration: 30s

StatefulSets

apiVersion: v1
kind: Pod
metadata:
  annotations:
    checksum/config: 67cdb8135e8f578ceb198e0575fd6080ca97b671a9a175897d7effb7b98c6e5a
    prometheus.io/path: /metrics
    prometheus.io/port: "7777"
    prometheus.io/scrape: "true"
  creationTimestamp: "2022-07-08T23:05:01Z"
  generateName: nats-
  labels:
    app.kubernetes.io/instance: nats
    app.kubernetes.io/name: nats
    controller-revision-hash: nats-59cb47bb5c
    statefulset.kubernetes.io/pod-name: nats-2
  name: nats-2
  namespace: default
  ownerReferences:
  - apiVersion: apps/v1
    blockOwnerDeletion: true
    controller: true
    kind: StatefulSet
    name: nats
    uid: 60e7d9c0-186f-4e27-aae2-2d1e9c5e2440
  resourceVersion: "3049778"
  uid: e3f1cd98-b5e5-4866-ae3a-047788065d76
spec:
  containers:
  - command:
    - nats-server
    - --config
    - /etc/nats-config/nats.conf
    env:
    - name: POD_NAME
      valueFrom:
        fieldRef:
          apiVersion: v1
          fieldPath: metadata.name
    - name: SERVER_NAME
      value: asg4-$(POD_NAME)
    - name: POD_NAMESPACE
      valueFrom:
        fieldRef:
          apiVersion: v1
          fieldPath: metadata.namespace
    - name: CLUSTER_ADVERTISE
      value: $(POD_NAME).nats.$(POD_NAMESPACE).svc.cluster.local
    image: nats
    imagePullPolicy: IfNotPresent
    lifecycle:
      preStop:
        exec:
          command:
          - nats-server
          - -sl=ldm=/var/run/nats/nats.pid
    livenessProbe:
      failureThreshold: 3
      httpGet:
        path: /
        port: 8222
        scheme: HTTP
      initialDelaySeconds: 10
      periodSeconds: 30
      successThreshold: 1
      timeoutSeconds: 5
    name: nats
    ports:
    - containerPort: 4222
      name: client
      protocol: TCP
    - containerPort: 6222
      name: cluster
      protocol: TCP
    - containerPort: 8222
      name: monitor
      protocol: TCP
    - containerPort: 7777
      name: metrics
      protocol: TCP
    resources: {}
    startupProbe:
      failureThreshold: 30
      httpGet:
        path: /healthz
        port: 8222
        scheme: HTTP
      initialDelaySeconds: 10
      periodSeconds: 10
      successThreshold: 1
      timeoutSeconds: 5
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: File
    volumeMounts:
    - mountPath: /etc/nats-config
      name: config-volume
    - mountPath: /var/run/nats
      name: pid
    - mountPath: /data/
      name: nats-js-pvc
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-ntbwp
      readOnly: true
  - command:
    - nats-server-config-reloader
    - -pid
    - /var/run/nats/nats.pid
    - -config
    - /etc/nats-config/nats.conf
    image: natsio/nats-server-config-reloader:0.7.0
    imagePullPolicy: IfNotPresent
    name: reloader
    resources: {}
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: File
    volumeMounts:
    - mountPath: /etc/nats-config
      name: config-volume
    - mountPath: /var/run/nats
      name: pid
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-ntbwp
      readOnly: true
  - args:
    - -connz
    - -routez
    - -subz
    - -varz
    - -prefix=nats
    - -use_internal_server_id
    - -jsz=all
    - http://localhost:8222/
    image: natsio/prometheus-nats-exporter:0.9.3
    imagePullPolicy: IfNotPresent
    name: metrics
    ports:
    - containerPort: 7777
      name: metrics
      protocol: TCP
    resources: {}
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: File
    volumeMounts:
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-ntbwp
      readOnly: true
  dnsPolicy: ClusterFirst
  enableServiceLinks: true
  hostname: nats-2
  preemptionPolicy: PreemptLowerPriority
  priority: 0
  restartPolicy: Always
  schedulerName: default-scheduler
  securityContext: {}
  serviceAccount: nats-service
  serviceAccountName: nats-service
  shareProcessNamespace: true
  subdomain: nats
  terminationGracePeriodSeconds: 60
  tolerations:
  - effect: NoExecute
    key: node.kubernetes.io/not-ready
    operator: Exists
    tolerationSeconds: 300
  - effect: NoExecute
    key: node.kubernetes.io/unreachable
    operator: Exists
    tolerationSeconds: 300
  volumes:
  - name: nats-js-pvc
    persistentVolumeClaim:
      claimName: nats-js-pvc-nats-2
  - configMap:
      defaultMode: 420
      name: nats-config
    name: config-volume
  - emptyDir: {}
    name: pid
  - name: kube-api-access-ntbwp
    projected:
      defaultMode: 420
      sources:
      - serviceAccountToken:
          expirationSeconds: 3607
          path: token
      - configMap:
          items:
          - key: ca.crt
            path: ca.crt
          name: kube-root-ca.crt
      - downwardAPI:
          items:
          - fieldRef:
              apiVersion: v1
              fieldPath: metadata.namespace
            path: namespace
status:
  conditions:
  - lastProbeTime: null
    lastTransitionTime: "2022-07-08T23:05:01Z"
    message: '0/1 nodes are available: 1 pod has unbound immediate PersistentVolumeClaims.
      preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling.'
    reason: Unschedulable
    status: "False"
    type: PodScheduled
  phase: Pending
  qosClass: BestEffort
NAME: nats
LAST DEPLOYED: Fri Jul  8 16:05:00 2022
NAMESPACE: default
STATUS: deployed
REVISION: 1
USER-SUPPLIED VALUES:
null

COMPUTED VALUES:
additionalContainers: []
additionalVolumeMounts: []
additionalVolumes: []
affinity: {}
auth:
  enabled: false
  resolver:
    allowDelete: false
    interval: 2m
    operator: null
    store:
      dir: /accounts/jwt
      size: 1Gi
    systemAccount: null
    type: none
bootconfig:
  image: natsio/nats-boot-config:0.7.0
  pullPolicy: IfNotPresent
  securityContext: {}
cluster:
  enabled: true
  extraRoutes: []
  name: asg4-cluster
  noAdvertise: false
  replicas: 3
commonLabels: {}
exporter:
  enabled: true
  image: natsio/prometheus-nats-exporter:0.9.3
  portName: metrics
  pullPolicy: IfNotPresent
  resources: {}
  securityContext: {}
  serviceMonitor:
    annotations: {}
    enabled: false
    labels: {}
    path: /metrics
gateway:
  enabled: false
  name: default
  port: 7522
imagePullSecrets: []
k8sClusterDomain: cluster.local
leafnodes:
  enabled: false
  noAdvertise: false
  port: 7422
mqtt:
  ackWait: 1m
  enabled: false
  maxAckPending: 100
nameOverride: ""
namespaceOverride: ""
nats:
  advertise: true
  client:
    port: 4222
    portName: client
  configChecksumAnnotation: true
  connectRetries: 120
  externalAccess: false
  healthcheck:
    detectHealthz: true
    enableHealthz: true
    liveness:
      enabled: true
      failureThreshold: 3
      initialDelaySeconds: 10
      periodSeconds: 30
      successThreshold: 1
      terminationGracePeriodSeconds: null
      timeoutSeconds: 5
    readiness:
      enabled: false
      failureThreshold: 3
      initialDelaySeconds: 10
      periodSeconds: 10
      successThreshold: 1
      timeoutSeconds: 5
    startup:
      enabled: true
      failureThreshold: 30
      initialDelaySeconds: 10
      periodSeconds: 10
      successThreshold: 1
      timeoutSeconds: 5
  image: nats
  jetstream:
    domain: ""
    enabled: true
    fileStorage:
      accessModes:
      - ReadWriteOnce
      annotations: null
      enabled: true
      size: 2Gi
      storageClassName: do-block-storage
      storageDirectory: /data/
    memStorage:
      enabled: true
      size: 1Gi
    uniqueTag: ""
  limits:
    lameDuckDuration: 30s
    lameDuckGracePeriod: 10s
    maxConnections: null
    maxControlLine: null
    maxPayload: null
    maxPending: null
    maxPings: null
    maxSubscriptions: null
    pingInterval: null
    writeDeadline: null
  logging:
    connectErrorReports: false
    debug: null
    logtime: null
    reconnectErrorReports: false
    trace: null
  profiling:
    enabled: false
    port: 6000
  pullPolicy: IfNotPresent
  resources: {}
  securityContext: {}
  selectorLabels: {}
  serverNamePrefix: asg4-
  serverTags: null
  serviceAccount:
    annotations: {}
    create: true
    name: nats-service
  terminationGracePeriodSeconds: 60
natsbox:
  additionalLabels: {}
  affinity: {}
  enabled: false
  extraVolumeMounts: []
  extraVolumes: []
  image: natsio/nats-box:0.11.0
  imagePullSecrets: []
  nodeSelector: {}
  podAnnotations: {}
  podLabels: {}
  pullPolicy: IfNotPresent
  securityContext: {}
  tolerations: []
networkPolicy:
  allowExternal: true
  enabled: false
  extraEgress: []
  extraIngress: []
  ingressNSMatchLabels: {}
  ingressNSPodMatchLabels: {}
nodeSelector: {}
podAnnotations: {}
podDisruptionBudget:
  enabled: true
  maxUnavailable: 1
podManagementPolicy: Parallel
priorityClassName: null
reloader:
  enabled: true
  extraConfigs: []
  image: natsio/nats-server-config-reloader:0.7.0
  pullPolicy: IfNotPresent
  securityContext: {}
securityContext: {}
serviceAnnotations: {}
statefulSetAnnotations: {}
statefulSetPodLabels: {}
tolerations: []
topologyKeys: []
topologySpreadConstraints: []
useFQDN: true
websocket:
  allowedOrigins: []
  enabled: false
  noTLS: true
  port: 443
  sameOrigin: false

HOOKS:
---
# Source: nats/templates/tests/test-request-reply.yaml
apiVersion: v1
kind: Pod
metadata:
  name: "nats-test-request-reply"
  labels:
    chart: nats-0.17.1
    app: nats-test-request-reply
  annotations:
    "helm.sh/hook": test
spec:
  containers:
  - name: nats-box
    image: synadia/nats-box
    env:
    - name: NATS_HOST
      value: nats
    command:
    - /bin/sh
    - -ec
    - |
      nats reply -s nats://$NATS_HOST:4222 'name.>' --command "echo 1" &
    - |
      "&&"
    - |
      name=$(nats request -s nats://$NATS_HOST:4222 name.test '' 2>/dev/null)
    - |
      "&&"
    - |
      [ $name = test ]

  restartPolicy: Never
MANIFEST:
---
# Source: nats/templates/pdb.yaml
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
  name: nats
  namespace: default
  labels:
    helm.sh/chart: nats-0.17.1
    app.kubernetes.io/name: nats
    app.kubernetes.io/instance: nats
    app.kubernetes.io/version: "2.8.4"
    app.kubernetes.io/managed-by: Helm
spec:
  maxUnavailable: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: nats
      app.kubernetes.io/instance: nats
---
# Source: nats/templates/rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nats-service
  labels:
    helm.sh/chart: nats-0.17.1
    app.kubernetes.io/name: nats
    app.kubernetes.io/instance: nats
    app.kubernetes.io/version: "2.8.4"
    app.kubernetes.io/managed-by: Helm
---
# Source: nats/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: nats-config
  namespace: default
  labels:
    helm.sh/chart: nats-0.17.1
    app.kubernetes.io/name: nats
    app.kubernetes.io/instance: nats
    app.kubernetes.io/version: "2.8.4"
    app.kubernetes.io/managed-by: Helm
data:
  nats.conf: |
    # NATS Clients Port
    port: 4222

    # PID file shared with configuration reloader.
    pid_file: "/var/run/nats/nats.pid"

    ###############
    #             #
    # Monitoring  #
    #             #
    ###############
    http: 8222
    server_name:$SERVER_NAME
    ###################################
    #                                 #
    # NATS JetStream                  #
    #                                 #
    ###################################
    jetstream {
      max_mem: 1Gi
      store_dir: /data/

      max_file:2Gi
    }
    ###################################
    #                                 #
    # NATS Full Mesh Clustering Setup #
    #                                 #
    ###################################
    cluster {
      port: 6222
      name: asg4-cluster

      routes = [
        nats://nats-0.nats.default.svc.cluster.local:6222,nats://nats-1.nats.default.svc.cluster.local:6222,nats://nats-2.nats.default.svc.cluster.local:6222,
        
      ]
      cluster_advertise: $CLUSTER_ADVERTISE

      connect_retries: 120
    }
    lame_duck_grace_period: 10s
    lame_duck_duration: 30s
---
# Source: nats/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
  name: nats
  namespace: default
  labels:
    helm.sh/chart: nats-0.17.1
    app.kubernetes.io/name: nats
    app.kubernetes.io/instance: nats
    app.kubernetes.io/version: "2.8.4"
    app.kubernetes.io/managed-by: Helm
spec:
  selector:
    app.kubernetes.io/name: nats
    app.kubernetes.io/instance: nats
  clusterIP: None
  publishNotReadyAddresses: true
  ports:
  - name: client
    port: 4222
    appProtocol: tcp
  - name: cluster
    port: 6222
    appProtocol: tcp
  - name: monitor
    port: 8222
    appProtocol: http
  - name: metrics
    port: 7777
    appProtocol: http
  - name: leafnodes
    port: 7422
    appProtocol: tcp
  - name: gateways
    port: 7522
    appProtocol: tcp
---
# Source: nats/templates/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: nats
  namespace: default
  labels:
    helm.sh/chart: nats-0.17.1
    app.kubernetes.io/name: nats
    app.kubernetes.io/instance: nats
    app.kubernetes.io/version: "2.8.4"
    app.kubernetes.io/managed-by: Helm
spec:
  selector:
    matchLabels:
      app.kubernetes.io/name: nats
      app.kubernetes.io/instance: nats
  replicas: 3
  serviceName: nats

  podManagementPolicy: Parallel

  template:
    metadata:
      annotations:
        prometheus.io/path: /metrics
        prometheus.io/port: "7777"
        prometheus.io/scrape: "true"
        checksum/config: 67cdb8135e8f578ceb198e0575fd6080ca97b671a9a175897d7effb7b98c6e5a
      labels:
        app.kubernetes.io/name: nats
        app.kubernetes.io/instance: nats
    spec:
      # Common volumes for the containers.
      volumes:
      - name: config-volume
        configMap:
          name: nats-config

      # Local volume shared with the reloader.
      - name: pid
        emptyDir: {}

      #################
      #               #
      #  TLS Volumes  #
      #               #
      #################

      serviceAccountName: nats-service

      # Required to be able to HUP signal and apply config
      # reload to the server without restarting the pod.
      shareProcessNamespace: true

      #################
      #               #
      #  NATS Server  #
      #               #
      #################
      terminationGracePeriodSeconds: 60
      containers:
      - name: nats
        image: nats
        imagePullPolicy: IfNotPresent
        resources:
          {}
        ports:
        - containerPort: 4222
          name: client
        - containerPort: 6222
          name: cluster
        - containerPort: 8222
          name: monitor
        - containerPort: 7777
          name: metrics

        command:
        - "nats-server"
        - "--config"
        - "/etc/nats-config/nats.conf"

        # Required to be able to define an environment variable
        # that refers to other environment variables.  This env var
        # is later used as part of the configuration file.
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: SERVER_NAME
          value: asg4-$(POD_NAME)
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        - name: CLUSTER_ADVERTISE
          value: $(POD_NAME).nats.$(POD_NAMESPACE).svc.cluster.local
        volumeMounts:
        - name: config-volume
          mountPath: /etc/nats-config
        - name: pid
          mountPath: /var/run/nats
        - name: nats-js-pvc
          mountPath: /data/
        

        #######################
        #                     #
        # Healthcheck Probes  #
        #                     #
        #######################
        livenessProbe:
          httpGet:
            path: /
            port: 8222
          initialDelaySeconds: 10
          timeoutSeconds: 5
          periodSeconds: 30
          successThreshold: 1
          failureThreshold: 3
        startupProbe:
          httpGet:
            # for NATS server versions >=2.7.1, healthz will be enabled to allow for a grace period
            # in case of JetStream enabled deployments to form quorum and streams to catch up.
            path: /healthz
            port: 8222
          initialDelaySeconds: 10
          timeoutSeconds: 5
          periodSeconds: 10
          successThreshold: 1
          failureThreshold: 30

        # Gracefully stop NATS Server on pod deletion or image upgrade.
        #
        lifecycle:
          preStop:
            exec:
              # send the lame duck shutdown signal to trigger a graceful shutdown
              # nats-server will ignore the TERM signal it receives after this
              #
              command:
              - "nats-server"
              - "-sl=ldm=/var/run/nats/nats.pid"

      #################################
      #                               #
      #  NATS Configuration Reloader  #
      #                               #
      #################################
      - name: reloader
        image: natsio/nats-server-config-reloader:0.7.0
        imagePullPolicy: IfNotPresent
        resources:
          null
        command:
        - "nats-server-config-reloader"
        - "-pid"
        - "/var/run/nats/nats.pid"
        - "-config"
        - "/etc/nats-config/nats.conf"
        volumeMounts:
        - name: config-volume
          mountPath: /etc/nats-config
        - name: pid
          mountPath: /var/run/nats
        

      ##############################
      #                            #
      #  NATS Prometheus Exporter  #
      #                            #
      ##############################
      - name: metrics
        image: natsio/prometheus-nats-exporter:0.9.3
        imagePullPolicy: IfNotPresent
        resources:
          {}
        args:
        - -connz
        - -routez
        - -subz
        - -varz
        - -prefix=nats
        - -use_internal_server_id
        - -jsz=all
        - http://localhost:8222/
        ports:
        - containerPort: 7777
          name: metrics

  volumeClaimTemplates:
  #####################################
  #                                   #
  #  Jetstream New Persistent Volume  #
  #                                   #
  #####################################
    - metadata:
        name: nats-js-pvc
      spec:
        accessModes:
          - ReadWriteOnce
        resources:
          requests:
            storage: 2Gi
        storageClassName: "do-block-storage"

NOTES:
You can find more information about running NATS on Kubernetes
in the NATS documentation website:

  https://docs.nats.io/nats-on-kubernetes/nats-kubernetes

Thanks for using NATS!

nats-0,1,2 PODS

services> kubectl describe pods/nats-0
Name:           nats-0
Namespace:      default
Priority:       0
Node:           <none>
Labels:         app.kubernetes.io/instance=nats
                app.kubernetes.io/name=nats
                controller-revision-hash=nats-59cb47bb5c
                statefulset.kubernetes.io/pod-name=nats-0
Annotations:    checksum/config: 67cdb8135e8f578ceb198e0575fd6080ca97b671a9a175897d7effb7b98c6e5a
                prometheus.io/path: /metrics
                prometheus.io/port: 7777
                prometheus.io/scrape: true
Status:         Pending
IP:
IPs:            <none>
Controlled By:  StatefulSet/nats
Containers:
  nats:
    Image:       nats
    Ports:       4222/TCP, 6222/TCP, 8222/TCP, 7777/TCP
    Host Ports:  0/TCP, 0/TCP, 0/TCP, 0/TCP
    Command:
      nats-server
      --config
      /etc/nats-config/nats.conf
    Liveness:  http-get http://:8222/ delay=10s timeout=5s period=30s #success=1 #failure=3
    Startup:   http-get http://:8222/healthz delay=10s timeout=5s period=10s #success=1 #failure=30
    Environment:
      POD_NAME:           nats-0 (v1:metadata.name)
      SERVER_NAME:        asg4-$(POD_NAME)
      POD_NAMESPACE:      default (v1:metadata.namespace)
      CLUSTER_ADVERTISE:  $(POD_NAME).nats.$(POD_NAMESPACE).svc.cluster.local
    Mounts:
      /data/ from nats-js-pvc (rw)
      /etc/nats-config from config-volume (rw)
      /var/run/nats from pid (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-kf94t (ro)
  reloader:
    Image:      natsio/nats-server-config-reloader:0.7.0
    Port:       <none>
    Host Port:  <none>
    Command:
      nats-server-config-reloader
      -pid
      /var/run/nats/nats.pid
      -config
      /etc/nats-config/nats.conf
    Environment:  <none>
    Mounts:
      /etc/nats-config from config-volume (rw)
      /var/run/nats from pid (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-kf94t (ro)
  metrics:
    Image:      natsio/prometheus-nats-exporter:0.9.3
    Port:       7777/TCP
    Host Port:  0/TCP
    Args:
      -connz
      -routez
      -subz
      -varz
      -prefix=nats
      -use_internal_server_id
      -jsz=all
      http://localhost:8222/
    Environment:  <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-kf94t (ro)
Conditions:
  Type           Status
  PodScheduled   False
Volumes:
  nats-js-pvc:
    Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)        
    ClaimName:  nats-js-pvc-nats-0
    ReadOnly:   false
  config-volume:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      nats-config
    Optional:  false
  pid:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:
    SizeLimit:  <unset>
  kube-api-access-kf94t:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   BestEffort
Node-Selectors:              <none>
Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type     Reason            Age                 From               Message
  ----     ------            ----                ----               -------
  Warning  FailedScheduling  99s (x4 over 4m4s)  default-scheduler  0/1 nodes are available: 1 pod has unbound immediate PersistentVolumeClaims. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling.
ASG4
  • 169
  • 2
  • 9

0 Answers0