I use custom Helm chart to deploy my project hosted on GitLab to Google Kubernetes cluster. It works smoothly. I have problem in following scenarios.
- The helm chart doesn't upgrade the deployment on Kubernetes even though the build image is new. My understanding is, it compares the SHA256 digest of a image deployed on Kubernetes and new images built in build stage and if there is difference it starts a new pod with new images and terminates the old pod. But it doesn't do that. Initially, I suspected it could be a problem with image
pullPolicy
as it was set toIfNotPresent
. I have tried by setting it toAlways
but still it didn't work. - When image pull policy is set to
Always
and a pod restart because of a failure or anything, then it givesimagePullBackOff
error. I checked the secrets present in the namespace on kubernetes, It hasdockerconfigjson
secret, but still gives no authorization error. It starts to work when I deploy again using new CI/CD pipeline.
error logs
Warning Failed 19m (x4 over 20m) kubelet Failed to pull image "gitlab.digital-worx.de:5050/asvin/asvin-frontend/master:latest": rpc error: code = Unknown desc = Error response from daemon: Get https://gitlab.digital-worx.de:5050/v2/asvin/asvin-frontend/master/manifests/latest: unauthorized: HTTP Basic: Access denied
Warning Failed 19m (x4 over 20m) kubelet Error: ErrImagePull
Warning Failed 25s (x87 over 20m) kubelet Error: ImagePullBackOff
deployement.yaml
{{- if not .Values.application.initializeCommand -}}
apiVersion: {{ default "extensions/v1beta1" .Values.deploymentApiVersion }}
kind: Deployment
metadata:
name: {{ template "name" . }}
annotations:
{{ if .Values.gitlab.app }}app.gitlab.com/app: {{ .Values.gitlab.app | quote }}{{ end }}
{{ if .Values.gitlab.env }}app.gitlab.com/env: {{ .Values.gitlab.env | quote }}{{ end }}
labels:
app: {{ template "name" . }}
track: "{{ .Values.application.track }}"
tier: "{{ .Values.application.tier }}"
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
release: {{ .Release.Name }}
service: {{ .Values.ranking.service.name }}
spec:
{{- if or .Values.enableSelector (eq (default "extensions/v1beta1" .Values.deploymentApiVersion) "apps/v1") }}
selector:
matchLabels:
app: {{ template "name" . }}
track: "{{ .Values.application.track }}"
tier: "{{ .Values.application.tier }}"
release: {{ .Release.Name }}
service: {{ .Values.ranking.service.name }}
{{- end }}
replicas: {{ .Values.replicaCount }}
{{- if .Values.strategyType }}
strategy:
type: {{ .Values.strategyType | quote }}
{{- end }}
template:
metadata:
annotations:
checksum/application-secrets: "{{ .Values.application.secretChecksum }}"
{{ if .Values.gitlab.app }}app.gitlab.com/app: {{ .Values.gitlab.app | quote }}{{ end }}
{{ if .Values.gitlab.env }}app.gitlab.com/env: {{ .Values.gitlab.env | quote }}{{ end }}
{{- if .Values.podAnnotations }}
{{ toYaml .Values.podAnnotations | indent 8 }}
{{- end }}
labels:
app: {{ template "name" . }}
track: "{{ .Values.application.track }}"
tier: "{{ .Values.application.tier }}"
release: {{ .Release.Name }}
service: {{ .Values.ranking.service.name }}
spec:
volumes:
{{- if .Values.ranking.configmap }}
{{end}}
imagePullSecrets:
{{ toYaml .Values.ranking.image.secrets | indent 10 }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.ranking.image.repository }}:{{ .Values.ranking.image.tag }}"
imagePullPolicy: {{ .Values.ranking.image.pullPolicy }}
{{- if .Values.application.secretName }}
envFrom:
- secretRef:
name: {{ .Values.application.secretName }}
{{- end }}
env:
- name: INDEXER_URL
valueFrom:
secretKeyRef:
name: {{.Release.Name}}-secret
key: INDEXER_URL
volumeMounts:
ports:
- name: "{{ .Values.ranking.service.name }}"
containerPort: {{ .Values.ranking.service.internalPort }}
livenessProbe:
{{- if eq .Values.livenessProbe.probeType "httpGet" }}
httpGet:
path: {{ .Values.livenessProbe.path }}
scheme: {{ .Values.livenessProbe.scheme }}
port: {{ .Values.ranking.service.internalPort }}
{{- else if eq .Values.livenessProbe.probeType "tcpSocket" }}
tcpSocket:
port: {{ .Values.ranking.service.internalPort }}
{{- else if eq .Values.livenessProbe.probeType "exec" }}
exec:
command:
{{ toYaml .Values.livenessProbe.command | indent 14 }}
{{- end }}
initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
readinessProbe:
{{- if eq .Values.readinessProbe.probeType "httpGet" }}
httpGet:
path: {{ .Values.readinessProbe.path }}
scheme: {{ .Values.readinessProbe.scheme }}
port: {{ .Values.ranking.service.internalPort }}
{{- else if eq .Values.readinessProbe.probeType "tcpSocket" }}
tcpSocket:
port: {{ .Values.ranking.service.internalPort }}
{{- else if eq .Values.readinessProbe.probeType "exec" }}
exec:
command:
{{ toYaml .Values.readinessProbe.command | indent 14 }}
{{- end }}
initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
resources:
{{ toYaml .Values.resources | indent 12 }}
restartPolicy: Always
enableServiceLinks: false
status: {}
{{- end -}}
values.yaml
# Default values for chart.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
strategyType:
enableSelector:
deploymentApiVersion: apps/v1
ranking:
name: ranking
image:
repository: gitlab.iotcrawler.net:4567/ranking/ranking/master
tag: latest
pullPolicy: Always
secrets:
- name: gitlab-registry-demonstrator-murcia-parking-iotcrawler
service:
enabled: true
annotations: {}
name: ranking
type: ClusterIP
additionalHosts:
commonName:
externalPort: 3003
internalPort: 3003
production:
url: parking.ranking.iotcrawler.eu
staging:
url: staging.parking.ranking.iotcrawler.eu
configmap: true
podAnnotations: {}
application:
track: latest
tier: web
migrateCommand:
initializeCommand:
secretName:
secretChecksum:
hpa:
enabled: false
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 80
gitlab:
app:
env:
envName:
envURL:
ingress:
enabled: true
url:
tls:
enabled: true
secretName: ""
annotations:
kubernetes.io/tls-acme: "true"
kubernetes.io/ingress.class: "nginx"
modSecurity:
enabled: false
secRuleEngine: "DetectionOnly"
# secRules:
# - variable: ""
# operator: ""
# action: ""
prometheus:
metrics: false
livenessProbe:
path: "/"
initialDelaySeconds: 15
timeoutSeconds: 15
scheme: "HTTP"
probeType: "httpGet"
readinessProbe:
path: "/"
initialDelaySeconds: 5
timeoutSeconds: 3
scheme: "HTTP"
probeType: "httpGet"
postgresql:
enabled: true
managed: false
managedClassSelector:
# matchLabels:
# stack: gitlab (This is an example. The labels should match the labels on the CloudSQLInstanceClass)
resources:
# limits:
# cpu: 100m
# memory: 128Mi
requests:
# cpu: 100m
# memory: 128Mi
## Configure PodDisruptionBudget
## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
#
podDisruptionBudget:
enabled: false
# minAvailable: 1
maxUnavailable: 1
## Configure NetworkPolicy
## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
#
networkPolicy:
enabled: false
spec:
podSelector:
matchLabels: {}
ingress:
- from:
- podSelector:
matchLabels: {}
- namespaceSelector:
matchLabels:
app.gitlab.com/managed_by: gitlab
workers: {}
# worker:
# replicaCount: 1
# terminationGracePeriodSeconds: 60
# command:
# - /bin/herokuish
# - procfile
# - start
# - worker
# preStopCommand:
# - /bin/herokuish
# - procfile
# - start
# - stop_worker