3

i am deploying my springboot application docker image on GCP by using helm charts .For env specific configuration i use helm-override.yaml file.However i noticed my configured values in application-stage.properties are not being taken by application.Attaching below helm chart and build.gradle files

below is project structure

```
xyz <br>
        settings.gradle <br>
        build.gradle <br>
        config <br>
          prod
            application-prod.properties
          stage
            application.properties
        gradle/ <br>
            wrapper/ <br>
                gradle-wrapper.jar <br>
                gradle-wrapper.properties <br>
        src/ <br>
            main/ <br>
                java/ <br>
                resources/ <br>
                    application.properties <br>
        xyzcharts/ <br>
          values.yaml <br>
          config/ <br>
            stage/ <br>
              helm-override-stage.yaml <br>
          templates/ <br>
            configmap.yaml <br>
            cronjob.yaml <br>
            
          
```

build.gradle

plugins {
    id 'org.springframework.boot' version "${springBootVersion}"
    id 'io.spring.dependency-management' version '1.0.15.RELEASE'
    id 'java'
    id 'eclipse'
    id 'jacoco'
    id 'org.sonarqube' version "3.3"
    id 'com.google.cloud.tools.jib' version "${jibVersion}"
}

group = 'com.vsi.postgrestoattentive'

if (!project.hasProperty('buildName')) {
    throw new GradleException("Usage for CLI:" 
        + System.getProperty("line.separator") 
        + "gradlew <taskName> -Dorg.gradle.java.home=<java-home-dir> -PbuildName=<major>.<minor>.<buildNumber> -PgcpProject=<gcloudProject>"
        + System.getProperty("line.separator")
        + "<org.gradle.java.home> - OPTIONAL if available in PATH"
        + System.getProperty("line.separator")
        + "<buildName> - MANDATORY, example 0.1.23")
        + System.getProperty("line.separator")
        + "<gcpProject> - OPTIONAL, project name in GCP";
}

project.ext {
    buildName = project.property('buildName');
}

version = "${project.ext.buildName}"

sourceCompatibility = '1.8'

apply from: 'gradle/sonar.gradle'
apply from: 'gradle/tests.gradle'
apply from: 'gradle/image-build-gcp.gradle'

repositories {
    mavenCentral()
    
}

dependencies {
    implementation("org.springframework.boot:spring-boot-starter-web:${springBootVersion}")
    implementation("org.springframework.boot:spring-boot-starter-actuator:${springBootVersion}")
    implementation 'org.springframework.boot:spring-boot-starter-web:2.7.0'
    developmentOnly 'org.springframework.boot:spring-boot-devtools'
    testImplementation 'org.springframework.boot:spring-boot-starter-test'
    testImplementation 'org.springframework.integration:spring-integration-test'
    testImplementation 'org.springframework.batch:spring-batch-test:4.3.0'
    implementation("org.springframework.boot:spring-boot-starter-data-jpa:${springBootVersion}")
    implementation 'org.postgresql:postgresql:42.1.4'
    implementation 'org.springframework.batch:spring-batch-core:4.1.1.RELEASE'
    implementation 'com.fasterxml.jackson.datatype:jackson-datatype-jsr310:2.14.1'
    implementation group: 'io.micrometer', name: 'micrometer-registry-datadog', version: '1.7.0'
    implementation 'com.google.cloud:libraries-bom:26.3.0'
    implementation 'com.google.cloud:google-cloud-storage:2.16.0'
}

bootJar {
    archiveFileName = "${project.name}.${archiveExtension.get()}"
}

springBoot {
    buildInfo()
}


test {
    finalizedBy jacocoTestReport
}

jacoco {
    toolVersion = "0.8.8"
}

jacocoTestReport {
    dependsOn test
}

//SMS2-28: Code to make build check code coverage ratio
project.tasks["bootJar"].dependsOn "jacocoTestReport","jacocoTestCoverageVerification"

cronjob.yaml

apiVersion: batch/v1
kind: CronJob
metadata:
  name: {{ include "xyz.fullname" . }}
  labels:
    {{ include "xyz.labels" . | nindent 4 }}
spec:
  schedule: "{{ .Values.schedule }}"
  concurrencyPolicy: Forbid
  successfulJobsHistoryLimit: 5
  failedJobsHistoryLimit: 2
  jobTemplate:
    spec:
      template:
        spec:
          restartPolicy: Never
          containers:
            - name: {{ .Chart.Name }}
              image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
              imagePullPolicy: {{ .Values.image.pullPolicy }}
              env:
              - name: POSTGRES_DB_USER_NAME
                valueFrom:
                  secretKeyRef:
                    name: xyz-feed-secret
                    key: DB_USER_NAME
              - name: POSTGRES_DB_PASSWORD
                valueFrom:
                  secretKeyRef:
                    name: xyz-feed-secret
                    key: DB_PASSWORD
              - name: POSTGRES_DB_URL
                valueFrom:
                  secretKeyRef:
                    name: xyz-feed-secret
                    key: DB_URL
              - name: POSTGRES_TO_ATTENTIVE_TOKEN
                valueFrom:
                  secretKeyRef:
                    name: xyz-feed-secret
                    key: ATTENTIVE_TOKEN
              - name: POD_NAME
                valueFrom:
                  fieldRef:
                    fieldPath: metadata.name
              - name: DD_AGENT_HOST
                valueFrom:
                  fieldRef:
                    fieldPath: status.hostIP  
              - name: DD_ENV
                value: {{ .Values.datadog.env }}
              - name: DD_SERVICE
                value: {{ include "xyz.name" . }}
              - name: DD_VERSION
                value: {{ include "xyz.AppVersion" . }}
              - name: DD_LOGS_INJECTION
                value: "true"
              - name: DD_RUNTIME_METRICS_ENABLED
                value: "true"
              volumeMounts:
                - mountPath: /app/config
                  name: logback
              ports:
                - name: http
                  containerPort: {{ .Values.service.port }}
                  protocol: TCP
          volumes:
            - configMap:
                name: {{ include "xyz.name" . }}
              name: logback
      backoffLimit: 0
    metadata:
      {{ with .Values.podAnnotations }}
    annotations:
      {{ toYaml . | nindent 8 }}
    labels:
      {{ include "xyz.selectorLabels" . | nindent 8 }}
      {{- end }}

configmap.yaml

apiVersion: v1
kind: ConfigMap
metadata:
  name: {{ include "xyz.name" . }}
  labels:
    {{- include "xyz.labels" . | nindent 4 }}
data:
  application.properties: |-
    {{- range .Files.Lines .Values.application.configoveride }}
    {{ . }}{{ end }}
  logback-spring.xml: |+
    <?xml version="1.0" encoding="UTF-8"?>
    <configuration>
        <include resource="org/springframework/boot/logging/logback/defaults.xml"/>
        <include resource="org/springframework/boot/logging/logback/console-appender.xml" />
        <include resource="org/springframework/cloud/gcp/logging/logback-json-appender.xml" />
        <property name="projectId" value="${projectId:-${GOOGLE_CLOUD_PROJECT}}"/>
        <appender name="CONSOLE_JSON" class="ch.qos.logback.core.ConsoleAppender">
            <encoder class="ch.qos.logback.core.encoder.LayoutWrappingEncoder">
                <layout class="org.springframework.cloud.gcp.logging.StackdriverJsonLayout">
                    <projectId>${projectId}</projectId>
                    <includeTraceId>true</includeTraceId>
                    <includeSpanId>true</includeSpanId>
                    <includeLevel>true</includeLevel>
                    <includeThreadName>true</includeThreadName>
                    <includeMDC>true</includeMDC>
                    <includeLoggerName>true</includeLoggerName>
                    <includeFormattedMessage>true</includeFormattedMessage>
                    <includeExceptionInMessage>false</includeExceptionInMessage>
                    <includeContextName>true</includeContextName>
                    <includeMessage>true</includeMessage>
                    <includeException>true</includeException>
                    <jsonFormatter
                       class="ch.qos.logback.contrib.jackson.JacksonJsonFormatter">
                    </jsonFormatter>
                </layout>
            </encoder>
        </appender>
        <root level="INFO">
            <appender-ref ref="CONSOLE_JSON"/>
        </root>
    </configuration>

values.yaml

# Default values for postgres_to_attentive_product_catalog.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.

###SMS2-40 - replica count indicates no of instances we need
### - If we want 3 intances then we will metion 3 -then 3 pods will be created on server
### - For staging env we usually keep 1
replicaCount: 1

image:
###SMS2-40 - Below is image name which is created duuring build-->GCP Build image
### --->We can also give local Image details also here
### --->We can create image in Docker repository and use that image URL here
  repository: gcr.io/mgcp-1308657-vsi-operations/smscatalogfeed
  pullPolicy: IfNotPresent
  # Overrides the image tag whose default is the chart appVersion.
  tag: ""

imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""

serviceAccount:
  # Specifies whether a service account should be created
  create: true
  # Annotations to add to the service account
  annotations: {}
  # The name of the service account to use.
  # If not set and create is true, a name is generated using the fullname template
  
  name: "smscatalogfeed"

podAnnotations: {}

podSecurityContext: {}
  # fsGroup: 2000

securityContext: {}
  # capabilities:
  #   drop:
  #   - ALL
  # readOnlyRootFilesystem: true
  # runAsNonRoot: true
  # runAsUser: 1000

schedule: "56 17 * * *"

###SMS2-40 - There are 2 ways how we want to serve our applications-->1st->LoadBalancer or 2-->NodePort
service:
  type: NodePort
  port: 8087
  liveness: /actuator/health/liveness
  readiness: /actuator/health/readiness
###service:
###  type: ClusterIP
###  port: 80

restartPolicy: "Never"

ingress:
  enabled: false
  className: ""
  annotations: {}
    # kubernetes.io/ingress.class: nginx
    # kubernetes.io/tls-acme: "true"
  hosts:
    - host: chart-example.local
      paths:
        - path: /
          pathType: ImplementationSpecific
  tls: []
  #  - secretName: chart-example-tls
  #    hosts:
  #      - chart-example.local

resources: {}
  # We usually recommend not to specify default resources and to leave this as a conscious
  # choice for the user. This also increases chances charts run on environments with little
  # resources, such as Minikube. If you do want to specify resources, uncomment the following
  # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
  # limits:
  #   cpu: 100m
  #   memory: 128Mi
  # requests:
  #   cpu: 100m
  #   memory: 128Mi

autoscaling:
  enabled: false
  minReplicas: 1
  maxReplicas: 100
  targetCPUUtilizationPercentage: 80
  # targetMemoryUtilizationPercentage: 80

nodeSelector: {}

tolerations: []

affinity: {}
###SMS2-40 - Below command is used to override configuration with config/application.properties
application:
  configoveride: "config/application.properties"

helm-override-stage.yaml

replicaCount: 1
#SMS2-12 : mgcp-1308657-vsi-operations is our server/project in GCP
image:
  repository: gcr.io/mgcp-1308657-vsi-operations/smscatalogfeed
  tag: <IMAGE_TAG_PLACEHOLDER_TO_BE_REPLACED>
application:
  configoveride: "config/stage/application-stage.properties"
datadog:
  enabled: true
  env: stage
Sanjay Naik
  • 264
  • 1
  • 4
  • 23

1 Answers1

0

Looks like there are many problems with your project structure and your yaml configs:

  1. application-stage.properties file is missing;
  2. Other chart files like Chart.yaml, _helpers.tpl are not reflected in your project structure;
  3. secret.yaml is missing for your chart template, but used in your CronJob jobTemplate envs fetching with secretKeyRef;
  4. Your application-stage.properties file is only stored in a configmap.yaml;
  5. Your configmap.yaml data is defined with filename as key and file content as value, which is not suited for exposing as container environment variables.

So as to the question why configured values in application-stage.properties are not being taken by application, the reason is that you defined your CronJob container to fetch envs from a secret which is missing. You only have your application-stage.properties stored in a configmap as file, not key-value pairs.

The tutorial: spring boot application.properties in kubernetes may provide you more guidelines.

YwH
  • 1,050
  • 5
  • 11