0

In our AWS EKS environment, I deployed Nginx ingress controller through helm, following the official Nginx install guide and adding a configmap yaml that enables Waf modsecurity in this ingress with OWASP v3.3.0 ruleset. It stands behind aws nlb

It seems that the petitions we are madding now to the environment are being processing with a high latency, but this happen when you make the first petition from same IP, after that, the following ones are working good.

nginx-values.yaml

---
controller:
  config:
    use-proxy-protocol: true
    enable-modsecurity: true
    ssl-protocols: "TLSv1.2 TLSv1.3"
    ssl-ciphers: "ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384"
  service:
    enableHttps: true
    enableHttp: false
    type: LoadBalancer
    annotations:
      service.beta.kubernetes.io/aws-load-balancer-type: external
      service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
      service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*"
      service.beta.kubernetes.io/aws-load-balancer-target-group-attributes: preserve_client_ip.enabled=true
      service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
      service.beta.kubernetes.io/aws-load-balancer-healthcheck-interval: 10
      service.beta.kubernetes.io/aws-load-balancer-healthcheck-timeout: 3
      service.beta.kubernetes.io/aws-load-balancer-healthcheck-healthy-threshold: 2
      service.beta.kubernetes.io/aws-load-balancer-healthcheck-unhealthy-threshold: 2
      service.beta.kubernetes.io/load-balancer-source-ranges: ${source_range}

  metrics:
    enabled: true
  extraInitContainers:
    - name: init
      image: alpine:3
      command: ["/bin/sh","-c"]
      args: ["ls -tla /opt/modsecurity/var; chown -R 101:101 /opt/modsecurity/var; ls -tla /opt/modsecurity/var; touch /opt/modsecurity/var/log/debug.log; chown -R 101:101 /opt/modsecurity/var"]
      volumeMounts:
        - name: log
          mountPath: /opt/modsecurity/var/log

      securityContext:
        runAsGroup: 0
        runAsNonRoot: false
        runAsUser: 0
        privileged: true
  extraContainers:
    - name: promtail
      image: grafana/promtail
      args:
        - -config.file=/etc/config-waf/promtail.yaml
      volumeMounts:
        - name: config-map
          mountPath: /etc/config-waf
        - name: log
          mountPath: /opt/modsecurity/var/log
          
  resources:
    limits:
      cpu: 100m
      memory: 256Mi
    requests:
      cpu: 100m
      memory: 256Mi

  extraVolumeMounts:
    - name: config-map
      mountPath: /etc/nginx/modsecurity
    - name: log
      mountPath: /opt/modsecurity/var/log


  extraVolumes:
    - name: config-map
      configMap:
        name: waf-config
    - name: log
      emptyDir: {}
    - name: audit
      emptyDir: {}
  
  autoscaling:
    enabled: true
    minReplicas: 2
    maxReplicas: 10


  autoscalingTemplate:
    - type: Pods
      pods:
        metric:
          name: nginx_ingress_controller_nginx_process_requests_total
        target:
          type: AverageValue
          averageValue: 10000m

defaultBackend:
  enabled: true

waf.conf

SecRuleEngine On

SecRequestBodyAccess On

SecRule REQUEST_HEADERS:Content-Type "(?:application(?:/soap\+|/)|text/)xml" \
     "id:'200000',phase:1,t:none,t:lowercase,pass,nolog,ctl:requestBodyProcessor=XML"

SecRule REQUEST_HEADERS:Content-Type "application/json" \
     "id:'200001',phase:1,t:none,t:lowercase,pass,nolog,ctl:requestBodyProcessor=JSON"

SecRequestBodyLimit 13107200

SecRequestBodyNoFilesLimit 131072

SecRequestBodyLimitAction Reject

SecRule REQBODY_ERROR "!@eq 0" \
"id:'200002', phase:2,t:none,log,deny,status:400,msg:'Failed to parse request body.',logdata:'%{reqbody_error_msg}',severity:2"

SecRule MULTIPART_STRICT_ERROR "!@eq 0" \
"id:'200003',phase:2,t:none,log,deny,status:400, \
msg:'Multipart request body failed strict validation: \
PE %{REQBODY_PROCESSOR_ERROR}, \
BQ %{MULTIPART_BOUNDARY_QUOTED}, \
BW %{MULTIPART_BOUNDARY_WHITESPACE}, \
DB %{MULTIPART_DATA_BEFORE}, \
DA %{MULTIPART_DATA_AFTER}, \
HF %{MULTIPART_HEADER_FOLDING}, \
LF %{MULTIPART_LF_LINE}, \
SM %{MULTIPART_MISSING_SEMICOLON}, \
IQ %{MULTIPART_INVALID_QUOTING}, \
IP %{MULTIPART_INVALID_PART}, \
IH %{MULTIPART_INVALID_HEADER_FOLDING}, \
FL %{MULTIPART_FILE_LIMIT_EXCEEDED}'"

SecRule MULTIPART_UNMATCHED_BOUNDARY "@eq 1" \
    "id:'200004',phase:2,t:none,log,deny,msg:'Multipart parser detected a possible unmatched boundary.'"

SecPcreMatchLimit 1000

SecPcreMatchLimitRecursion 1000

SecRule TX:/^MSC_/ "!@streq 0" \
        "id:'200005',phase:2,t:none,deny,msg:'ModSecurity internal error flagged: %{MATCHED_VAR_NAME}'"

SecResponseBodyAccess On

SecResponseBodyMimeType text/plain text/html text/xml

SecResponseBodyLimit 524288

SecResponseBodyLimitAction ProcessPartial

SecTmpDir /tmp/

SecDataDir /tmp/

SecDebugLog /opt/modsecurity/var/log/debug.log

SecDebugLogLevel 3

SecAuditEngine Off

SecAuditLogRelevantStatus "^(?:5|4(?!04))"

SecAuditLogParts ABIJDEFHZ

SecAuditLogType Serial

SecAuditLog /opt/modsecurity/var/audit/modsec_audit.log

SecArgumentSeparator &

SecCookieFormat 0

SecUnicodeMapFile unicode.mapping 20127

SecStatusEngine On

crs-setup.conf

SecDefaultAction "phase:1,log,auditlog,pass,status:408"

SecDefaultAction "phase:2,log,auditlog,pass,status:408"

SecAction \
 "id:900000,\
  phase:1,\
  nolog,\
  pass,\
  t:none,\
  setvar:tx.paranoia_level=1"

SecAction \
 "id:900100,\
  phase:1,\
  nolog,\
  pass,\
  t:none,\
  setvar:tx.critical_anomaly_score=5,\
  setvar:tx.error_anomaly_score=4,\
  setvar:tx.warning_anomaly_score=3,\
  setvar:tx.notice_anomaly_score=2"

SecAction \
 "id:900110,\
  phase:1,\
  nolog,\
  pass,\
  t:none,\
  setvar:tx.inbound_anomaly_score_threshold=10000,\
  setvar:tx.outbound_anomaly_score_threshold=10000"

SecAction \
"id:900700,\
 phase:1,\
 nolog,\
 pass,\
 t:none,\
 setvar:'tx.dos_burst_time_slice=30',\
 setvar:'tx.dos_counter_threshold=250',\
 setvar:'tx.dos_block_timeout=300'"

SecAction \
"id:900960,\
 phase:1,\
 nolog,\
 pass,\
 t:none,\
 setvar:tx.do_reput_block=1"

SecAction \
"id:900970,\
 phase:1,\
 nolog,\
 pass,\
 t:none,\
 setvar:tx.reput_block_duration=300"

SecCollectionTimeout 600

SecAction \
 "id:900990,\
  phase:1,\
  nolog,\
  pass,\
  t:none,\
  setvar:tx.crs_setup_version=330"

Any thoughts on this?

1 Answers1

0

What do you mean by 'high latency'? Is it affecting all requests or only specific ones? Have you tried disabling DoS protection in crs-setup.conf?

azurit
  • 286
  • 1
  • 7
  • I mean that some petitions last about 5 minutes being processed or even retrieve timeouts. Anyways i think we found the solution, we changed the worker node tipe in aws from c5 to c5n (enhanced networking) and througth this is seems that all petitions are working as expexted. – Rubenoh Jul 06 '21 at 08:55