4

I am working on a dynamic kubernetes informer to watch over my kubernetes cluster for events and the discovery of all kubernetes components.

But, When I am trying to access the KUBECONFIG via the InClusterConfig method, I am getting the following error:

// go run main.go
FATA[0000] could not get config                          error="unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined"
exit status 1

I found various similar issues related to this on kubernetes repo on github as well as stackoverflow but couldn't find any solution or workaround. [kubernetes issue, kubernetes issue, stackoverflow similar question, stackoverflow similar question]

Following is the go code and go.mod file

Go Code:

package main

import (
    "os"
    "os/signal"

    "github.com/sirupsen/logrus"
    v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
    "k8s.io/apimachinery/pkg/runtime/schema"
    "k8s.io/client-go/dynamic"
    "k8s.io/client-go/dynamic/dynamicinformer"
    "k8s.io/client-go/rest"
    "k8s.io/client-go/tools/cache"
    "k8s.io/client-go/tools/clientcmd"
)

func main() {
    cfg, err := restConfig()
    if err != nil {
        logrus.WithError(err).Fatal("could not get config")
    }

    // Grab a dynamic interface that we can create informers from
    dc, err := dynamic.NewForConfig(cfg)
    if err != nil {
        logrus.WithError(err).Fatal("could not generate dynamic client for config")
    }

    // Create a factory object that we can say "hey, I need to watch this resource"
    // and it will give us back an informer for it
    f := dynamicinformer.NewFilteredDynamicSharedInformerFactory(dc, 0, v1.NamespaceAll, nil)

    // Retrieve a "GroupVersionResource" type that we need when generating our informer from our dynamic factory
    gvr, _ := schema.ParseResourceArg("deployments.v1.apps")

    // Finally, create our informer for deployments!
    i := f.ForResource(*gvr)

    stopCh := make(chan struct{})
    go startWatching(stopCh, i.Informer())

    sigCh := make(chan os.Signal, 0)
    signal.Notify(sigCh, os.Kill, os.Interrupt)

    <-sigCh
    close(stopCh)
}

func restConfig() (*rest.Config, error) {
    kubeCfg, err := rest.InClusterConfig()
    if kubeconfig := os.Getenv("KUBECONFIG"); kubeconfig != "" {
        kubeCfg, err = clientcmd.BuildConfigFromFlags("", kubeconfig)
    }

    // var kubeCfg *string
    // if home := homedir.HomeDir(); home != "" {
    //  kubeCfg = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")
    // } else {
    //  kubeCfg = flag.String("kubeconfig", "", "absolute path to the kubeconfig file")
    // }
    // flag.Parse()

    // kubeCfg, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)

    //testing-ends

    if err != nil {
        return nil, err
    }

    return kubeCfg, nil
}

func startWatching(stopCh <-chan struct{}, s cache.SharedIndexInformer) {
    handlers := cache.ResourceEventHandlerFuncs{
        AddFunc: func(obj interface{}) {
            u := obj.(*unstructured.Unstructured)

            logrus.WithFields(logrus.Fields{
                "name":      u.GetName(),
                "namespace": u.GetNamespace(),
                "labels":    u.GetLabels(),
            }).Info("received add event!")
        },
        UpdateFunc: func(oldObj, obj interface{}) {
            logrus.Info("received update event!")
        },
        DeleteFunc: func(obj interface{}) {
            logrus.Info("received update event!")
        },
    }

    s.AddEventHandler(handlers)
    s.Run(stopCh)
}

go.mod file:

module discovery-test

go 1.15

require (
    github.com/googleapis/gnostic v0.5.3 // indirect
    github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
    github.com/imdario/mergo v0.3.11 // indirect
    github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
    github.com/sirupsen/logrus v1.7.0
    golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897 // indirect
    golang.org/x/net v0.0.0-20201029055024-942e2f445f3c // indirect
    golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 // indirect
    golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e // indirect
    k8s.io/apimachinery v0.17.0
    k8s.io/client-go v0.17.0
    k8s.io/klog v1.0.0 // indirect
    k8s.io/utils v0.0.0-20201027101359-01387209bb0d // indirect
)
  • 1
    this is because you are trying to get cluster config from outside of the cluster. When you run `go run main.go`, you are running your program on your local machine (127.0.0.1) from where you can not access the cluster. This program should work if you build a binary as well as a container image and deploy it through any K8s workload (Pod, Deployment, StatefulSet, etc.). In that case, this program will easily discover service host and port and thus get the in cluster config. – Shudipta Sharma Oct 31 '20 at 05:47
  • @ShudiptaSharma If I build a container image and deploy it as a Pod then after deployment how will I watch over the events via this dynamic informer? – Suryavanshi Virendrasingh Oct 31 '20 at 06:23
  • Seems you want to writte a deployment controller. In that case, see sample controller repo on open source. – Shudipta Sharma Oct 31 '20 at 06:37
  • @ShudiptaSharma I have worked with controller/operator but I was planning to use this for discovering Kubernetes resources. Is there a better alternative to discover Kubernetes resources instead of going with the controller? – Suryavanshi Virendrasingh Oct 31 '20 at 06:40
  • From my understanding, when we want to watch and take action on add/update/delete events on some resources, then we simply write a controller or operator (if it needs). In that case we define informers, listers, kubeclient/crdclient and use them from the reconcile methods. Since you have a dynamic informer, you can use it for such an controller/operator. I think you already know what you have to do for such a case. – Shudipta Sharma Oct 31 '20 at 13:29

1 Answers1

4

First of all, thanks to @ShudiptaSharma. His comment helped me in figuring out that I was trying to get the cluster config from outside of the cluster which was leading the program on my local machine (127.0.0.1) from where I am not able to access the cluster.

Further, I tried to figure out how to access the cluster from outside the cluster and found that InClusterConfig is used for running inside cluster use case, when running outside the cluster, something like the following can be used:

//go run main.go
package main

import (
    "os"
    "os/signal"
  //"context"
    "flag"
    //"fmt"
    "path/filepath"
    //"time"

    "github.com/sirupsen/logrus"
    v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
    "k8s.io/apimachinery/pkg/runtime/schema"
    "k8s.io/client-go/dynamic"
    "k8s.io/client-go/dynamic/dynamicinformer"
    //"k8s.io/client-go/rest"
    "k8s.io/client-go/tools/cache"
    "k8s.io/client-go/tools/clientcmd"
  "k8s.io/client-go/util/homedir"
)

func main() {
  //kubeconfig := os.Getenv("KUBECONFIG")
  
  var kubeconfig *string
    if home := homedir.HomeDir(); home != "" {
        kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")
    } else {
        kubeconfig = flag.String("kubeconfig", "", "absolute path to the kubeconfig file")
    }
    flag.Parse()
  
    cfg, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)
    if err != nil {
        logrus.WithError(err).Fatal("could not get config")
    }

    // Grab a dynamic interface that we can create informers from
    dc, err := dynamic.NewForConfig(cfg)
    if err != nil {
        logrus.WithError(err).Fatal("could not generate dynamic client for config")
    }

    // Create a factory object that we can say "hey, I need to watch this resource"
    // and it will give us back an informer for it
    f := dynamicinformer.NewFilteredDynamicSharedInformerFactory(dc, 0, v1.NamespaceAll, nil)

    // Retrieve a "GroupVersionResource" type that we need when generating our informer from our dynamic factory
    gvr, _ := schema.ParseResourceArg("deployments.v1.apps")

    // Finally, create our informer for deployments!
    i := f.ForResource(*gvr)

    stopCh := make(chan struct{})
    go startWatching(stopCh, i.Informer())

    sigCh := make(chan os.Signal, 0)
    signal.Notify(sigCh, os.Kill, os.Interrupt)

    <-sigCh
    close(stopCh)
}

func startWatching(stopCh <-chan struct{}, s cache.SharedIndexInformer) {
    handlers := cache.ResourceEventHandlerFuncs{
        AddFunc: func(obj interface{}) {
            u := obj.(*unstructured.Unstructured)

            logrus.WithFields(logrus.Fields{
                "name":      u.GetName(),
                "namespace": u.GetNamespace(),
                "labels":    u.GetLabels(),
            }).Info("received add event!")
        },
        UpdateFunc: func(oldObj, obj interface{}) {
            logrus.Info("received update event!")
        },
        DeleteFunc: func(obj interface{}) {
            logrus.Info("received update event!")
        },
    }

    s.AddEventHandler(handlers)
    s.Run(stopCh)
}