pulumi/examples

`kubernetes.NewProvider` cannot parse well formatted kubeconfig file string for GKE

honne23 opened this issue · 1 comments

What happened?

I'm trying to create a GKE cluster, then parse a kubeconfig in order to then manage the applications on that cluster from pulumi, however pulumi refuses to parse the kubeconfig string specified in the example here correctly.

The users[0].user.exec entry seems to be causing problems. I have copied the string directly from the example repo, and ensured that I have used spaces for indentation rather than tabs, which produces the following error:

error: rpc error: code = Unknown desc = failed to parse kubeconfig: yaml: line 19: found character that cannot start any token.

I have also passed the string through various yaml validators and not seen any errors either.

func generateKubeconfig(clusterEndpoint pulumi.StringOutput, clusterName pulumi.StringOutput,
	clusterMasterAuth container.ClusterMasterAuthOutput) pulumi.StringOutput {
	context := pulumi.Sprintf("demo_%s", clusterName)

	return pulumi.Sprintf(`apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: %s
    server: https://%s
  name: %s
contexts:
- context:
    cluster: %s
    user: %s
  name: %s
current-context: %s
kind: Config
preferences: {}
users:
- name: %s
  user:
    exec:
      apiVersion: "client.authentication.k8s.io/v1beta1"
      command: "gke-gcloud-auth-plugin"
      installHint: "Install gke-gcloud-auth-plugin for use with kubectl by following https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke"
      provideClusterInfo: true
`,
		clusterMasterAuth.ClusterCaCertificate().Elem(),
		clusterEndpoint, context, context, context, context, context, context)
}

Example

I have provided by code for completeness, however the example on the repo here doesn't work either.

package compute

import (
	"fmt"

	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/container"
	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/serviceaccount"
	"github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
	networking "github.com/solitude/solitude-infra/pkg/networking/gcp"
)

type GCPClusterConfig struct {
	NodesPerZone int
	ProjectName  string
	Region       string
	SA           *serviceaccount.Account
}

type GCPCluster struct {
	ComputeNetwork *container.Cluster
}

func BuildCluster(ctx *pulumi.Context, config *GCPClusterConfig, network *networking.GCPNetwork) (*kubernetes.Provider, error) {

	// Create a new GKE cluster
	gkeCluster, err := container.NewCluster(ctx, "gke-cluster", &container.ClusterArgs{
		DeletionProtection: pulumi.Bool(false),
		AddonsConfig: &container.ClusterAddonsConfigArgs{
			DnsCacheConfig: &container.ClusterAddonsConfigDnsCacheConfigArgs{
				Enabled: pulumi.Bool(true),
			},
		},
		BinaryAuthorization: &container.ClusterBinaryAuthorizationArgs{
			EvaluationMode: pulumi.String("PROJECT_SINGLETON_POLICY_ENFORCE"),
		},
		DatapathProvider: pulumi.String("ADVANCED_DATAPATH"),
		Description:      pulumi.String("A GKE cluster"),
		InitialNodeCount: pulumi.Int(1),
		IpAllocationPolicy: &container.ClusterIpAllocationPolicyArgs{
			ClusterIpv4CidrBlock:  pulumi.String("/14"),
			ServicesIpv4CidrBlock: pulumi.String("/20"),
		},
		NodeConfig: &container.ClusterNodeConfigArgs{
			DiskSizeGb: pulumi.Int(20),
		},
		Location: pulumi.String(config.Region),
		MasterAuthorizedNetworksConfig: &container.ClusterMasterAuthorizedNetworksConfigArgs{
			CidrBlocks: container.ClusterMasterAuthorizedNetworksConfigCidrBlockArray{
				&container.ClusterMasterAuthorizedNetworksConfigCidrBlockArgs{
					CidrBlock:   pulumi.String("0.0.0.0/0"),
					DisplayName: pulumi.String("All networks"),
				},
			},
		},
		Network:        network.ComputeNetwork.Name,
		NetworkingMode: pulumi.String("VPC_NATIVE"),
		PrivateClusterConfig: &container.ClusterPrivateClusterConfigArgs{
			EnablePrivateNodes:    pulumi.Bool(true),
			EnablePrivateEndpoint: pulumi.Bool(false),
			MasterIpv4CidrBlock:   pulumi.String("10.100.0.0/28"),
		},
		RemoveDefaultNodePool: pulumi.Bool(true),
		ReleaseChannel: &container.ClusterReleaseChannelArgs{
			Channel: pulumi.String("STABLE"),
		},
		Subnetwork: network.ComputeSubnetwork.Name,
		WorkloadIdentityConfig: &container.ClusterWorkloadIdentityConfigArgs{
			WorkloadPool: pulumi.String(fmt.Sprintf("%v.svc.id.goog", config.ProjectName)),
		},
	}, pulumi.IgnoreChanges([]string{"nodeConfig"}))
	if err != nil {
		return nil, err
	}
	// Create a new node pool
	nodePool, err := container.NewNodePool(ctx, "gke-nodepool", &container.NodePoolArgs{
		Cluster:   gkeCluster.ID(),
		NodeCount: pulumi.Int(config.NodesPerZone),
		NodeConfig: &container.NodePoolNodeConfigArgs{
			DiskSizeGb: pulumi.Int(20),
			OauthScopes: pulumi.StringArray{
				pulumi.String("https://www.googleapis.com/auth/cloud-platform"),
			},
			ServiceAccount: config.SA.Email,
		},
	})
	if err != nil {
		return nil, err
	}

	// Build Kubeconfig for accessing the cluster
	clusterKubeconfig := generateKubeconfig(gkeCluster.Endpoint, gkeCluster.Name, gkeCluster.MasterAuth)

	k8sProvider, err := kubernetes.NewProvider(ctx, "k8sprovider", &kubernetes.ProviderArgs{
		Kubeconfig: clusterKubeconfig,
	}, pulumi.DependsOn([]pulumi.Resource{nodePool}))
	if err != nil {
		return nil, err
	}
	// Export some values for use elsewhere
	ctx.Export("networkName", network.ComputeNetwork.Name)
	ctx.Export("networkId", network.ComputeNetwork.ID())
	ctx.Export("clusterName", gkeCluster.Name)
	ctx.Export("clusterId", gkeCluster.ID())
	ctx.Export("kubeconfig", clusterKubeconfig)
	return k8sProvider, nil
}

func generateKubeconfig(clusterEndpoint pulumi.StringOutput, clusterName pulumi.StringOutput,
	clusterMasterAuth container.ClusterMasterAuthOutput) pulumi.StringOutput {
	context := pulumi.Sprintf("demo_%s", clusterName)

	return pulumi.Sprintf(`apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: %s
    server: https://%s
  name: %s
contexts:
- context:
    cluster: %s
    user: %s
  name: %s
current-context: %s
kind: Config
preferences: {}
users:
- name: %s
  user:
    exec:
      apiVersion: "client.authentication.k8s.io/v1beta1"
      command: "gke-gcloud-auth-plugin"
      installHint: "Install gke-gcloud-auth-plugin for use with kubectl by following https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke"
      provideClusterInfo: true
`,
		clusterMasterAuth.ClusterCaCertificate().Elem(),
		clusterEndpoint, context, context, context, context, context, context)
}

Output of pulumi about

CLI          
Version      3.95.0
Go Version   go1.21.4
Go Compiler  gc

Plugins
NAME        VERSION
gcp         7.0.0
go          unknown
kubernetes  4.5.5

Host     
OS       darwin
Version  12.3.1
Arch     arm64

This project is written in go: executable='/opt/homebrew/bin/go' version='go version go1.21.4 darwin/arm64'

Current Stack: solitude/gcp-k8s/dev

TYPE                                URN
pulumi:pulumi:Stack                 urn:pulumi:dev::gcp-k8s::pulumi:pulumi:Stack::gcp-k8s-dev
pulumi:providers:gcp                urn:pulumi:dev::gcp-k8s::pulumi:providers:gcp::default
gcp:serviceaccount/account:Account  urn:pulumi:dev::gcp-k8s::gcp:serviceaccount/account:Account::gke-nodepool-sa
gcp:compute/network:Network         urn:pulumi:dev::gcp-k8s::gcp:compute/network:Network::gke-network
gcp:compute/subnetwork:Subnetwork   urn:pulumi:dev::gcp-k8s::gcp:compute/subnetwork:Subnetwork::gke-subnet
gcp:container/cluster:Cluster       urn:pulumi:dev::gcp-k8s::gcp:container/cluster:Cluster::gke-cluster
gcp:container/nodePool:NodePool     urn:pulumi:dev::gcp-k8s::gcp:container/nodePool:NodePool::gke-nodepool
pulumi:providers:kubernetes         urn:pulumi:dev::gcp-k8s::pulumi:providers:kubernetes::k8sprovider
gcp:container/cluster:Cluster       urn:pulumi:dev::gcp-k8s::gcp:container/cluster:Cluster::gke-cluster


Found no pending operations associated with solitude/dev

Backend        
Name           pulumi.com
URL            https://app.pulumi.com/hon_ne
User           hon_ne
Organizations  hon_ne, solitude
Token type     personal

Dependencies:
NAME                                        VERSION
github.com/pulumi/pulumi-gcp/sdk/v7         7.0.0
github.com/pulumi/pulumi-kubernetes/sdk/v4  4.5.5
github.com/pulumi/pulumi/sdk/v3             3.92.0

Pulumi locates its logs in /var/folders/j6/bcp9t5h92yl5kgy94k4rbkpc0000gn/T/ by default

Additional context

No response

Contributing

Vote on this issue by adding a 👍 reaction.
To contribute a fix for this issue, leave a comment (and link to your pull request, if you've opened one already).

Can't say for sure that this is what you're running into but I just found that re-initing the k8s-provider was the culprit for me. If you created the cluster or kubeconfig initially in a bad state it could not be updated without fully deleting the k8s-provider