Lately I’ve been using multiple notes to keep a track of all the kubectl commands that I’ve come across when troubelshooting vSphere with Tanzu. The idea behind this post is to create a reference kubectl cheat sheet for all kubectl commands in vSphere with Tanzu.
Login
LOGIN TO A SUPERVISOR CLUSTER
Command:
kubectl vsphere login –server IP/FQDN -u USERNAME –insecure-skip-tls-verify
Example:
kubectl vsphere login --server kube.gs.labs -u administrator@vsphere.local --insecure-skip-tls-verify
LOGIN TO A GUEST CLUSTER
Command:
kubectl vsphere login –server <IP/FQDN> –insecure-skip-tls-verify –tanzu-kubernetes-cluster-name GuestClusterNamespace –tanzu-kubernetes-cluster-namespace SupervisorClusterNamespace> –vsphere-username USERNAME
Example:
kubectl vsphere login --server kube.gs.labs --insecure-skip-tls-verify --tanzu-kubernetes-cluster-name demo-calico-tkc --tanzu-kubernetes-cluster-namespace gs-dev --vsphere-username administrator@vsphere.local
Contexts
DISPLAY THE CURRENT CONTEXT
Command:
kubectl config current-context
Example:
root@debian:~# kubectl config current-context
demo-calico-tkc
DISPLAY ALL CONTEXTS
Command:
kubectl config get-contexts
Example:
root@debian:~# kubectl config get-contexts
CURRENT NAME CLUSTER AUTHINFO NAMESPACE
* demo-calico-tkc 172.16.0.133 wcp:172.16.0.133:administrator@vsphere.local
gs-dev 172.16.0.129 wcp:172.16.0.129:administrator@vsphere.local gs-dev
SWITCH TO A SPECIFIC CONTEXT/GUEST CLUSTER NAMESPACE
Command:
kubectl config use-context CONTEXT/NAMESPACE
Example:
root@debian:~# kubectl config use-context gs-dev
Switched to context "gs-dev".
root@debian:~# kubectl config get-contexts
CURRENT NAME CLUSTER AUTHINFO NAMESPACE
172.16.0.129 172.16.0.129 wcp:172.16.0.129:administrator@vsphere.local
demo-calico-tkc 172.16.0.133 wcp:172.16.0.133:administrator@vsphere.local
* gs-dev 172.16.0.129 wcp:172.16.0.129:administrator@vsphere.local gs-dev
Clusters
DISPLAY ALL CLUSTERS
Command:
kubectl config get-clusters
Example:
root@debian:~# kubectl config get-clusters
NAME
172.16.0.133
supervisor-172.16.0.129
supervisor-kube.gs.labs
172.16.0.129
SWITCH TO A SPECIFIC CLUSTER
Command:
kubectl config set-cluster CLUSTERNAME
Example:
root@debian:~# kubectl config set-cluster supervisor-kube.gs.labs
Cluster "supervisor-kube.gs.labs" set.
LIST EVERYTHING
List all pods, services, deployments, replicasets, statefulset, jobs, cronjob, etc. across all namespaces. Command:
kubectl get all -A
Self-Service Namespace
Create Namespace in the Supervisor Cluster
Command
kubectl create namespace NAME
Example
root@debian:~# k create namespace jahnin-test
namespace/jahnin-test created
root@debian:~# k get ns
NAME STATUS AGE
default Active 67d
gs-dev Active 67d
jahnin-test Active 25s
kube-node-lease Active 67d
kube-public Active 67d
kube-system Active 67d
Describe the newly created Namespace
Command
kubectl describe namespace NAME
Example
root@debian:~# k describe ns jahnin-test
Name: jahnin-test
Labels: vSphereClusterID=domain-c8
Annotations: ls_id-0: ef278926-c8f1-42f5-8f7f-ad928b4d5a5c
ncp/extpoolid: domain-c8:dad7e875-3357-449b-809a-bf1783e3430d-ippool-172-16-0-161-172-16-0-190
ncp/router_id: t1_44090d24-628f-4bce-ac7b-db19aabf1a74_rtr
ncp/snat_ip: 172.16.0.164
ncp/subnet-0: 10.244.0.80/28
vmware-system-namespace-owner-count: 1
vmware-system-resource-pool: resgroup-4768
vmware-system-resource-pool-cpu-limit: 0.4160
vmware-system-resource-pool-memory-limit: 1024Mi
vmware-system-self-service-namespace: true
vmware-system-vm-folder: group-v4769
Status: Active
Resource Quotas
Name: jahnin-test
Resource Used Hard
-------- --- ---
requests.storage 0 1Gi
Name: jahnin-test-storagequota
Resource Used Hard
-------- --- ---
k8s-storage-profile.storageclass.storage.k8s.io/requests.storage 0 9223372036854775807
List the nodes in the self-service namespace
Command
kubectl get nodes -n NAMESPACE
Example
root@debian:~# k get nodes -n jahnin-test
NAME STATUS ROLES AGE VERSION
422f57695844de7ecea0159de09280b0 Ready master 67d v1.19.1+wcp.3
422f78932163b6dc93e693a87e83d890 Ready master 67d v1.19.1+wcp.3
422fbf91f6516c9943a9b0bcbf1d432a Ready master 67d v1.19.1+wcp.3
esx01.gs.labs Ready agent 67d v1.19.1-sph-b0161d9
esx02 Ready agent 67d v1.19.1-sph-b0161d9
esx03 Ready agent 67d v1.19.1-sph-b0161d9
Guest Clusters
Create a guest cluster with Calico CNI: 1 Control Plane with 2 Worker Nodes
YAML
apiVersion: run.tanzu.vmware.com/v1alpha1
kind: TanzuKubernetesCluster
metadata:
name: demo-calico-tkc
namespace: gs-dev
spec:
topology:
controlPlane:
count: 1
class: best-effort-small
storageClass: k8s-storage-profile
workers:
count: 2
class: best-effort-small
storageClass: k8s-storage-profile
distribution:
version: v1.19
settings:
network:
cni:
name: calico
services:
cidrBlocks: ["172.16.0.192/27"]
pods:
cidrBlocks: ["192.168.200.0/24"]
Command
kubectl apply -f YAML.FILE
Create a guest cluster with Antrea CNI: 1 Control Plane with 2 Worker Nodes
YAML
apiVersion: run.tanzu.vmware.com/v1alpha1
kind: TanzuKubernetesCluster
metadata:
name: demo-antrea-tkc
namespace: gs-dev
spec:
topology:
controlPlane:
count: 1
class: best-effort-small
storageClass: k8s-storage-profile
workers:
count: 2
class: best-effort-small
storageClass: k8s-storage-profile
distribution:
version: v1.19
settings:
network:
cni:
name: antrea
services:
cidrBlocks: ["172.16.0.192/27"]
pods:
cidrBlocks: ["192.168.200.0/24"]
Command
kubectl apply -f YAML.FILE
VM Service
List VM images
Command
kubectl get virtualmachineimages
Example
root@debian:~# k get virtualmachineimages
NAME VERSION OSTYPE FORMAT AGE
photon-3-k8s-v1.19.7---vmware.1-tkg.1.fc82c41 v1.19.7+vmware.1-tkg.2.f52f85a vmwarePhoton64Guest ovf 43d
slaxie ubuntu64Guest ovf 43d
slaxy ubuntu64Guest ovf 43d
List VM Classes
Command
kubectl get virtualmachineclasses
Example
root@debian:~# k get virtualmachineclasses
NAME CPU MEMORY AGE
best-effort-2xlarge 8 64Gi 67d
best-effort-4xlarge 16 128Gi 67d
best-effort-8xlarge 32 128Gi 67d
best-effort-large 4 16Gi 67d
best-effort-medium 2 8Gi 67d
best-effort-small 2 4Gi 67d
best-effort-xlarge 4 32Gi 67d
best-effort-xsmall 2 2Gi 67d
guaranteed-2xlarge 8 64Gi 67d
guaranteed-4xlarge 16 128Gi 67d
guaranteed-8xlarge 32 128Gi 67d
guaranteed-large 4 16Gi 67d
guaranteed-medium 2 8Gi 67d
guaranteed-small 2 4Gi 67d
guaranteed-xlarge 4 32Gi 67d
guaranteed-xsmall 2 2Gi 67d
List VM Images
Command
kubectl get virtualmachineimages
Example
root@debian:~# k get virtualmachineimages
NAME VERSION OSTYPE FORMAT AGE
photon-3-k8s-v1.19.7---vmware.1-tkg.1.fc82c41 v1.19.7+vmware.1-tkg.2.f52f85a vmwarePhoton64Guest ovf 43d
slaxie ubuntu64Guest ovf 43d
slaxy ubuntu64Guest ovf 43d
Deploy a VM in the Supervisor Cluster
YAML
apiVersion: vmoperator.vmware.com/v1alpha1
kind: VirtualMachine
metadata:
name: vmsvc-slax-vm
namespace: gs-dev
spec:
imageName: slaxie
className: best-effort-small
powerState: poweredOn
storageClass: k8s-storage-profile
networkInterfaces:
- networkType: nsx-t
Command
kubectl apply -f YAML.FILE
Storage
Create a persistent volume claim
YAML
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: testvolume
spec:
accessModes:
- ReadWriteOnce
storageClassName: k8s-storage-profile
resources:
requests:
storage: 2Gi
Output
root@debian:~# k apply -f pvc.yml
persistentvolumeclaim/testvolume created
root@debian:~# k get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
testvolume Bound pvc-0b6d9e8c-57dd-49af-83ae-808260d85e81 2Gi RWO k8s-storage-profile 22s
wp-gslabs-wordpress Bound pvc-50463820-817e-4400-a3f7-6b99da3b066c 10Gi RWO k8s-storage-profile 43d
Troubleshooting
Connect to the Supervisor Cluster as an Administrator
- SSH to vCenter Server and run the script
/usr/lib/vmware-wcp/decryptK8Pwd.py
root@vcenter [ / ]# /usr/lib/vmware-wcp/decryptK8Pwd.py
Read key from file
Connected to PSQL
Cluster: domain-c8:dad7e875-3357-449b-809a-bf1783e3430d
IP: 172.16.0.201
PWD: zflRFNeKdd7F7RJJ1sawcmO+WsnQPRgfzkIfU9
------------------------------------------------------------
- SSH to the Supervisor Control Plane node using the above IP and credentials
root@vcenter [ / ]# ssh root@172.16.0.201
FIPS mode initialized
Password:
Last login: Tue Sep 21 05:24:43 2021 from 172.16.1.20
12:56:14 up 67 days, 9:25, 1 user, load average: 1.35, 1.42, 1.55
49 Security notice(s)
Run 'tdnf updateinfo info' to see the details.
- Notice access to the additional namespaces
root@422fbf91f6516c9943a9b0bcbf1d432a [ ~ ]# alias k=kubectl
root@422fbf91f6516c9943a9b0bcbf1d432a [ ~ ]# k get ns -A
NAME STATUS AGE
default Active 67d
gs-dev Active 67d
jahnin-test Active 49m
kube-node-lease Active 67d
kube-public Active 67d
kube-system Active 67d
svc-tmc-c8 Active 67d
vmware-system-appplatform-operator-system Active 67d
vmware-system-capw Active 67d
vmware-system-cert-manager Active 67d
vmware-system-csi Active 67d
vmware-system-kubeimage Active 67d
vmware-system-license-operator Active 67d
vmware-system-logging Active 67d
vmware-system-nsop Active 67d
vmware-system-nsx Active 67d
vmware-system-registry Active 67d
vmware-system-registry-2147105364 Active 46d
vmware-system-tkg Active 67d
vmware-system-ucs Active 67d
vmware-system-vmop Active 67d
Connect to a guest cluster as the administrator
Decode the config from the Supervisor Cluster Secrets
Command
kubectl get secret TKGS-CLUSTER-NAME-kubeconfig -o jsonpath='{.data.value}' | base64 -d > tkc-admin-config
Example
root@debian:~# k get secret demo-calico-tkc-kubeconfig -o jsonpath='{.data.value}' | base64 -d > demo-calico-tkc-kubeconfig
root@debian:~# cat demo-calico-tkc-kubeconfig
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: LS0tLS1CRUdJT<TRUNCATED>
server: https://172.16.0.133:6443
name: demo-calico-tkc
contexts:
- context:
cluster: demo-calico-tkc
user: demo-calico-tkc-admin
name: demo-calico-tkc-admin@demo-calico-tkc
current-context: demo-calico-tkc-admin@demo-calico-tkc
kind: Config
preferences: {}
users:
- name: demo-calico-tkc-admin
user:
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZ<TRUNCATED>
Use the decoded config file to connect to the guest cluster as an administrator and list nodes
Command
kubectl –kubeconfig tkc-admin-config get nodes
Example
root@debian:~# k --kubeconfig demo-calico-tkc-kubeconfig get nodes
NAME STATUS ROLES AGE VERSION
demo-calico-tkc-control-plane-w9j97 Ready master 15d v1.19.7+vmware.1
demo-calico-tkc-workers-qxfj8-796cd94d78-g4wj9 Ready <none> 15d v1.19.7+vmware.1
demo-calico-tkc-workers-qxfj8-796cd94d78-w8v8g Ready <none> 15d v1.19.7+vmware.1
CNI
Antrea
Running antctl get
Command
kubectl exec -it antrea-agent -n kube-system -c antrea-agent – bash
Example
root@debian:~# kf exec -it antrea-agent-rbf6l -n kube-system -c antrea-agent -- bash
root [ / ]# antctl get
Get the status or resource of a topic
Usage:
antctl get [command]
Available Commands:
addressgroup Print address groups
agentinfo Print agent's basic information
appliedtogroup Print appliedto groups
networkpolicy Print NetworkPolicies
ovsflows Dump OVS flows
podinterface Print Pod's network interface information
Flags:
-h, --help help for get
Global Flags:
-k, --kubeconfig string absolute path to the kubeconfig file
-s, --server string address and port of the API server, taking precedence over the default endpoint and the one set in kubeconfig
-t, --timeout duration time limit of the execution of the command
-v, --verbose enable verbose output
Use "antctl get [command] --help" for more information about a command.
Review Antrea agent logs
Command
kubectl logs antrea-agent -c antrea-agent -n kube-system
Example
root@debian:~# kubectl get secret antrea-tkc-kubeconfig -o jsonpath='{.data.value}' | base64 -d > antrea-tkc-kubeconfig
root@debian:~# alias kf="kubectl --kubeconfig=antrea-tkc-kubeconfig"
root@debian:~# kf logs antrea-agent-42ft4 -c antrea-agent -n kube-system
...
I1028 15:21:18.334228 1 ovs_client.go:67] Connecting to OVSDB at address /var/run/openvswitch/db.sock
I1028 15:21:18.334503 1 agent.go:205] Setting up node network
I1028 15:21:18.347790 1 agent.go:603] Setting Node MTU=1450
Review Antrea controller logs
Command
kubectl logs antrea-controller -n kube-system
Example
root@debian:~# kf logs antrea-controller-6d498b5b54-xvqv6 -n kube-system
I1028 15:09:37.654909 1 log_file.go:99] Set log file max size to 104857600
I1028 15:09:37.655497 1 controller.go:82] Starting Antrea Controller (version v0.11.3-unknown)
I1028 15:09:37.655521 1 client.go:34] No kubeconfig file was specified. Falling back to in-cluster config
I1028 15:09:38.274936 1 prometheus.go:73] Initializing prometheus metrics
I1028 15:09:38.275390 1 log_file.go:127] Starting log file monitoring. Maximum log file number is 4
I1028 15:09:38.275779 1 controller.go:60] Starting Antrea Controller Monitor
I1028 15:09:38.275868 1 controller.go:62] Waiting for node synced for Controller Monitor
I1028 15:09:38.275981 1 networkpolicy_controller.go:1074] Starting NetworkPolicy controller
I1028 15:09:38.276060 1 networkpolicy_controller.go:1077] Waiting for caches to sync for NetworkPolicy controller
I1028 15:09:38.276236 1 cacert_controller.go:223] Syncing CA certificate with ConfigMap
I1028 15:09:38.292085 1 cacert_controller.go:204] Syncing CA certificate with APIServices
I1028 15:09:38.311406 1 cacert_controller.go:263] Starting CACertController