Whether you are working on scaling, performance, or high-availability, it can be useful to see exactly which Kubernetes worker node that pods are being scheduled unto.
Pods as distributed across worker nodes
ns=default kubectl get pods -n $ns -o=custom-columns=NAME:.metadata.name,NODE:.spec.nodeName
Pods as distributed across zones (GKE specific)
If you wanted to take it one step further and verify the pods were being distributed to GKE worker nodes in different zones, then you would first create an associative array for resolving node->zone.
The node label “topology.kubernetes.io/zone” is specific to GKE worker nodes, which is why this recipe does not apply to EKS (Amazon) or AKS (Azure).
# associative array for mapping GCP node->zone unset node_to_zone; declare -A node_to_zone IFS=$'\n' for line in $(kubectl get nodes -o=custom-columns=NAME:.metadata.name,ZONE:".metadata.labels.topology\.kubernetes\.io/zone" --no-headers); do node=$(echo $line | awk '{print $1}') zone=$(echo $line | awk '{print $2}') node_to_zone["$node"]="$zone" done # show node->zone mappings for nodekey in "${!node_to_zone[@]}"; do echo "$nodekey is in region ${node_to_zone[$nodekey]}"; done # show all pods, augmenting with node->zone resolution for line in $(kubectl get pods -n $ns -o=custom-columns=NAME:.metadata.name,NODE:.spec.nodeName --no-headers); do pod=$(echo $line | awk '{print $1}') node=$(echo $line | awk '{print $2}') zone=${node_to_zone["$node"]} echo "$pod $node $zone" done
REFERENCES