Use the save API version for the HPA controller and the deployment (#807)

This supposedly can fix our deployment problem. Tested on alpha.

Also updated the deployment script to replace the service object as
well.
This commit is contained in:
Lai Jiang 2020-09-16 09:15:35 -04:00 committed by GitHub
parent 5765be52d2
commit b6ed1982c3
3 changed files with 5 additions and 3 deletions

View file

@ -31,14 +31,16 @@ do
echo "Updating cluster ${parts[0]} in zone ${parts[1]}..."
gcloud container clusters get-credentials "${parts[0]}" \
--project "${project}" --zone "${parts[1]}"
# Kills all running pods, new pods created will be pulling the new image.
sed s/GCP_PROJECT/${project}/g "./kubernetes/proxy-deployment-${environment}.yaml" | \
kubectl replace -f -
kubectl replace -f "./kubernetes/proxy-service.yaml" --force
# Alpha does not have canary
if [[ ${environment} != "alpha" ]]; then
sed s/GCP_PROJECT/${project}/g "./kubernetes/proxy-deployment-${environment}-canary.yaml" | \
kubectl replace -f -
kubectl replace -f "./kubernetes/proxy-service-canary.yaml" --force
fi
# Kills all running pods, new pods created will be pulling the new image.
kubectl delete pods --all
done < <(gcloud container clusters list --project ${project} | grep proxy-cluster)
kubectl config use-context "$current_context"