mirror of
https://github.com/Azure/k8s-deploy.git
synced 2026-03-03 16:22:18 +08:00
Swapped out release-pr.yml and tag-and-release.yml to use reusable github workflows
This commit is contained in:
parent
ca8d2604ac
commit
4810ff9a3e
50
.github/workflows/release-pr.yml
vendored
50
.github/workflows/release-pr.yml
vendored
@ -1,4 +1,4 @@
|
||||
name: "Create release PR"
|
||||
name: Create release PR
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@ -8,49 +8,7 @@ on:
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
createPullRequest:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
release-pr:
|
||||
uses: OliverMKing/javascript-release-workflow/.github/workflows/release-pr.yml@main
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Check if remote branch exists
|
||||
env:
|
||||
BRANCH: releases/${{ github.event.inputs.release }}
|
||||
run: |
|
||||
echo "##[set-output name=exists;]$(echo $(if [[ -z $(git ls-remote --heads origin ${BRANCH}) ]]; then echo false; else echo true; fi;))"
|
||||
id: extract-branch-status
|
||||
# these two only need to occur if the branch exists
|
||||
- name: Checkout proper branch
|
||||
if: ${{ steps.extract-branch-status.outputs.exists == 'true' }}
|
||||
env:
|
||||
BRANCH: releases/${{ github.event.inputs.release }}
|
||||
run: git checkout ${BRANCH}
|
||||
- name: Reset promotion branch
|
||||
if: ${{ steps.extract-branch-status.outputs.exists == 'true' }}
|
||||
run: |
|
||||
git fetch origin main:main
|
||||
git reset --hard main
|
||||
- name: Install packages
|
||||
run: |
|
||||
rm -rf node_modules/
|
||||
npm install --no-bin-links
|
||||
npm run build
|
||||
- name: Remove node_modules from gitignore
|
||||
run: |
|
||||
sed -i '/node_modules/d' ./.gitignore
|
||||
- name: Create branch
|
||||
uses: peterjgrainger/action-create-branch@v2.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
branch: releases/${{ github.event.inputs.release }}
|
||||
- name: Create pull request
|
||||
uses: peter-evans/create-pull-request@v3
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
commit-message: Add node modules and new code for release
|
||||
title: ${{ github.event.inputs.release }} new release
|
||||
base: releases/${{ github.event.inputs.release }}
|
||||
branch: create-release
|
||||
delete-branch: true
|
||||
release: ${{ github.event.inputs.release }}
|
||||
73
.github/workflows/tag-and-release.yml
vendored
73
.github/workflows/tag-and-release.yml
vendored
@ -1,4 +1,4 @@
|
||||
name: "Tag and create release draft"
|
||||
name: Tag and create release draft
|
||||
|
||||
on:
|
||||
push:
|
||||
@ -6,72 +6,5 @@ on:
|
||||
- releases/*
|
||||
|
||||
jobs:
|
||||
gh_tagged_release:
|
||||
runs-on: "ubuntu-latest"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Test release
|
||||
run: |
|
||||
sudo npm install n
|
||||
sudo n latest
|
||||
npm test
|
||||
- name: Get branch ending
|
||||
run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF##*/} | sed 's:.*/::')"
|
||||
id: extract-branch
|
||||
- name: Get tags
|
||||
run: |
|
||||
echo "##[set-output name=tags;]$(echo $(git tag))"
|
||||
id: extract-tags
|
||||
- name: Get latest tag
|
||||
uses: actions/github-script@v5
|
||||
env:
|
||||
TAGS: ${{ steps.extract-tags.outputs.tags }}
|
||||
BRANCH: ${{ steps.extract-branch.outputs.branch }}
|
||||
with:
|
||||
script: |
|
||||
const tags = process.env["TAGS"]
|
||||
.split(" ")
|
||||
.map((x) => x.trim());
|
||||
const branch = process.env["BRANCH"];
|
||||
const splitTag = (x) =>
|
||||
x
|
||||
.substring(branch.length + 1)
|
||||
.split(".")
|
||||
.map((x) => Number(x));
|
||||
function compareTags(nums1, nums2, position = 0) {
|
||||
if (nums1.length < position && nums2.length < position) return nums2;
|
||||
const num1 = splitTag(nums1)[position] || 0;
|
||||
const num2 = splitTag(nums2)[position] || 0;
|
||||
if (num1 === num2) return compareTags(nums1, nums2, position + 1);
|
||||
else if (num1 > num2) return nums1;
|
||||
else return nums2;
|
||||
}
|
||||
const branchTags = tags.filter((tag) => tag.startsWith(branch));
|
||||
if (branchTags.length < 1) return branch + ".-1"
|
||||
return branchTags.reduce((prev, curr) => compareTags(prev, curr));
|
||||
result-encoding: string
|
||||
id: get-latest-tag
|
||||
- name: Get new tag
|
||||
uses: actions/github-script@v5
|
||||
env:
|
||||
PREV: ${{ steps.get-latest-tag.outputs.result }}
|
||||
with:
|
||||
script: |
|
||||
let version = process.env["PREV"]
|
||||
if (!version.includes(".")) version += ".0"; // case of v1 or v2
|
||||
const prefix = /^([a-zA-Z]+)/.exec(version)[0];
|
||||
const numbers = version.substring(prefix.length);
|
||||
let split = numbers.split(".");
|
||||
split[split.length - 1] = parseInt(split[split.length - 1]) + 1;
|
||||
return prefix + split.join(".");
|
||||
result-encoding: string
|
||||
id: get-new-tag
|
||||
- uses: "marvinpinto/action-automatic-releases@v1.2.1"
|
||||
with:
|
||||
title: ${{ steps.get-new-tag.outputs.result }} release
|
||||
automatic_release_tag: ${{ steps.get-new-tag.outputs.result }}
|
||||
repo_token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
draft: true
|
||||
tag-and-release:
|
||||
uses: OliverMKing/javascript-release-workflow/.github/workflows/tag-and-release.yml@main
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@ -2,3 +2,4 @@ node_modules
|
||||
|
||||
.DS_Store
|
||||
.idea
|
||||
lib/
|
||||
@ -1,63 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.deploy = void 0;
|
||||
const core = require("@actions/core");
|
||||
const models = require("../types/kubernetesTypes");
|
||||
const KubernetesConstants = require("../types/kubernetesTypes");
|
||||
const manifestUpdateUtils_1 = require("../utilities/manifestUpdateUtils");
|
||||
const blueGreenHelper_1 = require("../strategyHelpers/blueGreen/blueGreenHelper");
|
||||
const deploymentHelper_1 = require("../strategyHelpers/deploymentHelper");
|
||||
const deploymentStrategy_1 = require("../types/deploymentStrategy");
|
||||
const trafficSplitMethod_1 = require("../types/trafficSplitMethod");
|
||||
const routeStrategy_1 = require("../types/routeStrategy");
|
||||
function deploy(kubectl, manifestFilePaths, deploymentStrategy) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
// update manifests
|
||||
const inputManifestFiles = manifestUpdateUtils_1.updateManifestFiles(manifestFilePaths);
|
||||
core.debug("Input manifest files: " + inputManifestFiles);
|
||||
// deploy manifests
|
||||
core.info("Deploying manifests");
|
||||
const trafficSplitMethod = trafficSplitMethod_1.parseTrafficSplitMethod(core.getInput("traffic-split-method", { required: true }));
|
||||
const deployedManifestFiles = yield deploymentHelper_1.deployManifests(inputManifestFiles, deploymentStrategy, kubectl, trafficSplitMethod);
|
||||
core.debug("Deployed manifest files: " + deployedManifestFiles);
|
||||
// check manifest stability
|
||||
core.info("Checking manifest stability");
|
||||
const resourceTypes = manifestUpdateUtils_1.getResources(deployedManifestFiles, models.DEPLOYMENT_TYPES.concat([
|
||||
KubernetesConstants.DiscoveryAndLoadBalancerResource.SERVICE,
|
||||
]));
|
||||
yield deploymentHelper_1.checkManifestStability(kubectl, resourceTypes);
|
||||
if (deploymentStrategy == deploymentStrategy_1.DeploymentStrategy.BLUE_GREEN) {
|
||||
core.info("Routing blue green");
|
||||
const routeStrategy = routeStrategy_1.parseRouteStrategy(core.getInput("route-method", { required: true }));
|
||||
yield blueGreenHelper_1.routeBlueGreen(kubectl, inputManifestFiles, routeStrategy);
|
||||
}
|
||||
// print ingresses
|
||||
core.info("Printing ingresses");
|
||||
const ingressResources = manifestUpdateUtils_1.getResources(deployedManifestFiles, [
|
||||
KubernetesConstants.DiscoveryAndLoadBalancerResource.INGRESS,
|
||||
]);
|
||||
for (const ingressResource of ingressResources) {
|
||||
yield kubectl.getResource(KubernetesConstants.DiscoveryAndLoadBalancerResource.INGRESS, ingressResource.name);
|
||||
}
|
||||
// annotate resources
|
||||
core.info("Annotating resources");
|
||||
let allPods;
|
||||
try {
|
||||
allPods = JSON.parse((yield kubectl.getAllPods()).stdout);
|
||||
}
|
||||
catch (e) {
|
||||
core.debug("Unable to parse pods: " + e);
|
||||
}
|
||||
yield deploymentHelper_1.annotateAndLabelResources(deployedManifestFiles, kubectl, resourceTypes, allPods);
|
||||
});
|
||||
}
|
||||
exports.deploy = deploy;
|
||||
@ -1,109 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.promote = void 0;
|
||||
const core = require("@actions/core");
|
||||
const deploy = require("./deploy");
|
||||
const canaryDeploymentHelper = require("../strategyHelpers/canary/canaryHelper");
|
||||
const SMICanaryDeploymentHelper = require("../strategyHelpers/canary/smiCanaryHelper");
|
||||
const manifestUpdateUtils_1 = require("../utilities/manifestUpdateUtils");
|
||||
const models = require("../types/kubernetesTypes");
|
||||
const KubernetesManifestUtility = require("../utilities/manifestStabilityUtils");
|
||||
const blueGreenHelper_1 = require("../strategyHelpers/blueGreen/blueGreenHelper");
|
||||
const serviceBlueGreenHelper_1 = require("../strategyHelpers/blueGreen/serviceBlueGreenHelper");
|
||||
const ingressBlueGreenHelper_1 = require("../strategyHelpers/blueGreen/ingressBlueGreenHelper");
|
||||
const smiBlueGreenHelper_1 = require("../strategyHelpers/blueGreen/smiBlueGreenHelper");
|
||||
const deploymentStrategy_1 = require("../types/deploymentStrategy");
|
||||
const trafficSplitMethod_1 = require("../types/trafficSplitMethod");
|
||||
const routeStrategy_1 = require("../types/routeStrategy");
|
||||
function promote(kubectl, manifests, deploymentStrategy) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
switch (deploymentStrategy) {
|
||||
case deploymentStrategy_1.DeploymentStrategy.CANARY:
|
||||
yield promoteCanary(kubectl, manifests);
|
||||
break;
|
||||
case deploymentStrategy_1.DeploymentStrategy.BLUE_GREEN:
|
||||
yield promoteBlueGreen(kubectl, manifests);
|
||||
break;
|
||||
default:
|
||||
throw Error("Invalid promote deployment strategy");
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.promote = promote;
|
||||
function promoteCanary(kubectl, manifests) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
let includeServices = false;
|
||||
const trafficSplitMethod = trafficSplitMethod_1.parseTrafficSplitMethod(core.getInput("traffic-split-method", { required: true }));
|
||||
if (trafficSplitMethod == trafficSplitMethod_1.TrafficSplitMethod.SMI) {
|
||||
includeServices = true;
|
||||
// In case of SMI traffic split strategy when deployment is promoted, first we will redirect traffic to
|
||||
// canary deployment, then update stable deployment and then redirect traffic to stable deployment
|
||||
core.info("Redirecting traffic to canary deployment");
|
||||
yield SMICanaryDeploymentHelper.redirectTrafficToCanaryDeployment(kubectl, manifests);
|
||||
core.info("Deploying input manifests with SMI canary strategy");
|
||||
yield deploy.deploy(kubectl, manifests, deploymentStrategy_1.DeploymentStrategy.CANARY);
|
||||
core.info("Redirecting traffic to stable deployment");
|
||||
yield SMICanaryDeploymentHelper.redirectTrafficToStableDeployment(kubectl, manifests);
|
||||
}
|
||||
else {
|
||||
core.info("Deploying input manifests");
|
||||
yield deploy.deploy(kubectl, manifests, deploymentStrategy_1.DeploymentStrategy.CANARY);
|
||||
}
|
||||
core.info("Deleting canary and baseline workloads");
|
||||
try {
|
||||
yield canaryDeploymentHelper.deleteCanaryDeployment(kubectl, manifests, includeServices);
|
||||
}
|
||||
catch (ex) {
|
||||
core.warning("Exception occurred while deleting canary and baseline workloads: " + ex);
|
||||
}
|
||||
});
|
||||
}
|
||||
function promoteBlueGreen(kubectl, manifests) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
// update container images and pull secrets
|
||||
const inputManifestFiles = manifestUpdateUtils_1.updateManifestFiles(manifests);
|
||||
const manifestObjects = blueGreenHelper_1.getManifestObjects(inputManifestFiles);
|
||||
const routeStrategy = routeStrategy_1.parseRouteStrategy(core.getInput("route-method", { required: true }));
|
||||
core.info("Deleting old deployment and making new one");
|
||||
let result;
|
||||
if (routeStrategy == routeStrategy_1.RouteStrategy.INGRESS) {
|
||||
result = yield ingressBlueGreenHelper_1.promoteBlueGreenIngress(kubectl, manifestObjects);
|
||||
}
|
||||
else if (routeStrategy == routeStrategy_1.RouteStrategy.SMI) {
|
||||
result = yield smiBlueGreenHelper_1.promoteBlueGreenSMI(kubectl, manifestObjects);
|
||||
}
|
||||
else {
|
||||
result = yield serviceBlueGreenHelper_1.promoteBlueGreenService(kubectl, manifestObjects);
|
||||
}
|
||||
// checking stability of newly created deployments
|
||||
core.info("Checking manifest stability");
|
||||
const deployedManifestFiles = result.newFilePaths;
|
||||
const resources = manifestUpdateUtils_1.getResources(deployedManifestFiles, models.DEPLOYMENT_TYPES.concat([
|
||||
models.DiscoveryAndLoadBalancerResource.SERVICE,
|
||||
]));
|
||||
yield KubernetesManifestUtility.checkManifestStability(kubectl, resources);
|
||||
core.info("Routing to new deployments and deleting old workloads and services");
|
||||
if (routeStrategy == routeStrategy_1.RouteStrategy.INGRESS) {
|
||||
yield ingressBlueGreenHelper_1.routeBlueGreenIngress(kubectl, null, manifestObjects.serviceNameMap, manifestObjects.ingressEntityList);
|
||||
yield blueGreenHelper_1.deleteWorkloadsAndServicesWithLabel(kubectl, blueGreenHelper_1.GREEN_LABEL_VALUE, manifestObjects.deploymentEntityList, manifestObjects.serviceEntityList);
|
||||
}
|
||||
else if (routeStrategy == routeStrategy_1.RouteStrategy.SMI) {
|
||||
yield smiBlueGreenHelper_1.routeBlueGreenSMI(kubectl, blueGreenHelper_1.NONE_LABEL_VALUE, manifestObjects.serviceEntityList);
|
||||
yield blueGreenHelper_1.deleteWorkloadsWithLabel(kubectl, blueGreenHelper_1.GREEN_LABEL_VALUE, manifestObjects.deploymentEntityList);
|
||||
yield smiBlueGreenHelper_1.cleanupSMI(kubectl, manifestObjects.serviceEntityList);
|
||||
}
|
||||
else {
|
||||
yield serviceBlueGreenHelper_1.routeBlueGreenService(kubectl, blueGreenHelper_1.NONE_LABEL_VALUE, manifestObjects.serviceEntityList);
|
||||
yield blueGreenHelper_1.deleteWorkloadsWithLabel(kubectl, blueGreenHelper_1.GREEN_LABEL_VALUE, manifestObjects.deploymentEntityList);
|
||||
}
|
||||
});
|
||||
}
|
||||
@ -1,64 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.reject = void 0;
|
||||
const core = require("@actions/core");
|
||||
const canaryDeploymentHelper = require("../strategyHelpers/canary/canaryHelper");
|
||||
const SMICanaryDeploymentHelper = require("../strategyHelpers/canary/smiCanaryHelper");
|
||||
const serviceBlueGreenHelper_1 = require("../strategyHelpers/blueGreen/serviceBlueGreenHelper");
|
||||
const ingressBlueGreenHelper_1 = require("../strategyHelpers/blueGreen/ingressBlueGreenHelper");
|
||||
const smiBlueGreenHelper_1 = require("../strategyHelpers/blueGreen/smiBlueGreenHelper");
|
||||
const deploymentStrategy_1 = require("../types/deploymentStrategy");
|
||||
const trafficSplitMethod_1 = require("../types/trafficSplitMethod");
|
||||
const routeStrategy_1 = require("../types/routeStrategy");
|
||||
function reject(kubectl, manifests, deploymentStrategy) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
switch (deploymentStrategy) {
|
||||
case deploymentStrategy_1.DeploymentStrategy.CANARY:
|
||||
yield rejectCanary(kubectl, manifests);
|
||||
break;
|
||||
case deploymentStrategy_1.DeploymentStrategy.BLUE_GREEN:
|
||||
yield rejectBlueGreen(kubectl, manifests);
|
||||
break;
|
||||
default:
|
||||
throw "Invalid delete deployment strategy";
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.reject = reject;
|
||||
function rejectCanary(kubectl, manifests) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
let includeServices = false;
|
||||
const trafficSplitMethod = trafficSplitMethod_1.parseTrafficSplitMethod(core.getInput("traffic-split-method", { required: true }));
|
||||
if (trafficSplitMethod == trafficSplitMethod_1.TrafficSplitMethod.SMI) {
|
||||
core.info("Rejecting deployment with SMI canary strategy");
|
||||
includeServices = true;
|
||||
yield SMICanaryDeploymentHelper.redirectTrafficToStableDeployment(kubectl, manifests);
|
||||
}
|
||||
core.info("Deleting baseline and canary workloads");
|
||||
yield canaryDeploymentHelper.deleteCanaryDeployment(kubectl, manifests, includeServices);
|
||||
});
|
||||
}
|
||||
function rejectBlueGreen(kubectl, manifests) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
core.info("Rejecting deployment with blue green strategy");
|
||||
const routeStrategy = routeStrategy_1.parseRouteStrategy(core.getInput("route-method", { required: true }));
|
||||
if (routeStrategy == routeStrategy_1.RouteStrategy.INGRESS) {
|
||||
yield ingressBlueGreenHelper_1.rejectBlueGreenIngress(kubectl, manifests);
|
||||
}
|
||||
else if (routeStrategy == routeStrategy_1.RouteStrategy.SMI) {
|
||||
yield smiBlueGreenHelper_1.rejectBlueGreenSMI(kubectl, manifests);
|
||||
}
|
||||
else {
|
||||
yield serviceBlueGreenHelper_1.rejectBlueGreenService(kubectl, manifests);
|
||||
}
|
||||
});
|
||||
}
|
||||
@ -1,73 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.getWorkflowAnnotationKeyLabel = exports.getWorkflowAnnotationsJson = exports.WORKLOAD_TYPES_WITH_ROLLOUT_STATUS = exports.WORKLOAD_TYPES = exports.DEPLOYMENT_TYPES = exports.ServiceTypes = exports.DiscoveryAndLoadBalancerResource = exports.KubernetesWorkload = void 0;
|
||||
class KubernetesWorkload {
|
||||
}
|
||||
exports.KubernetesWorkload = KubernetesWorkload;
|
||||
KubernetesWorkload.POD = "Pod";
|
||||
KubernetesWorkload.REPLICASET = "Replicaset";
|
||||
KubernetesWorkload.DEPLOYMENT = "Deployment";
|
||||
KubernetesWorkload.STATEFUL_SET = "StatefulSet";
|
||||
KubernetesWorkload.DAEMON_SET = "DaemonSet";
|
||||
KubernetesWorkload.JOB = "job";
|
||||
KubernetesWorkload.CRON_JOB = "cronjob";
|
||||
class DiscoveryAndLoadBalancerResource {
|
||||
}
|
||||
exports.DiscoveryAndLoadBalancerResource = DiscoveryAndLoadBalancerResource;
|
||||
DiscoveryAndLoadBalancerResource.SERVICE = "service";
|
||||
DiscoveryAndLoadBalancerResource.INGRESS = "ingress";
|
||||
class ServiceTypes {
|
||||
}
|
||||
exports.ServiceTypes = ServiceTypes;
|
||||
ServiceTypes.LOAD_BALANCER = "LoadBalancer";
|
||||
ServiceTypes.NODE_PORT = "NodePort";
|
||||
ServiceTypes.CLUSTER_IP = "ClusterIP";
|
||||
exports.DEPLOYMENT_TYPES = [
|
||||
"deployment",
|
||||
"replicaset",
|
||||
"daemonset",
|
||||
"pod",
|
||||
"statefulset",
|
||||
];
|
||||
exports.WORKLOAD_TYPES = [
|
||||
"deployment",
|
||||
"replicaset",
|
||||
"daemonset",
|
||||
"pod",
|
||||
"statefulset",
|
||||
"job",
|
||||
"cronjob",
|
||||
];
|
||||
exports.WORKLOAD_TYPES_WITH_ROLLOUT_STATUS = [
|
||||
"deployment",
|
||||
"daemonset",
|
||||
"statefulset",
|
||||
];
|
||||
function getWorkflowAnnotationsJson(lastSuccessRunSha, workflowFilePath, deploymentConfig) {
|
||||
let annotationObject = {};
|
||||
annotationObject["run"] = process.env.GITHUB_RUN_ID;
|
||||
annotationObject["repository"] = process.env.GITHUB_REPOSITORY;
|
||||
annotationObject["workflow"] = process.env.GITHUB_WORKFLOW;
|
||||
annotationObject["workflowFileName"] = workflowFilePath.replace(".github/workflows/", "");
|
||||
annotationObject["jobName"] = process.env.GITHUB_JOB;
|
||||
annotationObject["createdBy"] = process.env.GITHUB_ACTOR;
|
||||
annotationObject["runUri"] = `https://github.com/${process.env.GITHUB_REPOSITORY}/actions/runs/${process.env.GITHUB_RUN_ID}`;
|
||||
annotationObject["commit"] = process.env.GITHUB_SHA;
|
||||
annotationObject["lastSuccessRunCommit"] = lastSuccessRunSha;
|
||||
annotationObject["branch"] = process.env.GITHUB_REF;
|
||||
annotationObject["deployTimestamp"] = Date.now();
|
||||
annotationObject["dockerfilePaths"] = deploymentConfig.dockerfilePaths;
|
||||
annotationObject["manifestsPaths"] = deploymentConfig.manifestFilePaths;
|
||||
annotationObject["helmChartPaths"] = deploymentConfig.helmChartFilePaths;
|
||||
annotationObject["provider"] = "GitHub";
|
||||
return JSON.stringify(annotationObject);
|
||||
}
|
||||
exports.getWorkflowAnnotationsJson = getWorkflowAnnotationsJson;
|
||||
function getWorkflowAnnotationKeyLabel(workflowFilePath) {
|
||||
const hashKey = require("crypto")
|
||||
.createHash("MD5")
|
||||
.update(`${process.env.GITHUB_REPOSITORY}/${workflowFilePath}`)
|
||||
.digest("hex");
|
||||
return `githubWorkflow_${hashKey}`;
|
||||
}
|
||||
exports.getWorkflowAnnotationKeyLabel = getWorkflowAnnotationKeyLabel;
|
||||
@ -1,50 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.deploy = void 0;
|
||||
const core = require("@actions/core");
|
||||
const KubernetesObjectUtility = require("../utilities/resource-object-utility");
|
||||
const models = require("../constants");
|
||||
const KubernetesConstants = require("../constants");
|
||||
const manifest_utilities_1 = require("../utilities/manifest-utilities");
|
||||
const blue_green_helper_1 = require("../utilities/strategy-helpers/blue-green-helper");
|
||||
const deployment_helper_1 = require("../utilities/strategy-helpers/deployment-helper");
|
||||
function deploy(manifestFilePaths, deploymentStrategy, kubectl) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const inputManifestFiles = manifest_utilities_1.updateManifestFiles(manifestFilePaths);
|
||||
// deployment
|
||||
const deployedManifestFiles = deployment_helper_1.deployManifests(inputManifestFiles, deploymentStrategy, kubectl);
|
||||
// check manifest stability
|
||||
const resourceTypes = KubernetesObjectUtility.getResources(deployedManifestFiles, models.DEPLOYMENT_TYPES.concat([
|
||||
KubernetesConstants.DiscoveryAndLoadBalancerResource.SERVICE,
|
||||
]));
|
||||
yield deployment_helper_1.checkManifestStability(kubectl, resourceTypes);
|
||||
// route blue-green deployments
|
||||
if (blue_green_helper_1.isBlueGreenDeploymentStrategy()) {
|
||||
yield blue_green_helper_1.routeBlueGreen(kubectl, inputManifestFiles);
|
||||
}
|
||||
// print ingress resources
|
||||
const ingressResources = KubernetesObjectUtility.getResources(deployedManifestFiles, [KubernetesConstants.DiscoveryAndLoadBalancerResource.INGRESS]);
|
||||
ingressResources.forEach((ingressResource) => {
|
||||
kubectl.getResource(KubernetesConstants.DiscoveryAndLoadBalancerResource.INGRESS, ingressResource.name);
|
||||
});
|
||||
// annotate resources
|
||||
let allPods;
|
||||
try {
|
||||
allPods = JSON.parse(kubectl.getAllPods().stdout);
|
||||
}
|
||||
catch (e) {
|
||||
core.debug("Unable to parse pods; Error: " + e);
|
||||
}
|
||||
deployment_helper_1.annotateAndLabelResources(deployedManifestFiles, kubectl, resourceTypes, allPods);
|
||||
});
|
||||
}
|
||||
exports.deploy = deploy;
|
||||
@ -1,31 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.DockerExec = void 0;
|
||||
const tool_runner_1 = require("./utilities/tool-runner");
|
||||
class DockerExec {
|
||||
constructor(dockerPath) {
|
||||
this.dockerPath = dockerPath;
|
||||
}
|
||||
;
|
||||
pull(image, args, silent) {
|
||||
args = ['pull', image, ...args];
|
||||
let result = this.execute(args, silent);
|
||||
if (result.stderr != '' && result.code != 0) {
|
||||
throw new Error(`docker images pull failed with: ${result.error}`);
|
||||
}
|
||||
}
|
||||
inspect(image, args, silent) {
|
||||
args = ['inspect', image, ...args];
|
||||
let result = this.execute(args, silent);
|
||||
if (result.stderr != '' && result.code != 0) {
|
||||
throw new Error(`docker inspect call failed with: ${result.error}`);
|
||||
}
|
||||
return result.stdout;
|
||||
}
|
||||
execute(args, silent) {
|
||||
const command = new tool_runner_1.ToolRunner(this.dockerPath);
|
||||
command.arg(args);
|
||||
return command.execSync({ silent: !!silent });
|
||||
}
|
||||
}
|
||||
exports.DockerExec = DockerExec;
|
||||
@ -1,47 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.GitHubClient = void 0;
|
||||
const core = require("@actions/core");
|
||||
const httpClient_1 = require("./utilities/httpClient");
|
||||
const core_1 = require("@octokit/core");
|
||||
const plugin_retry_1 = require("@octokit/plugin-retry");
|
||||
const RetryOctokit = core_1.Octokit.plugin(plugin_retry_1.retry);
|
||||
const RETRY_COUNT = 5;
|
||||
class GitHubClient {
|
||||
constructor(repository, token) {
|
||||
this.repository = repository;
|
||||
this.token = token;
|
||||
}
|
||||
getWorkflows() {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const octokit = new RetryOctokit({
|
||||
auth: this.token,
|
||||
request: { retries: RETRY_COUNT },
|
||||
});
|
||||
core.debug(`Getting workflows for repo: ${this.repository}`);
|
||||
return Promise.resolve(yield octokit.request(`GET /repos/${this.repository}/actions/workflows`));
|
||||
const getWorkflowFileNameUrl = `https://api.github.com`;
|
||||
const webRequest = new httpClient_1.WebRequest();
|
||||
webRequest.method = "GET";
|
||||
webRequest.uri = getWorkflowFileNameUrl;
|
||||
webRequest.headers = {
|
||||
Authorization: `Bearer ${this.token}`,
|
||||
};
|
||||
const response = yield httpClient_1.sendRequest(webRequest);
|
||||
return Promise.resolve(response);
|
||||
});
|
||||
}
|
||||
}
|
||||
exports.GitHubClient = GitHubClient;
|
||||
const token = "";
|
||||
const client = new GitHubClient("k8s-bake", token);
|
||||
console.log(client.getWorkflows());
|
||||
@ -1,64 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.githubToken = exports.forceDeployment = exports.args = exports.baselineAndCanaryReplicas = exports.versionSwitchBuffer = exports.routeMethod = exports.trafficSplitMethod = exports.deploymentStrategy = exports.canaryPercentage = exports.manifests = exports.imagePullSecrets = exports.containers = exports.namespace = void 0;
|
||||
const core = require("@actions/core");
|
||||
// delete this later (refactor into actions)
|
||||
exports.namespace = core.getInput("namespace");
|
||||
exports.containers = core.getInput("images").split("\n");
|
||||
exports.imagePullSecrets = core
|
||||
.getInput("imagepullsecrets")
|
||||
.split("\n")
|
||||
.filter((secret) => secret.trim().length > 0);
|
||||
exports.manifests = core
|
||||
.getInput("manifests")
|
||||
.split(/[\n,;]+/)
|
||||
.filter((manifest) => manifest.trim().length > 0);
|
||||
exports.canaryPercentage = core.getInput("percentage");
|
||||
exports.deploymentStrategy = core.getInput("strategy");
|
||||
exports.trafficSplitMethod = core.getInput("traffic-split-method");
|
||||
exports.routeMethod = core.getInput("route-method");
|
||||
exports.versionSwitchBuffer = core.getInput("version-switch-buffer");
|
||||
exports.baselineAndCanaryReplicas = core.getInput("baseline-and-canary-replicas");
|
||||
exports.args = core.getInput("arguments");
|
||||
exports.forceDeployment = core.getInput("force").toLowerCase() == "true";
|
||||
exports.githubToken = core.getInput("token");
|
||||
if (!exports.namespace) {
|
||||
core.debug('Namespace was not supplied; using "default" namespace instead.');
|
||||
exports.namespace = "default";
|
||||
}
|
||||
if (!exports.githubToken) {
|
||||
core.error("'token' input is not supplied. Set it to a PAT/GITHUB_TOKEN");
|
||||
}
|
||||
try {
|
||||
const pe = parseInt(exports.canaryPercentage);
|
||||
if (pe < 0 || pe > 100) {
|
||||
core.setFailed("A valid percentage value is between 0 and 100");
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
catch (ex) {
|
||||
core.setFailed("Enter a valid 'percentage' integer value ");
|
||||
process.exit(1);
|
||||
}
|
||||
try {
|
||||
const pe = parseInt(exports.baselineAndCanaryReplicas);
|
||||
if (pe < 0 || pe > 100) {
|
||||
core.setFailed("A valid baseline-and-canary-replicas value is between 0 and 100");
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
catch (ex) {
|
||||
core.setFailed("Enter a valid 'baseline-and-canary-replicas' integer value");
|
||||
process.exit(1);
|
||||
}
|
||||
try {
|
||||
const pe = parseInt(exports.versionSwitchBuffer);
|
||||
if (pe < 0 || pe > 300) {
|
||||
core.setFailed("Invalid buffer time, valid version-switch-buffer is a value more than or equal to 0 and lesser than or equal 300");
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
catch (ex) {
|
||||
core.setFailed("Enter a valid 'version-switch-buffer' integer value");
|
||||
process.exit(1);
|
||||
}
|
||||
@ -1,123 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.Kubectl = void 0;
|
||||
const tool_runner_1 = require("./utilities/tool-runner");
|
||||
class Kubectl {
|
||||
constructor(kubectlPath, namespace, ignoreSSLErrors) {
|
||||
this.kubectlPath = kubectlPath;
|
||||
this.ignoreSSLErrors = !!ignoreSSLErrors;
|
||||
if (!!namespace) {
|
||||
this.namespace = namespace;
|
||||
} else {
|
||||
this.namespace = "default";
|
||||
}
|
||||
}
|
||||
apply(configurationPaths, force) {
|
||||
let applyArgs = ["apply", "-f", this.createInlineArray(configurationPaths)];
|
||||
if (!!force) {
|
||||
console.log(
|
||||
"force flag is on, deployment will continue even if previous deployment already exists"
|
||||
);
|
||||
applyArgs.push("--force");
|
||||
}
|
||||
return this.execute(applyArgs);
|
||||
}
|
||||
describe(resourceType, resourceName, silent) {
|
||||
return this.execute(["describe", resourceType, resourceName], silent);
|
||||
}
|
||||
getNewReplicaSet(deployment) {
|
||||
let newReplicaSet = "";
|
||||
const result = this.describe("deployment", deployment, true);
|
||||
if (result && result.stdout) {
|
||||
const stdout = result.stdout.split("\n");
|
||||
stdout.forEach((line) => {
|
||||
if (!!line && line.toLowerCase().indexOf("newreplicaset") > -1) {
|
||||
newReplicaSet = line.substr(14).trim().split(" ")[0];
|
||||
}
|
||||
});
|
||||
}
|
||||
return newReplicaSet;
|
||||
}
|
||||
annotate(resourceType, resourceName, annotation) {
|
||||
let args = ["annotate", resourceType, resourceName];
|
||||
args.push(annotation);
|
||||
args.push(`--overwrite`);
|
||||
return this.execute(args);
|
||||
}
|
||||
annotateFiles(files, annotation) {
|
||||
let args = ["annotate"];
|
||||
args = args.concat(["-f", this.createInlineArray(files)]);
|
||||
args.push(annotation);
|
||||
args.push(`--overwrite`);
|
||||
return this.execute(args);
|
||||
}
|
||||
labelFiles(files, labels) {
|
||||
let args = ["label"];
|
||||
args = args.concat(["-f", this.createInlineArray(files)]);
|
||||
args = args.concat(labels);
|
||||
args.push(`--overwrite`);
|
||||
return this.execute(args);
|
||||
}
|
||||
getAllPods() {
|
||||
return this.execute(["get", "pods", "-o", "json"], true);
|
||||
}
|
||||
getClusterInfo() {
|
||||
return this.execute(["cluster-info"], true);
|
||||
}
|
||||
checkRolloutStatus(resourceType, name) {
|
||||
return this.execute(["rollout", "status", resourceType + "/" + name]);
|
||||
}
|
||||
getResource(resourceType, name) {
|
||||
return this.execute(["get", resourceType + "/" + name, "-o", "json"]);
|
||||
}
|
||||
getResources(applyOutput, filterResourceTypes) {
|
||||
const outputLines = applyOutput.split("\n");
|
||||
const results = [];
|
||||
outputLines.forEach((line) => {
|
||||
const words = line.split(" ");
|
||||
if (words.length > 2) {
|
||||
const resourceType = words[0].trim();
|
||||
const resourceName = JSON.parse(words[1].trim());
|
||||
if (
|
||||
filterResourceTypes.filter(
|
||||
(type) =>
|
||||
!!type &&
|
||||
resourceType.toLowerCase().startsWith(type.toLowerCase())
|
||||
).length > 0
|
||||
) {
|
||||
results.push({
|
||||
type: resourceType,
|
||||
name: resourceName,
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
return results;
|
||||
}
|
||||
executeCommand(customCommand, args) {
|
||||
if (!customCommand) throw new Error("NullCommandForKubectl");
|
||||
return args
|
||||
? this.execute([customCommand, args])
|
||||
: this.execute([customCommand]);
|
||||
}
|
||||
delete(args) {
|
||||
if (typeof args === "string") return this.execute(["delete", args]);
|
||||
else return this.execute(["delete"].concat(args));
|
||||
}
|
||||
execute(args, silent) {
|
||||
if (this.ignoreSSLErrors) {
|
||||
args.push("--insecure-skip-tls-verify");
|
||||
}
|
||||
args = args.concat(["--namespace", this.namespace]);
|
||||
const command = new tool_runner_1.ToolRunner(this.kubectlPath);
|
||||
command.arg(args);
|
||||
return command.execSync({ silent: !!silent });
|
||||
}
|
||||
createInlineArray(str) {
|
||||
if (typeof str === "string") {
|
||||
return str;
|
||||
}
|
||||
return str.join(",");
|
||||
}
|
||||
}
|
||||
exports.Kubectl = Kubectl;
|
||||
58
lib/run.js
58
lib/run.js
@ -1,58 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.run = void 0;
|
||||
const core = require("@actions/core");
|
||||
const kubectl_1 = require("./types/kubectl");
|
||||
const deploy_1 = require("./actions/deploy");
|
||||
const promote_1 = require("./actions/promote");
|
||||
const reject_1 = require("./actions/reject");
|
||||
const action_1 = require("./types/action");
|
||||
const deploymentStrategy_1 = require("./types/deploymentStrategy");
|
||||
function run() {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
// verify kubeconfig is set
|
||||
if (!process.env["KUBECONFIG"])
|
||||
core.warning("KUBECONFIG env is not explicitly set. Ensure cluster context is set by using k8s-set-context action.");
|
||||
// get inputs
|
||||
const action = action_1.parseAction(core.getInput("action", { required: true }));
|
||||
const strategy = deploymentStrategy_1.parseDeploymentStrategy(core.getInput("strategy"));
|
||||
const manifestsInput = core.getInput("manifests", { required: true });
|
||||
const manifestFilePaths = manifestsInput
|
||||
.split(/[\n,;]+/) // split into each individual manifest
|
||||
.map((manifest) => manifest.trim()) // remove surrounding whitespace
|
||||
.filter((manifest) => manifest.length > 0); // remove any blanks
|
||||
// create kubectl
|
||||
const kubectlPath = yield kubectl_1.getKubectlPath();
|
||||
const namespace = core.getInput("namespace") || "default";
|
||||
const kubectl = new kubectl_1.Kubectl(kubectlPath, namespace, true);
|
||||
// run action
|
||||
switch (action) {
|
||||
case action_1.Action.DEPLOY: {
|
||||
yield deploy_1.deploy(kubectl, manifestFilePaths, strategy);
|
||||
break;
|
||||
}
|
||||
case action_1.Action.PROMOTE: {
|
||||
yield promote_1.promote(kubectl, manifestFilePaths, strategy);
|
||||
break;
|
||||
}
|
||||
case action_1.Action.REJECT: {
|
||||
yield reject_1.reject(kubectl, manifestFilePaths, strategy);
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
throw Error('Not a valid action. The allowed actions are "deploy", "promote", and "reject".');
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.run = run;
|
||||
run().catch(core.setFailed);
|
||||
@ -1,279 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.fetchResource = exports.isServiceSelectorSubsetOfMatchLabel = exports.getServiceSelector = exports.getDeploymentMatchLabels = exports.getBlueGreenResourceName = exports.addBlueGreenLabelsAndAnnotations = exports.getNewBlueGreenObject = exports.createWorkloadsWithLabel = exports.isServiceRouted = exports.getManifestObjects = exports.deleteObjects = exports.deleteWorkloadsAndServicesWithLabel = exports.deleteWorkloadsWithLabel = exports.routeBlueGreen = exports.STABLE_SUFFIX = exports.GREEN_SUFFIX = exports.BLUE_GREEN_VERSION_LABEL = exports.NONE_LABEL_VALUE = exports.GREEN_LABEL_VALUE = void 0;
|
||||
const core = require("@actions/core");
|
||||
const fs = require("fs");
|
||||
const yaml = require("js-yaml");
|
||||
const kubernetesTypes_1 = require("../../types/kubernetesTypes");
|
||||
const fileHelper = require("../../utilities/fileUtils");
|
||||
const serviceBlueGreenHelper_1 = require("./serviceBlueGreenHelper");
|
||||
const ingressBlueGreenHelper_1 = require("./ingressBlueGreenHelper");
|
||||
const smiBlueGreenHelper_1 = require("./smiBlueGreenHelper");
|
||||
const manifestUpdateUtils_1 = require("../../utilities/manifestUpdateUtils");
|
||||
const manifestSpecLabelUtils_1 = require("../../utilities/manifestSpecLabelUtils");
|
||||
const kubectlUtils_1 = require("../../utilities/kubectlUtils");
|
||||
const timeUtils_1 = require("../../utilities/timeUtils");
|
||||
const routeStrategy_1 = require("../../types/routeStrategy");
|
||||
exports.GREEN_LABEL_VALUE = "green";
|
||||
exports.NONE_LABEL_VALUE = "None";
|
||||
exports.BLUE_GREEN_VERSION_LABEL = "k8s.deploy.color";
|
||||
exports.GREEN_SUFFIX = "-green";
|
||||
exports.STABLE_SUFFIX = "-stable";
|
||||
function routeBlueGreen(kubectl, inputManifestFiles, routeStrategy) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
// sleep for buffer time
|
||||
const bufferTime = parseInt(core.getInput("version-switch-buffer") || "0");
|
||||
if (bufferTime < 0 || bufferTime > 300)
|
||||
throw Error("Version switch buffer must be between 0 and 300 (inclusive)");
|
||||
const startSleepDate = new Date();
|
||||
core.info(`Starting buffer time of ${bufferTime} minute(s) at ${startSleepDate.toISOString()}`);
|
||||
yield timeUtils_1.sleep(bufferTime * 1000 * 60);
|
||||
const endSleepDate = new Date();
|
||||
core.info(`Stopping buffer time of ${bufferTime} minute(s) at ${endSleepDate.toISOString()}`);
|
||||
const manifestObjects = getManifestObjects(inputManifestFiles);
|
||||
core.debug("Manifest objects: " + JSON.stringify(manifestObjects));
|
||||
// route to new deployments
|
||||
if (routeStrategy == routeStrategy_1.RouteStrategy.INGRESS) {
|
||||
yield ingressBlueGreenHelper_1.routeBlueGreenIngress(kubectl, exports.GREEN_LABEL_VALUE, manifestObjects.serviceNameMap, manifestObjects.ingressEntityList);
|
||||
}
|
||||
else if (routeStrategy == routeStrategy_1.RouteStrategy.SMI) {
|
||||
yield smiBlueGreenHelper_1.routeBlueGreenSMI(kubectl, exports.GREEN_LABEL_VALUE, manifestObjects.serviceEntityList);
|
||||
}
|
||||
else {
|
||||
yield serviceBlueGreenHelper_1.routeBlueGreenService(kubectl, exports.GREEN_LABEL_VALUE, manifestObjects.serviceEntityList);
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.routeBlueGreen = routeBlueGreen;
|
||||
function deleteWorkloadsWithLabel(kubectl, deleteLabel, deploymentEntityList) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const resourcesToDelete = [];
|
||||
deploymentEntityList.forEach((inputObject) => {
|
||||
const name = inputObject.metadata.name;
|
||||
const kind = inputObject.kind;
|
||||
if (deleteLabel === exports.NONE_LABEL_VALUE) {
|
||||
// delete stable deployments
|
||||
const resourceToDelete = { name, kind };
|
||||
resourcesToDelete.push(resourceToDelete);
|
||||
}
|
||||
else {
|
||||
// delete new green deployments
|
||||
const resourceToDelete = {
|
||||
name: getBlueGreenResourceName(name, exports.GREEN_SUFFIX),
|
||||
kind: kind,
|
||||
};
|
||||
resourcesToDelete.push(resourceToDelete);
|
||||
}
|
||||
});
|
||||
yield deleteObjects(kubectl, resourcesToDelete);
|
||||
});
|
||||
}
|
||||
exports.deleteWorkloadsWithLabel = deleteWorkloadsWithLabel;
|
||||
function deleteWorkloadsAndServicesWithLabel(kubectl, deleteLabel, deploymentEntityList, serviceEntityList) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
// need to delete services and deployments
|
||||
const deletionEntitiesList = deploymentEntityList.concat(serviceEntityList);
|
||||
const resourcesToDelete = [];
|
||||
deletionEntitiesList.forEach((inputObject) => {
|
||||
const name = inputObject.metadata.name;
|
||||
const kind = inputObject.kind;
|
||||
if (deleteLabel === exports.NONE_LABEL_VALUE) {
|
||||
// delete stable objects
|
||||
const resourceToDelete = { name, kind };
|
||||
resourcesToDelete.push(resourceToDelete);
|
||||
}
|
||||
else {
|
||||
// delete green labels
|
||||
const resourceToDelete = {
|
||||
name: getBlueGreenResourceName(name, exports.GREEN_SUFFIX),
|
||||
kind: kind,
|
||||
};
|
||||
resourcesToDelete.push(resourceToDelete);
|
||||
}
|
||||
});
|
||||
yield deleteObjects(kubectl, resourcesToDelete);
|
||||
});
|
||||
}
|
||||
exports.deleteWorkloadsAndServicesWithLabel = deleteWorkloadsAndServicesWithLabel;
|
||||
function deleteObjects(kubectl, deleteList) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
// delete services and deployments
|
||||
for (const delObject of deleteList) {
|
||||
try {
|
||||
const result = yield kubectl.delete([delObject.kind, delObject.name]);
|
||||
kubectlUtils_1.checkForErrors([result]);
|
||||
}
|
||||
catch (ex) {
|
||||
// Ignore failures of delete if it doesn't exist
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.deleteObjects = deleteObjects;
|
||||
// other common functions
|
||||
function getManifestObjects(filePaths) {
|
||||
const deploymentEntityList = [];
|
||||
const routedServiceEntityList = [];
|
||||
const unroutedServiceEntityList = [];
|
||||
const ingressEntityList = [];
|
||||
const otherEntitiesList = [];
|
||||
const serviceNameMap = new Map();
|
||||
filePaths.forEach((filePath) => {
|
||||
const fileContents = fs.readFileSync(filePath).toString();
|
||||
yaml.safeLoadAll(fileContents, (inputObject) => {
|
||||
if (!!inputObject) {
|
||||
const kind = inputObject.kind;
|
||||
const name = inputObject.metadata.name;
|
||||
if (kubernetesTypes_1.isDeploymentEntity(kind)) {
|
||||
deploymentEntityList.push(inputObject);
|
||||
}
|
||||
else if (kubernetesTypes_1.isServiceEntity(kind)) {
|
||||
if (isServiceRouted(inputObject, deploymentEntityList)) {
|
||||
routedServiceEntityList.push(inputObject);
|
||||
serviceNameMap.set(name, getBlueGreenResourceName(name, exports.GREEN_SUFFIX));
|
||||
}
|
||||
else {
|
||||
unroutedServiceEntityList.push(inputObject);
|
||||
}
|
||||
}
|
||||
else if (kubernetesTypes_1.isIngressEntity(kind)) {
|
||||
ingressEntityList.push(inputObject);
|
||||
}
|
||||
else {
|
||||
otherEntitiesList.push(inputObject);
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
return {
|
||||
serviceEntityList: routedServiceEntityList,
|
||||
serviceNameMap: serviceNameMap,
|
||||
unroutedServiceEntityList: unroutedServiceEntityList,
|
||||
deploymentEntityList: deploymentEntityList,
|
||||
ingressEntityList: ingressEntityList,
|
||||
otherObjects: otherEntitiesList,
|
||||
};
|
||||
}
|
||||
exports.getManifestObjects = getManifestObjects;
|
||||
function isServiceRouted(serviceObject, deploymentEntityList) {
|
||||
let shouldBeRouted = false;
|
||||
const serviceSelector = getServiceSelector(serviceObject);
|
||||
if (serviceSelector) {
|
||||
if (deploymentEntityList.some((depObject) => {
|
||||
// finding if there is a deployment in the given manifests the service targets
|
||||
const matchLabels = getDeploymentMatchLabels(depObject);
|
||||
return (matchLabels &&
|
||||
isServiceSelectorSubsetOfMatchLabel(serviceSelector, matchLabels));
|
||||
})) {
|
||||
shouldBeRouted = true;
|
||||
}
|
||||
}
|
||||
return shouldBeRouted;
|
||||
}
|
||||
exports.isServiceRouted = isServiceRouted;
|
||||
function createWorkloadsWithLabel(kubectl, deploymentObjectList, nextLabel) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const newObjectsList = [];
|
||||
deploymentObjectList.forEach((inputObject) => {
|
||||
// creating deployment with label
|
||||
const newBlueGreenObject = getNewBlueGreenObject(inputObject, nextLabel);
|
||||
core.debug("New blue-green object is: " + JSON.stringify(newBlueGreenObject));
|
||||
newObjectsList.push(newBlueGreenObject);
|
||||
});
|
||||
const manifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
const result = yield kubectl.apply(manifestFiles);
|
||||
return { result: result, newFilePaths: manifestFiles };
|
||||
});
|
||||
}
|
||||
exports.createWorkloadsWithLabel = createWorkloadsWithLabel;
|
||||
function getNewBlueGreenObject(inputObject, labelValue) {
|
||||
const newObject = JSON.parse(JSON.stringify(inputObject));
|
||||
// Updating name only if label is green label is given
|
||||
if (labelValue === exports.GREEN_LABEL_VALUE) {
|
||||
newObject.metadata.name = getBlueGreenResourceName(inputObject.metadata.name, exports.GREEN_SUFFIX);
|
||||
}
|
||||
// Adding labels and annotations
|
||||
addBlueGreenLabelsAndAnnotations(newObject, labelValue);
|
||||
return newObject;
|
||||
}
|
||||
exports.getNewBlueGreenObject = getNewBlueGreenObject;
|
||||
function addBlueGreenLabelsAndAnnotations(inputObject, labelValue) {
|
||||
//creating the k8s.deploy.color label
|
||||
const newLabels = new Map();
|
||||
newLabels[exports.BLUE_GREEN_VERSION_LABEL] = labelValue;
|
||||
// updating object labels and selector labels
|
||||
manifestUpdateUtils_1.updateObjectLabels(inputObject, newLabels, false);
|
||||
manifestUpdateUtils_1.updateSelectorLabels(inputObject, newLabels, false);
|
||||
// updating spec labels if it is a service
|
||||
if (!kubernetesTypes_1.isServiceEntity(inputObject.kind)) {
|
||||
manifestSpecLabelUtils_1.updateSpecLabels(inputObject, newLabels, false);
|
||||
}
|
||||
}
|
||||
exports.addBlueGreenLabelsAndAnnotations = addBlueGreenLabelsAndAnnotations;
|
||||
function getBlueGreenResourceName(name, suffix) {
|
||||
return `${name}${suffix}`;
|
||||
}
|
||||
exports.getBlueGreenResourceName = getBlueGreenResourceName;
|
||||
function getDeploymentMatchLabels(deploymentObject) {
|
||||
var _a, _b, _c, _d;
|
||||
if (((_a = deploymentObject === null || deploymentObject === void 0 ? void 0 : deploymentObject.kind) === null || _a === void 0 ? void 0 : _a.toUpperCase()) ==
|
||||
kubernetesTypes_1.KubernetesWorkload.POD.toUpperCase() && ((_b = deploymentObject === null || deploymentObject === void 0 ? void 0 : deploymentObject.metadata) === null || _b === void 0 ? void 0 : _b.labels)) {
|
||||
return deploymentObject.metadata.labels;
|
||||
}
|
||||
else if ((_d = (_c = deploymentObject === null || deploymentObject === void 0 ? void 0 : deploymentObject.spec) === null || _c === void 0 ? void 0 : _c.selector) === null || _d === void 0 ? void 0 : _d.matchLabels) {
|
||||
return deploymentObject.spec.selector.matchLabels;
|
||||
}
|
||||
}
|
||||
exports.getDeploymentMatchLabels = getDeploymentMatchLabels;
|
||||
function getServiceSelector(serviceObject) {
|
||||
var _a;
|
||||
if ((_a = serviceObject === null || serviceObject === void 0 ? void 0 : serviceObject.spec) === null || _a === void 0 ? void 0 : _a.selector) {
|
||||
return serviceObject.spec.selector;
|
||||
}
|
||||
}
|
||||
exports.getServiceSelector = getServiceSelector;
|
||||
function isServiceSelectorSubsetOfMatchLabel(serviceSelector, matchLabels) {
|
||||
const serviceSelectorMap = new Map();
|
||||
const matchLabelsMap = new Map();
|
||||
JSON.parse(JSON.stringify(serviceSelector), (key, value) => {
|
||||
serviceSelectorMap.set(key, value);
|
||||
});
|
||||
JSON.parse(JSON.stringify(matchLabels), (key, value) => {
|
||||
matchLabelsMap.set(key, value);
|
||||
});
|
||||
let isMatch = true;
|
||||
serviceSelectorMap.forEach((value, key) => {
|
||||
if (!!key && (!matchLabelsMap.has(key) || matchLabelsMap.get(key)) != value)
|
||||
isMatch = false;
|
||||
});
|
||||
return isMatch;
|
||||
}
|
||||
exports.isServiceSelectorSubsetOfMatchLabel = isServiceSelectorSubsetOfMatchLabel;
|
||||
function fetchResource(kubectl, kind, name) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const result = yield kubectl.getResource(kind, name);
|
||||
if (result == null || !!result.stderr) {
|
||||
return null;
|
||||
}
|
||||
if (!!result.stdout) {
|
||||
const resource = JSON.parse(result.stdout);
|
||||
try {
|
||||
manifestUpdateUtils_1.UnsetClusterSpecificDetails(resource);
|
||||
return resource;
|
||||
}
|
||||
catch (ex) {
|
||||
core.debug(`Exception occurred while Parsing ${resource} in Json object: ${ex}`);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.fetchResource = fetchResource;
|
||||
@ -1,149 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.updateIngressBackend = exports.getUpdatedBlueGreenIngress = exports.validateIngressesState = exports.routeBlueGreenIngress = exports.rejectBlueGreenIngress = exports.promoteBlueGreenIngress = exports.deployBlueGreenIngress = void 0;
|
||||
const fileHelper = require("../../utilities/fileUtils");
|
||||
const blueGreenHelper_1 = require("./blueGreenHelper");
|
||||
const core = require("@actions/core");
|
||||
const BACKEND = "BACKEND";
|
||||
function deployBlueGreenIngress(kubectl, filePaths) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
// get all kubernetes objects defined in manifest files
|
||||
const manifestObjects = blueGreenHelper_1.getManifestObjects(filePaths);
|
||||
// create deployments with green label value
|
||||
const result = blueGreenHelper_1.createWorkloadsWithLabel(kubectl, manifestObjects.deploymentEntityList, blueGreenHelper_1.GREEN_LABEL_VALUE);
|
||||
// create new services and other objects
|
||||
let newObjectsList = [];
|
||||
manifestObjects.serviceEntityList.forEach((inputObject) => {
|
||||
const newBlueGreenObject = blueGreenHelper_1.getNewBlueGreenObject(inputObject, blueGreenHelper_1.GREEN_LABEL_VALUE);
|
||||
newObjectsList.push(newBlueGreenObject);
|
||||
});
|
||||
newObjectsList = newObjectsList
|
||||
.concat(manifestObjects.otherObjects)
|
||||
.concat(manifestObjects.unroutedServiceEntityList);
|
||||
const manifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
yield kubectl.apply(manifestFiles);
|
||||
return result;
|
||||
});
|
||||
}
|
||||
exports.deployBlueGreenIngress = deployBlueGreenIngress;
|
||||
function promoteBlueGreenIngress(kubectl, manifestObjects) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
//checking if anything to promote
|
||||
if (!validateIngressesState(kubectl, manifestObjects.ingressEntityList, manifestObjects.serviceNameMap)) {
|
||||
throw "Ingress not in promote state";
|
||||
}
|
||||
// create stable deployments with new configuration
|
||||
const result = blueGreenHelper_1.createWorkloadsWithLabel(kubectl, manifestObjects.deploymentEntityList, blueGreenHelper_1.NONE_LABEL_VALUE);
|
||||
// create stable services with new configuration
|
||||
const newObjectsList = [];
|
||||
manifestObjects.serviceEntityList.forEach((inputObject) => {
|
||||
const newBlueGreenObject = blueGreenHelper_1.getNewBlueGreenObject(inputObject, blueGreenHelper_1.NONE_LABEL_VALUE);
|
||||
newObjectsList.push(newBlueGreenObject);
|
||||
});
|
||||
const manifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
yield kubectl.apply(manifestFiles);
|
||||
return result;
|
||||
});
|
||||
}
|
||||
exports.promoteBlueGreenIngress = promoteBlueGreenIngress;
|
||||
function rejectBlueGreenIngress(kubectl, filePaths) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
// get all kubernetes objects defined in manifest files
|
||||
const manifestObjects = blueGreenHelper_1.getManifestObjects(filePaths);
|
||||
// route ingress to stables services
|
||||
yield routeBlueGreenIngress(kubectl, null, manifestObjects.serviceNameMap, manifestObjects.ingressEntityList);
|
||||
// delete green services and deployments
|
||||
yield blueGreenHelper_1.deleteWorkloadsAndServicesWithLabel(kubectl, blueGreenHelper_1.GREEN_LABEL_VALUE, manifestObjects.deploymentEntityList, manifestObjects.serviceEntityList);
|
||||
});
|
||||
}
|
||||
exports.rejectBlueGreenIngress = rejectBlueGreenIngress;
|
||||
function routeBlueGreenIngress(kubectl, nextLabel, serviceNameMap, ingressEntityList) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
let newObjectsList = [];
|
||||
if (!nextLabel) {
|
||||
newObjectsList = ingressEntityList.filter((ingress) => isIngressRouted(ingress, serviceNameMap));
|
||||
}
|
||||
else {
|
||||
ingressEntityList.forEach((inputObject) => {
|
||||
if (isIngressRouted(inputObject, serviceNameMap)) {
|
||||
const newBlueGreenIngressObject = getUpdatedBlueGreenIngress(inputObject, serviceNameMap, blueGreenHelper_1.GREEN_LABEL_VALUE);
|
||||
newObjectsList.push(newBlueGreenIngressObject);
|
||||
}
|
||||
else {
|
||||
newObjectsList.push(inputObject);
|
||||
}
|
||||
});
|
||||
}
|
||||
core.debug("New objects: " + JSON.stringify(newObjectsList));
|
||||
const manifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
yield kubectl.apply(manifestFiles);
|
||||
});
|
||||
}
|
||||
exports.routeBlueGreenIngress = routeBlueGreenIngress;
|
||||
function validateIngressesState(kubectl, ingressEntityList, serviceNameMap) {
|
||||
let areIngressesTargetingNewServices = true;
|
||||
ingressEntityList.forEach((inputObject) => __awaiter(this, void 0, void 0, function* () {
|
||||
var _a;
|
||||
if (isIngressRouted(inputObject, serviceNameMap)) {
|
||||
//querying existing ingress
|
||||
const existingIngress = yield blueGreenHelper_1.fetchResource(kubectl, inputObject.kind, inputObject.metadata.name);
|
||||
if (!!existingIngress) {
|
||||
const currentLabel = (_a = existingIngress === null || existingIngress === void 0 ? void 0 : existingIngress.metadata) === null || _a === void 0 ? void 0 : _a.labels[blueGreenHelper_1.BLUE_GREEN_VERSION_LABEL];
|
||||
// if not green label, then wrong configuration
|
||||
if (currentLabel != blueGreenHelper_1.GREEN_LABEL_VALUE)
|
||||
areIngressesTargetingNewServices = false;
|
||||
}
|
||||
else {
|
||||
// no ingress at all, so nothing to promote
|
||||
areIngressesTargetingNewServices = false;
|
||||
}
|
||||
}
|
||||
}));
|
||||
return areIngressesTargetingNewServices;
|
||||
}
|
||||
exports.validateIngressesState = validateIngressesState;
|
||||
function isIngressRouted(ingressObject, serviceNameMap) {
|
||||
let isIngressRouted = false;
|
||||
// check if ingress targets a service in the given manifests
|
||||
JSON.parse(JSON.stringify(ingressObject), (key, value) => {
|
||||
if (key === "serviceName" && serviceNameMap.has(value)) {
|
||||
isIngressRouted = true;
|
||||
}
|
||||
return value;
|
||||
});
|
||||
return isIngressRouted;
|
||||
}
|
||||
function getUpdatedBlueGreenIngress(inputObject, serviceNameMap, type) {
|
||||
if (!type) {
|
||||
return inputObject;
|
||||
}
|
||||
const newObject = JSON.parse(JSON.stringify(inputObject));
|
||||
// add green labels and values
|
||||
blueGreenHelper_1.addBlueGreenLabelsAndAnnotations(newObject, type);
|
||||
// update ingress labels
|
||||
return updateIngressBackend(newObject, serviceNameMap);
|
||||
}
|
||||
exports.getUpdatedBlueGreenIngress = getUpdatedBlueGreenIngress;
|
||||
function updateIngressBackend(inputObject, serviceNameMap) {
|
||||
inputObject = JSON.parse(JSON.stringify(inputObject), (key, value) => {
|
||||
if (key.toUpperCase() === BACKEND) {
|
||||
const { serviceName } = value;
|
||||
if (serviceNameMap.has(serviceName)) {
|
||||
// update service name with corresponding bluegreen name only if service is provied in given manifests
|
||||
value.serviceName = serviceNameMap.get(serviceName);
|
||||
}
|
||||
}
|
||||
return value;
|
||||
});
|
||||
return inputObject;
|
||||
}
|
||||
exports.updateIngressBackend = updateIngressBackend;
|
||||
@ -1,103 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.getServiceSpecLabel = exports.validateServicesState = exports.routeBlueGreenService = exports.rejectBlueGreenService = exports.promoteBlueGreenService = exports.deployBlueGreenService = void 0;
|
||||
const fileHelper = require("../../utilities/fileUtils");
|
||||
const blueGreenHelper_1 = require("./blueGreenHelper");
|
||||
function deployBlueGreenService(kubectl, filePaths) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const manifestObjects = blueGreenHelper_1.getManifestObjects(filePaths);
|
||||
// create deployments with green label value
|
||||
const result = yield blueGreenHelper_1.createWorkloadsWithLabel(kubectl, manifestObjects.deploymentEntityList, blueGreenHelper_1.GREEN_LABEL_VALUE);
|
||||
// create other non deployment and non service entities
|
||||
const newObjectsList = manifestObjects.otherObjects
|
||||
.concat(manifestObjects.ingressEntityList)
|
||||
.concat(manifestObjects.unroutedServiceEntityList);
|
||||
const manifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
if (manifestFiles.length > 0)
|
||||
yield kubectl.apply(manifestFiles);
|
||||
// returning deployment details to check for rollout stability
|
||||
return result;
|
||||
});
|
||||
}
|
||||
exports.deployBlueGreenService = deployBlueGreenService;
|
||||
function promoteBlueGreenService(kubectl, manifestObjects) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
// checking if services are in the right state ie. targeting green deployments
|
||||
if (!(yield validateServicesState(kubectl, manifestObjects.serviceEntityList))) {
|
||||
throw "Not inP promote state";
|
||||
}
|
||||
// creating stable deployments with new configurations
|
||||
return yield blueGreenHelper_1.createWorkloadsWithLabel(kubectl, manifestObjects.deploymentEntityList, blueGreenHelper_1.NONE_LABEL_VALUE);
|
||||
});
|
||||
}
|
||||
exports.promoteBlueGreenService = promoteBlueGreenService;
|
||||
function rejectBlueGreenService(kubectl, filePaths) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
// get all kubernetes objects defined in manifest files
|
||||
const manifestObjects = blueGreenHelper_1.getManifestObjects(filePaths);
|
||||
// route to stable objects
|
||||
yield routeBlueGreenService(kubectl, blueGreenHelper_1.NONE_LABEL_VALUE, manifestObjects.serviceEntityList);
|
||||
// delete new deployments with green suffix
|
||||
yield blueGreenHelper_1.deleteWorkloadsWithLabel(kubectl, blueGreenHelper_1.GREEN_LABEL_VALUE, manifestObjects.deploymentEntityList);
|
||||
});
|
||||
}
|
||||
exports.rejectBlueGreenService = rejectBlueGreenService;
|
||||
function routeBlueGreenService(kubectl, nextLabel, serviceEntityList) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const newObjectsList = [];
|
||||
serviceEntityList.forEach((serviceObject) => {
|
||||
const newBlueGreenServiceObject = getUpdatedBlueGreenService(serviceObject, nextLabel);
|
||||
newObjectsList.push(newBlueGreenServiceObject);
|
||||
});
|
||||
// configures the services
|
||||
const manifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
yield kubectl.apply(manifestFiles);
|
||||
});
|
||||
}
|
||||
exports.routeBlueGreenService = routeBlueGreenService;
|
||||
// add green labels to configure existing service
|
||||
function getUpdatedBlueGreenService(inputObject, labelValue) {
|
||||
const newObject = JSON.parse(JSON.stringify(inputObject));
|
||||
// Adding labels and annotations.
|
||||
blueGreenHelper_1.addBlueGreenLabelsAndAnnotations(newObject, labelValue);
|
||||
return newObject;
|
||||
}
|
||||
function validateServicesState(kubectl, serviceEntityList) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
let areServicesGreen = true;
|
||||
for (const serviceObject of serviceEntityList) {
|
||||
// finding the existing routed service
|
||||
const existingService = yield blueGreenHelper_1.fetchResource(kubectl, serviceObject.kind, serviceObject.metadata.name);
|
||||
if (!!existingService) {
|
||||
const currentLabel = getServiceSpecLabel(existingService);
|
||||
if (currentLabel != blueGreenHelper_1.GREEN_LABEL_VALUE) {
|
||||
// service should be targeting deployments with green label
|
||||
areServicesGreen = false;
|
||||
}
|
||||
}
|
||||
else {
|
||||
// service targeting deployment doesn't exist
|
||||
areServicesGreen = false;
|
||||
}
|
||||
}
|
||||
return areServicesGreen;
|
||||
});
|
||||
}
|
||||
exports.validateServicesState = validateServicesState;
|
||||
function getServiceSpecLabel(inputObject) {
|
||||
var _a;
|
||||
if ((_a = inputObject === null || inputObject === void 0 ? void 0 : inputObject.spec) === null || _a === void 0 ? void 0 : _a.selector[blueGreenHelper_1.BLUE_GREEN_VERSION_LABEL]) {
|
||||
return inputObject.spec.selector[blueGreenHelper_1.BLUE_GREEN_VERSION_LABEL];
|
||||
}
|
||||
return "";
|
||||
}
|
||||
exports.getServiceSpecLabel = getServiceSpecLabel;
|
||||
@ -1,189 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.cleanupSMI = exports.validateTrafficSplitsState = exports.routeBlueGreenSMI = exports.getSMIServiceResource = exports.setupSMI = exports.rejectBlueGreenSMI = exports.promoteBlueGreenSMI = exports.deployBlueGreenSMI = void 0;
|
||||
const kubectlUtils = require("../../utilities/trafficSplitUtils");
|
||||
const fileHelper = require("../../utilities/fileUtils");
|
||||
const blueGreenHelper_1 = require("./blueGreenHelper");
|
||||
const TRAFFIC_SPLIT_OBJECT_NAME_SUFFIX = "-trafficsplit";
|
||||
const TRAFFIC_SPLIT_OBJECT = "TrafficSplit";
|
||||
const MIN_VAL = 0;
|
||||
const MAX_VAL = 100;
|
||||
function deployBlueGreenSMI(kubectl, filePaths) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
// get all kubernetes objects defined in manifest files
|
||||
const manifestObjects = blueGreenHelper_1.getManifestObjects(filePaths);
|
||||
// create services and other objects
|
||||
const newObjectsList = manifestObjects.otherObjects
|
||||
.concat(manifestObjects.serviceEntityList)
|
||||
.concat(manifestObjects.ingressEntityList)
|
||||
.concat(manifestObjects.unroutedServiceEntityList);
|
||||
const manifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
yield kubectl.apply(manifestFiles);
|
||||
// make extraservices and trafficsplit
|
||||
yield setupSMI(kubectl, manifestObjects.serviceEntityList);
|
||||
// create new deloyments
|
||||
return yield blueGreenHelper_1.createWorkloadsWithLabel(kubectl, manifestObjects.deploymentEntityList, blueGreenHelper_1.GREEN_LABEL_VALUE);
|
||||
});
|
||||
}
|
||||
exports.deployBlueGreenSMI = deployBlueGreenSMI;
|
||||
function promoteBlueGreenSMI(kubectl, manifestObjects) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
// checking if there is something to promote
|
||||
if (!(yield validateTrafficSplitsState(kubectl, manifestObjects.serviceEntityList))) {
|
||||
throw Error("Not in promote state SMI");
|
||||
}
|
||||
// create stable deployments with new configuration
|
||||
return yield blueGreenHelper_1.createWorkloadsWithLabel(kubectl, manifestObjects.deploymentEntityList, blueGreenHelper_1.NONE_LABEL_VALUE);
|
||||
});
|
||||
}
|
||||
exports.promoteBlueGreenSMI = promoteBlueGreenSMI;
|
||||
function rejectBlueGreenSMI(kubectl, filePaths) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
// get all kubernetes objects defined in manifest files
|
||||
const manifestObjects = blueGreenHelper_1.getManifestObjects(filePaths);
|
||||
// route trafficsplit to stable deploymetns
|
||||
yield routeBlueGreenSMI(kubectl, blueGreenHelper_1.NONE_LABEL_VALUE, manifestObjects.serviceEntityList);
|
||||
// delete rejected new bluegreen deployments
|
||||
yield blueGreenHelper_1.deleteWorkloadsWithLabel(kubectl, blueGreenHelper_1.GREEN_LABEL_VALUE, manifestObjects.deploymentEntityList);
|
||||
// delete trafficsplit and extra services
|
||||
yield cleanupSMI(kubectl, manifestObjects.serviceEntityList);
|
||||
});
|
||||
}
|
||||
exports.rejectBlueGreenSMI = rejectBlueGreenSMI;
|
||||
function setupSMI(kubectl, serviceEntityList) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const newObjectsList = [];
|
||||
const trafficObjectList = [];
|
||||
serviceEntityList.forEach((serviceObject) => {
|
||||
// create a trafficsplit for service
|
||||
trafficObjectList.push(serviceObject);
|
||||
// set up the services for trafficsplit
|
||||
const newStableService = getSMIServiceResource(serviceObject, blueGreenHelper_1.STABLE_SUFFIX);
|
||||
const newGreenService = getSMIServiceResource(serviceObject, blueGreenHelper_1.GREEN_SUFFIX);
|
||||
newObjectsList.push(newStableService);
|
||||
newObjectsList.push(newGreenService);
|
||||
});
|
||||
// create services
|
||||
const manifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
yield kubectl.apply(manifestFiles);
|
||||
// route to stable service
|
||||
trafficObjectList.forEach((inputObject) => {
|
||||
createTrafficSplitObject(kubectl, inputObject.metadata.name, blueGreenHelper_1.NONE_LABEL_VALUE);
|
||||
});
|
||||
});
|
||||
}
|
||||
exports.setupSMI = setupSMI;
|
||||
let trafficSplitAPIVersion = "";
|
||||
function createTrafficSplitObject(kubectl, name, nextLabel) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
// cache traffic split api version
|
||||
if (!trafficSplitAPIVersion)
|
||||
trafficSplitAPIVersion = yield kubectlUtils.getTrafficSplitAPIVersion(kubectl);
|
||||
// decide weights based on nextlabel
|
||||
const stableWeight = nextLabel === blueGreenHelper_1.GREEN_LABEL_VALUE ? MIN_VAL : MAX_VAL;
|
||||
const greenWeight = nextLabel === blueGreenHelper_1.GREEN_LABEL_VALUE ? MAX_VAL : MIN_VAL;
|
||||
const trafficSplitObject = JSON.stringify({
|
||||
apiVersion: trafficSplitAPIVersion,
|
||||
kind: "TrafficSplit",
|
||||
metadata: {
|
||||
name: blueGreenHelper_1.getBlueGreenResourceName(name, TRAFFIC_SPLIT_OBJECT_NAME_SUFFIX),
|
||||
},
|
||||
spec: {
|
||||
service: name,
|
||||
backends: [
|
||||
{
|
||||
service: blueGreenHelper_1.getBlueGreenResourceName(name, blueGreenHelper_1.STABLE_SUFFIX),
|
||||
weight: stableWeight,
|
||||
},
|
||||
{
|
||||
service: blueGreenHelper_1.getBlueGreenResourceName(name, blueGreenHelper_1.GREEN_SUFFIX),
|
||||
weight: greenWeight,
|
||||
},
|
||||
],
|
||||
},
|
||||
});
|
||||
// create traffic split object
|
||||
const trafficSplitManifestFile = fileHelper.writeManifestToFile(trafficSplitObject, TRAFFIC_SPLIT_OBJECT, blueGreenHelper_1.getBlueGreenResourceName(name, TRAFFIC_SPLIT_OBJECT_NAME_SUFFIX));
|
||||
yield kubectl.apply(trafficSplitManifestFile);
|
||||
});
|
||||
}
|
||||
function getSMIServiceResource(inputObject, suffix) {
|
||||
const newObject = JSON.parse(JSON.stringify(inputObject));
|
||||
if (suffix === blueGreenHelper_1.STABLE_SUFFIX) {
|
||||
// adding stable suffix to service name
|
||||
newObject.metadata.name = blueGreenHelper_1.getBlueGreenResourceName(inputObject.metadata.name, blueGreenHelper_1.STABLE_SUFFIX);
|
||||
return blueGreenHelper_1.getNewBlueGreenObject(newObject, blueGreenHelper_1.NONE_LABEL_VALUE);
|
||||
}
|
||||
else {
|
||||
// green label will be added for these
|
||||
return blueGreenHelper_1.getNewBlueGreenObject(newObject, blueGreenHelper_1.GREEN_LABEL_VALUE);
|
||||
}
|
||||
}
|
||||
exports.getSMIServiceResource = getSMIServiceResource;
|
||||
function routeBlueGreenSMI(kubectl, nextLabel, serviceEntityList) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
for (const serviceObject of serviceEntityList) {
|
||||
// route trafficsplit to given label
|
||||
yield createTrafficSplitObject(kubectl, serviceObject.metadata.name, nextLabel);
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.routeBlueGreenSMI = routeBlueGreenSMI;
|
||||
function validateTrafficSplitsState(kubectl, serviceEntityList) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
let trafficSplitsInRightState = true;
|
||||
for (const serviceObject of serviceEntityList) {
|
||||
const name = serviceObject.metadata.name;
|
||||
let trafficSplitObject = yield blueGreenHelper_1.fetchResource(kubectl, TRAFFIC_SPLIT_OBJECT, blueGreenHelper_1.getBlueGreenResourceName(name, TRAFFIC_SPLIT_OBJECT_NAME_SUFFIX));
|
||||
if (!trafficSplitObject) {
|
||||
// no traffic split exits
|
||||
trafficSplitsInRightState = false;
|
||||
}
|
||||
trafficSplitObject = JSON.parse(JSON.stringify(trafficSplitObject));
|
||||
trafficSplitObject.spec.backends.forEach((element) => {
|
||||
// checking if trafficsplit in right state to deploy
|
||||
if (element.service === blueGreenHelper_1.getBlueGreenResourceName(name, blueGreenHelper_1.GREEN_SUFFIX)) {
|
||||
if (element.weight != MAX_VAL)
|
||||
trafficSplitsInRightState = false;
|
||||
}
|
||||
if (element.service === blueGreenHelper_1.getBlueGreenResourceName(name, blueGreenHelper_1.STABLE_SUFFIX)) {
|
||||
if (element.weight != MIN_VAL)
|
||||
trafficSplitsInRightState = false;
|
||||
}
|
||||
});
|
||||
}
|
||||
return trafficSplitsInRightState;
|
||||
});
|
||||
}
|
||||
exports.validateTrafficSplitsState = validateTrafficSplitsState;
|
||||
function cleanupSMI(kubectl, serviceEntityList) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const deleteList = [];
|
||||
serviceEntityList.forEach((serviceObject) => {
|
||||
deleteList.push({
|
||||
name: blueGreenHelper_1.getBlueGreenResourceName(serviceObject.metadata.name, TRAFFIC_SPLIT_OBJECT_NAME_SUFFIX),
|
||||
kind: TRAFFIC_SPLIT_OBJECT,
|
||||
});
|
||||
deleteList.push({
|
||||
name: blueGreenHelper_1.getBlueGreenResourceName(serviceObject.metadata.name, blueGreenHelper_1.GREEN_SUFFIX),
|
||||
kind: serviceObject.kind,
|
||||
});
|
||||
deleteList.push({
|
||||
name: blueGreenHelper_1.getBlueGreenResourceName(serviceObject.metadata.name, blueGreenHelper_1.STABLE_SUFFIX),
|
||||
kind: serviceObject.kind,
|
||||
});
|
||||
});
|
||||
// delete all objects
|
||||
yield blueGreenHelper_1.deleteObjects(kubectl, deleteList);
|
||||
});
|
||||
}
|
||||
exports.cleanupSMI = cleanupSMI;
|
||||
@ -1,159 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.getStableResourceName = exports.getBaselineResourceName = exports.getCanaryResourceName = exports.fetchResource = exports.getNewCanaryResource = exports.getNewBaselineResource = exports.getStableResource = exports.isResourceMarkedAsStable = exports.markResourceAsStable = exports.deleteCanaryDeployment = exports.STABLE_LABEL_VALUE = exports.STABLE_SUFFIX = exports.CANARY_LABEL_VALUE = exports.BASELINE_LABEL_VALUE = exports.CANARY_VERSION_LABEL = void 0;
|
||||
const fs = require("fs");
|
||||
const yaml = require("js-yaml");
|
||||
const core = require("@actions/core");
|
||||
const kubernetesTypes_1 = require("../../types/kubernetesTypes");
|
||||
const utils = require("../../utilities/manifestUpdateUtils");
|
||||
const manifestUpdateUtils_1 = require("../../utilities/manifestUpdateUtils");
|
||||
const manifestSpecLabelUtils_1 = require("../../utilities/manifestSpecLabelUtils");
|
||||
const kubectlUtils_1 = require("../../utilities/kubectlUtils");
|
||||
exports.CANARY_VERSION_LABEL = "workflow/version";
|
||||
const BASELINE_SUFFIX = "-baseline";
|
||||
exports.BASELINE_LABEL_VALUE = "baseline";
|
||||
const CANARY_SUFFIX = "-canary";
|
||||
exports.CANARY_LABEL_VALUE = "canary";
|
||||
exports.STABLE_SUFFIX = "-stable";
|
||||
exports.STABLE_LABEL_VALUE = "stable";
|
||||
function deleteCanaryDeployment(kubectl, manifestFilePaths, includeServices) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
if (manifestFilePaths == null || manifestFilePaths.length == 0) {
|
||||
throw new Error("Manifest file not found");
|
||||
}
|
||||
yield cleanUpCanary(kubectl, manifestFilePaths, includeServices);
|
||||
});
|
||||
}
|
||||
exports.deleteCanaryDeployment = deleteCanaryDeployment;
|
||||
function markResourceAsStable(inputObject) {
|
||||
if (isResourceMarkedAsStable(inputObject)) {
|
||||
return inputObject;
|
||||
}
|
||||
const newObject = JSON.parse(JSON.stringify(inputObject));
|
||||
addCanaryLabelsAndAnnotations(newObject, exports.STABLE_LABEL_VALUE);
|
||||
return newObject;
|
||||
}
|
||||
exports.markResourceAsStable = markResourceAsStable;
|
||||
function isResourceMarkedAsStable(inputObject) {
|
||||
var _a;
|
||||
return (((_a = inputObject === null || inputObject === void 0 ? void 0 : inputObject.metadata) === null || _a === void 0 ? void 0 : _a.labels[exports.CANARY_VERSION_LABEL]) === exports.STABLE_LABEL_VALUE);
|
||||
}
|
||||
exports.isResourceMarkedAsStable = isResourceMarkedAsStable;
|
||||
function getStableResource(inputObject) {
|
||||
const replicaCount = specContainsReplicas(inputObject.kind)
|
||||
? inputObject.metadata.replicas
|
||||
: 0;
|
||||
return getNewCanaryObject(inputObject, replicaCount, exports.STABLE_LABEL_VALUE);
|
||||
}
|
||||
exports.getStableResource = getStableResource;
|
||||
function getNewBaselineResource(stableObject, replicas) {
|
||||
return getNewCanaryObject(stableObject, replicas, exports.BASELINE_LABEL_VALUE);
|
||||
}
|
||||
exports.getNewBaselineResource = getNewBaselineResource;
|
||||
function getNewCanaryResource(inputObject, replicas) {
|
||||
return getNewCanaryObject(inputObject, replicas, exports.CANARY_LABEL_VALUE);
|
||||
}
|
||||
exports.getNewCanaryResource = getNewCanaryResource;
|
||||
function fetchResource(kubectl, kind, name) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const result = yield kubectl.getResource(kind, name);
|
||||
if (!result || (result === null || result === void 0 ? void 0 : result.stderr)) {
|
||||
return null;
|
||||
}
|
||||
if (result.stdout) {
|
||||
const resource = JSON.parse(result.stdout);
|
||||
try {
|
||||
utils.UnsetClusterSpecificDetails(resource);
|
||||
return resource;
|
||||
}
|
||||
catch (ex) {
|
||||
core.debug(`Exception occurred while Parsing ${resource} in JSON object: ${ex}`);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.fetchResource = fetchResource;
|
||||
function getCanaryResourceName(name) {
|
||||
return name + CANARY_SUFFIX;
|
||||
}
|
||||
exports.getCanaryResourceName = getCanaryResourceName;
|
||||
function getBaselineResourceName(name) {
|
||||
return name + BASELINE_SUFFIX;
|
||||
}
|
||||
exports.getBaselineResourceName = getBaselineResourceName;
|
||||
function getStableResourceName(name) {
|
||||
return name + exports.STABLE_SUFFIX;
|
||||
}
|
||||
exports.getStableResourceName = getStableResourceName;
|
||||
function getNewCanaryObject(inputObject, replicas, type) {
|
||||
const newObject = JSON.parse(JSON.stringify(inputObject));
|
||||
// Updating name
|
||||
if (type === exports.CANARY_LABEL_VALUE) {
|
||||
newObject.metadata.name = getCanaryResourceName(inputObject.metadata.name);
|
||||
}
|
||||
else if (type === exports.STABLE_LABEL_VALUE) {
|
||||
newObject.metadata.name = getStableResourceName(inputObject.metadata.name);
|
||||
}
|
||||
else {
|
||||
newObject.metadata.name = getBaselineResourceName(inputObject.metadata.name);
|
||||
}
|
||||
addCanaryLabelsAndAnnotations(newObject, type);
|
||||
if (specContainsReplicas(newObject.kind)) {
|
||||
newObject.spec.replicas = replicas;
|
||||
}
|
||||
return newObject;
|
||||
}
|
||||
function specContainsReplicas(kind) {
|
||||
return (kind.toLowerCase() !== kubernetesTypes_1.KubernetesWorkload.POD.toLowerCase() &&
|
||||
kind.toLowerCase() !== kubernetesTypes_1.KubernetesWorkload.DAEMON_SET.toLowerCase() &&
|
||||
!kubernetesTypes_1.isServiceEntity(kind));
|
||||
}
|
||||
function addCanaryLabelsAndAnnotations(inputObject, type) {
|
||||
const newLabels = new Map();
|
||||
newLabels[exports.CANARY_VERSION_LABEL] = type;
|
||||
manifestUpdateUtils_1.updateObjectLabels(inputObject, newLabels, false);
|
||||
manifestUpdateUtils_1.updateObjectAnnotations(inputObject, newLabels, false);
|
||||
manifestUpdateUtils_1.updateSelectorLabels(inputObject, newLabels, false);
|
||||
if (!kubernetesTypes_1.isServiceEntity(inputObject.kind)) {
|
||||
manifestSpecLabelUtils_1.updateSpecLabels(inputObject, newLabels, false);
|
||||
}
|
||||
}
|
||||
function cleanUpCanary(kubectl, files, includeServices) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const deleteObject = function (kind, name) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
try {
|
||||
const result = yield kubectl.delete([kind, name]);
|
||||
kubectlUtils_1.checkForErrors([result]);
|
||||
}
|
||||
catch (ex) {
|
||||
// Ignore failures of delete if it doesn't exist
|
||||
}
|
||||
});
|
||||
};
|
||||
for (const filePath of files) {
|
||||
const fileContents = fs.readFileSync(filePath).toString();
|
||||
const parsedYaml = yaml.safeLoadAll(fileContents);
|
||||
for (const inputObject of parsedYaml) {
|
||||
const name = inputObject.metadata.name;
|
||||
const kind = inputObject.kind;
|
||||
if (kubernetesTypes_1.isDeploymentEntity(kind) ||
|
||||
(includeServices && kubernetesTypes_1.isServiceEntity(kind))) {
|
||||
const canaryObjectName = getCanaryResourceName(name);
|
||||
const baselineObjectName = getBaselineResourceName(name);
|
||||
yield deleteObject(kind, canaryObjectName);
|
||||
yield deleteObject(kind, baselineObjectName);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
@ -1,72 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.deployPodCanary = void 0;
|
||||
const core = require("@actions/core");
|
||||
const fs = require("fs");
|
||||
const yaml = require("js-yaml");
|
||||
const fileHelper = require("../../utilities/fileUtils");
|
||||
const canaryDeploymentHelper = require("./canaryHelper");
|
||||
const kubernetesTypes_1 = require("../../types/kubernetesTypes");
|
||||
const manifestUpdateUtils_1 = require("../../utilities/manifestUpdateUtils");
|
||||
function deployPodCanary(filePaths, kubectl) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const newObjectsList = [];
|
||||
const percentage = parseInt(core.getInput("percentage"));
|
||||
if (percentage < 0 || percentage > 100)
|
||||
throw Error("Percentage must be between 0 and 100");
|
||||
for (const filePath of filePaths) {
|
||||
const fileContents = fs.readFileSync(filePath).toString();
|
||||
const parsedYaml = yaml.safeLoadAll(fileContents);
|
||||
for (const inputObject of parsedYaml) {
|
||||
const name = inputObject.metadata.name;
|
||||
const kind = inputObject.kind;
|
||||
if (kubernetesTypes_1.isDeploymentEntity(kind)) {
|
||||
core.debug("Calculating replica count for canary");
|
||||
const canaryReplicaCount = calculateReplicaCountForCanary(inputObject, percentage);
|
||||
core.debug("Replica count is " + canaryReplicaCount);
|
||||
// Get stable object
|
||||
core.debug("Querying stable object");
|
||||
const stableObject = yield canaryDeploymentHelper.fetchResource(kubectl, kind, name);
|
||||
if (!stableObject) {
|
||||
core.debug("Stable object not found. Creating canary object");
|
||||
const newCanaryObject = canaryDeploymentHelper.getNewCanaryResource(inputObject, canaryReplicaCount);
|
||||
newObjectsList.push(newCanaryObject);
|
||||
}
|
||||
else {
|
||||
core.debug("Creating canary and baseline objects. Stable object found: " +
|
||||
JSON.stringify(stableObject));
|
||||
const newCanaryObject = canaryDeploymentHelper.getNewCanaryResource(inputObject, canaryReplicaCount);
|
||||
core.debug("New canary object: " + JSON.stringify(newCanaryObject));
|
||||
const newBaselineObject = canaryDeploymentHelper.getNewBaselineResource(stableObject, canaryReplicaCount);
|
||||
core.debug("New baseline object: " + JSON.stringify(newBaselineObject));
|
||||
newObjectsList.push(newCanaryObject);
|
||||
newObjectsList.push(newBaselineObject);
|
||||
}
|
||||
}
|
||||
else {
|
||||
// update non deployment entity as it is
|
||||
newObjectsList.push(inputObject);
|
||||
}
|
||||
}
|
||||
}
|
||||
core.debug("New objects list: " + JSON.stringify(newObjectsList));
|
||||
const manifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
const forceDeployment = core.getInput("force").toLowerCase() === "true";
|
||||
const result = yield kubectl.apply(manifestFiles, forceDeployment);
|
||||
return { result, newFilePaths: manifestFiles };
|
||||
});
|
||||
}
|
||||
exports.deployPodCanary = deployPodCanary;
|
||||
function calculateReplicaCountForCanary(inputObject, percentage) {
|
||||
const inputReplicaCount = manifestUpdateUtils_1.getReplicaCount(inputObject);
|
||||
return Math.round((inputReplicaCount * percentage) / 100);
|
||||
}
|
||||
@ -1,221 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.redirectTrafficToStableDeployment = exports.redirectTrafficToCanaryDeployment = exports.deploySMICanary = void 0;
|
||||
const core = require("@actions/core");
|
||||
const fs = require("fs");
|
||||
const yaml = require("js-yaml");
|
||||
const fileHelper = require("../../utilities/fileUtils");
|
||||
const kubectlUtils = require("../../utilities/trafficSplitUtils");
|
||||
const canaryDeploymentHelper = require("./canaryHelper");
|
||||
const kubernetesTypes_1 = require("../../types/kubernetesTypes");
|
||||
const kubectlUtils_1 = require("../../utilities/kubectlUtils");
|
||||
const TRAFFIC_SPLIT_OBJECT_NAME_SUFFIX = "-workflow-rollout";
|
||||
const TRAFFIC_SPLIT_OBJECT = "TrafficSplit";
|
||||
function deploySMICanary(filePaths, kubectl) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const canaryReplicaCount = parseInt(core.getInput("baseline-and-canary-replicas"));
|
||||
if (canaryReplicaCount < 0 || canaryReplicaCount > 100)
|
||||
throw Error("Baseline-and-canary-replicas must be between 0 and 100");
|
||||
const newObjectsList = [];
|
||||
filePaths.forEach((filePath) => {
|
||||
const fileContents = fs.readFileSync(filePath).toString();
|
||||
yaml.safeLoadAll(fileContents, (inputObject) => {
|
||||
const name = inputObject.metadata.name;
|
||||
const kind = inputObject.kind;
|
||||
if (kubernetesTypes_1.isDeploymentEntity(kind)) {
|
||||
const stableObject = canaryDeploymentHelper.fetchResource(kubectl, kind, name);
|
||||
if (!stableObject) {
|
||||
core.debug("Stable object not found. Creating only canary object");
|
||||
const newCanaryObject = canaryDeploymentHelper.getNewCanaryResource(inputObject, canaryReplicaCount);
|
||||
newObjectsList.push(newCanaryObject);
|
||||
}
|
||||
else {
|
||||
if (!canaryDeploymentHelper.isResourceMarkedAsStable(stableObject)) {
|
||||
throw Error(`StableSpecSelectorNotExist : ${name}`);
|
||||
}
|
||||
core.debug("Stable object found. Creating canary and baseline objects");
|
||||
const newCanaryObject = canaryDeploymentHelper.getNewCanaryResource(inputObject, canaryReplicaCount);
|
||||
const newBaselineObject = canaryDeploymentHelper.getNewBaselineResource(stableObject, canaryReplicaCount);
|
||||
newObjectsList.push(newCanaryObject);
|
||||
newObjectsList.push(newBaselineObject);
|
||||
}
|
||||
}
|
||||
else {
|
||||
// Update non deployment entity as it is
|
||||
newObjectsList.push(inputObject);
|
||||
}
|
||||
});
|
||||
});
|
||||
const newFilePaths = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
const forceDeployment = core.getInput("force").toLowerCase() === "true";
|
||||
const result = yield kubectl.apply(newFilePaths, forceDeployment);
|
||||
yield createCanaryService(kubectl, filePaths);
|
||||
return { result, newFilePaths };
|
||||
});
|
||||
}
|
||||
exports.deploySMICanary = deploySMICanary;
|
||||
function createCanaryService(kubectl, filePaths) {
|
||||
var _a;
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const newObjectsList = [];
|
||||
const trafficObjectsList = [];
|
||||
for (const filePath of filePaths) {
|
||||
const fileContents = fs.readFileSync(filePath).toString();
|
||||
const parsedYaml = yaml.safeLoadAll(fileContents);
|
||||
for (const inputObject of parsedYaml) {
|
||||
const name = inputObject.metadata.name;
|
||||
const kind = inputObject.kind;
|
||||
if (kubernetesTypes_1.isServiceEntity(kind)) {
|
||||
const newCanaryServiceObject = canaryDeploymentHelper.getNewCanaryResource(inputObject);
|
||||
newObjectsList.push(newCanaryServiceObject);
|
||||
const newBaselineServiceObject = canaryDeploymentHelper.getNewBaselineResource(inputObject);
|
||||
newObjectsList.push(newBaselineServiceObject);
|
||||
const stableObject = yield canaryDeploymentHelper.fetchResource(kubectl, kind, canaryDeploymentHelper.getStableResourceName(name));
|
||||
if (!stableObject) {
|
||||
const newStableServiceObject = canaryDeploymentHelper.getStableResource(inputObject);
|
||||
newObjectsList.push(newStableServiceObject);
|
||||
core.debug("Creating the traffic object for service: " + name);
|
||||
const trafficObject = yield createTrafficSplitManifestFile(kubectl, name, 0, 0, 1000);
|
||||
trafficObjectsList.push(trafficObject);
|
||||
}
|
||||
else {
|
||||
let updateTrafficObject = true;
|
||||
const trafficObject = yield canaryDeploymentHelper.fetchResource(kubectl, TRAFFIC_SPLIT_OBJECT, getTrafficSplitResourceName(name));
|
||||
if (trafficObject) {
|
||||
const trafficJObject = JSON.parse(JSON.stringify(trafficObject));
|
||||
if ((_a = trafficJObject === null || trafficJObject === void 0 ? void 0 : trafficJObject.spec) === null || _a === void 0 ? void 0 : _a.backends) {
|
||||
trafficJObject.spec.backends.forEach((s) => {
|
||||
if (s.service ===
|
||||
canaryDeploymentHelper.getCanaryResourceName(name) &&
|
||||
s.weight === "1000m") {
|
||||
core.debug("Update traffic objcet not required");
|
||||
updateTrafficObject = false;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
if (updateTrafficObject) {
|
||||
core.debug("Stable service object present so updating the traffic object for service: " +
|
||||
name);
|
||||
trafficObjectsList.push(updateTrafficSplitObject(kubectl, name));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
const manifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
manifestFiles.push(...trafficObjectsList);
|
||||
const forceDeployment = core.getInput("force").toLowerCase() === "true";
|
||||
const result = yield kubectl.apply(manifestFiles, forceDeployment);
|
||||
kubectlUtils_1.checkForErrors([result]);
|
||||
});
|
||||
}
|
||||
function redirectTrafficToCanaryDeployment(kubectl, manifestFilePaths) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
yield adjustTraffic(kubectl, manifestFilePaths, 0, 1000);
|
||||
});
|
||||
}
|
||||
exports.redirectTrafficToCanaryDeployment = redirectTrafficToCanaryDeployment;
|
||||
function redirectTrafficToStableDeployment(kubectl, manifestFilePaths) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
yield adjustTraffic(kubectl, manifestFilePaths, 1000, 0);
|
||||
});
|
||||
}
|
||||
exports.redirectTrafficToStableDeployment = redirectTrafficToStableDeployment;
|
||||
function adjustTraffic(kubectl, manifestFilePaths, stableWeight, canaryWeight) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
if (!manifestFilePaths || (manifestFilePaths === null || manifestFilePaths === void 0 ? void 0 : manifestFilePaths.length) == 0) {
|
||||
return;
|
||||
}
|
||||
const trafficSplitManifests = [];
|
||||
for (const filePath of manifestFilePaths) {
|
||||
const fileContents = fs.readFileSync(filePath).toString();
|
||||
const parsedYaml = yaml.safeLoadAll(fileContents);
|
||||
for (const inputObject of parsedYaml) {
|
||||
const name = inputObject.metadata.name;
|
||||
const kind = inputObject.kind;
|
||||
if (kubernetesTypes_1.isServiceEntity(kind)) {
|
||||
trafficSplitManifests.push(yield createTrafficSplitManifestFile(kubectl, name, stableWeight, 0, canaryWeight));
|
||||
}
|
||||
}
|
||||
}
|
||||
if (trafficSplitManifests.length <= 0) {
|
||||
return;
|
||||
}
|
||||
const forceDeployment = core.getInput("force").toLowerCase() === "true";
|
||||
const result = yield kubectl.apply(trafficSplitManifests, forceDeployment);
|
||||
kubectlUtils_1.checkForErrors([result]);
|
||||
});
|
||||
}
|
||||
function updateTrafficSplitObject(kubectl, serviceName) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const percentage = parseInt(core.getInput("percentage"));
|
||||
if (percentage < 0 || percentage > 100)
|
||||
throw Error("Percentage must be between 0 and 100");
|
||||
const percentageWithMuliplier = percentage * 10;
|
||||
const baselineAndCanaryWeight = percentageWithMuliplier / 2;
|
||||
const stableDeploymentWeight = 1000 - percentageWithMuliplier;
|
||||
core.debug("Creating the traffic object with canary weight: " +
|
||||
baselineAndCanaryWeight +
|
||||
",baseling weight: " +
|
||||
baselineAndCanaryWeight +
|
||||
",stable: " +
|
||||
stableDeploymentWeight);
|
||||
return yield createTrafficSplitManifestFile(kubectl, serviceName, stableDeploymentWeight, baselineAndCanaryWeight, baselineAndCanaryWeight);
|
||||
});
|
||||
}
|
||||
function createTrafficSplitManifestFile(kubectl, serviceName, stableWeight, baselineWeight, canaryWeight) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const smiObjectString = yield getTrafficSplitObject(kubectl, serviceName, stableWeight, baselineWeight, canaryWeight);
|
||||
const manifestFile = fileHelper.writeManifestToFile(smiObjectString, TRAFFIC_SPLIT_OBJECT, serviceName);
|
||||
if (!manifestFile) {
|
||||
throw new Error("Unable to create traffic split manifest file");
|
||||
}
|
||||
return manifestFile;
|
||||
});
|
||||
}
|
||||
let trafficSplitAPIVersion = "";
|
||||
function getTrafficSplitObject(kubectl, name, stableWeight, baselineWeight, canaryWeight) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
// cached version
|
||||
if (!trafficSplitAPIVersion) {
|
||||
trafficSplitAPIVersion = yield kubectlUtils.getTrafficSplitAPIVersion(kubectl);
|
||||
}
|
||||
return JSON.stringify({
|
||||
apiVersion: trafficSplitAPIVersion,
|
||||
kind: "TrafficSplit",
|
||||
metadata: {
|
||||
name: getTrafficSplitResourceName(name),
|
||||
},
|
||||
spec: {
|
||||
backends: [
|
||||
{
|
||||
service: canaryDeploymentHelper.getStableResourceName(name),
|
||||
weight: stableWeight,
|
||||
},
|
||||
{
|
||||
service: canaryDeploymentHelper.getBaselineResourceName(name),
|
||||
weight: baselineWeight,
|
||||
},
|
||||
{
|
||||
service: canaryDeploymentHelper.getCanaryResourceName(name),
|
||||
weight: canaryWeight,
|
||||
},
|
||||
],
|
||||
service: name,
|
||||
},
|
||||
});
|
||||
});
|
||||
}
|
||||
function getTrafficSplitResourceName(name) {
|
||||
return name + TRAFFIC_SPLIT_OBJECT_NAME_SUFFIX;
|
||||
}
|
||||
@ -1,136 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.annotateAndLabelResources = exports.checkManifestStability = exports.deployManifests = void 0;
|
||||
const fs = require("fs");
|
||||
const yaml = require("js-yaml");
|
||||
const canaryDeploymentHelper = require("./canary/canaryHelper");
|
||||
const models = require("../types/kubernetesTypes");
|
||||
const kubernetesTypes_1 = require("../types/kubernetesTypes");
|
||||
const fileHelper = require("../utilities/fileUtils");
|
||||
const KubernetesManifestUtility = require("../utilities/manifestStabilityUtils");
|
||||
const podCanaryHelper_1 = require("./canary/podCanaryHelper");
|
||||
const smiCanaryHelper_1 = require("./canary/smiCanaryHelper");
|
||||
const serviceBlueGreenHelper_1 = require("./blueGreen/serviceBlueGreenHelper");
|
||||
const ingressBlueGreenHelper_1 = require("./blueGreen/ingressBlueGreenHelper");
|
||||
const smiBlueGreenHelper_1 = require("./blueGreen/smiBlueGreenHelper");
|
||||
const deploymentStrategy_1 = require("../types/deploymentStrategy");
|
||||
const core = require("@actions/core");
|
||||
const trafficSplitMethod_1 = require("../types/trafficSplitMethod");
|
||||
const routeStrategy_1 = require("../types/routeStrategy");
|
||||
const workflowAnnotationUtils_1 = require("../utilities/workflowAnnotationUtils");
|
||||
const kubectlUtils_1 = require("../utilities/kubectlUtils");
|
||||
const githubUtils_1 = require("../utilities/githubUtils");
|
||||
const dockerUtils_1 = require("../utilities/dockerUtils");
|
||||
function deployManifests(files, deploymentStrategy, kubectl, trafficSplitMethod) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
switch (deploymentStrategy) {
|
||||
case deploymentStrategy_1.DeploymentStrategy.CANARY: {
|
||||
const { result, newFilePaths } = trafficSplitMethod == trafficSplitMethod_1.TrafficSplitMethod.SMI
|
||||
? yield smiCanaryHelper_1.deploySMICanary(files, kubectl)
|
||||
: yield podCanaryHelper_1.deployPodCanary(files, kubectl);
|
||||
kubectlUtils_1.checkForErrors([result]);
|
||||
return newFilePaths;
|
||||
}
|
||||
case deploymentStrategy_1.DeploymentStrategy.BLUE_GREEN: {
|
||||
const routeStrategy = routeStrategy_1.parseRouteStrategy(core.getInput("route-method", { required: true }));
|
||||
const { result, newFilePaths } = yield Promise.resolve((routeStrategy == routeStrategy_1.RouteStrategy.INGRESS &&
|
||||
ingressBlueGreenHelper_1.deployBlueGreenIngress(kubectl, files)) ||
|
||||
(routeStrategy == routeStrategy_1.RouteStrategy.SMI &&
|
||||
smiBlueGreenHelper_1.deployBlueGreenSMI(kubectl, files)) ||
|
||||
serviceBlueGreenHelper_1.deployBlueGreenService(kubectl, files));
|
||||
kubectlUtils_1.checkForErrors([result]);
|
||||
return newFilePaths;
|
||||
}
|
||||
case undefined: {
|
||||
core.warning("Deployment strategy is not recognized.");
|
||||
}
|
||||
default: {
|
||||
const trafficSplitMethod = trafficSplitMethod_1.parseTrafficSplitMethod(core.getInput("traffic-split-method", { required: true }));
|
||||
const forceDeployment = core.getInput("force").toLowerCase() === "true";
|
||||
if (trafficSplitMethod === trafficSplitMethod_1.TrafficSplitMethod.SMI) {
|
||||
const updatedManifests = appendStableVersionLabelToResource(files);
|
||||
const result = yield kubectl.apply(updatedManifests, forceDeployment);
|
||||
kubectlUtils_1.checkForErrors([result]);
|
||||
}
|
||||
else {
|
||||
const result = yield kubectl.apply(files, forceDeployment);
|
||||
kubectlUtils_1.checkForErrors([result]);
|
||||
}
|
||||
return files;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.deployManifests = deployManifests;
|
||||
function appendStableVersionLabelToResource(files) {
|
||||
const manifestFiles = [];
|
||||
const newObjectsList = [];
|
||||
files.forEach((filePath) => {
|
||||
const fileContents = fs.readFileSync(filePath).toString();
|
||||
yaml.safeLoadAll(fileContents, function (inputObject) {
|
||||
const { kind } = inputObject;
|
||||
if (kubernetesTypes_1.isDeploymentEntity(kind)) {
|
||||
const updatedObject = canaryDeploymentHelper.markResourceAsStable(inputObject);
|
||||
newObjectsList.push(updatedObject);
|
||||
}
|
||||
else {
|
||||
manifestFiles.push(filePath);
|
||||
}
|
||||
});
|
||||
});
|
||||
const updatedManifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
manifestFiles.push(...updatedManifestFiles);
|
||||
return manifestFiles;
|
||||
}
|
||||
function checkManifestStability(kubectl, resources) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
yield KubernetesManifestUtility.checkManifestStability(kubectl, resources);
|
||||
});
|
||||
}
|
||||
exports.checkManifestStability = checkManifestStability;
|
||||
function annotateAndLabelResources(files, kubectl, resourceTypes, allPods) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const githubToken = core.getInput("token");
|
||||
const workflowFilePath = yield githubUtils_1.getWorkflowFilePath(githubToken);
|
||||
const deploymentConfig = yield dockerUtils_1.getDeploymentConfig();
|
||||
const annotationKeyLabel = workflowAnnotationUtils_1.getWorkflowAnnotationKeyLabel(workflowFilePath);
|
||||
yield annotateResources(files, kubectl, resourceTypes, allPods, annotationKeyLabel, workflowFilePath, deploymentConfig);
|
||||
yield labelResources(files, kubectl, annotationKeyLabel);
|
||||
});
|
||||
}
|
||||
exports.annotateAndLabelResources = annotateAndLabelResources;
|
||||
function annotateResources(files, kubectl, resourceTypes, allPods, annotationKey, workflowFilePath, deploymentConfig) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const annotateResults = [];
|
||||
const namespace = core.getInput("namespace") || "default";
|
||||
const lastSuccessSha = yield kubectlUtils_1.getLastSuccessfulRunSha(kubectl, namespace, annotationKey);
|
||||
const annotationKeyValStr = `${annotationKey}=${workflowAnnotationUtils_1.getWorkflowAnnotations(lastSuccessSha, workflowFilePath, deploymentConfig)}`;
|
||||
annotateResults.push(yield kubectl.annotate("namespace", namespace, annotationKeyValStr));
|
||||
annotateResults.push(yield kubectl.annotateFiles(files, annotationKeyValStr));
|
||||
for (const resource of resourceTypes) {
|
||||
if (resource.type.toLowerCase() !==
|
||||
models.KubernetesWorkload.POD.toLowerCase()) {
|
||||
(yield kubectlUtils_1.annotateChildPods(kubectl, resource.type, resource.name, annotationKeyValStr, allPods)).forEach((execResult) => annotateResults.push(execResult));
|
||||
}
|
||||
}
|
||||
kubectlUtils_1.checkForErrors(annotateResults, true);
|
||||
});
|
||||
}
|
||||
function labelResources(files, kubectl, label) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const labels = [
|
||||
`workflowFriendlyName=${githubUtils_1.normalizeWorkflowStrLabel(process.env.GITHUB_WORKFLOW)}`,
|
||||
`workflow=${label}`,
|
||||
];
|
||||
kubectlUtils_1.checkForErrors([yield kubectl.labelFiles(files, labels)], true);
|
||||
});
|
||||
}
|
||||
@ -1,15 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.parseAction = exports.Action = void 0;
|
||||
var Action;
|
||||
(function (Action) {
|
||||
Action["DEPLOY"] = "deploy";
|
||||
Action["PROMOTE"] = "promote";
|
||||
Action["REJECT"] = "reject";
|
||||
})(Action = exports.Action || (exports.Action = {}));
|
||||
/**
|
||||
* Converts a string to the Action enum
|
||||
* @param str The action type (case insensitive)
|
||||
* @returns The Action enum or undefined if it can't be parsed
|
||||
*/
|
||||
exports.parseAction = (str) => Action[Object.keys(Action).filter((k) => Action[k].toString().toLowerCase() === str.toLowerCase())[0]];
|
||||
@ -1,11 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.isDeployment = void 0;
|
||||
const deploymentTypes = [
|
||||
"deployment",
|
||||
"replicaset",
|
||||
"daemonset",
|
||||
"pod",
|
||||
"statefulset",
|
||||
];
|
||||
exports.isDeployment = (kind) => deploymentTypes.some((x) => x == kind.toLowerCase());
|
||||
@ -1,2 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
@ -1,14 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.parseDeploymentStrategy = exports.DeploymentStrategy = void 0;
|
||||
var DeploymentStrategy;
|
||||
(function (DeploymentStrategy) {
|
||||
DeploymentStrategy["CANARY"] = "canary";
|
||||
DeploymentStrategy["BLUE_GREEN"] = "blue-green";
|
||||
})(DeploymentStrategy = exports.DeploymentStrategy || (exports.DeploymentStrategy = {}));
|
||||
/**
|
||||
* Converts a string to the DeploymentStrategy enum
|
||||
* @param str The deployment strategy (case insensitive)
|
||||
* @returns The DeploymentStrategy enum or undefined if it can't be parsed
|
||||
*/
|
||||
exports.parseDeploymentStrategy = (str) => DeploymentStrategy[Object.keys(DeploymentStrategy).filter((k) => DeploymentStrategy[k].toString().toLowerCase() === str.toLowerCase())[0]];
|
||||
@ -1,40 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.DockerExec = void 0;
|
||||
const exec_1 = require("@actions/exec");
|
||||
class DockerExec {
|
||||
constructor(dockerPath) {
|
||||
this.dockerPath = dockerPath;
|
||||
}
|
||||
pull(image, args, silent) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const result = yield this.execute(["pull", image, ...args], silent);
|
||||
if (result.stderr != "" || result.exitCode != 0) {
|
||||
throw new Error(`docker images pull failed: ${result.stderr}`);
|
||||
}
|
||||
});
|
||||
}
|
||||
inspect(image, args, silent = false) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const result = yield this.execute(["inspect", image, ...args], silent);
|
||||
if (result.stderr != "" || result.exitCode != 0)
|
||||
throw new Error(`docker inspect failed: ${result.stderr}`);
|
||||
return result.stdout;
|
||||
});
|
||||
}
|
||||
execute(args, silent = false) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
return yield exec_1.getExecOutput(this.dockerPath, args, { silent });
|
||||
});
|
||||
}
|
||||
}
|
||||
exports.DockerExec = DockerExec;
|
||||
@ -1,40 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.GitHubClient = exports.OkStatusCode = void 0;
|
||||
const core = require("@actions/core");
|
||||
const core_1 = require("@octokit/core");
|
||||
const plugin_retry_1 = require("@octokit/plugin-retry");
|
||||
exports.OkStatusCode = 200;
|
||||
const RetryOctokit = core_1.Octokit.plugin(plugin_retry_1.retry);
|
||||
const RETRY_COUNT = 5;
|
||||
const requestUrl = "GET /repos/{owner}/{repo}/actions/workflows";
|
||||
class GitHubClient {
|
||||
constructor(repository, token) {
|
||||
this.repository = repository;
|
||||
this.token = token;
|
||||
}
|
||||
getWorkflows() {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const octokit = new RetryOctokit({
|
||||
auth: this.token,
|
||||
request: { retries: RETRY_COUNT },
|
||||
});
|
||||
const [owner, repo] = this.repository.split("/");
|
||||
core.debug(`Getting workflows for repo: ${this.repository}`);
|
||||
return Promise.resolve(yield octokit.request(requestUrl, {
|
||||
owner,
|
||||
repo,
|
||||
}));
|
||||
});
|
||||
}
|
||||
}
|
||||
exports.GitHubClient = GitHubClient;
|
||||
@ -1,150 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.getKubectlPath = exports.Kubectl = void 0;
|
||||
const exec_1 = require("@actions/exec");
|
||||
const arrayUtils_1 = require("../utilities/arrayUtils");
|
||||
const core = require("@actions/core");
|
||||
const toolCache = require("@actions/tool-cache");
|
||||
const io = require("@actions/io");
|
||||
class Kubectl {
|
||||
constructor(kubectlPath, namespace = "default", ignoreSSLErrors = false) {
|
||||
this.kubectlPath = kubectlPath;
|
||||
this.ignoreSSLErrors = !!ignoreSSLErrors;
|
||||
this.namespace = namespace;
|
||||
}
|
||||
apply(configurationPaths, force = false) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
try {
|
||||
if (!configurationPaths || (configurationPaths === null || configurationPaths === void 0 ? void 0 : configurationPaths.length) === 0)
|
||||
throw Error("Configuration paths must exist");
|
||||
const applyArgs = [
|
||||
"apply",
|
||||
"-f",
|
||||
arrayUtils_1.createInlineArray(configurationPaths),
|
||||
];
|
||||
if (force)
|
||||
applyArgs.push("--force");
|
||||
return yield this.execute(applyArgs);
|
||||
}
|
||||
catch (err) {
|
||||
core.debug("Kubectl apply failed:" + err);
|
||||
}
|
||||
});
|
||||
}
|
||||
describe(resourceType, resourceName, silent = false) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
return yield this.execute(["describe", resourceType, resourceName], silent);
|
||||
});
|
||||
}
|
||||
getNewReplicaSet(deployment) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const result = yield this.describe("deployment", deployment, true);
|
||||
let newReplicaSet = "";
|
||||
if (result === null || result === void 0 ? void 0 : result.stdout) {
|
||||
const stdout = result.stdout.split("\n");
|
||||
stdout.forEach((line) => {
|
||||
const newreplicaset = "newreplicaset";
|
||||
if (line && line.toLowerCase().indexOf(newreplicaset) > -1)
|
||||
newReplicaSet = line
|
||||
.substring(newreplicaset.length)
|
||||
.trim()
|
||||
.split(" ")[0];
|
||||
});
|
||||
}
|
||||
return newReplicaSet;
|
||||
});
|
||||
}
|
||||
annotate(resourceType, resourceName, annotation) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const args = [
|
||||
"annotate",
|
||||
resourceType,
|
||||
resourceName,
|
||||
annotation,
|
||||
"--overwrite",
|
||||
];
|
||||
return yield this.execute(args);
|
||||
});
|
||||
}
|
||||
annotateFiles(files, annotation) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const args = [
|
||||
"annotate",
|
||||
"-f",
|
||||
arrayUtils_1.createInlineArray(files),
|
||||
annotation,
|
||||
"--overwrite",
|
||||
];
|
||||
return yield this.execute(args);
|
||||
});
|
||||
}
|
||||
labelFiles(files, labels) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const args = [
|
||||
"label",
|
||||
"-f",
|
||||
arrayUtils_1.createInlineArray(files),
|
||||
...labels,
|
||||
"--overwrite",
|
||||
];
|
||||
return yield this.execute(args);
|
||||
});
|
||||
}
|
||||
getAllPods() {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
return yield this.execute(["get", "pods", "-o", "json"], true);
|
||||
});
|
||||
}
|
||||
checkRolloutStatus(resourceType, name) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
return yield this.execute(["rollout", "status", `${resourceType}/${name}`]);
|
||||
});
|
||||
}
|
||||
getResource(resourceType, name) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
return yield this.execute(["get", `${resourceType}/${name}`, "-o", "json"]);
|
||||
});
|
||||
}
|
||||
executeCommand(command, args) {
|
||||
if (!command)
|
||||
throw new Error("Command must be defined");
|
||||
return args ? this.execute([command, args]) : this.execute([command]);
|
||||
}
|
||||
delete(args) {
|
||||
if (typeof args === "string")
|
||||
return this.execute(["delete", args]);
|
||||
return this.execute(["delete", ...args]);
|
||||
}
|
||||
execute(args, silent = false) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
if (this.ignoreSSLErrors) {
|
||||
args.push("--insecure-skip-tls-verify");
|
||||
}
|
||||
args = args.concat(["--namespace", this.namespace]);
|
||||
core.debug(`Kubectl run with command: ${this.kubectlPath} ${args}`);
|
||||
return yield exec_1.getExecOutput(this.kubectlPath, args, { silent });
|
||||
});
|
||||
}
|
||||
}
|
||||
exports.Kubectl = Kubectl;
|
||||
function getKubectlPath() {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const version = core.getInput("kubectl-version");
|
||||
const kubectlPath = version
|
||||
? toolCache.find("kubectl", version)
|
||||
: yield io.which("kubectl", true);
|
||||
if (!kubectlPath)
|
||||
throw Error("kubectl not found. You must install it before running this action");
|
||||
return kubectlPath;
|
||||
});
|
||||
}
|
||||
exports.getKubectlPath = getKubectlPath;
|
||||
@ -1,75 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.InputObjectMetadataNotDefinedError = exports.InputObjectKindNotDefinedError = exports.NullInputObjectError = exports.ResourceKindNotDefinedError = exports.isIngressEntity = exports.isServiceEntity = exports.isWorkloadEntity = exports.isDeploymentEntity = exports.WORKLOAD_TYPES_WITH_ROLLOUT_STATUS = exports.WORKLOAD_TYPES = exports.DEPLOYMENT_TYPES = exports.ServiceTypes = exports.DiscoveryAndLoadBalancerResource = exports.KubernetesWorkload = void 0;
|
||||
class KubernetesWorkload {
|
||||
}
|
||||
exports.KubernetesWorkload = KubernetesWorkload;
|
||||
KubernetesWorkload.POD = "Pod";
|
||||
KubernetesWorkload.REPLICASET = "Replicaset";
|
||||
KubernetesWorkload.DEPLOYMENT = "Deployment";
|
||||
KubernetesWorkload.STATEFUL_SET = "StatefulSet";
|
||||
KubernetesWorkload.DAEMON_SET = "DaemonSet";
|
||||
KubernetesWorkload.JOB = "job";
|
||||
KubernetesWorkload.CRON_JOB = "cronjob";
|
||||
class DiscoveryAndLoadBalancerResource {
|
||||
}
|
||||
exports.DiscoveryAndLoadBalancerResource = DiscoveryAndLoadBalancerResource;
|
||||
DiscoveryAndLoadBalancerResource.SERVICE = "service";
|
||||
DiscoveryAndLoadBalancerResource.INGRESS = "ingress";
|
||||
class ServiceTypes {
|
||||
}
|
||||
exports.ServiceTypes = ServiceTypes;
|
||||
ServiceTypes.LOAD_BALANCER = "LoadBalancer";
|
||||
ServiceTypes.NODE_PORT = "NodePort";
|
||||
ServiceTypes.CLUSTER_IP = "ClusterIP";
|
||||
exports.DEPLOYMENT_TYPES = [
|
||||
"deployment",
|
||||
"replicaset",
|
||||
"daemonset",
|
||||
"pod",
|
||||
"statefulset",
|
||||
];
|
||||
exports.WORKLOAD_TYPES = [
|
||||
"deployment",
|
||||
"replicaset",
|
||||
"daemonset",
|
||||
"pod",
|
||||
"statefulset",
|
||||
"job",
|
||||
"cronjob",
|
||||
];
|
||||
exports.WORKLOAD_TYPES_WITH_ROLLOUT_STATUS = [
|
||||
"deployment",
|
||||
"daemonset",
|
||||
"statefulset",
|
||||
];
|
||||
function isDeploymentEntity(kind) {
|
||||
if (!kind)
|
||||
throw exports.ResourceKindNotDefinedError;
|
||||
return exports.DEPLOYMENT_TYPES.some((type) => {
|
||||
return type.toLowerCase() === kind.toLowerCase();
|
||||
});
|
||||
}
|
||||
exports.isDeploymentEntity = isDeploymentEntity;
|
||||
function isWorkloadEntity(kind) {
|
||||
if (!kind)
|
||||
throw exports.ResourceKindNotDefinedError;
|
||||
return exports.WORKLOAD_TYPES.some((type) => type.toLowerCase() === kind.toLowerCase());
|
||||
}
|
||||
exports.isWorkloadEntity = isWorkloadEntity;
|
||||
function isServiceEntity(kind) {
|
||||
if (!kind)
|
||||
throw exports.ResourceKindNotDefinedError;
|
||||
return "service" === kind.toLowerCase();
|
||||
}
|
||||
exports.isServiceEntity = isServiceEntity;
|
||||
function isIngressEntity(kind) {
|
||||
if (!kind)
|
||||
throw exports.ResourceKindNotDefinedError;
|
||||
return "ingress" === kind.toLowerCase();
|
||||
}
|
||||
exports.isIngressEntity = isIngressEntity;
|
||||
exports.ResourceKindNotDefinedError = Error("Resource kind not defined");
|
||||
exports.NullInputObjectError = Error("Null inputObject");
|
||||
exports.InputObjectKindNotDefinedError = Error("Input object kind not defined");
|
||||
exports.InputObjectMetadataNotDefinedError = Error("Input object metatada not defined");
|
||||
@ -1,10 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.parseRouteStrategy = exports.RouteStrategy = void 0;
|
||||
var RouteStrategy;
|
||||
(function (RouteStrategy) {
|
||||
RouteStrategy["INGRESS"] = "ingress";
|
||||
RouteStrategy["SMI"] = "smi";
|
||||
RouteStrategy["SERVICE"] = "service";
|
||||
})(RouteStrategy = exports.RouteStrategy || (exports.RouteStrategy = {}));
|
||||
exports.parseRouteStrategy = (str) => RouteStrategy[Object.keys(RouteStrategy).filter((k) => RouteStrategy[k].toString().toLowerCase() === str.toLowerCase())[0]];
|
||||
@ -1,13 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.StatusCodes = void 0;
|
||||
var StatusCodes;
|
||||
(function (StatusCodes) {
|
||||
StatusCodes[StatusCodes["OK"] = 200] = "OK";
|
||||
StatusCodes[StatusCodes["CREATED"] = 201] = "CREATED";
|
||||
StatusCodes[StatusCodes["ACCEPTED"] = 202] = "ACCEPTED";
|
||||
StatusCodes[StatusCodes["UNAUTHORIZED"] = 401] = "UNAUTHORIZED";
|
||||
StatusCodes[StatusCodes["NOT_FOUND"] = 404] = "NOT_FOUND";
|
||||
StatusCodes[StatusCodes["INTERNAL_SERVER_ERROR"] = 500] = "INTERNAL_SERVER_ERROR";
|
||||
StatusCodes[StatusCodes["SERVICE_UNAVAILABLE"] = 503] = "SERVICE_UNAVAILABLE";
|
||||
})(StatusCodes = exports.StatusCodes || (exports.StatusCodes = {}));
|
||||
@ -1,14 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.parseTrafficSplitMethod = exports.TrafficSplitMethod = void 0;
|
||||
var TrafficSplitMethod;
|
||||
(function (TrafficSplitMethod) {
|
||||
TrafficSplitMethod["POD"] = "pod";
|
||||
TrafficSplitMethod["SMI"] = "smi";
|
||||
})(TrafficSplitMethod = exports.TrafficSplitMethod || (exports.TrafficSplitMethod = {}));
|
||||
/**
|
||||
* Converts a string to the TrafficSplitMethod enum
|
||||
* @param str The traffic split method (case insensitive)
|
||||
* @returns The TrafficSplitMethod enum or undefined if it can't be parsed
|
||||
*/
|
||||
exports.parseTrafficSplitMethod = (str) => TrafficSplitMethod[Object.keys(TrafficSplitMethod).filter((k) => TrafficSplitMethod[k].toString().toLowerCase() === str.toLowerCase())[0]];
|
||||
@ -1,64 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.setImagePullSecrets = exports.isWorkload = exports.parseWorkload = exports.Workload = void 0;
|
||||
const core = require("@actions/core");
|
||||
var Workload;
|
||||
(function (Workload) {
|
||||
Workload["DEPLOYMENT"] = "deployment";
|
||||
Workload["REPLICASET"] = "replicaset";
|
||||
Workload["DAEMONSET"] = "daemonset";
|
||||
Workload["POD"] = "pod";
|
||||
Workload["STATEFULSET"] = "statefulset";
|
||||
Workload["JOB"] = "job";
|
||||
Workload["CRONJJOB"] = "cronjob";
|
||||
})(Workload = exports.Workload || (exports.Workload = {}));
|
||||
/**
|
||||
* Converts a string to the Workload enum
|
||||
* @param str The workload type (case insensitive)
|
||||
* @returns The Workload enum or undefined if it can't be parsed
|
||||
*/
|
||||
exports.parseWorkload = (str) => Workload[Object.keys(Workload).filter((k) => Workload[k].toString().toLowerCase() === str.toLowerCase())[0]];
|
||||
exports.isWorkload = (kind) => exports.parseWorkload(kind) !== undefined;
|
||||
exports.setImagePullSecrets = (k, newSecrets, override = false) => {
|
||||
switch (exports.parseWorkload(k.kind)) {
|
||||
case Workload.POD: {
|
||||
if (k && k.spec && k.spec.imagePullSecrets)
|
||||
k.spec.imagePullSecrets = getOverriddenSecrets(k.spec.imagePullSecrets, newSecrets, override);
|
||||
else
|
||||
throw ManifestSecretError;
|
||||
break;
|
||||
}
|
||||
case Workload.CRONJJOB: {
|
||||
if (k &&
|
||||
k.spec &&
|
||||
k.spec.jobTemplate &&
|
||||
k.spec.jobTemplate.spec &&
|
||||
k.spec.jobTemplate.spec.template &&
|
||||
k.spec.jobTemplate.spec.template.spec &&
|
||||
k.spec.jobTemplate.spec.template.spec.imagePullSecrets)
|
||||
k.spec.jobTemplate.spec.template.spec.imagePullSecrets =
|
||||
getOverriddenSecrets(k.spec.jobTemplate.spec.template.spec.imagePullSecrets, newSecrets, override);
|
||||
else
|
||||
throw ManifestSecretError;
|
||||
break;
|
||||
}
|
||||
case undefined: {
|
||||
core.debug(`Can't set secrets for manifests of kind ${k.kind}.`);
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
if (k && k.spec && k.spec.template && k.spec.template.imagePullSecrets)
|
||||
k.spec.template.spec.imagePullSecrets = getOverriddenSecrets(k.spec.template.spec.imagePullSecrets, newSecrets, override);
|
||||
else
|
||||
throw ManifestSecretError;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return k;
|
||||
};
|
||||
const getOverriddenSecrets = (oldSecrets, newSecrets, override) => {
|
||||
if (override)
|
||||
return newSecrets;
|
||||
return oldSecrets.concat(newSecrets);
|
||||
};
|
||||
const ManifestSecretError = Error(`Can't update secret of manifest due to improper format`);
|
||||
@ -1,10 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.createInlineArray = void 0;
|
||||
function createInlineArray(str) {
|
||||
if (typeof str === "string") {
|
||||
return str;
|
||||
}
|
||||
return str.join(",");
|
||||
}
|
||||
exports.createInlineArray = createInlineArray;
|
||||
@ -1,74 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.checkDockerPath = exports.getDeploymentConfig = void 0;
|
||||
const io = require("@actions/io");
|
||||
const core = require("@actions/core");
|
||||
const docker_1 = require("../types/docker");
|
||||
const githubUtils_1 = require("./githubUtils");
|
||||
function getDeploymentConfig() {
|
||||
var _a, _b;
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
let helmChartPaths = ((_b = (_a = process.env) === null || _a === void 0 ? void 0 : _a.HELM_CHART_PATHS) === null || _b === void 0 ? void 0 : _b.split(";").filter((path) => path != "")) ||
|
||||
[];
|
||||
helmChartPaths = helmChartPaths.map((helmchart) => githubUtils_1.getNormalizedPath(helmchart.trim()));
|
||||
let inputManifestFiles = core
|
||||
.getInput("manifests")
|
||||
.split(/[\n,;]+/)
|
||||
.filter((manifest) => manifest.trim().length > 0) || [];
|
||||
if ((helmChartPaths === null || helmChartPaths === void 0 ? void 0 : helmChartPaths.length) == 0) {
|
||||
inputManifestFiles = inputManifestFiles.map((manifestFile) => githubUtils_1.getNormalizedPath(manifestFile));
|
||||
}
|
||||
const imageNames = core.getInput("images").split("\n") || [];
|
||||
const imageDockerfilePathMap = {};
|
||||
//Fetching from image label if available
|
||||
for (const image of imageNames) {
|
||||
try {
|
||||
imageDockerfilePathMap[image] = yield getDockerfilePath(image);
|
||||
}
|
||||
catch (ex) {
|
||||
core.warning(`Failed to get dockerfile path for image ${image.toString()}: ${ex} `);
|
||||
}
|
||||
}
|
||||
return Promise.resolve({
|
||||
manifestFilePaths: inputManifestFiles,
|
||||
helmChartFilePaths: helmChartPaths,
|
||||
dockerfilePaths: imageDockerfilePathMap,
|
||||
});
|
||||
});
|
||||
}
|
||||
exports.getDeploymentConfig = getDeploymentConfig;
|
||||
function getDockerfilePath(image) {
|
||||
var _a, _b;
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
yield checkDockerPath();
|
||||
const dockerExec = new docker_1.DockerExec("docker");
|
||||
yield dockerExec.pull(image, [], false);
|
||||
const imageInspectResult = yield dockerExec.inspect(image, [], false);
|
||||
const imageConfig = JSON.parse(imageInspectResult)[0];
|
||||
const DOCKERFILE_PATH_LABEL_KEY = "dockerfile-path";
|
||||
let pathValue = "";
|
||||
if (((_a = imageConfig === null || imageConfig === void 0 ? void 0 : imageConfig.Config) === null || _a === void 0 ? void 0 : _a.Labels) && ((_b = imageConfig === null || imageConfig === void 0 ? void 0 : imageConfig.Config) === null || _b === void 0 ? void 0 : _b.Labels[DOCKERFILE_PATH_LABEL_KEY])) {
|
||||
const pathLabel = imageConfig.Config.Labels[DOCKERFILE_PATH_LABEL_KEY];
|
||||
pathValue = githubUtils_1.getNormalizedPath(pathLabel);
|
||||
}
|
||||
return Promise.resolve(pathValue);
|
||||
});
|
||||
}
|
||||
function checkDockerPath() {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const dockerPath = yield io.which("docker", false);
|
||||
if (!dockerPath) {
|
||||
throw new Error("Docker is not installed.");
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.checkDockerPath = checkDockerPath;
|
||||
@ -1,53 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.writeManifestToFile = exports.writeObjectsToFile = exports.getTempDirectory = void 0;
|
||||
const fs = require("fs");
|
||||
const path = require("path");
|
||||
const core = require("@actions/core");
|
||||
const os = require("os");
|
||||
const timeUtils_1 = require("./timeUtils");
|
||||
function getTempDirectory() {
|
||||
return process.env["runner.tempDirectory"] || os.tmpdir();
|
||||
}
|
||||
exports.getTempDirectory = getTempDirectory;
|
||||
function writeObjectsToFile(inputObjects) {
|
||||
const newFilePaths = [];
|
||||
inputObjects.forEach((inputObject) => {
|
||||
var _a;
|
||||
try {
|
||||
const inputObjectString = JSON.stringify(inputObject);
|
||||
if ((_a = inputObject === null || inputObject === void 0 ? void 0 : inputObject.metadata) === null || _a === void 0 ? void 0 : _a.name) {
|
||||
const fileName = getManifestFileName(inputObject.kind, inputObject.metadata.name);
|
||||
fs.writeFileSync(path.join(fileName), inputObjectString);
|
||||
newFilePaths.push(fileName);
|
||||
}
|
||||
else {
|
||||
core.debug("Input object is not proper K8s resource object. Object: " +
|
||||
inputObjectString);
|
||||
}
|
||||
}
|
||||
catch (ex) {
|
||||
core.debug(`Exception occurred while writing object to file ${inputObject}: ${ex}`);
|
||||
}
|
||||
});
|
||||
return newFilePaths;
|
||||
}
|
||||
exports.writeObjectsToFile = writeObjectsToFile;
|
||||
function writeManifestToFile(inputObjectString, kind, name) {
|
||||
if (inputObjectString) {
|
||||
try {
|
||||
const fileName = getManifestFileName(kind, name);
|
||||
fs.writeFileSync(path.join(fileName), inputObjectString);
|
||||
return fileName;
|
||||
}
|
||||
catch (ex) {
|
||||
throw Error(`Exception occurred while writing object to file: ${inputObjectString}. Exception: ${ex}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
exports.writeManifestToFile = writeManifestToFile;
|
||||
function getManifestFileName(kind, name) {
|
||||
const filePath = `${kind}_${name}_ ${timeUtils_1.getCurrentTime().toString()}`;
|
||||
const tempDirectory = getTempDirectory();
|
||||
return path.join(tempDirectory, path.basename(filePath));
|
||||
}
|
||||
@ -1,84 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.writeManifestToFile = exports.writeObjectsToFile = exports.assertFileExists = exports.ensureDirExists = exports.getNewUserDirPath = exports.getTempDirectory = void 0;
|
||||
const fs = require("fs");
|
||||
const path = require("path");
|
||||
const core = require("@actions/core");
|
||||
const os = require("os");
|
||||
function getTempDirectory() {
|
||||
return process.env["runner.tempDirectory"] || os.tmpdir();
|
||||
}
|
||||
exports.getTempDirectory = getTempDirectory;
|
||||
function getNewUserDirPath() {
|
||||
let userDir = path.join(getTempDirectory(), "kubectlTask");
|
||||
ensureDirExists(userDir);
|
||||
userDir = path.join(userDir, getCurrentTime().toString());
|
||||
ensureDirExists(userDir);
|
||||
return userDir;
|
||||
}
|
||||
exports.getNewUserDirPath = getNewUserDirPath;
|
||||
function ensureDirExists(dirPath) {
|
||||
if (!fs.existsSync(dirPath)) {
|
||||
fs.mkdirSync(dirPath);
|
||||
}
|
||||
}
|
||||
exports.ensureDirExists = ensureDirExists;
|
||||
function assertFileExists(path) {
|
||||
if (!fs.existsSync(path)) {
|
||||
core.error(`FileNotFoundException : ${path}`);
|
||||
throw new Error(`FileNotFoundException: ${path}`);
|
||||
}
|
||||
}
|
||||
exports.assertFileExists = assertFileExists;
|
||||
function writeObjectsToFile(inputObjects) {
|
||||
const newFilePaths = [];
|
||||
if (!!inputObjects) {
|
||||
inputObjects.forEach((inputObject) => {
|
||||
try {
|
||||
const inputObjectString = JSON.stringify(inputObject);
|
||||
if (!!inputObject.kind &&
|
||||
!!inputObject.metadata &&
|
||||
!!inputObject.metadata.name) {
|
||||
const fileName = getManifestFileName(inputObject.kind, inputObject.metadata.name);
|
||||
fs.writeFileSync(path.join(fileName), inputObjectString);
|
||||
newFilePaths.push(fileName);
|
||||
}
|
||||
else {
|
||||
core.debug("Input object is not proper K8s resource object. Object: " +
|
||||
inputObjectString);
|
||||
}
|
||||
}
|
||||
catch (ex) {
|
||||
core.debug(`Exception occurred while writing object to file ${inputObject}: ${ex}`);
|
||||
}
|
||||
});
|
||||
}
|
||||
return newFilePaths;
|
||||
}
|
||||
exports.writeObjectsToFile = writeObjectsToFile;
|
||||
function writeManifestToFile(inputObjectString, kind, name) {
|
||||
if (inputObjectString) {
|
||||
try {
|
||||
const fileName = getManifestFileName(kind, name);
|
||||
fs.writeFileSync(path.join(fileName), inputObjectString);
|
||||
return fileName;
|
||||
}
|
||||
catch (ex) {
|
||||
core.debug("Exception occurred while writing object to file : " +
|
||||
inputObjectString +
|
||||
" . Exception: " +
|
||||
ex);
|
||||
}
|
||||
}
|
||||
return "";
|
||||
}
|
||||
exports.writeManifestToFile = writeManifestToFile;
|
||||
function getManifestFileName(kind, name) {
|
||||
const filePath = kind + "_" + name + "_" + getCurrentTime().toString();
|
||||
const tempDirectory = getTempDirectory();
|
||||
const fileName = path.join(tempDirectory, path.basename(filePath));
|
||||
return fileName;
|
||||
}
|
||||
function getCurrentTime() {
|
||||
return new Date().getTime();
|
||||
}
|
||||
@ -1,63 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.isHttpUrl = exports.getNormalizedPath = exports.normalizeWorkflowStrLabel = exports.getWorkflowFilePath = void 0;
|
||||
const githubClient_1 = require("../types/githubClient");
|
||||
const core = require("@actions/core");
|
||||
function getWorkflowFilePath(githubToken) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
let workflowFilePath = process.env.GITHUB_WORKFLOW;
|
||||
if (!workflowFilePath.startsWith(".github/workflows/")) {
|
||||
const githubClient = new githubClient_1.GitHubClient(process.env.GITHUB_REPOSITORY, githubToken);
|
||||
const response = yield githubClient.getWorkflows();
|
||||
if (response) {
|
||||
if (response.status === githubClient_1.OkStatusCode && response.data.total_count) {
|
||||
if (response.data.total_count > 0) {
|
||||
for (const workflow of response.data.workflows) {
|
||||
if (process.env.GITHUB_WORKFLOW === workflow.name) {
|
||||
workflowFilePath = workflow.path;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (response.status != githubClient_1.OkStatusCode) {
|
||||
core.error(`An error occurred while getting list of workflows on the repo. Status code: ${response.status}`);
|
||||
}
|
||||
}
|
||||
else {
|
||||
core.error(`Failed to get response from workflow list API`);
|
||||
}
|
||||
}
|
||||
return Promise.resolve(workflowFilePath);
|
||||
});
|
||||
}
|
||||
exports.getWorkflowFilePath = getWorkflowFilePath;
|
||||
function normalizeWorkflowStrLabel(workflowName) {
|
||||
const workflowsPath = ".github/workflows/";
|
||||
workflowName = workflowName.startsWith(workflowsPath)
|
||||
? workflowName.replace(workflowsPath, "")
|
||||
: workflowName;
|
||||
return workflowName.replace(/ /g, "_");
|
||||
}
|
||||
exports.normalizeWorkflowStrLabel = normalizeWorkflowStrLabel;
|
||||
function getNormalizedPath(pathValue) {
|
||||
if (!isHttpUrl(pathValue)) {
|
||||
//if it is not an http url then convert to link from current repo and commit
|
||||
return `https://github.com/${process.env.GITHUB_REPOSITORY}/blob/${process.env.GITHUB_SHA}/${pathValue}`;
|
||||
}
|
||||
return pathValue;
|
||||
}
|
||||
exports.getNormalizedPath = getNormalizedPath;
|
||||
function isHttpUrl(url) {
|
||||
return /^https?:\/\/.*$/.test(url);
|
||||
}
|
||||
exports.isHttpUrl = isHttpUrl;
|
||||
@ -1,121 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.sleepFor = exports.sendRequest = exports.WebRequestOptions = exports.WebResponse = exports.WebRequest = void 0;
|
||||
// Taken from https://github.com/Azure/aks-set-context/blob/master/src/client.ts
|
||||
const util = require("util");
|
||||
const fs = require("fs");
|
||||
const httpClient = require("typed-rest-client/HttpClient");
|
||||
const core = require("@actions/core");
|
||||
var httpCallbackClient = new httpClient.HttpClient("GITHUB_RUNNER", null, {});
|
||||
class WebRequest {
|
||||
}
|
||||
exports.WebRequest = WebRequest;
|
||||
class WebResponse {
|
||||
}
|
||||
exports.WebResponse = WebResponse;
|
||||
class WebRequestOptions {
|
||||
}
|
||||
exports.WebRequestOptions = WebRequestOptions;
|
||||
function sendRequest(request, options) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
let i = 0;
|
||||
let retryCount = options && options.retryCount ? options.retryCount : 5;
|
||||
let retryIntervalInSeconds = options && options.retryIntervalInSeconds
|
||||
? options.retryIntervalInSeconds
|
||||
: 2;
|
||||
let retriableErrorCodes = options && options.retriableErrorCodes
|
||||
? options.retriableErrorCodes
|
||||
: [
|
||||
"ETIMEDOUT",
|
||||
"ECONNRESET",
|
||||
"ENOTFOUND",
|
||||
"ESOCKETTIMEDOUT",
|
||||
"ECONNREFUSED",
|
||||
"EHOSTUNREACH",
|
||||
"EPIPE",
|
||||
"EA_AGAIN",
|
||||
];
|
||||
let retriableStatusCodes = options && options.retriableStatusCodes
|
||||
? options.retriableStatusCodes
|
||||
: [408, 409, 500, 502, 503, 504];
|
||||
let timeToWait = retryIntervalInSeconds;
|
||||
while (true) {
|
||||
try {
|
||||
if (request.body &&
|
||||
typeof request.body !== "string" &&
|
||||
!request.body["readable"]) {
|
||||
request.body = fs.createReadStream(request.body["path"]);
|
||||
}
|
||||
let response = yield sendRequestInternal(request);
|
||||
if (retriableStatusCodes.indexOf(response.statusCode) != -1 &&
|
||||
++i < retryCount) {
|
||||
core.debug(util.format("Encountered a retriable status code: %s. Message: '%s'.", response.statusCode, response.statusMessage));
|
||||
yield sleepFor(timeToWait);
|
||||
timeToWait =
|
||||
timeToWait * retryIntervalInSeconds + retryIntervalInSeconds;
|
||||
continue;
|
||||
}
|
||||
return response;
|
||||
}
|
||||
catch (error) {
|
||||
if (retriableErrorCodes.indexOf(error.code) != -1 && ++i < retryCount) {
|
||||
core.debug(util.format("Encountered a retriable error:%s. Message: %s.", error.code, error.message));
|
||||
yield sleepFor(timeToWait);
|
||||
timeToWait =
|
||||
timeToWait * retryIntervalInSeconds + retryIntervalInSeconds;
|
||||
}
|
||||
else {
|
||||
if (error.code) {
|
||||
core.debug("error code =" + error.code);
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.sendRequest = sendRequest;
|
||||
function sleepFor(sleepDurationInSeconds) {
|
||||
return new Promise((resolve, reject) => {
|
||||
setTimeout(resolve, sleepDurationInSeconds * 1000);
|
||||
});
|
||||
}
|
||||
exports.sleepFor = sleepFor;
|
||||
function sendRequestInternal(request) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
core.debug(util.format("[%s]%s", request.method, request.uri));
|
||||
var response = yield httpCallbackClient.request(request.method, request.uri, request.body, request.headers);
|
||||
return yield toWebResponse(response);
|
||||
});
|
||||
}
|
||||
function toWebResponse(response) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
var res = new WebResponse();
|
||||
if (response) {
|
||||
res.statusCode = response.message.statusCode;
|
||||
res.statusMessage = response.message.statusMessage;
|
||||
res.headers = response.message.headers;
|
||||
var body = yield response.readBody();
|
||||
if (body) {
|
||||
try {
|
||||
res.body = JSON.parse(body);
|
||||
}
|
||||
catch (error) {
|
||||
core.debug("Could not parse response: " + JSON.stringify(error));
|
||||
core.debug("Response: " + JSON.stringify(res.body));
|
||||
res.body = body;
|
||||
}
|
||||
}
|
||||
}
|
||||
return res;
|
||||
});
|
||||
}
|
||||
@ -1,101 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.getTrafficSplitAPIVersion = exports.downloadKubectl = exports.getStableKubectlVersion = exports.getkubectlDownloadURL = void 0;
|
||||
const core = require("@actions/core");
|
||||
const fs = require("fs");
|
||||
const os = require("os");
|
||||
const path = require("path");
|
||||
const toolCache = require("@actions/tool-cache");
|
||||
const util = require("util");
|
||||
const httpClient_1 = require("./httpClient");
|
||||
const kubectlToolName = "kubectl";
|
||||
const stableKubectlVersion = "v1.15.0";
|
||||
const stableVersionUrl = "https://storage.googleapis.com/kubernetes-release/release/stable.txt";
|
||||
const trafficSplitAPIVersionPrefix = "split.smi-spec.io";
|
||||
function getExecutableExtension() {
|
||||
if (os.type().match(/^Win/)) {
|
||||
return ".exe";
|
||||
}
|
||||
return "";
|
||||
}
|
||||
function getKubectlArch() {
|
||||
let arch = os.arch();
|
||||
if (arch === "x64") {
|
||||
return "amd64";
|
||||
}
|
||||
return arch;
|
||||
}
|
||||
function getkubectlDownloadURL(version, arch) {
|
||||
switch (os.type()) {
|
||||
case "Linux":
|
||||
return util.format("https://storage.googleapis.com/kubernetes-release/release/%s/bin/linux/%s/kubectl", version, arch);
|
||||
case "Darwin":
|
||||
return util.format("https://storage.googleapis.com/kubernetes-release/release/%s/bin/darwin/%s/kubectl", version, arch);
|
||||
case "Windows_NT":
|
||||
default:
|
||||
return util.format("https://storage.googleapis.com/kubernetes-release/release/%s/bin/windows/%s/kubectl.exe", version, arch);
|
||||
}
|
||||
}
|
||||
exports.getkubectlDownloadURL = getkubectlDownloadURL;
|
||||
function getStableKubectlVersion() {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
return toolCache.downloadTool(stableVersionUrl).then((downloadPath) => {
|
||||
let version = fs.readFileSync(downloadPath, "utf8").toString().trim();
|
||||
if (!version) {
|
||||
version = stableKubectlVersion;
|
||||
}
|
||||
return version;
|
||||
}, (error) => {
|
||||
core.debug(error);
|
||||
core.warning("GetStableVersionFailed");
|
||||
return stableKubectlVersion;
|
||||
});
|
||||
});
|
||||
}
|
||||
exports.getStableKubectlVersion = getStableKubectlVersion;
|
||||
function downloadKubectl(version) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
let cachedToolpath = toolCache.find(kubectlToolName, version);
|
||||
let kubectlDownloadPath = "";
|
||||
let arch = getKubectlArch();
|
||||
if (!cachedToolpath) {
|
||||
try {
|
||||
kubectlDownloadPath = yield toolCache.downloadTool(getkubectlDownloadURL(version, arch));
|
||||
}
|
||||
catch (exception) {
|
||||
if (exception instanceof toolCache.HTTPError &&
|
||||
exception.httpStatusCode === httpClient_1.StatusCodes.NOT_FOUND) {
|
||||
throw new Error(util.format("Kubectl '%s' for '%s' arch not found.", version, arch));
|
||||
}
|
||||
else {
|
||||
throw new Error("DownloadKubectlFailed");
|
||||
}
|
||||
}
|
||||
cachedToolpath = yield toolCache.cacheFile(kubectlDownloadPath, kubectlToolName + getExecutableExtension(), kubectlToolName, version);
|
||||
}
|
||||
const kubectlPath = path.join(cachedToolpath, kubectlToolName + getExecutableExtension());
|
||||
fs.chmodSync(kubectlPath, "777");
|
||||
return kubectlPath;
|
||||
});
|
||||
}
|
||||
exports.downloadKubectl = downloadKubectl;
|
||||
function getTrafficSplitAPIVersion(kubectl) {
|
||||
const result = kubectl.executeCommand("api-versions");
|
||||
const trafficSplitAPIVersion = result.stdout
|
||||
.split("\n")
|
||||
.find((version) => version.startsWith(trafficSplitAPIVersionPrefix));
|
||||
if (!trafficSplitAPIVersion) {
|
||||
throw new Error("UnableToCreateTrafficSplitManifestFile");
|
||||
}
|
||||
return trafficSplitAPIVersion;
|
||||
}
|
||||
exports.getTrafficSplitAPIVersion = getTrafficSplitAPIVersion;
|
||||
@ -1,85 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.annotateChildPods = exports.getLastSuccessfulRunSha = exports.checkForErrors = void 0;
|
||||
const core = require("@actions/core");
|
||||
function checkForErrors(execResults, warnIfError) {
|
||||
let stderr = "";
|
||||
execResults.forEach((result) => {
|
||||
if ((result === null || result === void 0 ? void 0 : result.exitCode) !== 0) {
|
||||
stderr += (result === null || result === void 0 ? void 0 : result.stderr) + " \n";
|
||||
}
|
||||
else if (result === null || result === void 0 ? void 0 : result.stderr) {
|
||||
core.warning(result.stderr);
|
||||
}
|
||||
});
|
||||
if (stderr.length > 0) {
|
||||
if (warnIfError) {
|
||||
core.warning(stderr.trim());
|
||||
}
|
||||
else {
|
||||
throw new Error(stderr.trim());
|
||||
}
|
||||
}
|
||||
}
|
||||
exports.checkForErrors = checkForErrors;
|
||||
function getLastSuccessfulRunSha(kubectl, namespaceName, annotationKey) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
try {
|
||||
const result = yield kubectl.getResource("namespace", namespaceName);
|
||||
if (result === null || result === void 0 ? void 0 : result.stderr) {
|
||||
core.warning(result.stderr);
|
||||
return process.env.GITHUB_SHA;
|
||||
}
|
||||
else if (result === null || result === void 0 ? void 0 : result.stdout) {
|
||||
const annotationsSet = JSON.parse(result.stdout).metadata.annotations;
|
||||
if (annotationsSet && annotationsSet[annotationKey]) {
|
||||
return JSON.parse(annotationsSet[annotationKey].replace(/'/g, '"'))
|
||||
.commit;
|
||||
}
|
||||
else {
|
||||
return "NA";
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (ex) {
|
||||
core.warning(`Failed to get commits from cluster. ${JSON.stringify(ex)}`);
|
||||
return "";
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.getLastSuccessfulRunSha = getLastSuccessfulRunSha;
|
||||
function annotateChildPods(kubectl, resourceType, resourceName, annotationKeyValStr, allPods) {
|
||||
var _a;
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
let owner = resourceName;
|
||||
if (resourceType.toLowerCase().indexOf("deployment") > -1) {
|
||||
owner = yield kubectl.getNewReplicaSet(resourceName);
|
||||
}
|
||||
const commandExecutionResults = [];
|
||||
if ((allPods === null || allPods === void 0 ? void 0 : allPods.items) && ((_a = allPods.items) === null || _a === void 0 ? void 0 : _a.length) > 0) {
|
||||
allPods.items.forEach((pod) => {
|
||||
var _a;
|
||||
const owners = (_a = pod === null || pod === void 0 ? void 0 : pod.metadata) === null || _a === void 0 ? void 0 : _a.ownerReferences;
|
||||
if (owners) {
|
||||
for (const ownerRef of owners) {
|
||||
if (ownerRef.name === owner) {
|
||||
commandExecutionResults.push(kubectl.annotate("pod", pod.metadata.name, annotationKeyValStr));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
return yield Promise.all(commandExecutionResults);
|
||||
});
|
||||
}
|
||||
exports.annotateChildPods = annotateChildPods;
|
||||
@ -1,163 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.checkPodStatus = exports.checkManifestStability = void 0;
|
||||
const core = require("@actions/core");
|
||||
const utils = require("./utility");
|
||||
const KubernetesConstants = require("../constants");
|
||||
function checkManifestStability(kubectl, resources) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
let rolloutStatusHasErrors = false;
|
||||
const numberOfResources = resources.length;
|
||||
for (let i = 0; i < numberOfResources; i++) {
|
||||
const resource = resources[i];
|
||||
if (KubernetesConstants.WORKLOAD_TYPES_WITH_ROLLOUT_STATUS.indexOf(resource.type.toLowerCase()) >= 0) {
|
||||
try {
|
||||
var result = kubectl.checkRolloutStatus(resource.type, resource.name);
|
||||
utils.checkForErrors([result]);
|
||||
}
|
||||
catch (ex) {
|
||||
core.error(ex);
|
||||
kubectl.describe(resource.type, resource.name);
|
||||
rolloutStatusHasErrors = true;
|
||||
}
|
||||
}
|
||||
if (utils.isEqual(resource.type, KubernetesConstants.KubernetesWorkload.POD, true)) {
|
||||
try {
|
||||
yield checkPodStatus(kubectl, resource.name);
|
||||
}
|
||||
catch (ex) {
|
||||
core.warning(`CouldNotDeterminePodStatus ${JSON.stringify(ex)}`);
|
||||
kubectl.describe(resource.type, resource.name);
|
||||
}
|
||||
}
|
||||
if (utils.isEqual(resource.type, KubernetesConstants.DiscoveryAndLoadBalancerResource.SERVICE, true)) {
|
||||
try {
|
||||
const service = getService(kubectl, resource.name);
|
||||
const spec = service.spec;
|
||||
const status = service.status;
|
||||
if (utils.isEqual(spec.type, KubernetesConstants.ServiceTypes.LOAD_BALANCER, true)) {
|
||||
if (!isLoadBalancerIPAssigned(status)) {
|
||||
yield waitForServiceExternalIPAssignment(kubectl, resource.name);
|
||||
}
|
||||
else {
|
||||
console.log("ServiceExternalIP", resource.name, status.loadBalancer.ingress[0].ip);
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (ex) {
|
||||
core.warning(`CouldNotDetermineServiceStatus of: ${resource.name} Error: ${JSON.stringify(ex)}`);
|
||||
kubectl.describe(resource.type, resource.name);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (rolloutStatusHasErrors) {
|
||||
throw new Error("RolloutStatusTimedout");
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.checkManifestStability = checkManifestStability;
|
||||
function checkPodStatus(kubectl, podName) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const sleepTimeout = 10 * 1000; // 10 seconds
|
||||
const iterations = 60; // 60 * 10 seconds timeout = 10 minutes max timeout
|
||||
let podStatus;
|
||||
let kubectlDescribeNeeded = false;
|
||||
for (let i = 0; i < iterations; i++) {
|
||||
yield utils.sleep(sleepTimeout);
|
||||
core.debug(`Polling for pod status: ${podName}`);
|
||||
podStatus = getPodStatus(kubectl, podName);
|
||||
if (podStatus.phase &&
|
||||
podStatus.phase !== "Pending" &&
|
||||
podStatus.phase !== "Unknown") {
|
||||
break;
|
||||
}
|
||||
}
|
||||
podStatus = getPodStatus(kubectl, podName);
|
||||
switch (podStatus.phase) {
|
||||
case "Succeeded":
|
||||
case "Running":
|
||||
if (isPodReady(podStatus)) {
|
||||
console.log(`pod/${podName} is successfully rolled out`);
|
||||
}
|
||||
else {
|
||||
kubectlDescribeNeeded = true;
|
||||
}
|
||||
break;
|
||||
case "Pending":
|
||||
if (!isPodReady(podStatus)) {
|
||||
core.warning(`pod/${podName} rollout status check timedout`);
|
||||
kubectlDescribeNeeded = true;
|
||||
}
|
||||
break;
|
||||
case "Failed":
|
||||
core.error(`pod/${podName} rollout failed`);
|
||||
kubectlDescribeNeeded = true;
|
||||
break;
|
||||
default:
|
||||
core.warning(`pod/${podName} rollout status: ${podStatus.phase}`);
|
||||
}
|
||||
if (kubectlDescribeNeeded) {
|
||||
kubectl.describe("pod", podName);
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.checkPodStatus = checkPodStatus;
|
||||
function getPodStatus(kubectl, podName) {
|
||||
const podResult = kubectl.getResource("pod", podName);
|
||||
utils.checkForErrors([podResult]);
|
||||
const podStatus = JSON.parse(podResult.stdout).status;
|
||||
core.debug(`Pod Status: ${JSON.stringify(podStatus)}`);
|
||||
return podStatus;
|
||||
}
|
||||
function isPodReady(podStatus) {
|
||||
let allContainersAreReady = true;
|
||||
podStatus.containerStatuses.forEach((container) => {
|
||||
if (container.ready === false) {
|
||||
console.log(`'${container.name}' status: ${JSON.stringify(container.state)}`);
|
||||
allContainersAreReady = false;
|
||||
}
|
||||
});
|
||||
if (!allContainersAreReady) {
|
||||
core.warning("AllContainersNotInReadyState");
|
||||
}
|
||||
return allContainersAreReady;
|
||||
}
|
||||
function getService(kubectl, serviceName) {
|
||||
const serviceResult = kubectl.getResource(KubernetesConstants.DiscoveryAndLoadBalancerResource.SERVICE, serviceName);
|
||||
utils.checkForErrors([serviceResult]);
|
||||
return JSON.parse(serviceResult.stdout);
|
||||
}
|
||||
function waitForServiceExternalIPAssignment(kubectl, serviceName) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const sleepTimeout = 10 * 1000; // 10 seconds
|
||||
const iterations = 18; // 18 * 10 seconds timeout = 3 minutes max timeout
|
||||
for (let i = 0; i < iterations; i++) {
|
||||
console.log(`waitForServiceIpAssignment : ${serviceName}`);
|
||||
yield utils.sleep(sleepTimeout);
|
||||
let status = getService(kubectl, serviceName).status;
|
||||
if (isLoadBalancerIPAssigned(status)) {
|
||||
console.log("ServiceExternalIP", serviceName, status.loadBalancer.ingress[0].ip);
|
||||
return;
|
||||
}
|
||||
}
|
||||
core.warning(`waitForServiceIpAssignmentTimedOut ${serviceName}`);
|
||||
});
|
||||
}
|
||||
function isLoadBalancerIPAssigned(status) {
|
||||
if (status &&
|
||||
status.loadBalancer &&
|
||||
status.loadBalancer.ingress &&
|
||||
status.loadBalancer.ingress.length > 0) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@ -1,287 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.isWorkloadEntity = exports.updateManifestFiles = exports.updateImagePullSecrets = exports.substituteImageNameInSpecFile = exports.getDeleteCmdArgs = exports.createKubectlArgs = exports.getKubectl = exports.getManifestFiles = void 0;
|
||||
const core = require("@actions/core");
|
||||
const fs = require("fs");
|
||||
const yaml = require("js-yaml");
|
||||
const path = require("path");
|
||||
const kubectlutility = require("./kubectl-util");
|
||||
const io = require("@actions/io");
|
||||
const utility_1 = require("./utility");
|
||||
const fileHelper = require("./files-helper");
|
||||
const files_helper_1 = require("./files-helper");
|
||||
const KubernetesObjectUtility = require("./resource-object-utility");
|
||||
const TaskInputParameters = require("../input-parameters");
|
||||
function getManifestFiles(manifestFilePaths) {
|
||||
if (!manifestFilePaths) {
|
||||
core.debug("file input is not present");
|
||||
return null;
|
||||
}
|
||||
return manifestFilePaths;
|
||||
}
|
||||
exports.getManifestFiles = getManifestFiles;
|
||||
function getKubectl() {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
try {
|
||||
return Promise.resolve(io.which("kubectl", true));
|
||||
}
|
||||
catch (ex) {
|
||||
return kubectlutility.downloadKubectl(yield kubectlutility.getStableKubectlVersion());
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.getKubectl = getKubectl;
|
||||
function createKubectlArgs(kinds, names) {
|
||||
let args = "";
|
||||
if (!!kinds && kinds.size > 0) {
|
||||
args = args + createInlineArray(Array.from(kinds.values()));
|
||||
}
|
||||
if (!!names && names.size > 0) {
|
||||
args = args + " " + Array.from(names.values()).join(" ");
|
||||
}
|
||||
return args;
|
||||
}
|
||||
exports.createKubectlArgs = createKubectlArgs;
|
||||
function getDeleteCmdArgs(argsPrefix, inputArgs) {
|
||||
let args = "";
|
||||
if (!!argsPrefix && argsPrefix.length > 0) {
|
||||
args = argsPrefix;
|
||||
}
|
||||
if (!!inputArgs && inputArgs.length > 0) {
|
||||
if (args.length > 0) {
|
||||
args = args + " ";
|
||||
}
|
||||
args = args + inputArgs;
|
||||
}
|
||||
return args;
|
||||
}
|
||||
exports.getDeleteCmdArgs = getDeleteCmdArgs;
|
||||
/*
|
||||
For example,
|
||||
currentString: `image: "example/example-image"`
|
||||
imageName: `example/example-image`
|
||||
imageNameWithNewTag: `example/example-image:identifiertag`
|
||||
|
||||
This substituteImageNameInSpecFile function would return
|
||||
return Value: `image: "example/example-image:identifiertag"`
|
||||
*/
|
||||
function substituteImageNameInSpecFile(spec, imageName, imageNameWithNewTag) {
|
||||
if (spec.indexOf(imageName) < 0)
|
||||
return spec;
|
||||
return spec.split("\n").reduce((acc, line) => {
|
||||
const imageKeyword = line.match(/^ *image:/);
|
||||
if (imageKeyword) {
|
||||
let [currentImageName, currentImageTag] = line
|
||||
.substring(imageKeyword[0].length) // consume the line from keyword onwards
|
||||
.trim()
|
||||
.replace(/[',"]/g, "") // replace allowed quotes with nothing
|
||||
.split(":");
|
||||
if ((currentImageName === null || currentImageName === void 0 ? void 0 : currentImageName.indexOf(" ")) > 0) {
|
||||
currentImageName = currentImageName.split(" ")[0]; // remove comments
|
||||
}
|
||||
if (currentImageName === imageName) {
|
||||
return acc + `${imageKeyword[0]} ${imageNameWithNewTag}\n`;
|
||||
}
|
||||
}
|
||||
return acc + line + "\n";
|
||||
}, "");
|
||||
}
|
||||
exports.substituteImageNameInSpecFile = substituteImageNameInSpecFile;
|
||||
function createInlineArray(str) {
|
||||
if (typeof str === "string") {
|
||||
return str;
|
||||
}
|
||||
return str.join(",");
|
||||
}
|
||||
function getImagePullSecrets(inputObject) {
|
||||
if (!inputObject || !inputObject.spec) {
|
||||
return;
|
||||
}
|
||||
if (utility_1.isEqual(inputObject.kind, "pod") &&
|
||||
inputObject &&
|
||||
inputObject.spec &&
|
||||
inputObject.spec.imagePullSecrets) {
|
||||
return inputObject.spec.imagePullSecrets;
|
||||
}
|
||||
else if (utility_1.isEqual(inputObject.kind, "cronjob") &&
|
||||
inputObject &&
|
||||
inputObject.spec &&
|
||||
inputObject.spec.jobTemplate &&
|
||||
inputObject.spec.jobTemplate.spec &&
|
||||
inputObject.spec.jobTemplate.spec.template &&
|
||||
inputObject.spec.jobTemplate.spec.template.spec &&
|
||||
inputObject.spec.jobTemplate.spec.template.spec.imagePullSecrets) {
|
||||
return inputObject.spec.jobTemplate.spec.template.spec.imagePullSecrets;
|
||||
}
|
||||
else if (inputObject &&
|
||||
inputObject.spec &&
|
||||
inputObject.spec.template &&
|
||||
inputObject.spec.template.spec &&
|
||||
inputObject.spec.template.spec.imagePullSecrets) {
|
||||
return inputObject.spec.template.spec.imagePullSecrets;
|
||||
}
|
||||
}
|
||||
function setImagePullSecrets(inputObject, newImagePullSecrets) {
|
||||
if (!inputObject || !inputObject.spec || !newImagePullSecrets) {
|
||||
return;
|
||||
}
|
||||
if (utility_1.isEqual(inputObject.kind, "pod")) {
|
||||
if (inputObject && inputObject.spec) {
|
||||
if (newImagePullSecrets.length > 0) {
|
||||
inputObject.spec.imagePullSecrets = newImagePullSecrets;
|
||||
}
|
||||
else {
|
||||
delete inputObject.spec.imagePullSecrets;
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (utility_1.isEqual(inputObject.kind, "cronjob")) {
|
||||
if (inputObject &&
|
||||
inputObject.spec &&
|
||||
inputObject.spec.jobTemplate &&
|
||||
inputObject.spec.jobTemplate.spec &&
|
||||
inputObject.spec.jobTemplate.spec.template &&
|
||||
inputObject.spec.jobTemplate.spec.template.spec) {
|
||||
if (newImagePullSecrets.length > 0) {
|
||||
inputObject.spec.jobTemplate.spec.template.spec.imagePullSecrets =
|
||||
newImagePullSecrets;
|
||||
}
|
||||
else {
|
||||
delete inputObject.spec.jobTemplate.spec.template.spec.imagePullSecrets;
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (!!inputObject.spec.template && !!inputObject.spec.template.spec) {
|
||||
if (inputObject &&
|
||||
inputObject.spec &&
|
||||
inputObject.spec.template &&
|
||||
inputObject.spec.template.spec) {
|
||||
if (newImagePullSecrets.length > 0) {
|
||||
inputObject.spec.template.spec.imagePullSecrets = newImagePullSecrets;
|
||||
}
|
||||
else {
|
||||
delete inputObject.spec.template.spec.imagePullSecrets;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
function substituteImageNameInSpecContent(currentString, imageName, imageNameWithNewTag) {
|
||||
if (currentString.indexOf(imageName) < 0) {
|
||||
core.debug(`No occurence of replacement token: ${imageName} found`);
|
||||
return currentString;
|
||||
}
|
||||
return currentString.split("\n").reduce((acc, line) => {
|
||||
const imageKeyword = line.match(/^ *image:/);
|
||||
if (imageKeyword) {
|
||||
const [currentImageName, currentImageTag] = line
|
||||
.substring(imageKeyword[0].length) // consume the line from keyword onwards
|
||||
.trim()
|
||||
.replace(/[',"]/g, "") // replace allowed quotes with nothing
|
||||
.split(":");
|
||||
if (currentImageName === imageName) {
|
||||
return acc + `${imageKeyword[0]} ${imageNameWithNewTag}\n`;
|
||||
}
|
||||
}
|
||||
return acc + line + "\n";
|
||||
}, "");
|
||||
}
|
||||
function updateContainerImagesInManifestFiles(filePaths, containers) {
|
||||
if (!((filePaths === null || filePaths === void 0 ? void 0 : filePaths.length) > 0))
|
||||
return filePaths;
|
||||
const newFilePaths = [];
|
||||
const tempDirectory = files_helper_1.getTempDirectory();
|
||||
// update container images
|
||||
filePaths.forEach((filePath) => {
|
||||
let contents = fs.readFileSync(filePath).toString();
|
||||
containers.forEach((container) => {
|
||||
let imageName = container.split(":")[0];
|
||||
if (imageName.indexOf("@") > 0) {
|
||||
imageName = imageName.split("@")[0];
|
||||
}
|
||||
if (contents.indexOf(imageName) > 0)
|
||||
contents = substituteImageNameInSpecFile(contents, imageName, container);
|
||||
});
|
||||
// write updated files
|
||||
const fileName = path.join(tempDirectory, path.basename(filePath));
|
||||
fs.writeFileSync(path.join(fileName), contents);
|
||||
newFilePaths.push(fileName);
|
||||
});
|
||||
return newFilePaths;
|
||||
}
|
||||
function updateImagePullSecrets(inputObject, newImagePullSecrets) {
|
||||
if (!inputObject || !inputObject.spec || !newImagePullSecrets) {
|
||||
return;
|
||||
}
|
||||
let newImagePullSecretsObjects;
|
||||
if (newImagePullSecrets.length > 0) {
|
||||
newImagePullSecretsObjects = Array.from(newImagePullSecrets, (x) => {
|
||||
return !!x ? { name: x } : null;
|
||||
});
|
||||
}
|
||||
else {
|
||||
newImagePullSecretsObjects = [];
|
||||
}
|
||||
let existingImagePullSecretObjects = getImagePullSecrets(inputObject);
|
||||
if (!existingImagePullSecretObjects) {
|
||||
existingImagePullSecretObjects = new Array();
|
||||
}
|
||||
existingImagePullSecretObjects = existingImagePullSecretObjects.concat(newImagePullSecretsObjects);
|
||||
setImagePullSecrets(inputObject, existingImagePullSecretObjects);
|
||||
}
|
||||
exports.updateImagePullSecrets = updateImagePullSecrets;
|
||||
function updateImagePullSecretsInManifestFiles(filePaths, imagePullSecrets) {
|
||||
if (!((imagePullSecrets === null || imagePullSecrets === void 0 ? void 0 : imagePullSecrets.length) > 0))
|
||||
return filePaths;
|
||||
const newObjectsList = [];
|
||||
filePaths.forEach((filePath) => {
|
||||
const fileContents = fs.readFileSync(filePath).toString();
|
||||
yaml.safeLoadAll(fileContents, (inputObject) => {
|
||||
if (inputObject === null || inputObject === void 0 ? void 0 : inputObject.kind) {
|
||||
const { kind } = inputObject;
|
||||
if (KubernetesObjectUtility.isWorkloadEntity(kind)) {
|
||||
KubernetesObjectUtility.updateImagePullSecrets(inputObject, imagePullSecrets);
|
||||
}
|
||||
newObjectsList.push(inputObject);
|
||||
}
|
||||
});
|
||||
});
|
||||
return fileHelper.writeObjectsToFile(newObjectsList);
|
||||
}
|
||||
function updateManifestFiles(manifestFilePaths) {
|
||||
if (!manifestFilePaths || manifestFilePaths.length === 0) {
|
||||
throw new Error("Manifest files not provided");
|
||||
}
|
||||
// update container images
|
||||
const manifestFiles = updateContainerImagesInManifestFiles(manifestFilePaths, TaskInputParameters.containers);
|
||||
// update pull secrets
|
||||
return updateImagePullSecretsInManifestFiles(manifestFiles, TaskInputParameters.imagePullSecrets);
|
||||
}
|
||||
exports.updateManifestFiles = updateManifestFiles;
|
||||
const workloadTypes = [
|
||||
"deployment",
|
||||
"replicaset",
|
||||
"daemonset",
|
||||
"pod",
|
||||
"statefulset",
|
||||
"job",
|
||||
"cronjob",
|
||||
];
|
||||
function isWorkloadEntity(kind) {
|
||||
if (!kind) {
|
||||
core.debug("ResourceKindNotDefined");
|
||||
return false;
|
||||
}
|
||||
return workloadTypes.some((type) => {
|
||||
return utility_1.isEqual(type, kind);
|
||||
});
|
||||
}
|
||||
exports.isWorkloadEntity = isWorkloadEntity;
|
||||
@ -1,37 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.setImagePullSecrets = exports.getImagePullSecrets = void 0;
|
||||
const kubernetesTypes_1 = require("../types/kubernetesTypes");
|
||||
function getImagePullSecrets(inputObject) {
|
||||
var _a, _b, _c, _d, _e, _f, _g;
|
||||
if (!(inputObject === null || inputObject === void 0 ? void 0 : inputObject.spec))
|
||||
return null;
|
||||
if (inputObject.kind.toLowerCase() === kubernetesTypes_1.KubernetesWorkload.CRON_JOB.toLowerCase())
|
||||
return (_e = (_d = (_c = (_b = (_a = inputObject === null || inputObject === void 0 ? void 0 : inputObject.spec) === null || _a === void 0 ? void 0 : _a.jobTemplate) === null || _b === void 0 ? void 0 : _b.spec) === null || _c === void 0 ? void 0 : _c.template) === null || _d === void 0 ? void 0 : _d.spec) === null || _e === void 0 ? void 0 : _e.imagePullSecrets;
|
||||
if (inputObject.kind.toLowerCase() === kubernetesTypes_1.KubernetesWorkload.POD.toLowerCase())
|
||||
return inputObject.spec.imagePullSecrets;
|
||||
if ((_g = (_f = inputObject === null || inputObject === void 0 ? void 0 : inputObject.spec) === null || _f === void 0 ? void 0 : _f.template) === null || _g === void 0 ? void 0 : _g.spec) {
|
||||
return inputObject.spec.template.spec.imagePullSecrets;
|
||||
}
|
||||
}
|
||||
exports.getImagePullSecrets = getImagePullSecrets;
|
||||
function setImagePullSecrets(inputObject, newImagePullSecrets) {
|
||||
var _a, _b, _c, _d, _e, _f;
|
||||
if (!inputObject || !inputObject.spec || !newImagePullSecrets)
|
||||
return;
|
||||
if (inputObject.kind.toLowerCase() === kubernetesTypes_1.KubernetesWorkload.POD.toLowerCase()) {
|
||||
inputObject.spec.imagePullSecrets = newImagePullSecrets;
|
||||
return;
|
||||
}
|
||||
if (inputObject.kind.toLowerCase() === kubernetesTypes_1.KubernetesWorkload.CRON_JOB.toLowerCase()) {
|
||||
if ((_d = (_c = (_b = (_a = inputObject === null || inputObject === void 0 ? void 0 : inputObject.spec) === null || _a === void 0 ? void 0 : _a.jobTemplate) === null || _b === void 0 ? void 0 : _b.spec) === null || _c === void 0 ? void 0 : _c.template) === null || _d === void 0 ? void 0 : _d.spec)
|
||||
inputObject.spec.jobTemplate.spec.template.spec.imagePullSecrets =
|
||||
newImagePullSecrets;
|
||||
return;
|
||||
}
|
||||
if ((_f = (_e = inputObject === null || inputObject === void 0 ? void 0 : inputObject.spec) === null || _e === void 0 ? void 0 : _e.template) === null || _f === void 0 ? void 0 : _f.spec) {
|
||||
inputObject.spec.template.spec.imagePullSecrets = newImagePullSecrets;
|
||||
return;
|
||||
}
|
||||
}
|
||||
exports.setImagePullSecrets = setImagePullSecrets;
|
||||
@ -1,65 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.setSpecSelectorLabels = exports.getSpecSelectorLabels = exports.updateSpecLabels = void 0;
|
||||
const kubernetesTypes_1 = require("../types/kubernetesTypes");
|
||||
function updateSpecLabels(inputObject, newLabels, override) {
|
||||
if (!inputObject)
|
||||
throw kubernetesTypes_1.NullInputObjectError;
|
||||
if (!inputObject.kind)
|
||||
throw kubernetesTypes_1.InputObjectKindNotDefinedError;
|
||||
if (!newLabels)
|
||||
return;
|
||||
let existingLabels = getSpecLabels(inputObject);
|
||||
if (override) {
|
||||
existingLabels = newLabels;
|
||||
}
|
||||
else {
|
||||
existingLabels = existingLabels || new Map();
|
||||
Object.keys(newLabels).forEach((key) => (existingLabels[key] = newLabels[key]));
|
||||
}
|
||||
setSpecLabels(inputObject, existingLabels);
|
||||
}
|
||||
exports.updateSpecLabels = updateSpecLabels;
|
||||
function getSpecLabels(inputObject) {
|
||||
var _a, _b;
|
||||
if (!inputObject)
|
||||
return null;
|
||||
if (inputObject.kind.toLowerCase() === kubernetesTypes_1.KubernetesWorkload.POD.toLowerCase())
|
||||
return inputObject.metadata.labels;
|
||||
if ((_b = (_a = inputObject === null || inputObject === void 0 ? void 0 : inputObject.spec) === null || _a === void 0 ? void 0 : _a.template) === null || _b === void 0 ? void 0 : _b.metadata)
|
||||
return inputObject.spec.template.metadata.labels;
|
||||
return null;
|
||||
}
|
||||
function setSpecLabels(inputObject, newLabels) {
|
||||
var _a, _b;
|
||||
if (!inputObject || !newLabels)
|
||||
return null;
|
||||
if (inputObject.kind.toLowerCase() === kubernetesTypes_1.KubernetesWorkload.POD.toLowerCase()) {
|
||||
inputObject.metadata.labels = newLabels;
|
||||
return;
|
||||
}
|
||||
if ((_b = (_a = inputObject === null || inputObject === void 0 ? void 0 : inputObject.spec) === null || _a === void 0 ? void 0 : _a.template) === null || _b === void 0 ? void 0 : _b.metatada) {
|
||||
inputObject.spec.template.metatada.labels = newLabels;
|
||||
return;
|
||||
}
|
||||
}
|
||||
function getSpecSelectorLabels(inputObject) {
|
||||
var _a;
|
||||
if ((_a = inputObject === null || inputObject === void 0 ? void 0 : inputObject.spec) === null || _a === void 0 ? void 0 : _a.selector) {
|
||||
if (kubernetesTypes_1.isServiceEntity(inputObject.kind))
|
||||
return inputObject.spec.selector;
|
||||
else
|
||||
return inputObject.spec.selector.matchLabels;
|
||||
}
|
||||
}
|
||||
exports.getSpecSelectorLabels = getSpecSelectorLabels;
|
||||
function setSpecSelectorLabels(inputObject, newLabels) {
|
||||
var _a;
|
||||
if ((_a = inputObject === null || inputObject === void 0 ? void 0 : inputObject.spec) === null || _a === void 0 ? void 0 : _a.selector) {
|
||||
if (kubernetesTypes_1.isServiceEntity(inputObject.kind))
|
||||
inputObject.spec.selector = newLabels;
|
||||
else
|
||||
inputObject.spec.selector.matchLabels = newLabels;
|
||||
}
|
||||
}
|
||||
exports.setSpecSelectorLabels = setSpecSelectorLabels;
|
||||
@ -1,160 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.checkPodStatus = exports.checkManifestStability = void 0;
|
||||
const core = require("@actions/core");
|
||||
const KubernetesConstants = require("../types/kubernetesTypes");
|
||||
const kubectlUtils_1 = require("./kubectlUtils");
|
||||
const timeUtils_1 = require("./timeUtils");
|
||||
function checkManifestStability(kubectl, resources) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
let rolloutStatusHasErrors = false;
|
||||
for (let i = 0; i < resources.length; i++) {
|
||||
const resource = resources[i];
|
||||
if (KubernetesConstants.WORKLOAD_TYPES_WITH_ROLLOUT_STATUS.indexOf(resource.type.toLowerCase()) >= 0) {
|
||||
try {
|
||||
const result = yield kubectl.checkRolloutStatus(resource.type, resource.name);
|
||||
kubectlUtils_1.checkForErrors([result]);
|
||||
}
|
||||
catch (ex) {
|
||||
core.error(ex);
|
||||
yield kubectl.describe(resource.type, resource.name);
|
||||
rolloutStatusHasErrors = true;
|
||||
}
|
||||
}
|
||||
if (resource.type == KubernetesConstants.KubernetesWorkload.POD) {
|
||||
try {
|
||||
yield checkPodStatus(kubectl, resource.name);
|
||||
}
|
||||
catch (ex) {
|
||||
core.warning(`Could not determine pod status: ${JSON.stringify(ex)}`);
|
||||
yield kubectl.describe(resource.type, resource.name);
|
||||
}
|
||||
}
|
||||
if (resource.type ==
|
||||
KubernetesConstants.DiscoveryAndLoadBalancerResource.SERVICE) {
|
||||
try {
|
||||
const service = yield getService(kubectl, resource.name);
|
||||
const { spec, status } = service;
|
||||
if (spec.type === KubernetesConstants.ServiceTypes.LOAD_BALANCER) {
|
||||
if (!isLoadBalancerIPAssigned(status)) {
|
||||
yield waitForServiceExternalIPAssignment(kubectl, resource.name);
|
||||
}
|
||||
else {
|
||||
core.info(`ServiceExternalIP ${resource.name} ${status.loadBalancer.ingress[0].ip}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (ex) {
|
||||
core.warning(`Could not determine service status of: ${resource.name} Error: ${ex}`);
|
||||
yield kubectl.describe(resource.type, resource.name);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (rolloutStatusHasErrors) {
|
||||
throw new Error("Rollout status error");
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.checkManifestStability = checkManifestStability;
|
||||
function checkPodStatus(kubectl, podName) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const sleepTimeout = 10 * 1000; // 10 seconds
|
||||
const iterations = 60; // 60 * 10 seconds timeout = 10 minutes max timeout
|
||||
let podStatus;
|
||||
let kubectlDescribeNeeded = false;
|
||||
for (let i = 0; i < iterations; i++) {
|
||||
yield timeUtils_1.sleep(sleepTimeout);
|
||||
core.debug(`Polling for pod status: ${podName}`);
|
||||
podStatus = yield getPodStatus(kubectl, podName);
|
||||
if (podStatus &&
|
||||
(podStatus === null || podStatus === void 0 ? void 0 : podStatus.phase) !== "Pending" &&
|
||||
(podStatus === null || podStatus === void 0 ? void 0 : podStatus.phase) !== "Unknown") {
|
||||
break;
|
||||
}
|
||||
}
|
||||
podStatus = yield getPodStatus(kubectl, podName);
|
||||
switch (podStatus.phase) {
|
||||
case "Succeeded":
|
||||
case "Running":
|
||||
if (isPodReady(podStatus)) {
|
||||
console.log(`pod/${podName} is successfully rolled out`);
|
||||
}
|
||||
else {
|
||||
kubectlDescribeNeeded = true;
|
||||
}
|
||||
break;
|
||||
case "Pending":
|
||||
if (!isPodReady(podStatus)) {
|
||||
core.warning(`pod/${podName} rollout status check timed out`);
|
||||
kubectlDescribeNeeded = true;
|
||||
}
|
||||
break;
|
||||
case "Failed":
|
||||
core.error(`pod/${podName} rollout failed`);
|
||||
kubectlDescribeNeeded = true;
|
||||
break;
|
||||
default:
|
||||
core.warning(`pod/${podName} rollout status: ${podStatus.phase}`);
|
||||
}
|
||||
if (kubectlDescribeNeeded) {
|
||||
yield kubectl.describe("pod", podName);
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.checkPodStatus = checkPodStatus;
|
||||
function getPodStatus(kubectl, podName) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const podResult = yield kubectl.getResource("pod", podName);
|
||||
kubectlUtils_1.checkForErrors([podResult]);
|
||||
return JSON.parse(podResult.stdout).status;
|
||||
});
|
||||
}
|
||||
function isPodReady(podStatus) {
|
||||
let allContainersAreReady = true;
|
||||
podStatus.containerStatuses.forEach((container) => {
|
||||
if (container.ready === false) {
|
||||
core.info(`'${container.name}' status: ${JSON.stringify(container.state)}`);
|
||||
allContainersAreReady = false;
|
||||
}
|
||||
});
|
||||
if (!allContainersAreReady) {
|
||||
core.warning("All containers not in ready state");
|
||||
}
|
||||
return allContainersAreReady;
|
||||
}
|
||||
function getService(kubectl, serviceName) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const serviceResult = yield kubectl.getResource(KubernetesConstants.DiscoveryAndLoadBalancerResource.SERVICE, serviceName);
|
||||
kubectlUtils_1.checkForErrors([serviceResult]);
|
||||
return JSON.parse(serviceResult.stdout);
|
||||
});
|
||||
}
|
||||
function waitForServiceExternalIPAssignment(kubectl, serviceName) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const sleepTimeout = 10 * 1000; // 10 seconds
|
||||
const iterations = 18; // 18 * 10 seconds timeout = 3 minutes max timeout
|
||||
for (let i = 0; i < iterations; i++) {
|
||||
core.info(`Wait for service ip assignment : ${serviceName}`);
|
||||
yield timeUtils_1.sleep(sleepTimeout);
|
||||
const status = (yield getService(kubectl, serviceName)).status;
|
||||
if (isLoadBalancerIPAssigned(status)) {
|
||||
core.info(`ServiceExternalIP ${serviceName} ${status.loadBalancer.ingress[0].ip}`);
|
||||
return;
|
||||
}
|
||||
}
|
||||
core.warning(`Wait for service ip assignment timed out${serviceName}`);
|
||||
});
|
||||
}
|
||||
function isLoadBalancerIPAssigned(status) {
|
||||
var _a, _b;
|
||||
return ((_b = (_a = status === null || status === void 0 ? void 0 : status.loadBalancer) === null || _a === void 0 ? void 0 : _a.ingress) === null || _b === void 0 ? void 0 : _b.length) > 0;
|
||||
}
|
||||
@ -1,224 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.getResources = exports.updateSelectorLabels = exports.updateImagePullSecrets = exports.updateObjectAnnotations = exports.updateObjectLabels = exports.getReplicaCount = exports.substituteImageNameInSpecFile = exports.UnsetClusterSpecificDetails = exports.updateManifestFiles = void 0;
|
||||
const core = require("@actions/core");
|
||||
const fs = require("fs");
|
||||
const yaml = require("js-yaml");
|
||||
const path = require("path");
|
||||
const fileHelper = require("./fileUtils");
|
||||
const fileUtils_1 = require("./fileUtils");
|
||||
const kubernetesTypes_1 = require("../types/kubernetesTypes");
|
||||
const manifestSpecLabelUtils_1 = require("./manifestSpecLabelUtils");
|
||||
const manifestPullSecretUtils_1 = require("./manifestPullSecretUtils");
|
||||
function updateManifestFiles(manifestFilePaths) {
|
||||
if ((manifestFilePaths === null || manifestFilePaths === void 0 ? void 0 : manifestFilePaths.length) === 0) {
|
||||
throw new Error("Manifest files not provided");
|
||||
}
|
||||
// update container images
|
||||
const containers = core.getInput("images").split("\n");
|
||||
const manifestFiles = updateContainerImagesInManifestFiles(manifestFilePaths, containers);
|
||||
// update pull secrets
|
||||
const imagePullSecrets = core
|
||||
.getInput("imagepullsecrets")
|
||||
.split("\n")
|
||||
.filter((secret) => secret.trim().length > 0);
|
||||
return updateImagePullSecretsInManifestFiles(manifestFiles, imagePullSecrets);
|
||||
}
|
||||
exports.updateManifestFiles = updateManifestFiles;
|
||||
function UnsetClusterSpecificDetails(resource) {
|
||||
if (!resource) {
|
||||
return;
|
||||
}
|
||||
// Unset cluster specific details in the object
|
||||
if (!!resource) {
|
||||
const { metadata, status } = resource;
|
||||
if (!!metadata) {
|
||||
resource.metadata = {
|
||||
annotations: metadata.annotations,
|
||||
labels: metadata.labels,
|
||||
name: metadata.name,
|
||||
};
|
||||
}
|
||||
if (!!status) {
|
||||
resource.status = {};
|
||||
}
|
||||
}
|
||||
}
|
||||
exports.UnsetClusterSpecificDetails = UnsetClusterSpecificDetails;
|
||||
function updateContainerImagesInManifestFiles(filePaths, containers) {
|
||||
if ((filePaths === null || filePaths === void 0 ? void 0 : filePaths.length) <= 0)
|
||||
return filePaths;
|
||||
const newFilePaths = [];
|
||||
// update container images
|
||||
filePaths.forEach((filePath) => {
|
||||
let contents = fs.readFileSync(filePath).toString();
|
||||
containers.forEach((container) => {
|
||||
let [imageName] = container.split(":");
|
||||
if (imageName.indexOf("@") > 0) {
|
||||
imageName = imageName.split("@")[0];
|
||||
}
|
||||
if (contents.indexOf(imageName) > 0)
|
||||
contents = substituteImageNameInSpecFile(contents, imageName, container);
|
||||
});
|
||||
// write updated files
|
||||
const tempDirectory = fileUtils_1.getTempDirectory();
|
||||
const fileName = path.join(tempDirectory, path.basename(filePath));
|
||||
fs.writeFileSync(path.join(fileName), contents);
|
||||
newFilePaths.push(fileName);
|
||||
});
|
||||
return newFilePaths;
|
||||
}
|
||||
/*
|
||||
Example:
|
||||
|
||||
Input of
|
||||
currentString: `image: "example/example-image"`
|
||||
imageName: `example/example-image`
|
||||
imageNameWithNewTag: `example/example-image:identifiertag`
|
||||
|
||||
would return
|
||||
`image: "example/example-image:identifiertag"`
|
||||
*/
|
||||
function substituteImageNameInSpecFile(spec, imageName, imageNameWithNewTag) {
|
||||
if (spec.indexOf(imageName) < 0)
|
||||
return spec;
|
||||
return spec.split("\n").reduce((acc, line) => {
|
||||
const imageKeyword = line.match(/^ *image:/);
|
||||
if (imageKeyword) {
|
||||
let [currentImageName] = line
|
||||
.substring(imageKeyword[0].length) // consume the line from keyword onwards
|
||||
.trim()
|
||||
.replace(/[',"]/g, "") // replace allowed quotes with nothing
|
||||
.split(":");
|
||||
if ((currentImageName === null || currentImageName === void 0 ? void 0 : currentImageName.indexOf(" ")) > 0) {
|
||||
currentImageName = currentImageName.split(" ")[0]; // remove comments
|
||||
}
|
||||
if (currentImageName === imageName) {
|
||||
return acc + `${imageKeyword[0]} ${imageNameWithNewTag}\n`;
|
||||
}
|
||||
}
|
||||
return acc + line + "\n";
|
||||
}, "");
|
||||
}
|
||||
exports.substituteImageNameInSpecFile = substituteImageNameInSpecFile;
|
||||
function getReplicaCount(inputObject) {
|
||||
if (!inputObject)
|
||||
throw kubernetesTypes_1.NullInputObjectError;
|
||||
if (!inputObject.kind) {
|
||||
throw kubernetesTypes_1.InputObjectKindNotDefinedError;
|
||||
}
|
||||
const { kind } = inputObject;
|
||||
if (kind.toLowerCase() !== kubernetesTypes_1.KubernetesWorkload.POD.toLowerCase() &&
|
||||
kind.toLowerCase() !== kubernetesTypes_1.KubernetesWorkload.DAEMON_SET.toLowerCase())
|
||||
return inputObject.spec.replicas;
|
||||
return 0;
|
||||
}
|
||||
exports.getReplicaCount = getReplicaCount;
|
||||
function updateObjectLabels(inputObject, newLabels, override = false) {
|
||||
if (!inputObject)
|
||||
throw kubernetesTypes_1.NullInputObjectError;
|
||||
if (!inputObject.metadata)
|
||||
throw kubernetesTypes_1.InputObjectMetadataNotDefinedError;
|
||||
if (!newLabels)
|
||||
return;
|
||||
if (override) {
|
||||
inputObject.metadata.labels = newLabels;
|
||||
}
|
||||
else {
|
||||
let existingLabels = inputObject.metadata.labels || new Map();
|
||||
Object.keys(newLabels).forEach((key) => (existingLabels[key] = newLabels[key]));
|
||||
inputObject.metadata.labels = existingLabels;
|
||||
}
|
||||
}
|
||||
exports.updateObjectLabels = updateObjectLabels;
|
||||
function updateObjectAnnotations(inputObject, newAnnotations, override = false) {
|
||||
if (!inputObject)
|
||||
throw kubernetesTypes_1.NullInputObjectError;
|
||||
if (!inputObject.metadata)
|
||||
throw kubernetesTypes_1.InputObjectMetadataNotDefinedError;
|
||||
if (!newAnnotations)
|
||||
return;
|
||||
if (override) {
|
||||
inputObject.metadata.annotations = newAnnotations;
|
||||
}
|
||||
else {
|
||||
const existingAnnotations = inputObject.metadata.annotations || new Map();
|
||||
Object.keys(newAnnotations).forEach((key) => (existingAnnotations[key] = newAnnotations[key]));
|
||||
inputObject.metadata.annotations = existingAnnotations;
|
||||
}
|
||||
}
|
||||
exports.updateObjectAnnotations = updateObjectAnnotations;
|
||||
function updateImagePullSecrets(inputObject, newImagePullSecrets, override = false) {
|
||||
if (!(inputObject === null || inputObject === void 0 ? void 0 : inputObject.spec) || !newImagePullSecrets)
|
||||
return;
|
||||
const newImagePullSecretsObjects = Array.from(newImagePullSecrets, (name) => {
|
||||
return { name };
|
||||
});
|
||||
let existingImagePullSecretObjects = manifestPullSecretUtils_1.getImagePullSecrets(inputObject);
|
||||
if (override) {
|
||||
existingImagePullSecretObjects = newImagePullSecretsObjects;
|
||||
}
|
||||
else {
|
||||
existingImagePullSecretObjects = existingImagePullSecretObjects || [];
|
||||
existingImagePullSecretObjects = existingImagePullSecretObjects.concat(newImagePullSecretsObjects);
|
||||
}
|
||||
manifestPullSecretUtils_1.setImagePullSecrets(inputObject, existingImagePullSecretObjects);
|
||||
}
|
||||
exports.updateImagePullSecrets = updateImagePullSecrets;
|
||||
function updateSelectorLabels(inputObject, newLabels, override) {
|
||||
if (!inputObject)
|
||||
throw kubernetesTypes_1.NullInputObjectError;
|
||||
if (!inputObject.kind)
|
||||
throw kubernetesTypes_1.InputObjectKindNotDefinedError;
|
||||
if (!newLabels)
|
||||
return;
|
||||
if (inputObject.kind.toLowerCase() === kubernetesTypes_1.KubernetesWorkload.POD.toLowerCase())
|
||||
return;
|
||||
let existingLabels = manifestSpecLabelUtils_1.getSpecSelectorLabels(inputObject);
|
||||
if (override) {
|
||||
existingLabels = newLabels;
|
||||
}
|
||||
else {
|
||||
existingLabels = existingLabels || new Map();
|
||||
Object.keys(newLabels).forEach((key) => (existingLabels[key] = newLabels[key]));
|
||||
}
|
||||
manifestSpecLabelUtils_1.setSpecSelectorLabels(inputObject, existingLabels);
|
||||
}
|
||||
exports.updateSelectorLabels = updateSelectorLabels;
|
||||
function getResources(filePaths, filterResourceTypes) {
|
||||
if (!filePaths)
|
||||
return [];
|
||||
const resources = [];
|
||||
filePaths.forEach((filePath) => {
|
||||
const fileContents = fs.readFileSync(filePath).toString();
|
||||
yaml.safeLoadAll(fileContents, (inputObject) => {
|
||||
const inputObjectKind = (inputObject === null || inputObject === void 0 ? void 0 : inputObject.kind) || "";
|
||||
if (filterResourceTypes.filter((type) => inputObjectKind.toLowerCase() === type.toLowerCase()).length > 0) {
|
||||
resources.push({
|
||||
type: inputObject.kind,
|
||||
name: inputObject.metadata.name,
|
||||
});
|
||||
}
|
||||
});
|
||||
});
|
||||
return resources;
|
||||
}
|
||||
exports.getResources = getResources;
|
||||
function updateImagePullSecretsInManifestFiles(filePaths, imagePullSecrets) {
|
||||
if ((imagePullSecrets === null || imagePullSecrets === void 0 ? void 0 : imagePullSecrets.length) <= 0)
|
||||
return filePaths;
|
||||
const newObjectsList = [];
|
||||
filePaths.forEach((filePath) => {
|
||||
const fileContents = fs.readFileSync(filePath).toString();
|
||||
yaml.safeLoadAll(fileContents, (inputObject) => {
|
||||
if (inputObject === null || inputObject === void 0 ? void 0 : inputObject.kind) {
|
||||
const { kind } = inputObject;
|
||||
if (kubernetesTypes_1.isWorkloadEntity(kind)) {
|
||||
updateImagePullSecrets(inputObject, imagePullSecrets);
|
||||
}
|
||||
newObjectsList.push(inputObject);
|
||||
}
|
||||
});
|
||||
});
|
||||
return fileHelper.writeObjectsToFile(newObjectsList);
|
||||
}
|
||||
@ -1,278 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.getResources = exports.updateSelectorLabels = exports.updateSpecLabels = exports.updateImagePullSecrets = exports.updateObjectAnnotations = exports.updateObjectLabels = exports.getReplicaCount = exports.isIngressEntity = exports.isServiceEntity = exports.isWorkloadEntity = exports.isDeploymentEntity = void 0;
|
||||
const fs = require("fs");
|
||||
const core = require("@actions/core");
|
||||
const yaml = require("js-yaml");
|
||||
const constants_1 = require("../constants");
|
||||
const string_comparison_1 = require("./string-comparison");
|
||||
const INGRESS = "Ingress";
|
||||
function isDeploymentEntity(kind) {
|
||||
if (!kind) {
|
||||
throw "ResourceKindNotDefined";
|
||||
}
|
||||
return constants_1.DEPLOYMENT_TYPES.some((type) => {
|
||||
return string_comparison_1.isEqual(type, kind, string_comparison_1.StringComparer.OrdinalIgnoreCase);
|
||||
});
|
||||
}
|
||||
exports.isDeploymentEntity = isDeploymentEntity;
|
||||
function isWorkloadEntity(kind) {
|
||||
return constants_1.WORKLOAD_TYPES.some((type) => type.toUpperCase() == kind.toUpperCase());
|
||||
}
|
||||
exports.isWorkloadEntity = isWorkloadEntity;
|
||||
function isServiceEntity(kind) {
|
||||
if (!kind) {
|
||||
throw "ResourceKindNotDefined";
|
||||
}
|
||||
return string_comparison_1.isEqual("Service", kind, string_comparison_1.StringComparer.OrdinalIgnoreCase);
|
||||
}
|
||||
exports.isServiceEntity = isServiceEntity;
|
||||
function isIngressEntity(kind) {
|
||||
if (!kind) {
|
||||
throw "ResourceKindNotDefined";
|
||||
}
|
||||
return string_comparison_1.isEqual(INGRESS, kind, string_comparison_1.StringComparer.OrdinalIgnoreCase);
|
||||
}
|
||||
exports.isIngressEntity = isIngressEntity;
|
||||
function getReplicaCount(inputObject) {
|
||||
if (!inputObject) {
|
||||
throw "NullInputObject";
|
||||
}
|
||||
if (!inputObject.kind) {
|
||||
throw "ResourceKindNotDefined";
|
||||
}
|
||||
const kind = inputObject.kind;
|
||||
if (!string_comparison_1.isEqual(kind, constants_1.KubernetesWorkload.POD, string_comparison_1.StringComparer.OrdinalIgnoreCase) &&
|
||||
!string_comparison_1.isEqual(kind, constants_1.KubernetesWorkload.DAEMON_SET, string_comparison_1.StringComparer.OrdinalIgnoreCase)) {
|
||||
return inputObject.spec.replicas;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
exports.getReplicaCount = getReplicaCount;
|
||||
function updateObjectLabels(inputObject, newLabels, override) {
|
||||
if (!inputObject) {
|
||||
throw "NullInputObject";
|
||||
}
|
||||
if (!inputObject.metadata) {
|
||||
throw "NullInputObjectMetadata";
|
||||
}
|
||||
if (!newLabels) {
|
||||
return;
|
||||
}
|
||||
if (override) {
|
||||
inputObject.metadata.labels = newLabels;
|
||||
}
|
||||
else {
|
||||
let existingLabels = inputObject.metadata.labels;
|
||||
if (!existingLabels) {
|
||||
existingLabels = new Map();
|
||||
}
|
||||
Object.keys(newLabels).forEach(function (key) {
|
||||
existingLabels[key] = newLabels[key];
|
||||
});
|
||||
inputObject.metadata.labels = existingLabels;
|
||||
}
|
||||
}
|
||||
exports.updateObjectLabels = updateObjectLabels;
|
||||
function updateObjectAnnotations(inputObject, newAnnotations, override) {
|
||||
if (!inputObject) {
|
||||
throw "NullInputObject";
|
||||
}
|
||||
if (!inputObject.metadata) {
|
||||
throw "NullInputObjectMetadata";
|
||||
}
|
||||
if (!newAnnotations) {
|
||||
return;
|
||||
}
|
||||
if (override) {
|
||||
inputObject.metadata.annotations = newAnnotations;
|
||||
}
|
||||
else {
|
||||
let existingAnnotations = inputObject.metadata.annotations;
|
||||
if (!existingAnnotations) {
|
||||
existingAnnotations = new Map();
|
||||
}
|
||||
Object.keys(newAnnotations).forEach(function (key) {
|
||||
existingAnnotations[key] = newAnnotations[key];
|
||||
});
|
||||
inputObject.metadata.annotations = existingAnnotations;
|
||||
}
|
||||
}
|
||||
exports.updateObjectAnnotations = updateObjectAnnotations;
|
||||
function updateImagePullSecrets(inputObject, newImagePullSecrets, override = false) {
|
||||
if (!inputObject || !inputObject.spec || !newImagePullSecrets) {
|
||||
return;
|
||||
}
|
||||
const newImagePullSecretsObjects = Array.from(newImagePullSecrets, (x) => {
|
||||
return { name: x };
|
||||
});
|
||||
let existingImagePullSecretObjects = getImagePullSecrets(inputObject);
|
||||
if (override) {
|
||||
existingImagePullSecretObjects = newImagePullSecretsObjects;
|
||||
}
|
||||
else {
|
||||
if (!existingImagePullSecretObjects) {
|
||||
existingImagePullSecretObjects = new Array();
|
||||
}
|
||||
existingImagePullSecretObjects = existingImagePullSecretObjects.concat(newImagePullSecretsObjects);
|
||||
}
|
||||
setImagePullSecrets(inputObject, existingImagePullSecretObjects);
|
||||
}
|
||||
exports.updateImagePullSecrets = updateImagePullSecrets;
|
||||
function updateSpecLabels(inputObject, newLabels, override) {
|
||||
if (!inputObject) {
|
||||
throw "NullInputObject";
|
||||
}
|
||||
if (!inputObject.kind) {
|
||||
throw "ResourceKindNotDefined";
|
||||
}
|
||||
if (!newLabels) {
|
||||
return;
|
||||
}
|
||||
let existingLabels = getSpecLabels(inputObject);
|
||||
if (override) {
|
||||
existingLabels = newLabels;
|
||||
}
|
||||
else {
|
||||
if (!existingLabels) {
|
||||
existingLabels = new Map();
|
||||
}
|
||||
Object.keys(newLabels).forEach(function (key) {
|
||||
existingLabels[key] = newLabels[key];
|
||||
});
|
||||
}
|
||||
setSpecLabels(inputObject, existingLabels);
|
||||
}
|
||||
exports.updateSpecLabels = updateSpecLabels;
|
||||
function updateSelectorLabels(inputObject, newLabels, override) {
|
||||
if (!inputObject) {
|
||||
throw "NullInputObject";
|
||||
}
|
||||
if (!inputObject.kind) {
|
||||
throw "ResourceKindNotDefined";
|
||||
}
|
||||
if (!newLabels) {
|
||||
return;
|
||||
}
|
||||
if (string_comparison_1.isEqual(inputObject.kind, constants_1.KubernetesWorkload.POD, string_comparison_1.StringComparer.OrdinalIgnoreCase)) {
|
||||
return;
|
||||
}
|
||||
let existingLabels = getSpecSelectorLabels(inputObject);
|
||||
if (override) {
|
||||
existingLabels = newLabels;
|
||||
}
|
||||
else {
|
||||
if (!existingLabels) {
|
||||
existingLabels = new Map();
|
||||
}
|
||||
Object.keys(newLabels).forEach(function (key) {
|
||||
existingLabels[key] = newLabels[key];
|
||||
});
|
||||
}
|
||||
setSpecSelectorLabels(inputObject, existingLabels);
|
||||
}
|
||||
exports.updateSelectorLabels = updateSelectorLabels;
|
||||
function getResources(filePaths, filterResourceTypes) {
|
||||
if (!filePaths) {
|
||||
return [];
|
||||
}
|
||||
const resources = [];
|
||||
filePaths.forEach((filePath) => {
|
||||
const fileContents = fs.readFileSync(filePath);
|
||||
yaml.safeLoadAll(fileContents, function (inputObject) {
|
||||
const inputObjectKind = inputObject ? inputObject.kind : "";
|
||||
if (filterResourceTypes.filter((type) => string_comparison_1.isEqual(inputObjectKind, type, string_comparison_1.StringComparer.OrdinalIgnoreCase)).length > 0) {
|
||||
const resource = {
|
||||
type: inputObject.kind,
|
||||
name: inputObject.metadata.name,
|
||||
};
|
||||
resources.push(resource);
|
||||
}
|
||||
});
|
||||
});
|
||||
return resources;
|
||||
}
|
||||
exports.getResources = getResources;
|
||||
function getSpecLabels(inputObject) {
|
||||
if (!inputObject) {
|
||||
return null;
|
||||
}
|
||||
if (string_comparison_1.isEqual(inputObject.kind, constants_1.KubernetesWorkload.POD, string_comparison_1.StringComparer.OrdinalIgnoreCase)) {
|
||||
return inputObject.metadata.labels;
|
||||
}
|
||||
if (!!inputObject.spec &&
|
||||
!!inputObject.spec.template &&
|
||||
!!inputObject.spec.template.metadata) {
|
||||
return inputObject.spec.template.metadata.labels;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
function getImagePullSecrets(inputObject) {
|
||||
if (!inputObject || !inputObject.spec) {
|
||||
return null;
|
||||
}
|
||||
if (string_comparison_1.isEqual(inputObject.kind, constants_1.KubernetesWorkload.CRON_JOB, string_comparison_1.StringComparer.OrdinalIgnoreCase)) {
|
||||
try {
|
||||
return inputObject.spec.jobTemplate.spec.template.spec.imagePullSecrets;
|
||||
}
|
||||
catch (ex) {
|
||||
core.debug(`Fetching imagePullSecrets failed due to this error: ${JSON.stringify(ex)}`);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
if (string_comparison_1.isEqual(inputObject.kind, constants_1.KubernetesWorkload.POD, string_comparison_1.StringComparer.OrdinalIgnoreCase)) {
|
||||
return inputObject.spec.imagePullSecrets;
|
||||
}
|
||||
if (!!inputObject.spec.template && !!inputObject.spec.template.spec) {
|
||||
return inputObject.spec.template.spec.imagePullSecrets;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
function setImagePullSecrets(inputObject, newImagePullSecrets) {
|
||||
if (!inputObject || !inputObject.spec || !newImagePullSecrets) {
|
||||
return;
|
||||
}
|
||||
if (string_comparison_1.isEqual(inputObject.kind, constants_1.KubernetesWorkload.POD, string_comparison_1.StringComparer.OrdinalIgnoreCase)) {
|
||||
inputObject.spec.imagePullSecrets = newImagePullSecrets;
|
||||
return;
|
||||
}
|
||||
if (string_comparison_1.isEqual(inputObject.kind, constants_1.KubernetesWorkload.CRON_JOB, string_comparison_1.StringComparer.OrdinalIgnoreCase)) {
|
||||
try {
|
||||
inputObject.spec.jobTemplate.spec.template.spec.imagePullSecrets =
|
||||
newImagePullSecrets;
|
||||
}
|
||||
catch (ex) {
|
||||
core.debug(`Overriding imagePullSecrets failed due to this error: ${JSON.stringify(ex)}`);
|
||||
//Do nothing
|
||||
}
|
||||
return;
|
||||
}
|
||||
if (!!inputObject.spec.template && !!inputObject.spec.template.spec) {
|
||||
inputObject.spec.template.spec.imagePullSecrets = newImagePullSecrets;
|
||||
return;
|
||||
}
|
||||
return;
|
||||
}
|
||||
function setSpecLabels(inputObject, newLabels) {
|
||||
let specLabels = getSpecLabels(inputObject);
|
||||
if (!!newLabels) {
|
||||
specLabels = newLabels;
|
||||
}
|
||||
}
|
||||
function getSpecSelectorLabels(inputObject) {
|
||||
if (!!inputObject && !!inputObject.spec && !!inputObject.spec.selector) {
|
||||
if (isServiceEntity(inputObject.kind)) {
|
||||
return inputObject.spec.selector;
|
||||
}
|
||||
else {
|
||||
return inputObject.spec.selector.matchLabels;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
function setSpecSelectorLabels(inputObject, newLabels) {
|
||||
let selectorLabels = getSpecSelectorLabels(inputObject);
|
||||
if (!!selectorLabels) {
|
||||
selectorLabels = newLabels;
|
||||
}
|
||||
}
|
||||
@ -1,328 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.fetchResource = exports.isServiceSelectorSubsetOfMatchLabel = exports.getServiceSelector = exports.getDeploymentMatchLabels = exports.getBlueGreenResourceName = exports.addBlueGreenLabelsAndAnnotations = exports.getNewBlueGreenObject = exports.createWorkloadsWithLabel = exports.isServiceRouted = exports.getManifestObjects = exports.getSuffix = exports.deleteObjects = exports.deleteWorkloadsAndServicesWithLabel = exports.deleteWorkloadsWithLabel = exports.routeBlueGreen = exports.isSMIRoute = exports.isIngressRoute = exports.isBlueGreenDeploymentStrategy = exports.STABLE_SUFFIX = exports.GREEN_SUFFIX = exports.BLUE_GREEN_VERSION_LABEL = exports.NONE_LABEL_VALUE = exports.GREEN_LABEL_VALUE = exports.BLUE_GREEN_DEPLOYMENT_STRATEGY = void 0;
|
||||
const core = require("@actions/core");
|
||||
const fs = require("fs");
|
||||
const yaml = require("js-yaml");
|
||||
const utility_1 = require("../utility");
|
||||
const constants_1 = require("../../constants");
|
||||
const fileHelper = require("../files-helper");
|
||||
const helper = require("../resource-object-utility");
|
||||
const TaskInputParameters = require("../../input-parameters");
|
||||
const service_blue_green_helper_1 = require("./service-blue-green-helper");
|
||||
const ingress_blue_green_helper_1 = require("./ingress-blue-green-helper");
|
||||
const smi_blue_green_helper_1 = require("./smi-blue-green-helper");
|
||||
exports.BLUE_GREEN_DEPLOYMENT_STRATEGY = "BLUE-GREEN";
|
||||
exports.GREEN_LABEL_VALUE = "green";
|
||||
exports.NONE_LABEL_VALUE = "None";
|
||||
exports.BLUE_GREEN_VERSION_LABEL = "k8s.deploy.color";
|
||||
exports.GREEN_SUFFIX = "-green";
|
||||
exports.STABLE_SUFFIX = "-stable";
|
||||
const INGRESS_ROUTE = "INGRESS";
|
||||
const SMI_ROUTE = "SMI";
|
||||
function isBlueGreenDeploymentStrategy() {
|
||||
const deploymentStrategy = TaskInputParameters.deploymentStrategy;
|
||||
return (deploymentStrategy &&
|
||||
deploymentStrategy.toUpperCase() === exports.BLUE_GREEN_DEPLOYMENT_STRATEGY);
|
||||
}
|
||||
exports.isBlueGreenDeploymentStrategy = isBlueGreenDeploymentStrategy;
|
||||
function isIngressRoute() {
|
||||
const routeMethod = TaskInputParameters.routeMethod;
|
||||
return routeMethod && routeMethod.toUpperCase() === INGRESS_ROUTE;
|
||||
}
|
||||
exports.isIngressRoute = isIngressRoute;
|
||||
function isSMIRoute() {
|
||||
const routeMethod = TaskInputParameters.routeMethod;
|
||||
return routeMethod && routeMethod.toUpperCase() === SMI_ROUTE;
|
||||
}
|
||||
exports.isSMIRoute = isSMIRoute;
|
||||
function routeBlueGreen(kubectl, inputManifestFiles) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
// get buffer time
|
||||
let bufferTime = parseInt(TaskInputParameters.versionSwitchBuffer);
|
||||
//logging start of buffer time
|
||||
let dateNow = new Date();
|
||||
console.log(`Starting buffer time of ${bufferTime} minute(s) at ${dateNow.toISOString()}`);
|
||||
// waiting
|
||||
yield utility_1.sleep(bufferTime * 1000 * 60);
|
||||
// logging end of buffer time
|
||||
dateNow = new Date();
|
||||
console.log(`Stopping buffer time of ${bufferTime} minute(s) at ${dateNow.toISOString()}`);
|
||||
const manifestObjects = getManifestObjects(inputManifestFiles);
|
||||
// routing to new deployments
|
||||
if (isIngressRoute()) {
|
||||
ingress_blue_green_helper_1.routeBlueGreenIngress(kubectl, exports.GREEN_LABEL_VALUE, manifestObjects.serviceNameMap, manifestObjects.ingressEntityList);
|
||||
}
|
||||
else if (isSMIRoute()) {
|
||||
smi_blue_green_helper_1.routeBlueGreenSMI(kubectl, exports.GREEN_LABEL_VALUE, manifestObjects.serviceEntityList);
|
||||
}
|
||||
else {
|
||||
service_blue_green_helper_1.routeBlueGreenService(kubectl, exports.GREEN_LABEL_VALUE, manifestObjects.serviceEntityList);
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.routeBlueGreen = routeBlueGreen;
|
||||
function deleteWorkloadsWithLabel(kubectl, deleteLabel, deploymentEntityList) {
|
||||
let resourcesToDelete = [];
|
||||
deploymentEntityList.forEach((inputObject) => {
|
||||
const name = inputObject.metadata.name;
|
||||
const kind = inputObject.kind;
|
||||
if (deleteLabel === exports.NONE_LABEL_VALUE) {
|
||||
// if dellabel is none, deletes stable deployments
|
||||
const resourceToDelete = { name: name, kind: kind };
|
||||
resourcesToDelete.push(resourceToDelete);
|
||||
}
|
||||
else {
|
||||
// if dellabel is not none, then deletes new green deployments
|
||||
const resourceToDelete = {
|
||||
name: getBlueGreenResourceName(name, exports.GREEN_SUFFIX),
|
||||
kind: kind,
|
||||
};
|
||||
resourcesToDelete.push(resourceToDelete);
|
||||
}
|
||||
});
|
||||
// deletes the deployments
|
||||
deleteObjects(kubectl, resourcesToDelete);
|
||||
}
|
||||
exports.deleteWorkloadsWithLabel = deleteWorkloadsWithLabel;
|
||||
function deleteWorkloadsAndServicesWithLabel(kubectl, deleteLabel, deploymentEntityList, serviceEntityList) {
|
||||
// need to delete services and deployments
|
||||
const deletionEntitiesList = deploymentEntityList.concat(serviceEntityList);
|
||||
let resourcesToDelete = [];
|
||||
deletionEntitiesList.forEach((inputObject) => {
|
||||
const name = inputObject.metadata.name;
|
||||
const kind = inputObject.kind;
|
||||
if (deleteLabel === exports.NONE_LABEL_VALUE) {
|
||||
// if not dellabel, delete stable objects
|
||||
const resourceToDelete = { name: name, kind: kind };
|
||||
resourcesToDelete.push(resourceToDelete);
|
||||
}
|
||||
else {
|
||||
// else delete green labels
|
||||
const resourceToDelete = {
|
||||
name: getBlueGreenResourceName(name, exports.GREEN_SUFFIX),
|
||||
kind: kind,
|
||||
};
|
||||
resourcesToDelete.push(resourceToDelete);
|
||||
}
|
||||
});
|
||||
deleteObjects(kubectl, resourcesToDelete);
|
||||
}
|
||||
exports.deleteWorkloadsAndServicesWithLabel = deleteWorkloadsAndServicesWithLabel;
|
||||
function deleteObjects(kubectl, deleteList) {
|
||||
// delete services and deployments
|
||||
deleteList.forEach((delObject) => {
|
||||
try {
|
||||
const result = kubectl.delete([delObject.kind, delObject.name]);
|
||||
utility_1.checkForErrors([result]);
|
||||
}
|
||||
catch (ex) {
|
||||
// Ignore failures of delete if doesn't exist
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.deleteObjects = deleteObjects;
|
||||
function getSuffix(label) {
|
||||
if (label === exports.GREEN_LABEL_VALUE) {
|
||||
return exports.GREEN_SUFFIX;
|
||||
}
|
||||
else {
|
||||
return "";
|
||||
}
|
||||
}
|
||||
exports.getSuffix = getSuffix;
|
||||
// other common functions
|
||||
function getManifestObjects(filePaths) {
|
||||
const deploymentEntityList = [];
|
||||
const routedServiceEntityList = [];
|
||||
const unroutedServiceEntityList = [];
|
||||
const ingressEntityList = [];
|
||||
const otherEntitiesList = [];
|
||||
let serviceNameMap = new Map();
|
||||
filePaths.forEach((filePath) => {
|
||||
const fileContents = fs.readFileSync(filePath);
|
||||
yaml.safeLoadAll(fileContents, function (inputObject) {
|
||||
if (!!inputObject) {
|
||||
const kind = inputObject.kind;
|
||||
const name = inputObject.metadata.name;
|
||||
if (helper.isDeploymentEntity(kind)) {
|
||||
deploymentEntityList.push(inputObject);
|
||||
}
|
||||
else if (helper.isServiceEntity(kind)) {
|
||||
if (isServiceRouted(inputObject, deploymentEntityList)) {
|
||||
routedServiceEntityList.push(inputObject);
|
||||
serviceNameMap.set(name, getBlueGreenResourceName(name, exports.GREEN_SUFFIX));
|
||||
}
|
||||
else {
|
||||
unroutedServiceEntityList.push(inputObject);
|
||||
}
|
||||
}
|
||||
else if (helper.isIngressEntity(kind)) {
|
||||
ingressEntityList.push(inputObject);
|
||||
}
|
||||
else {
|
||||
otherEntitiesList.push(inputObject);
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
return {
|
||||
serviceEntityList: routedServiceEntityList,
|
||||
serviceNameMap: serviceNameMap,
|
||||
unroutedServiceEntityList: unroutedServiceEntityList,
|
||||
deploymentEntityList: deploymentEntityList,
|
||||
ingressEntityList: ingressEntityList,
|
||||
otherObjects: otherEntitiesList,
|
||||
};
|
||||
}
|
||||
exports.getManifestObjects = getManifestObjects;
|
||||
function isServiceRouted(serviceObject, deploymentEntityList) {
|
||||
let shouldBeRouted = false;
|
||||
const serviceSelector = getServiceSelector(serviceObject);
|
||||
if (!!serviceSelector) {
|
||||
if (deploymentEntityList.some((depObject) => {
|
||||
// finding if there is a deployment in the given manifests the service targets
|
||||
const matchLabels = getDeploymentMatchLabels(depObject);
|
||||
return (!!matchLabels &&
|
||||
isServiceSelectorSubsetOfMatchLabel(serviceSelector, matchLabels));
|
||||
})) {
|
||||
shouldBeRouted = true;
|
||||
}
|
||||
}
|
||||
return shouldBeRouted;
|
||||
}
|
||||
exports.isServiceRouted = isServiceRouted;
|
||||
function createWorkloadsWithLabel(kubectl, deploymentObjectList, nextLabel) {
|
||||
const newObjectsList = [];
|
||||
deploymentObjectList.forEach((inputObject) => {
|
||||
// creating deployment with label
|
||||
const newBlueGreenObject = getNewBlueGreenObject(inputObject, nextLabel);
|
||||
core.debug("New blue-green object is: " + JSON.stringify(newBlueGreenObject));
|
||||
newObjectsList.push(newBlueGreenObject);
|
||||
});
|
||||
const manifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
const result = kubectl.apply(manifestFiles);
|
||||
return { result: result, newFilePaths: manifestFiles };
|
||||
}
|
||||
exports.createWorkloadsWithLabel = createWorkloadsWithLabel;
|
||||
function getNewBlueGreenObject(inputObject, labelValue) {
|
||||
const newObject = JSON.parse(JSON.stringify(inputObject));
|
||||
// Updating name only if label is green label is given
|
||||
if (labelValue === exports.GREEN_LABEL_VALUE) {
|
||||
newObject.metadata.name = getBlueGreenResourceName(inputObject.metadata.name, exports.GREEN_SUFFIX);
|
||||
}
|
||||
// Adding labels and annotations
|
||||
addBlueGreenLabelsAndAnnotations(newObject, labelValue);
|
||||
return newObject;
|
||||
}
|
||||
exports.getNewBlueGreenObject = getNewBlueGreenObject;
|
||||
function addBlueGreenLabelsAndAnnotations(inputObject, labelValue) {
|
||||
//creating the k8s.deploy.color label
|
||||
const newLabels = new Map();
|
||||
newLabels[exports.BLUE_GREEN_VERSION_LABEL] = labelValue;
|
||||
// updating object labels and selector labels
|
||||
helper.updateObjectLabels(inputObject, newLabels, false);
|
||||
helper.updateSelectorLabels(inputObject, newLabels, false);
|
||||
// updating spec labels if it is a service
|
||||
if (!helper.isServiceEntity(inputObject.kind)) {
|
||||
helper.updateSpecLabels(inputObject, newLabels, false);
|
||||
}
|
||||
}
|
||||
exports.addBlueGreenLabelsAndAnnotations = addBlueGreenLabelsAndAnnotations;
|
||||
function getBlueGreenResourceName(name, suffix) {
|
||||
return `${name}${suffix}`;
|
||||
}
|
||||
exports.getBlueGreenResourceName = getBlueGreenResourceName;
|
||||
function getDeploymentMatchLabels(deploymentObject) {
|
||||
if (!!deploymentObject &&
|
||||
deploymentObject.kind.toUpperCase() ==
|
||||
constants_1.KubernetesWorkload.POD.toUpperCase() &&
|
||||
!!deploymentObject.metadata &&
|
||||
!!deploymentObject.metadata.labels) {
|
||||
return deploymentObject.metadata.labels;
|
||||
}
|
||||
else if (!!deploymentObject &&
|
||||
deploymentObject.spec &&
|
||||
deploymentObject.spec.selector &&
|
||||
deploymentObject.spec.selector.matchLabels) {
|
||||
return deploymentObject.spec.selector.matchLabels;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
exports.getDeploymentMatchLabels = getDeploymentMatchLabels;
|
||||
function getServiceSelector(serviceObject) {
|
||||
if (!!serviceObject && serviceObject.spec && serviceObject.spec.selector) {
|
||||
return serviceObject.spec.selector;
|
||||
}
|
||||
else
|
||||
return null;
|
||||
}
|
||||
exports.getServiceSelector = getServiceSelector;
|
||||
function isServiceSelectorSubsetOfMatchLabel(serviceSelector, matchLabels) {
|
||||
let serviceSelectorMap = new Map();
|
||||
let matchLabelsMap = new Map();
|
||||
JSON.parse(JSON.stringify(serviceSelector), (key, value) => {
|
||||
serviceSelectorMap.set(key, value);
|
||||
});
|
||||
JSON.parse(JSON.stringify(matchLabels), (key, value) => {
|
||||
matchLabelsMap.set(key, value);
|
||||
});
|
||||
let isMatch = true;
|
||||
serviceSelectorMap.forEach((value, key) => {
|
||||
if (!!key &&
|
||||
(!matchLabelsMap.has(key) || matchLabelsMap.get(key)) != value) {
|
||||
isMatch = false;
|
||||
}
|
||||
});
|
||||
return isMatch;
|
||||
}
|
||||
exports.isServiceSelectorSubsetOfMatchLabel = isServiceSelectorSubsetOfMatchLabel;
|
||||
function fetchResource(kubectl, kind, name) {
|
||||
const result = kubectl.getResource(kind, name);
|
||||
if (result == null || !!result.stderr) {
|
||||
return null;
|
||||
}
|
||||
if (!!result.stdout) {
|
||||
const resource = JSON.parse(result.stdout);
|
||||
try {
|
||||
UnsetsClusterSpecficDetails(resource);
|
||||
return resource;
|
||||
}
|
||||
catch (ex) {
|
||||
core.debug("Exception occurred while Parsing " + resource + " in Json object");
|
||||
core.debug(`Exception:${ex}`);
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
exports.fetchResource = fetchResource;
|
||||
function UnsetsClusterSpecficDetails(resource) {
|
||||
if (resource == null) {
|
||||
return;
|
||||
}
|
||||
// Unsets the cluster specific details in the object
|
||||
if (!!resource) {
|
||||
const metadata = resource.metadata;
|
||||
const status = resource.status;
|
||||
if (!!metadata) {
|
||||
const newMetadata = {
|
||||
annotations: metadata.annotations,
|
||||
labels: metadata.labels,
|
||||
name: metadata.name,
|
||||
};
|
||||
resource.metadata = newMetadata;
|
||||
}
|
||||
if (!!status) {
|
||||
resource.status = {};
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,192 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.getStableResourceName = exports.getBaselineResourceName = exports.getCanaryResourceName = exports.isSMICanaryStrategy = exports.isCanaryDeploymentStrategy = exports.fetchResource = exports.fetchCanaryResource = exports.getNewCanaryResource = exports.getNewBaselineResource = exports.getStableResource = exports.isResourceMarkedAsStable = exports.markResourceAsStable = exports.deleteCanaryDeployment = exports.STABLE_LABEL_VALUE = exports.STABLE_SUFFIX = exports.CANARY_LABEL_VALUE = exports.BASELINE_LABEL_VALUE = exports.CANARY_VERSION_LABEL = exports.TRAFFIC_SPLIT_STRATEGY = exports.CANARY_DEPLOYMENT_STRATEGY = void 0;
|
||||
const fs = require("fs");
|
||||
const yaml = require("js-yaml");
|
||||
const core = require("@actions/core");
|
||||
const TaskInputParameters = require("../../input-parameters");
|
||||
const helper = require("../resource-object-utility");
|
||||
const constants_1 = require("../../constants");
|
||||
const string_comparison_1 = require("../string-comparison");
|
||||
const utility_1 = require("../utility");
|
||||
const utils = require("../manifest-utilities");
|
||||
exports.CANARY_DEPLOYMENT_STRATEGY = "CANARY";
|
||||
exports.TRAFFIC_SPLIT_STRATEGY = "SMI";
|
||||
exports.CANARY_VERSION_LABEL = "workflow/version";
|
||||
const BASELINE_SUFFIX = "-baseline";
|
||||
exports.BASELINE_LABEL_VALUE = "baseline";
|
||||
const CANARY_SUFFIX = "-canary";
|
||||
exports.CANARY_LABEL_VALUE = "canary";
|
||||
exports.STABLE_SUFFIX = "-stable";
|
||||
exports.STABLE_LABEL_VALUE = "stable";
|
||||
function deleteCanaryDeployment(kubectl, manifestFilePaths, includeServices) {
|
||||
// get manifest files
|
||||
const inputManifestFiles = utils.getManifestFiles(manifestFilePaths);
|
||||
if (inputManifestFiles == null || inputManifestFiles.length == 0) {
|
||||
throw new Error("ManifestFileNotFound");
|
||||
}
|
||||
// create delete cmd prefix
|
||||
cleanUpCanary(kubectl, inputManifestFiles, includeServices);
|
||||
}
|
||||
exports.deleteCanaryDeployment = deleteCanaryDeployment;
|
||||
function markResourceAsStable(inputObject) {
|
||||
if (isResourceMarkedAsStable(inputObject)) {
|
||||
return inputObject;
|
||||
}
|
||||
const newObject = JSON.parse(JSON.stringify(inputObject));
|
||||
// Adding labels and annotations.
|
||||
addCanaryLabelsAndAnnotations(newObject, exports.STABLE_LABEL_VALUE);
|
||||
core.debug("Added stable label: " + JSON.stringify(newObject));
|
||||
return newObject;
|
||||
}
|
||||
exports.markResourceAsStable = markResourceAsStable;
|
||||
function isResourceMarkedAsStable(inputObject) {
|
||||
return (inputObject &&
|
||||
inputObject.metadata &&
|
||||
inputObject.metadata.labels &&
|
||||
inputObject.metadata.labels[exports.CANARY_VERSION_LABEL] === exports.STABLE_LABEL_VALUE);
|
||||
}
|
||||
exports.isResourceMarkedAsStable = isResourceMarkedAsStable;
|
||||
function getStableResource(inputObject) {
|
||||
var replicaCount = isSpecContainsReplicas(inputObject.kind)
|
||||
? inputObject.metadata.replicas
|
||||
: 0;
|
||||
return getNewCanaryObject(inputObject, replicaCount, exports.STABLE_LABEL_VALUE);
|
||||
}
|
||||
exports.getStableResource = getStableResource;
|
||||
function getNewBaselineResource(stableObject, replicas) {
|
||||
return getNewCanaryObject(stableObject, replicas, exports.BASELINE_LABEL_VALUE);
|
||||
}
|
||||
exports.getNewBaselineResource = getNewBaselineResource;
|
||||
function getNewCanaryResource(inputObject, replicas) {
|
||||
return getNewCanaryObject(inputObject, replicas, exports.CANARY_LABEL_VALUE);
|
||||
}
|
||||
exports.getNewCanaryResource = getNewCanaryResource;
|
||||
function fetchCanaryResource(kubectl, kind, name) {
|
||||
return fetchResource(kubectl, kind, getCanaryResourceName(name));
|
||||
}
|
||||
exports.fetchCanaryResource = fetchCanaryResource;
|
||||
function fetchResource(kubectl, kind, name) {
|
||||
const result = kubectl.getResource(kind, name);
|
||||
if (!result || (result === null || result === void 0 ? void 0 : result.stderr)) {
|
||||
return null;
|
||||
}
|
||||
if (result.stdout) {
|
||||
const resource = JSON.parse(result.stdout);
|
||||
try {
|
||||
UnsetsClusterSpecficDetails(resource);
|
||||
return resource;
|
||||
}
|
||||
catch (ex) {
|
||||
core.debug("Exception occurred while Parsing " + resource + " in JSON object");
|
||||
core.debug(`Exception: ${ex}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
exports.fetchResource = fetchResource;
|
||||
function isCanaryDeploymentStrategy() {
|
||||
const deploymentStrategy = TaskInputParameters.deploymentStrategy;
|
||||
return (deploymentStrategy &&
|
||||
deploymentStrategy.toUpperCase() === exports.CANARY_DEPLOYMENT_STRATEGY);
|
||||
}
|
||||
exports.isCanaryDeploymentStrategy = isCanaryDeploymentStrategy;
|
||||
function isSMICanaryStrategy() {
|
||||
const deploymentStrategy = TaskInputParameters.trafficSplitMethod;
|
||||
return (isCanaryDeploymentStrategy() &&
|
||||
deploymentStrategy &&
|
||||
deploymentStrategy.toUpperCase() === exports.TRAFFIC_SPLIT_STRATEGY);
|
||||
}
|
||||
exports.isSMICanaryStrategy = isSMICanaryStrategy;
|
||||
function getCanaryResourceName(name) {
|
||||
return name + CANARY_SUFFIX;
|
||||
}
|
||||
exports.getCanaryResourceName = getCanaryResourceName;
|
||||
function getBaselineResourceName(name) {
|
||||
return name + BASELINE_SUFFIX;
|
||||
}
|
||||
exports.getBaselineResourceName = getBaselineResourceName;
|
||||
function getStableResourceName(name) {
|
||||
return name + exports.STABLE_SUFFIX;
|
||||
}
|
||||
exports.getStableResourceName = getStableResourceName;
|
||||
function UnsetsClusterSpecficDetails(resource) {
|
||||
if (resource == null) {
|
||||
return;
|
||||
}
|
||||
// Unsets the cluster specific details in the object
|
||||
if (!!resource) {
|
||||
const metadata = resource.metadata;
|
||||
const status = resource.status;
|
||||
if (!!metadata) {
|
||||
const newMetadata = {
|
||||
annotations: metadata.annotations,
|
||||
labels: metadata.labels,
|
||||
name: metadata.name,
|
||||
};
|
||||
resource.metadata = newMetadata;
|
||||
}
|
||||
if (!!status) {
|
||||
resource.status = {};
|
||||
}
|
||||
}
|
||||
}
|
||||
function getNewCanaryObject(inputObject, replicas, type) {
|
||||
const newObject = JSON.parse(JSON.stringify(inputObject));
|
||||
// Updating name
|
||||
if (type === exports.CANARY_LABEL_VALUE) {
|
||||
newObject.metadata.name = getCanaryResourceName(inputObject.metadata.name);
|
||||
}
|
||||
else if (type === exports.STABLE_LABEL_VALUE) {
|
||||
newObject.metadata.name = getStableResourceName(inputObject.metadata.name);
|
||||
}
|
||||
else {
|
||||
newObject.metadata.name = getBaselineResourceName(inputObject.metadata.name);
|
||||
}
|
||||
// Adding labels and annotations.
|
||||
addCanaryLabelsAndAnnotations(newObject, type);
|
||||
// Updating number of replicas
|
||||
if (isSpecContainsReplicas(newObject.kind)) {
|
||||
newObject.spec.replicas = replicas;
|
||||
}
|
||||
return newObject;
|
||||
}
|
||||
function isSpecContainsReplicas(kind) {
|
||||
return (!string_comparison_1.isEqual(kind, constants_1.KubernetesWorkload.POD, string_comparison_1.StringComparer.OrdinalIgnoreCase) &&
|
||||
!string_comparison_1.isEqual(kind, constants_1.KubernetesWorkload.DAEMON_SET, string_comparison_1.StringComparer.OrdinalIgnoreCase) &&
|
||||
!helper.isServiceEntity(kind));
|
||||
}
|
||||
function addCanaryLabelsAndAnnotations(inputObject, type) {
|
||||
const newLabels = new Map();
|
||||
newLabels[exports.CANARY_VERSION_LABEL] = type;
|
||||
helper.updateObjectLabels(inputObject, newLabels, false);
|
||||
helper.updateObjectAnnotations(inputObject, newLabels, false);
|
||||
helper.updateSelectorLabels(inputObject, newLabels, false);
|
||||
if (!helper.isServiceEntity(inputObject.kind)) {
|
||||
helper.updateSpecLabels(inputObject, newLabels, false);
|
||||
}
|
||||
}
|
||||
function cleanUpCanary(kubectl, files, includeServices) {
|
||||
var deleteObject = function (kind, name) {
|
||||
try {
|
||||
const result = kubectl.delete([kind, name]);
|
||||
utility_1.checkForErrors([result]);
|
||||
}
|
||||
catch (ex) {
|
||||
// Ignore failures of delete if doesn't exist
|
||||
}
|
||||
};
|
||||
files.forEach((filePath) => {
|
||||
const fileContents = fs.readFileSync(filePath);
|
||||
yaml.safeLoadAll(fileContents, function (inputObject) {
|
||||
const name = inputObject.metadata.name;
|
||||
const kind = inputObject.kind;
|
||||
if (helper.isDeploymentEntity(kind) ||
|
||||
(includeServices && helper.isServiceEntity(kind))) {
|
||||
const canaryObjectName = getCanaryResourceName(name);
|
||||
const baselineObjectName = getBaselineResourceName(name);
|
||||
deleteObject(kind, canaryObjectName);
|
||||
deleteObject(kind, baselineObjectName);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
@ -1,142 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.isCanaryDeploymentStrategy = exports.annotateAndLabelResources = exports.checkManifestStability = exports.deployManifests = exports.getManifestFiles = void 0;
|
||||
const fs = require("fs");
|
||||
const yaml = require("js-yaml");
|
||||
const canaryDeploymentHelper = require("./canary-deployment-helper");
|
||||
const KubernetesObjectUtility = require("../resource-object-utility");
|
||||
const TaskInputParameters = require("../../input-parameters");
|
||||
const models = require("../../constants");
|
||||
const fileHelper = require("../files-helper");
|
||||
const utils = require("../manifest-utilities");
|
||||
const KubernetesManifestUtility = require("../manifest-stability-utility");
|
||||
const pod_canary_deployment_helper_1 = require("./pod-canary-deployment-helper");
|
||||
const smi_canary_deployment_helper_1 = require("./smi-canary-deployment-helper");
|
||||
const utility_1 = require("../utility");
|
||||
const service_blue_green_helper_1 = require("./service-blue-green-helper");
|
||||
const ingress_blue_green_helper_1 = require("./ingress-blue-green-helper");
|
||||
const smi_blue_green_helper_1 = require("./smi-blue-green-helper");
|
||||
const deploymentStrategy_1 = require("../../types/deploymentStrategy");
|
||||
const core = require("@actions/core");
|
||||
const trafficSplitMethod_1 = require("../../types/trafficSplitMethod");
|
||||
const routeStrategy_1 = require("../../types/routeStrategy");
|
||||
function getManifestFiles(manifestFilePaths) {
|
||||
const files = utils.getManifestFiles(manifestFilePaths);
|
||||
if (files == null || files.length === 0) {
|
||||
throw new Error(`ManifestFileNotFound : ${manifestFilePaths}`);
|
||||
}
|
||||
return files;
|
||||
}
|
||||
exports.getManifestFiles = getManifestFiles;
|
||||
function deployManifests(files, deploymentStrategy, kubectl) {
|
||||
switch (deploymentStrategy) {
|
||||
case deploymentStrategy_1.DeploymentStrategy.CANARY: {
|
||||
const trafficSplitMethod = trafficSplitMethod_1.parseTrafficSplitMethod(core.getInput("traffic-split-method", { required: true }));
|
||||
const { result, newFilePaths } = trafficSplitMethod == trafficSplitMethod_1.TrafficSplitMethod.SMI
|
||||
? smi_canary_deployment_helper_1.deploySMICanary(files, kubectl)
|
||||
: pod_canary_deployment_helper_1.deployPodCanary(files, kubectl);
|
||||
utility_1.checkForErrors([result]);
|
||||
return newFilePaths;
|
||||
}
|
||||
case deploymentStrategy_1.DeploymentStrategy.BLUE_GREEN: {
|
||||
const routeStrategy = routeStrategy_1.parseRouteStrategy(core.getInput("route-method", { required: true }));
|
||||
const { result, newFilePaths } = (routeStrategy == routeStrategy_1.RouteStrategy.INGRESS &&
|
||||
ingress_blue_green_helper_1.deployBlueGreenIngress(files)) ||
|
||||
(routeStrategy == routeStrategy_1.RouteStrategy.SMI && smi_blue_green_helper_1.deployBlueGreenSMI(files)) ||
|
||||
service_blue_green_helper_1.deployBlueGreenService(files);
|
||||
utility_1.checkForErrors([result]);
|
||||
return newFilePaths;
|
||||
}
|
||||
case undefined: {
|
||||
core.warning("Deployment strategy is not recognized");
|
||||
}
|
||||
default: {
|
||||
const trafficSplitMethod = trafficSplitMethod_1.parseTrafficSplitMethod(core.getInput("traffic-split-method", { required: true }));
|
||||
if (trafficSplitMethod == trafficSplitMethod_1.TrafficSplitMethod.SMI) {
|
||||
const updatedManifests = appendStableVersionLabelToResource(files, kubectl);
|
||||
const result = kubectl.apply(updatedManifests, TaskInputParameters.forceDeployment);
|
||||
utility_1.checkForErrors([result]);
|
||||
}
|
||||
else {
|
||||
const result = kubectl.apply(files, TaskInputParameters.forceDeployment);
|
||||
utility_1.checkForErrors([result]);
|
||||
}
|
||||
return files;
|
||||
}
|
||||
}
|
||||
}
|
||||
exports.deployManifests = deployManifests;
|
||||
function appendStableVersionLabelToResource(files, kubectl) {
|
||||
const manifestFiles = [];
|
||||
const newObjectsList = [];
|
||||
files.forEach((filePath) => {
|
||||
const fileContents = fs.readFileSync(filePath);
|
||||
yaml.safeLoadAll(fileContents, function (inputObject) {
|
||||
const kind = inputObject.kind;
|
||||
if (KubernetesObjectUtility.isDeploymentEntity(kind)) {
|
||||
const updatedObject = canaryDeploymentHelper.markResourceAsStable(inputObject);
|
||||
newObjectsList.push(updatedObject);
|
||||
}
|
||||
else {
|
||||
manifestFiles.push(filePath);
|
||||
}
|
||||
});
|
||||
});
|
||||
const updatedManifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
manifestFiles.push(...updatedManifestFiles);
|
||||
return manifestFiles;
|
||||
}
|
||||
function checkManifestStability(kubectl, resources) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
yield KubernetesManifestUtility.checkManifestStability(kubectl, resources);
|
||||
});
|
||||
}
|
||||
exports.checkManifestStability = checkManifestStability;
|
||||
function annotateAndLabelResources(files, kubectl, resourceTypes, allPods) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const workflowFilePath = yield utility_1.getWorkflowFilePath(TaskInputParameters.githubToken);
|
||||
const deploymentConfig = yield utility_1.getDeploymentConfig();
|
||||
const annotationKeyLabel = models.getWorkflowAnnotationKeyLabel(workflowFilePath);
|
||||
annotateResources(files, kubectl, resourceTypes, allPods, annotationKeyLabel, workflowFilePath, deploymentConfig);
|
||||
labelResources(files, kubectl, annotationKeyLabel);
|
||||
});
|
||||
}
|
||||
exports.annotateAndLabelResources = annotateAndLabelResources;
|
||||
function annotateResources(files, kubectl, resourceTypes, allPods, annotationKey, workflowFilePath, deploymentConfig) {
|
||||
const annotateResults = [];
|
||||
const lastSuccessSha = utility_1.getLastSuccessfulRunSha(kubectl, TaskInputParameters.namespace, annotationKey);
|
||||
let annotationKeyValStr = annotationKey +
|
||||
"=" +
|
||||
models.getWorkflowAnnotationsJson(lastSuccessSha, workflowFilePath, deploymentConfig);
|
||||
annotateResults.push(kubectl.annotate("namespace", TaskInputParameters.namespace, annotationKeyValStr));
|
||||
annotateResults.push(kubectl.annotateFiles(files, annotationKeyValStr));
|
||||
resourceTypes.forEach((resource) => {
|
||||
if (resource.type.toUpperCase() !==
|
||||
models.KubernetesWorkload.POD.toUpperCase()) {
|
||||
utility_1.annotateChildPods(kubectl, resource.type, resource.name, annotationKeyValStr, allPods).forEach((execResult) => annotateResults.push(execResult));
|
||||
}
|
||||
});
|
||||
utility_1.checkForErrors(annotateResults, true);
|
||||
}
|
||||
function labelResources(files, kubectl, label) {
|
||||
const labels = [
|
||||
`workflowFriendlyName=${utility_1.normaliseWorkflowStrLabel(process.env.GITHUB_WORKFLOW)}`,
|
||||
`workflow=${label}`,
|
||||
];
|
||||
utility_1.checkForErrors([kubectl.labelFiles(files, labels)], true);
|
||||
}
|
||||
function isCanaryDeploymentStrategy(deploymentStrategy) {
|
||||
return (deploymentStrategy != null &&
|
||||
deploymentStrategy.toUpperCase() ===
|
||||
canaryDeploymentHelper.CANARY_DEPLOYMENT_STRATEGY.toUpperCase());
|
||||
}
|
||||
exports.isCanaryDeploymentStrategy = isCanaryDeploymentStrategy;
|
||||
@ -1,160 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.updateIngressBackend = exports.getUpdatedBlueGreenIngress = exports.validateIngressesState = exports.routeBlueGreenIngress = exports.rejectBlueGreenIngress = exports.promoteBlueGreenIngress = exports.deployBlueGreenIngress = void 0;
|
||||
const core = require("@actions/core");
|
||||
const fileHelper = require("../files-helper");
|
||||
const blue_green_helper_1 = require("./blue-green-helper");
|
||||
const blue_green_helper_2 = require("./blue-green-helper");
|
||||
const BACKEND = "BACKEND";
|
||||
function deployBlueGreenIngress(kubectl, filePaths) {
|
||||
// get all kubernetes objects defined in manifest files
|
||||
const manifestObjects = blue_green_helper_1.getManifestObjects(filePaths);
|
||||
// create deployments with green label value
|
||||
const result = blue_green_helper_1.createWorkloadsWithLabel(kubectl, manifestObjects.deploymentEntityList, blue_green_helper_2.GREEN_LABEL_VALUE);
|
||||
// create new services and other objects
|
||||
let newObjectsList = [];
|
||||
manifestObjects.serviceEntityList.forEach((inputObject) => {
|
||||
const newBlueGreenObject = blue_green_helper_1.getNewBlueGreenObject(inputObject, blue_green_helper_2.GREEN_LABEL_VALUE);
|
||||
core.debug("New blue-green object is: " + JSON.stringify(newBlueGreenObject));
|
||||
newObjectsList.push(newBlueGreenObject);
|
||||
});
|
||||
newObjectsList = newObjectsList
|
||||
.concat(manifestObjects.otherObjects)
|
||||
.concat(manifestObjects.unroutedServiceEntityList);
|
||||
const manifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
kubectl.apply(manifestFiles);
|
||||
// return results to check for manifest stability
|
||||
return result;
|
||||
}
|
||||
exports.deployBlueGreenIngress = deployBlueGreenIngress;
|
||||
function promoteBlueGreenIngress(kubectl, manifestObjects) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
//checking if anything to promote
|
||||
if (!validateIngressesState(kubectl, manifestObjects.ingressEntityList, manifestObjects.serviceNameMap)) {
|
||||
throw "NotInPromoteStateIngress";
|
||||
}
|
||||
// create stable deployments with new configuration
|
||||
const result = blue_green_helper_1.createWorkloadsWithLabel(kubectl, manifestObjects.deploymentEntityList, blue_green_helper_2.NONE_LABEL_VALUE);
|
||||
// create stable services with new configuration
|
||||
const newObjectsList = [];
|
||||
manifestObjects.serviceEntityList.forEach((inputObject) => {
|
||||
const newBlueGreenObject = blue_green_helper_1.getNewBlueGreenObject(inputObject, blue_green_helper_2.NONE_LABEL_VALUE);
|
||||
core.debug("New blue-green object is: " + JSON.stringify(newBlueGreenObject));
|
||||
newObjectsList.push(newBlueGreenObject);
|
||||
});
|
||||
const manifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
kubectl.apply(manifestFiles);
|
||||
// returning deployments to check for rollout stability
|
||||
return result;
|
||||
});
|
||||
}
|
||||
exports.promoteBlueGreenIngress = promoteBlueGreenIngress;
|
||||
function rejectBlueGreenIngress(kubectl, filePaths) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
// get all kubernetes objects defined in manifest files
|
||||
const manifestObjects = blue_green_helper_1.getManifestObjects(filePaths);
|
||||
// routing ingress to stables services
|
||||
routeBlueGreenIngress(kubectl, null, manifestObjects.serviceNameMap, manifestObjects.ingressEntityList);
|
||||
// deleting green services and deployments
|
||||
blue_green_helper_1.deleteWorkloadsAndServicesWithLabel(kubectl, blue_green_helper_2.GREEN_LABEL_VALUE, manifestObjects.deploymentEntityList, manifestObjects.serviceEntityList);
|
||||
});
|
||||
}
|
||||
exports.rejectBlueGreenIngress = rejectBlueGreenIngress;
|
||||
function routeBlueGreenIngress(kubectl, nextLabel, serviceNameMap, ingressEntityList) {
|
||||
let newObjectsList = [];
|
||||
if (!nextLabel) {
|
||||
newObjectsList = ingressEntityList.filter((ingress) => isIngressRouted(ingress, serviceNameMap));
|
||||
}
|
||||
else {
|
||||
ingressEntityList.forEach((inputObject) => {
|
||||
if (isIngressRouted(inputObject, serviceNameMap)) {
|
||||
const newBlueGreenIngressObject = getUpdatedBlueGreenIngress(inputObject, serviceNameMap, blue_green_helper_2.GREEN_LABEL_VALUE);
|
||||
newObjectsList.push(newBlueGreenIngressObject);
|
||||
}
|
||||
else {
|
||||
newObjectsList.push(inputObject);
|
||||
}
|
||||
});
|
||||
}
|
||||
const manifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
kubectl.apply(manifestFiles);
|
||||
}
|
||||
exports.routeBlueGreenIngress = routeBlueGreenIngress;
|
||||
function validateIngressesState(kubectl, ingressEntityList, serviceNameMap) {
|
||||
let areIngressesTargetingNewServices = true;
|
||||
ingressEntityList.forEach((inputObject) => {
|
||||
if (isIngressRouted(inputObject, serviceNameMap)) {
|
||||
//querying existing ingress
|
||||
let existingIngress = blue_green_helper_1.fetchResource(kubectl, inputObject.kind, inputObject.metadata.name);
|
||||
if (!!existingIngress) {
|
||||
let currentLabel;
|
||||
// checking its label
|
||||
try {
|
||||
currentLabel =
|
||||
existingIngress.metadata.labels[blue_green_helper_2.BLUE_GREEN_VERSION_LABEL];
|
||||
}
|
||||
catch (_a) {
|
||||
// if no label exists, then not an ingress targeting green deployments
|
||||
areIngressesTargetingNewServices = false;
|
||||
}
|
||||
if (currentLabel != blue_green_helper_2.GREEN_LABEL_VALUE) {
|
||||
// if not green label, then wrong configuration
|
||||
areIngressesTargetingNewServices = false;
|
||||
}
|
||||
}
|
||||
else {
|
||||
// no ingress at all, so nothing to promote
|
||||
areIngressesTargetingNewServices = false;
|
||||
}
|
||||
}
|
||||
});
|
||||
return areIngressesTargetingNewServices;
|
||||
}
|
||||
exports.validateIngressesState = validateIngressesState;
|
||||
function isIngressRouted(ingressObject, serviceNameMap) {
|
||||
let isIngressRouted = false;
|
||||
// sees if ingress targets a service in the given manifests
|
||||
JSON.parse(JSON.stringify(ingressObject), (key, value) => {
|
||||
if (key === "serviceName" && serviceNameMap.has(value)) {
|
||||
isIngressRouted = true;
|
||||
}
|
||||
return value;
|
||||
});
|
||||
return isIngressRouted;
|
||||
}
|
||||
function getUpdatedBlueGreenIngress(inputObject, serviceNameMap, type) {
|
||||
if (!type) {
|
||||
// returning original with no modifications
|
||||
return inputObject;
|
||||
}
|
||||
const newObject = JSON.parse(JSON.stringify(inputObject));
|
||||
// adding green labels and values
|
||||
blue_green_helper_1.addBlueGreenLabelsAndAnnotations(newObject, type);
|
||||
// Updating ingress labels
|
||||
let finalObject = updateIngressBackend(newObject, serviceNameMap);
|
||||
return finalObject;
|
||||
}
|
||||
exports.getUpdatedBlueGreenIngress = getUpdatedBlueGreenIngress;
|
||||
function updateIngressBackend(inputObject, serviceNameMap) {
|
||||
inputObject = JSON.parse(JSON.stringify(inputObject), (key, value) => {
|
||||
if (key.toUpperCase() === BACKEND) {
|
||||
let serviceName = value.serviceName;
|
||||
if (serviceNameMap.has(serviceName)) {
|
||||
// updating service name with corresponding bluegreen name only if service is provied in given manifests
|
||||
value.serviceName = serviceNameMap.get(serviceName);
|
||||
}
|
||||
}
|
||||
return value;
|
||||
});
|
||||
return inputObject;
|
||||
}
|
||||
exports.updateIngressBackend = updateIngressBackend;
|
||||
@ -1,58 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.deployPodCanary = void 0;
|
||||
const core = require("@actions/core");
|
||||
const fs = require("fs");
|
||||
const yaml = require("js-yaml");
|
||||
const TaskInputParameters = require("../../input-parameters");
|
||||
const fileHelper = require("../files-helper");
|
||||
const helper = require("../resource-object-utility");
|
||||
const canaryDeploymentHelper = require("./canary-deployment-helper");
|
||||
function deployPodCanary(filePaths, kubectl) {
|
||||
const newObjectsList = [];
|
||||
const percentage = parseInt(TaskInputParameters.canaryPercentage);
|
||||
filePaths.forEach((filePath) => {
|
||||
const fileContents = fs.readFileSync(filePath);
|
||||
yaml.safeLoadAll(fileContents, function (inputObject) {
|
||||
const name = inputObject.metadata.name;
|
||||
const kind = inputObject.kind;
|
||||
if (helper.isDeploymentEntity(kind)) {
|
||||
core.debug("Calculating replica count for canary");
|
||||
const canaryReplicaCount = calculateReplicaCountForCanary(inputObject, percentage);
|
||||
core.debug("Replica count is " + canaryReplicaCount);
|
||||
// Get stable object
|
||||
core.debug("Querying stable object");
|
||||
const stableObject = canaryDeploymentHelper.fetchResource(kubectl, kind, name);
|
||||
if (!stableObject) {
|
||||
core.debug("Stable object not found. Creating only canary object");
|
||||
// If stable object not found, create canary deployment.
|
||||
const newCanaryObject = canaryDeploymentHelper.getNewCanaryResource(inputObject, canaryReplicaCount);
|
||||
core.debug("New canary object is: " + JSON.stringify(newCanaryObject));
|
||||
newObjectsList.push(newCanaryObject);
|
||||
}
|
||||
else {
|
||||
core.debug("Stable object found. Creating canary and baseline objects");
|
||||
// If canary object not found, create canary and baseline object.
|
||||
const newCanaryObject = canaryDeploymentHelper.getNewCanaryResource(inputObject, canaryReplicaCount);
|
||||
const newBaselineObject = canaryDeploymentHelper.getNewBaselineResource(stableObject, canaryReplicaCount);
|
||||
core.debug("New canary object is: " + JSON.stringify(newCanaryObject));
|
||||
core.debug("New baseline object is: " + JSON.stringify(newBaselineObject));
|
||||
newObjectsList.push(newCanaryObject);
|
||||
newObjectsList.push(newBaselineObject);
|
||||
}
|
||||
}
|
||||
else {
|
||||
// Updating non deployment entity as it is.
|
||||
newObjectsList.push(inputObject);
|
||||
}
|
||||
});
|
||||
});
|
||||
const manifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
const result = kubectl.apply(manifestFiles, TaskInputParameters.forceDeployment);
|
||||
return { result: result, newFilePaths: manifestFiles };
|
||||
}
|
||||
exports.deployPodCanary = deployPodCanary;
|
||||
function calculateReplicaCountForCanary(inputObject, percentage) {
|
||||
const inputReplicaCount = helper.getReplicaCount(inputObject);
|
||||
return Math.round((inputReplicaCount * percentage) / 100);
|
||||
}
|
||||
@ -1,102 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.getServiceSpecLabel = exports.validateServicesState = exports.routeBlueGreenService = exports.rejectBlueGreenService = exports.promoteBlueGreenService = exports.deployBlueGreenService = void 0;
|
||||
const fileHelper = require("../files-helper");
|
||||
const blue_green_helper_1 = require("./blue-green-helper");
|
||||
const blue_green_helper_2 = require("./blue-green-helper");
|
||||
function deployBlueGreenService(kubectl, filePaths) {
|
||||
// get all kubernetes objects defined in manifest files
|
||||
const manifestObjects = blue_green_helper_1.getManifestObjects(filePaths);
|
||||
// create deployments with green label value
|
||||
const result = blue_green_helper_1.createWorkloadsWithLabel(kubectl, manifestObjects.deploymentEntityList, blue_green_helper_2.GREEN_LABEL_VALUE);
|
||||
// create other non deployment and non service entities
|
||||
const newObjectsList = manifestObjects.otherObjects
|
||||
.concat(manifestObjects.ingressEntityList)
|
||||
.concat(manifestObjects.unroutedServiceEntityList);
|
||||
const manifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
kubectl.apply(manifestFiles);
|
||||
// returning deployment details to check for rollout stability
|
||||
return result;
|
||||
}
|
||||
exports.deployBlueGreenService = deployBlueGreenService;
|
||||
function promoteBlueGreenService(kubectl, manifestObjects) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
// checking if services are in the right state ie. targeting green deployments
|
||||
if (!validateServicesState(kubectl, manifestObjects.serviceEntityList)) {
|
||||
throw "NotInPromoteState";
|
||||
}
|
||||
// creating stable deployments with new configurations
|
||||
const result = blue_green_helper_1.createWorkloadsWithLabel(kubectl, manifestObjects.deploymentEntityList, blue_green_helper_2.NONE_LABEL_VALUE);
|
||||
// returning deployment details to check for rollout stability
|
||||
return result;
|
||||
});
|
||||
}
|
||||
exports.promoteBlueGreenService = promoteBlueGreenService;
|
||||
function rejectBlueGreenService(kubectl, filePaths) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
// get all kubernetes objects defined in manifest files
|
||||
const manifestObjects = blue_green_helper_1.getManifestObjects(filePaths);
|
||||
// routing to stable objects
|
||||
routeBlueGreenService(kubectl, blue_green_helper_2.NONE_LABEL_VALUE, manifestObjects.serviceEntityList);
|
||||
// deleting the new deployments with green suffix
|
||||
blue_green_helper_1.deleteWorkloadsWithLabel(kubectl, blue_green_helper_2.GREEN_LABEL_VALUE, manifestObjects.deploymentEntityList);
|
||||
});
|
||||
}
|
||||
exports.rejectBlueGreenService = rejectBlueGreenService;
|
||||
function routeBlueGreenService(kubectl, nextLabel, serviceEntityList) {
|
||||
const newObjectsList = [];
|
||||
serviceEntityList.forEach((serviceObject) => {
|
||||
const newBlueGreenServiceObject = getUpdatedBlueGreenService(serviceObject, nextLabel);
|
||||
newObjectsList.push(newBlueGreenServiceObject);
|
||||
});
|
||||
// configures the services
|
||||
const manifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
kubectl.apply(manifestFiles);
|
||||
}
|
||||
exports.routeBlueGreenService = routeBlueGreenService;
|
||||
// adding green labels to configure existing service
|
||||
function getUpdatedBlueGreenService(inputObject, labelValue) {
|
||||
const newObject = JSON.parse(JSON.stringify(inputObject));
|
||||
// Adding labels and annotations.
|
||||
blue_green_helper_1.addBlueGreenLabelsAndAnnotations(newObject, labelValue);
|
||||
return newObject;
|
||||
}
|
||||
function validateServicesState(kubectl, serviceEntityList) {
|
||||
let areServicesGreen = true;
|
||||
serviceEntityList.forEach((serviceObject) => {
|
||||
// finding the existing routed service
|
||||
const existingService = blue_green_helper_1.fetchResource(kubectl, serviceObject.kind, serviceObject.metadata.name);
|
||||
if (!!existingService) {
|
||||
let currentLabel = getServiceSpecLabel(existingService);
|
||||
if (currentLabel != blue_green_helper_2.GREEN_LABEL_VALUE) {
|
||||
// service should be targeting deployments with green label
|
||||
areServicesGreen = false;
|
||||
}
|
||||
}
|
||||
else {
|
||||
// service targeting deployment doesn't exist
|
||||
areServicesGreen = false;
|
||||
}
|
||||
});
|
||||
return areServicesGreen;
|
||||
}
|
||||
exports.validateServicesState = validateServicesState;
|
||||
function getServiceSpecLabel(inputObject) {
|
||||
if (!!inputObject &&
|
||||
inputObject.spec &&
|
||||
inputObject.spec.selector &&
|
||||
inputObject.spec.selector[blue_green_helper_2.BLUE_GREEN_VERSION_LABEL]) {
|
||||
return inputObject.spec.selector[blue_green_helper_2.BLUE_GREEN_VERSION_LABEL];
|
||||
}
|
||||
return "";
|
||||
}
|
||||
exports.getServiceSpecLabel = getServiceSpecLabel;
|
||||
@ -1,196 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.cleanupSMI = exports.validateTrafficSplitsState = exports.routeBlueGreenSMI = exports.getSMIServiceResource = exports.setupSMI = exports.rejectBlueGreenSMI = exports.promoteBlueGreenSMI = exports.deployBlueGreenSMI = void 0;
|
||||
const kubectlUtils = require("../kubectl-util");
|
||||
const fileHelper = require("../files-helper");
|
||||
const blue_green_helper_1 = require("./blue-green-helper");
|
||||
const blue_green_helper_2 = require("./blue-green-helper");
|
||||
let trafficSplitAPIVersion = "";
|
||||
const TRAFFIC_SPLIT_OBJECT_NAME_SUFFIX = "-trafficsplit";
|
||||
const TRAFFIC_SPLIT_OBJECT = "TrafficSplit";
|
||||
const MIN_VAL = "0";
|
||||
const MAX_VAL = "100";
|
||||
function deployBlueGreenSMI(kubectl, filePaths) {
|
||||
// get all kubernetes objects defined in manifest files
|
||||
const manifestObjects = blue_green_helper_1.getManifestObjects(filePaths);
|
||||
// creating services and other objects
|
||||
const newObjectsList = manifestObjects.otherObjects
|
||||
.concat(manifestObjects.serviceEntityList)
|
||||
.concat(manifestObjects.ingressEntityList)
|
||||
.concat(manifestObjects.unroutedServiceEntityList);
|
||||
const manifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
kubectl.apply(manifestFiles);
|
||||
// make extraservices and trafficsplit
|
||||
setupSMI(kubectl, manifestObjects.serviceEntityList);
|
||||
// create new deloyments
|
||||
const result = blue_green_helper_1.createWorkloadsWithLabel(kubectl, manifestObjects.deploymentEntityList, blue_green_helper_2.GREEN_LABEL_VALUE);
|
||||
// return results to check for manifest stability
|
||||
return result;
|
||||
}
|
||||
exports.deployBlueGreenSMI = deployBlueGreenSMI;
|
||||
function promoteBlueGreenSMI(kubectl, manifestObjects) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
// checking if there is something to promote
|
||||
if (!validateTrafficSplitsState(kubectl, manifestObjects.serviceEntityList)) {
|
||||
throw "NotInPromoteStateSMI";
|
||||
}
|
||||
// create stable deployments with new configuration
|
||||
const result = blue_green_helper_1.createWorkloadsWithLabel(kubectl, manifestObjects.deploymentEntityList, blue_green_helper_2.NONE_LABEL_VALUE);
|
||||
// return result to check for stability
|
||||
return result;
|
||||
});
|
||||
}
|
||||
exports.promoteBlueGreenSMI = promoteBlueGreenSMI;
|
||||
function rejectBlueGreenSMI(kubectl, filePaths) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
// get all kubernetes objects defined in manifest files
|
||||
const manifestObjects = blue_green_helper_1.getManifestObjects(filePaths);
|
||||
// routing trafficsplit to stable deploymetns
|
||||
routeBlueGreenSMI(kubectl, blue_green_helper_2.NONE_LABEL_VALUE, manifestObjects.serviceEntityList);
|
||||
// deleting rejected new bluegreen deplyments
|
||||
blue_green_helper_1.deleteWorkloadsWithLabel(kubectl, blue_green_helper_2.GREEN_LABEL_VALUE, manifestObjects.deploymentEntityList);
|
||||
//deleting trafficsplit and extra services
|
||||
cleanupSMI(kubectl, manifestObjects.serviceEntityList);
|
||||
});
|
||||
}
|
||||
exports.rejectBlueGreenSMI = rejectBlueGreenSMI;
|
||||
function setupSMI(kubectl, serviceEntityList) {
|
||||
const newObjectsList = [];
|
||||
const trafficObjectList = [];
|
||||
serviceEntityList.forEach((serviceObject) => {
|
||||
// create a trafficsplit for service
|
||||
trafficObjectList.push(serviceObject);
|
||||
// setting up the services for trafficsplit
|
||||
const newStableService = getSMIServiceResource(serviceObject, blue_green_helper_2.STABLE_SUFFIX);
|
||||
const newGreenService = getSMIServiceResource(serviceObject, blue_green_helper_2.GREEN_SUFFIX);
|
||||
newObjectsList.push(newStableService);
|
||||
newObjectsList.push(newGreenService);
|
||||
});
|
||||
// creating services
|
||||
const manifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
kubectl.apply(manifestFiles);
|
||||
// route to stable service
|
||||
trafficObjectList.forEach((inputObject) => {
|
||||
createTrafficSplitObject(kubectl, inputObject.metadata.name, blue_green_helper_2.NONE_LABEL_VALUE);
|
||||
});
|
||||
}
|
||||
exports.setupSMI = setupSMI;
|
||||
function createTrafficSplitObject(kubectl, name, nextLabel) {
|
||||
// getting smi spec api version
|
||||
if (!trafficSplitAPIVersion) {
|
||||
trafficSplitAPIVersion = kubectlUtils.getTrafficSplitAPIVersion(kubectl);
|
||||
}
|
||||
// deciding weights based on nextlabel
|
||||
let stableWeight;
|
||||
let greenWeight;
|
||||
if (nextLabel === blue_green_helper_2.GREEN_LABEL_VALUE) {
|
||||
stableWeight = parseInt(MIN_VAL);
|
||||
greenWeight = parseInt(MAX_VAL);
|
||||
}
|
||||
else {
|
||||
stableWeight = parseInt(MAX_VAL);
|
||||
greenWeight = parseInt(MIN_VAL);
|
||||
}
|
||||
//traffic split json
|
||||
const trafficSplitObject = `{
|
||||
"apiVersion": "${trafficSplitAPIVersion}",
|
||||
"kind": "TrafficSplit",
|
||||
"metadata": {
|
||||
"name": "${blue_green_helper_1.getBlueGreenResourceName(name, TRAFFIC_SPLIT_OBJECT_NAME_SUFFIX)}"
|
||||
},
|
||||
"spec": {
|
||||
"service": "${name}",
|
||||
"backends": [
|
||||
{
|
||||
"service": "${blue_green_helper_1.getBlueGreenResourceName(name, blue_green_helper_2.STABLE_SUFFIX)}",
|
||||
"weight": ${stableWeight}
|
||||
},
|
||||
{
|
||||
"service": "${blue_green_helper_1.getBlueGreenResourceName(name, blue_green_helper_2.GREEN_SUFFIX)}",
|
||||
"weight": ${greenWeight}
|
||||
}
|
||||
]
|
||||
}
|
||||
}`;
|
||||
// creating trafficplit object
|
||||
const trafficSplitManifestFile = fileHelper.writeManifestToFile(trafficSplitObject, TRAFFIC_SPLIT_OBJECT, blue_green_helper_1.getBlueGreenResourceName(name, TRAFFIC_SPLIT_OBJECT_NAME_SUFFIX));
|
||||
kubectl.apply(trafficSplitManifestFile);
|
||||
}
|
||||
function getSMIServiceResource(inputObject, suffix) {
|
||||
const newObject = JSON.parse(JSON.stringify(inputObject));
|
||||
if (suffix === blue_green_helper_2.STABLE_SUFFIX) {
|
||||
// adding stable suffix to service name
|
||||
newObject.metadata.name = blue_green_helper_1.getBlueGreenResourceName(inputObject.metadata.name, blue_green_helper_2.STABLE_SUFFIX);
|
||||
return blue_green_helper_1.getNewBlueGreenObject(newObject, blue_green_helper_2.NONE_LABEL_VALUE);
|
||||
}
|
||||
else {
|
||||
// green label will be added for these
|
||||
return blue_green_helper_1.getNewBlueGreenObject(newObject, blue_green_helper_2.GREEN_LABEL_VALUE);
|
||||
}
|
||||
}
|
||||
exports.getSMIServiceResource = getSMIServiceResource;
|
||||
function routeBlueGreenSMI(kubectl, nextLabel, serviceEntityList) {
|
||||
serviceEntityList.forEach((serviceObject) => {
|
||||
// routing trafficsplit to given label
|
||||
createTrafficSplitObject(kubectl, serviceObject.metadata.name, nextLabel);
|
||||
});
|
||||
}
|
||||
exports.routeBlueGreenSMI = routeBlueGreenSMI;
|
||||
function validateTrafficSplitsState(kubectl, serviceEntityList) {
|
||||
let areTrafficSplitsInRightState = true;
|
||||
serviceEntityList.forEach((serviceObject) => {
|
||||
const name = serviceObject.metadata.name;
|
||||
let trafficSplitObject = blue_green_helper_1.fetchResource(kubectl, TRAFFIC_SPLIT_OBJECT, blue_green_helper_1.getBlueGreenResourceName(name, TRAFFIC_SPLIT_OBJECT_NAME_SUFFIX));
|
||||
if (!trafficSplitObject) {
|
||||
// no trafficplit exits
|
||||
areTrafficSplitsInRightState = false;
|
||||
}
|
||||
trafficSplitObject = JSON.parse(JSON.stringify(trafficSplitObject));
|
||||
trafficSplitObject.spec.backends.forEach((element) => {
|
||||
// checking if trafficsplit in right state to deploy
|
||||
if (element.service === blue_green_helper_1.getBlueGreenResourceName(name, blue_green_helper_2.GREEN_SUFFIX)) {
|
||||
if (element.weight != MAX_VAL) {
|
||||
// green service should have max weight
|
||||
areTrafficSplitsInRightState = false;
|
||||
}
|
||||
}
|
||||
if (element.service === blue_green_helper_1.getBlueGreenResourceName(name, blue_green_helper_2.STABLE_SUFFIX)) {
|
||||
if (element.weight != MIN_VAL) {
|
||||
// stable service should have 0 weight
|
||||
areTrafficSplitsInRightState = false;
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
return areTrafficSplitsInRightState;
|
||||
}
|
||||
exports.validateTrafficSplitsState = validateTrafficSplitsState;
|
||||
function cleanupSMI(kubectl, serviceEntityList) {
|
||||
const deleteList = [];
|
||||
serviceEntityList.forEach((serviceObject) => {
|
||||
deleteList.push({
|
||||
name: blue_green_helper_1.getBlueGreenResourceName(serviceObject.metadata.name, TRAFFIC_SPLIT_OBJECT_NAME_SUFFIX),
|
||||
kind: TRAFFIC_SPLIT_OBJECT,
|
||||
});
|
||||
deleteList.push({
|
||||
name: blue_green_helper_1.getBlueGreenResourceName(serviceObject.metadata.name, blue_green_helper_2.GREEN_SUFFIX),
|
||||
kind: serviceObject.kind,
|
||||
});
|
||||
deleteList.push({
|
||||
name: blue_green_helper_1.getBlueGreenResourceName(serviceObject.metadata.name, blue_green_helper_2.STABLE_SUFFIX),
|
||||
kind: serviceObject.kind,
|
||||
});
|
||||
});
|
||||
// deleting all objects
|
||||
blue_green_helper_1.deleteObjects(kubectl, deleteList);
|
||||
}
|
||||
exports.cleanupSMI = cleanupSMI;
|
||||
@ -1,203 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.redirectTrafficToStableDeployment = exports.redirectTrafficToCanaryDeployment = exports.deploySMICanary = void 0;
|
||||
const core = require("@actions/core");
|
||||
const fs = require("fs");
|
||||
const yaml = require("js-yaml");
|
||||
const TaskInputParameters = require("../../input-parameters");
|
||||
const fileHelper = require("../files-helper");
|
||||
const helper = require("../resource-object-utility");
|
||||
const utils = require("../manifest-utilities");
|
||||
const kubectlUtils = require("../kubectl-util");
|
||||
const canaryDeploymentHelper = require("./canary-deployment-helper");
|
||||
const utility_1 = require("../utility");
|
||||
const TRAFFIC_SPLIT_OBJECT_NAME_SUFFIX = "-workflow-rollout";
|
||||
const TRAFFIC_SPLIT_OBJECT = "TrafficSplit";
|
||||
let trafficSplitAPIVersion = "";
|
||||
function deploySMICanary(filePaths, kubectl) {
|
||||
const canaryReplicaCount = parseInt(core.getInput("baseline-and-canary-replicas"));
|
||||
if (canaryReplicaCount < 0 || canaryReplicaCount > 100)
|
||||
throw Error("Baseline-and-canary-replicas must be between 0 and 100");
|
||||
const newObjectsList = [];
|
||||
filePaths.forEach((filePath) => {
|
||||
const fileContents = fs.readFileSync(filePath);
|
||||
yaml.safeLoadAll(fileContents, (inputObject) => {
|
||||
const name = inputObject.metadata.name;
|
||||
const kind = inputObject.kind;
|
||||
if (helper.isDeploymentEntity(kind)) {
|
||||
const stableObject = canaryDeploymentHelper.fetchResource(kubectl, kind, name);
|
||||
if (!stableObject) {
|
||||
core.debug("Stable object not found. Creating only canary object");
|
||||
// If stable object not found, create canary deployment.
|
||||
const newCanaryObject = canaryDeploymentHelper.getNewCanaryResource(inputObject, canaryReplicaCount);
|
||||
newObjectsList.push(newCanaryObject);
|
||||
}
|
||||
else {
|
||||
if (!canaryDeploymentHelper.isResourceMarkedAsStable(stableObject)) {
|
||||
throw Error(`StableSpecSelectorNotExist : ${name}`);
|
||||
}
|
||||
core.debug("Stable object found. Creating canary and baseline objects");
|
||||
const newCanaryObject = canaryDeploymentHelper.getNewCanaryResource(inputObject, canaryReplicaCount);
|
||||
const newBaselineObject = canaryDeploymentHelper.getNewBaselineResource(stableObject, canaryReplicaCount);
|
||||
newObjectsList.push(newCanaryObject);
|
||||
newObjectsList.push(newBaselineObject);
|
||||
}
|
||||
}
|
||||
else {
|
||||
// Update non deployment entity as it is
|
||||
newObjectsList.push(inputObject);
|
||||
}
|
||||
});
|
||||
});
|
||||
const newFilePaths = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
const result = kubectl.apply(newFilePaths, TaskInputParameters.forceDeployment);
|
||||
createCanaryService(kubectl, filePaths);
|
||||
return { result, newFilePaths };
|
||||
}
|
||||
exports.deploySMICanary = deploySMICanary;
|
||||
function createCanaryService(kubectl, filePaths) {
|
||||
const newObjectsList = [];
|
||||
const trafficObjectsList = [];
|
||||
filePaths.forEach((filePath) => {
|
||||
const fileContents = fs.readFileSync(filePath);
|
||||
yaml.safeLoadAll(fileContents, function (inputObject) {
|
||||
const name = inputObject.metadata.name;
|
||||
const kind = inputObject.kind;
|
||||
if (helper.isServiceEntity(kind)) {
|
||||
const newCanaryServiceObject = canaryDeploymentHelper.getNewCanaryResource(inputObject);
|
||||
core.debug("New canary service object is: " +
|
||||
JSON.stringify(newCanaryServiceObject));
|
||||
newObjectsList.push(newCanaryServiceObject);
|
||||
const newBaselineServiceObject = canaryDeploymentHelper.getNewBaselineResource(inputObject);
|
||||
core.debug("New baseline object is: " + JSON.stringify(newBaselineServiceObject));
|
||||
newObjectsList.push(newBaselineServiceObject);
|
||||
core.debug("Querying for stable service object");
|
||||
const stableObject = canaryDeploymentHelper.fetchResource(kubectl, kind, canaryDeploymentHelper.getStableResourceName(name));
|
||||
if (!stableObject) {
|
||||
const newStableServiceObject = canaryDeploymentHelper.getStableResource(inputObject);
|
||||
core.debug("New stable service object is: " +
|
||||
JSON.stringify(newStableServiceObject));
|
||||
newObjectsList.push(newStableServiceObject);
|
||||
core.debug("Creating the traffic object for service: " + name);
|
||||
const trafficObject = createTrafficSplitManifestFile(kubectl, name, 0, 0, 1000);
|
||||
core.debug("Creating the traffic object for service: " + trafficObject);
|
||||
trafficObjectsList.push(trafficObject);
|
||||
}
|
||||
else {
|
||||
let updateTrafficObject = true;
|
||||
const trafficObject = canaryDeploymentHelper.fetchResource(kubectl, TRAFFIC_SPLIT_OBJECT, getTrafficSplitResourceName(name));
|
||||
if (trafficObject) {
|
||||
const trafficJObject = JSON.parse(JSON.stringify(trafficObject));
|
||||
if (trafficJObject &&
|
||||
trafficJObject.spec &&
|
||||
trafficJObject.spec.backends) {
|
||||
trafficJObject.spec.backends.forEach((s) => {
|
||||
if (s.service ===
|
||||
canaryDeploymentHelper.getCanaryResourceName(name) &&
|
||||
s.weight === "1000m") {
|
||||
core.debug("Update traffic objcet not required");
|
||||
updateTrafficObject = false;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
if (updateTrafficObject) {
|
||||
core.debug("Stable service object present so updating the traffic object for service: " +
|
||||
name);
|
||||
trafficObjectsList.push(updateTrafficSplitObject(kubectl, name));
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
const manifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
manifestFiles.push(...trafficObjectsList);
|
||||
const result = kubectl.apply(manifestFiles, TaskInputParameters.forceDeployment);
|
||||
utility_1.checkForErrors([result]);
|
||||
}
|
||||
function redirectTrafficToCanaryDeployment(kubectl, manifestFilePaths) {
|
||||
adjustTraffic(kubectl, manifestFilePaths, 0, 1000);
|
||||
}
|
||||
exports.redirectTrafficToCanaryDeployment = redirectTrafficToCanaryDeployment;
|
||||
function redirectTrafficToStableDeployment(kubectl, manifestFilePaths) {
|
||||
adjustTraffic(kubectl, manifestFilePaths, 1000, 0);
|
||||
}
|
||||
exports.redirectTrafficToStableDeployment = redirectTrafficToStableDeployment;
|
||||
function adjustTraffic(kubectl, manifestFilePaths, stableWeight, canaryWeight) {
|
||||
// get manifest files
|
||||
const inputManifestFiles = utils.getManifestFiles(manifestFilePaths);
|
||||
if (inputManifestFiles == null || inputManifestFiles.length == 0) {
|
||||
return;
|
||||
}
|
||||
const trafficSplitManifests = [];
|
||||
const serviceObjects = [];
|
||||
inputManifestFiles.forEach((filePath) => {
|
||||
const fileContents = fs.readFileSync(filePath);
|
||||
yaml.safeLoadAll(fileContents, function (inputObject) {
|
||||
const name = inputObject.metadata.name;
|
||||
const kind = inputObject.kind;
|
||||
if (helper.isServiceEntity(kind)) {
|
||||
trafficSplitManifests.push(createTrafficSplitManifestFile(kubectl, name, stableWeight, 0, canaryWeight));
|
||||
serviceObjects.push(name);
|
||||
}
|
||||
});
|
||||
});
|
||||
if (trafficSplitManifests.length <= 0) {
|
||||
return;
|
||||
}
|
||||
const result = kubectl.apply(trafficSplitManifests, TaskInputParameters.forceDeployment);
|
||||
core.debug("serviceObjects:" + serviceObjects.join(",") + " result:" + result);
|
||||
utility_1.checkForErrors([result]);
|
||||
}
|
||||
function updateTrafficSplitObject(kubectl, serviceName) {
|
||||
const percentage = parseInt(TaskInputParameters.canaryPercentage) * 10;
|
||||
const baselineAndCanaryWeight = percentage / 2;
|
||||
const stableDeploymentWeight = 1000 - percentage;
|
||||
core.debug("Creating the traffic object with canary weight: " +
|
||||
baselineAndCanaryWeight +
|
||||
",baseling weight: " +
|
||||
baselineAndCanaryWeight +
|
||||
",stable: " +
|
||||
stableDeploymentWeight);
|
||||
return createTrafficSplitManifestFile(kubectl, serviceName, stableDeploymentWeight, baselineAndCanaryWeight, baselineAndCanaryWeight);
|
||||
}
|
||||
function createTrafficSplitManifestFile(kubectl, serviceName, stableWeight, baselineWeight, canaryWeight) {
|
||||
const smiObjectString = getTrafficSplitObject(kubectl, serviceName, stableWeight, baselineWeight, canaryWeight);
|
||||
const manifestFile = fileHelper.writeManifestToFile(smiObjectString, TRAFFIC_SPLIT_OBJECT, serviceName);
|
||||
if (!manifestFile) {
|
||||
throw new Error("UnableToCreateTrafficSplitManifestFile");
|
||||
}
|
||||
return manifestFile;
|
||||
}
|
||||
function getTrafficSplitObject(kubectl, name, stableWeight, baselineWeight, canaryWeight) {
|
||||
if (!trafficSplitAPIVersion) {
|
||||
trafficSplitAPIVersion = kubectlUtils.getTrafficSplitAPIVersion(kubectl);
|
||||
}
|
||||
return `{
|
||||
"apiVersion": "${trafficSplitAPIVersion}",
|
||||
"kind": "TrafficSplit",
|
||||
"metadata": {
|
||||
"name": "${getTrafficSplitResourceName(name)}"
|
||||
},
|
||||
"spec": {
|
||||
"backends": [
|
||||
{
|
||||
"service": "${canaryDeploymentHelper.getStableResourceName(name)}",
|
||||
"weight": "${stableWeight}"
|
||||
},
|
||||
{
|
||||
"service": "${canaryDeploymentHelper.getBaselineResourceName(name)}",
|
||||
"weight": "${baselineWeight}"
|
||||
},
|
||||
{
|
||||
"service": "${canaryDeploymentHelper.getCanaryResourceName(name)}",
|
||||
"weight": "${canaryWeight}"
|
||||
}
|
||||
],
|
||||
"service": "${name}"
|
||||
}
|
||||
}`;
|
||||
}
|
||||
function getTrafficSplitResourceName(name) {
|
||||
return name + TRAFFIC_SPLIT_OBJECT_NAME_SUFFIX;
|
||||
}
|
||||
@ -1,26 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.isEqual = exports.StringComparer = void 0;
|
||||
var StringComparer;
|
||||
(function (StringComparer) {
|
||||
StringComparer[StringComparer["Ordinal"] = 0] = "Ordinal";
|
||||
StringComparer[StringComparer["OrdinalIgnoreCase"] = 1] = "OrdinalIgnoreCase";
|
||||
})(StringComparer = exports.StringComparer || (exports.StringComparer = {}));
|
||||
function isEqual(str1, str2, stringComparer) {
|
||||
if (str1 == null && str2 == null) {
|
||||
return true;
|
||||
}
|
||||
if (str1 == null) {
|
||||
return false;
|
||||
}
|
||||
if (str2 == null) {
|
||||
return false;
|
||||
}
|
||||
if (stringComparer == StringComparer.OrdinalIgnoreCase) {
|
||||
return str1.toUpperCase() === str2.toUpperCase();
|
||||
}
|
||||
else {
|
||||
return str1 === str2;
|
||||
}
|
||||
}
|
||||
exports.isEqual = isEqual;
|
||||
@ -1,11 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.getCurrentTime = exports.sleep = void 0;
|
||||
function sleep(timeout) {
|
||||
return new Promise((resolve) => setTimeout(resolve, timeout));
|
||||
}
|
||||
exports.sleep = sleep;
|
||||
function getCurrentTime() {
|
||||
return new Date().getTime();
|
||||
}
|
||||
exports.getCurrentTime = getCurrentTime;
|
||||
@ -1,527 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.ToolRunner = void 0;
|
||||
const os = require("os");
|
||||
const events = require("events");
|
||||
const child = require("child_process");
|
||||
const core = require("@actions/core");
|
||||
class ToolRunner extends events.EventEmitter {
|
||||
constructor(toolPath) {
|
||||
super();
|
||||
if (!toolPath) {
|
||||
throw new Error('Parameter \'toolPath\' cannot be null or empty.');
|
||||
}
|
||||
this.toolPath = toolPath;
|
||||
this.args = [];
|
||||
core.debug('toolRunner toolPath: ' + toolPath);
|
||||
}
|
||||
_debug(message) {
|
||||
this.emit('debug', message);
|
||||
}
|
||||
_argStringToArray(argString) {
|
||||
var args = [];
|
||||
var inQuotes = false;
|
||||
var escaped = false;
|
||||
var lastCharWasSpace = true;
|
||||
var arg = '';
|
||||
var append = function (c) {
|
||||
// we only escape double quotes.
|
||||
if (escaped && c !== '"') {
|
||||
arg += '\\';
|
||||
}
|
||||
arg += c;
|
||||
escaped = false;
|
||||
};
|
||||
for (var i = 0; i < argString.length; i++) {
|
||||
var c = argString.charAt(i);
|
||||
if (c === ' ' && !inQuotes) {
|
||||
if (!lastCharWasSpace) {
|
||||
args.push(arg);
|
||||
arg = '';
|
||||
}
|
||||
lastCharWasSpace = true;
|
||||
continue;
|
||||
}
|
||||
else {
|
||||
lastCharWasSpace = false;
|
||||
}
|
||||
if (c === '"') {
|
||||
if (!escaped) {
|
||||
inQuotes = !inQuotes;
|
||||
}
|
||||
else {
|
||||
append(c);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (c === "\\" && escaped) {
|
||||
append(c);
|
||||
continue;
|
||||
}
|
||||
if (c === "\\" && inQuotes) {
|
||||
escaped = true;
|
||||
continue;
|
||||
}
|
||||
append(c);
|
||||
lastCharWasSpace = false;
|
||||
}
|
||||
if (!lastCharWasSpace) {
|
||||
args.push(arg.trim());
|
||||
}
|
||||
return args;
|
||||
}
|
||||
_getCommandString(options, noPrefix) {
|
||||
let toolPath = this._getSpawnFileName();
|
||||
let args = this._getSpawnArgs(options);
|
||||
let cmd = noPrefix ? '' : '[command]'; // omit prefix when piped to a second tool
|
||||
if (process.platform == 'win32') {
|
||||
// Windows + cmd file
|
||||
if (this._isCmdFile()) {
|
||||
cmd += toolPath;
|
||||
args.forEach((a) => {
|
||||
cmd += ` ${a}`;
|
||||
});
|
||||
}
|
||||
// Windows + verbatim
|
||||
else if (options.windowsVerbatimArguments) {
|
||||
cmd += `"${toolPath}"`;
|
||||
args.forEach((a) => {
|
||||
cmd += ` ${a}`;
|
||||
});
|
||||
}
|
||||
// Windows (regular)
|
||||
else {
|
||||
cmd += this._windowsQuoteCmdArg(toolPath);
|
||||
args.forEach((a) => {
|
||||
cmd += ` ${this._windowsQuoteCmdArg(a)}`;
|
||||
});
|
||||
}
|
||||
}
|
||||
else {
|
||||
// OSX/Linux - this can likely be improved with some form of quoting.
|
||||
// creating processes on Unix is fundamentally different than Windows.
|
||||
// on Unix, execvp() takes an arg array.
|
||||
cmd += toolPath;
|
||||
args.forEach((a) => {
|
||||
cmd += ` ${a}`;
|
||||
});
|
||||
}
|
||||
// append second tool
|
||||
if (this.pipeOutputToTool) {
|
||||
cmd += ' | ' + this.pipeOutputToTool._getCommandString(options, /*noPrefix:*/ true);
|
||||
}
|
||||
return cmd;
|
||||
}
|
||||
_getSpawnFileName() {
|
||||
if (process.platform == 'win32') {
|
||||
if (this._isCmdFile()) {
|
||||
return process.env['COMSPEC'] || 'cmd.exe';
|
||||
}
|
||||
}
|
||||
return this.toolPath;
|
||||
}
|
||||
_getSpawnArgs(options) {
|
||||
if (process.platform == 'win32') {
|
||||
if (this._isCmdFile()) {
|
||||
let argline = `/D /S /C "${this._windowsQuoteCmdArg(this.toolPath)}`;
|
||||
for (let i = 0; i < this.args.length; i++) {
|
||||
argline += ' ';
|
||||
argline += options.windowsVerbatimArguments ? this.args[i] : this._windowsQuoteCmdArg(this.args[i]);
|
||||
}
|
||||
argline += '"';
|
||||
return [argline];
|
||||
}
|
||||
if (options.windowsVerbatimArguments) {
|
||||
// note, in Node 6.x options.argv0 can be used instead of overriding args.slice and args.unshift.
|
||||
// for more details, refer to https://github.com/nodejs/node/blob/v6.x/lib/child_process.js
|
||||
let args = this.args.slice(0); // copy the array
|
||||
// override slice to prevent Node from creating a copy of the arg array.
|
||||
// we need Node to use the "unshift" override below.
|
||||
args.slice = function () {
|
||||
if (arguments.length != 1 || arguments[0] != 0) {
|
||||
throw new Error('Unexpected arguments passed to args.slice when windowsVerbatimArguments flag is set.');
|
||||
}
|
||||
return args;
|
||||
};
|
||||
// override unshift
|
||||
//
|
||||
// when using the windowsVerbatimArguments option, Node does not quote the tool path when building
|
||||
// the cmdline parameter for the win32 function CreateProcess(). an unquoted space in the tool path
|
||||
// causes problems for tools when attempting to parse their own command line args. tools typically
|
||||
// assume their arguments begin after arg 0.
|
||||
//
|
||||
// by hijacking unshift, we can quote the tool path when it pushed onto the args array. Node builds
|
||||
// the cmdline parameter from the args array.
|
||||
//
|
||||
// note, we can't simply pass a quoted tool path to Node for multiple reasons:
|
||||
// 1) Node verifies the file exists (calls win32 function GetFileAttributesW) and the check returns
|
||||
// false if the path is quoted.
|
||||
// 2) Node passes the tool path as the application parameter to CreateProcess, which expects the
|
||||
// path to be unquoted.
|
||||
//
|
||||
// also note, in addition to the tool path being embedded within the cmdline parameter, Node also
|
||||
// passes the tool path to CreateProcess via the application parameter (optional parameter). when
|
||||
// present, Windows uses the application parameter to determine which file to run, instead of
|
||||
// interpreting the file from the cmdline parameter.
|
||||
args.unshift = function () {
|
||||
if (arguments.length != 1) {
|
||||
throw new Error('Unexpected arguments passed to args.unshift when windowsVerbatimArguments flag is set.');
|
||||
}
|
||||
return Array.prototype.unshift.call(args, `"${arguments[0]}"`); // quote the file name
|
||||
};
|
||||
return args;
|
||||
}
|
||||
}
|
||||
return this.args;
|
||||
}
|
||||
_isCmdFile() {
|
||||
let upperToolPath = this.toolPath.toUpperCase();
|
||||
return this._endsWith(upperToolPath, '.CMD') || this._endsWith(upperToolPath, '.BAT');
|
||||
}
|
||||
_endsWith(str, end) {
|
||||
return str.slice(-end.length) == end;
|
||||
}
|
||||
_windowsQuoteCmdArg(arg) {
|
||||
// for .exe, apply the normal quoting rules that libuv applies
|
||||
if (!this._isCmdFile()) {
|
||||
return this._uv_quote_cmd_arg(arg);
|
||||
}
|
||||
// otherwise apply quoting rules specific to the cmd.exe command line parser.
|
||||
// the libuv rules are generic and are not designed specifically for cmd.exe
|
||||
// command line parser.
|
||||
//
|
||||
// for a detailed description of the cmd.exe command line parser, refer to
|
||||
// http://stackoverflow.com/questions/4094699/how-does-the-windows-command-interpreter-cmd-exe-parse-scripts/7970912#7970912
|
||||
// need quotes for empty arg
|
||||
if (!arg) {
|
||||
return '""';
|
||||
}
|
||||
// determine whether the arg needs to be quoted
|
||||
const cmdSpecialChars = [' ', '\t', '&', '(', ')', '[', ']', '{', '}', '^', '=', ';', '!', '\'', '+', ',', '`', '~', '|', '<', '>', '"'];
|
||||
let needsQuotes = false;
|
||||
for (let char of arg) {
|
||||
if (cmdSpecialChars.some(x => x == char)) {
|
||||
needsQuotes = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
// short-circuit if quotes not needed
|
||||
if (!needsQuotes) {
|
||||
return arg;
|
||||
}
|
||||
// the following quoting rules are very similar to the rules that by libuv applies.
|
||||
//
|
||||
// 1) wrap the string in quotes
|
||||
//
|
||||
// 2) double-up quotes - i.e. " => ""
|
||||
//
|
||||
// this is different from the libuv quoting rules. libuv replaces " with \", which unfortunately
|
||||
// doesn't work well with a cmd.exe command line.
|
||||
//
|
||||
// note, replacing " with "" also works well if the arg is passed to a downstream .NET console app.
|
||||
// for example, the command line:
|
||||
// foo.exe "myarg:""my val"""
|
||||
// is parsed by a .NET console app into an arg array:
|
||||
// [ "myarg:\"my val\"" ]
|
||||
// which is the same end result when applying libuv quoting rules. although the actual
|
||||
// command line from libuv quoting rules would look like:
|
||||
// foo.exe "myarg:\"my val\""
|
||||
//
|
||||
// 3) double-up slashes that preceed a quote,
|
||||
// e.g. hello \world => "hello \world"
|
||||
// hello\"world => "hello\\""world"
|
||||
// hello\\"world => "hello\\\\""world"
|
||||
// hello world\ => "hello world\\"
|
||||
//
|
||||
// technically this is not required for a cmd.exe command line, or the batch argument parser.
|
||||
// the reasons for including this as a .cmd quoting rule are:
|
||||
//
|
||||
// a) this is optimized for the scenario where the argument is passed from the .cmd file to an
|
||||
// external program. many programs (e.g. .NET console apps) rely on the slash-doubling rule.
|
||||
//
|
||||
// b) it's what we've been doing previously (by deferring to node default behavior) and we
|
||||
// haven't heard any complaints about that aspect.
|
||||
//
|
||||
// note, a weakness of the quoting rules chosen here, is that % is not escaped. in fact, % cannot be
|
||||
// escaped when used on the command line directly - even though within a .cmd file % can be escaped
|
||||
// by using %%.
|
||||
//
|
||||
// the saving grace is, on the command line, %var% is left as-is if var is not defined. this contrasts
|
||||
// the line parsing rules within a .cmd file, where if var is not defined it is replaced with nothing.
|
||||
//
|
||||
// one option that was explored was replacing % with ^% - i.e. %var% => ^%var^%. this hack would
|
||||
// often work, since it is unlikely that var^ would exist, and the ^ character is removed when the
|
||||
// variable is used. the problem, however, is that ^ is not removed when %* is used to pass the args
|
||||
// to an external program.
|
||||
//
|
||||
// an unexplored potential solution for the % escaping problem, is to create a wrapper .cmd file.
|
||||
// % can be escaped within a .cmd file.
|
||||
let reverse = '"';
|
||||
let quote_hit = true;
|
||||
for (let i = arg.length; i > 0; i--) { // walk the string in reverse
|
||||
reverse += arg[i - 1];
|
||||
if (quote_hit && arg[i - 1] == '\\') {
|
||||
reverse += '\\'; // double the slash
|
||||
}
|
||||
else if (arg[i - 1] == '"') {
|
||||
quote_hit = true;
|
||||
reverse += '"'; // double the quote
|
||||
}
|
||||
else {
|
||||
quote_hit = false;
|
||||
}
|
||||
}
|
||||
reverse += '"';
|
||||
return reverse.split('').reverse().join('');
|
||||
}
|
||||
_uv_quote_cmd_arg(arg) {
|
||||
// Tool runner wraps child_process.spawn() and needs to apply the same quoting as
|
||||
// Node in certain cases where the undocumented spawn option windowsVerbatimArguments
|
||||
// is used.
|
||||
//
|
||||
// Since this function is a port of quote_cmd_arg from Node 4.x (technically, lib UV,
|
||||
// see https://github.com/nodejs/node/blob/v4.x/deps/uv/src/win/process.c for details),
|
||||
// pasting copyright notice from Node within this function:
|
||||
//
|
||||
// Copyright Joyent, Inc. and other Node contributors. All rights reserved.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to
|
||||
// deal in the Software without restriction, including without limitation the
|
||||
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
// sell copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
// IN THE SOFTWARE.
|
||||
if (!arg) {
|
||||
// Need double quotation for empty argument
|
||||
return '""';
|
||||
}
|
||||
if (arg.indexOf(' ') < 0 && arg.indexOf('\t') < 0 && arg.indexOf('"') < 0) {
|
||||
// No quotation needed
|
||||
return arg;
|
||||
}
|
||||
if (arg.indexOf('"') < 0 && arg.indexOf('\\') < 0) {
|
||||
// No embedded double quotes or backslashes, so I can just wrap
|
||||
// quote marks around the whole thing.
|
||||
return `"${arg}"`;
|
||||
}
|
||||
// Expected input/output:
|
||||
// input : hello"world
|
||||
// output: "hello\"world"
|
||||
// input : hello""world
|
||||
// output: "hello\"\"world"
|
||||
// input : hello\world
|
||||
// output: hello\world
|
||||
// input : hello\\world
|
||||
// output: hello\\world
|
||||
// input : hello\"world
|
||||
// output: "hello\\\"world"
|
||||
// input : hello\\"world
|
||||
// output: "hello\\\\\"world"
|
||||
// input : hello world\
|
||||
// output: "hello world\\" - note the comment in libuv actually reads "hello world\"
|
||||
// but it appears the comment is wrong, it should be "hello world\\"
|
||||
let reverse = '"';
|
||||
let quote_hit = true;
|
||||
for (let i = arg.length; i > 0; i--) { // walk the string in reverse
|
||||
reverse += arg[i - 1];
|
||||
if (quote_hit && arg[i - 1] == '\\') {
|
||||
reverse += '\\';
|
||||
}
|
||||
else if (arg[i - 1] == '"') {
|
||||
quote_hit = true;
|
||||
reverse += '\\';
|
||||
}
|
||||
else {
|
||||
quote_hit = false;
|
||||
}
|
||||
}
|
||||
reverse += '"';
|
||||
return reverse.split('').reverse().join('');
|
||||
}
|
||||
_cloneExecOptions(options) {
|
||||
options = options || {};
|
||||
let result = {
|
||||
cwd: options.cwd || process.cwd(),
|
||||
env: options.env || process.env,
|
||||
silent: options.silent || false,
|
||||
failOnStdErr: options.failOnStdErr || false,
|
||||
ignoreReturnCode: options.ignoreReturnCode || false,
|
||||
windowsVerbatimArguments: options.windowsVerbatimArguments || false
|
||||
};
|
||||
result.outStream = options.outStream || process.stdout;
|
||||
result.errStream = options.errStream || process.stderr;
|
||||
return result;
|
||||
}
|
||||
_getSpawnSyncOptions(options) {
|
||||
let result = {};
|
||||
result.cwd = options.cwd;
|
||||
result.env = options.env;
|
||||
result['windowsVerbatimArguments'] = options.windowsVerbatimArguments || this._isCmdFile();
|
||||
return result;
|
||||
}
|
||||
/**
|
||||
* Add argument
|
||||
* Append an argument or an array of arguments
|
||||
* returns ToolRunner for chaining
|
||||
*
|
||||
* @param val string cmdline or array of strings
|
||||
* @returns ToolRunner
|
||||
*/
|
||||
arg(val) {
|
||||
if (!val) {
|
||||
return this;
|
||||
}
|
||||
if (val instanceof Array) {
|
||||
core.debug(this.toolPath + ' arg: ' + JSON.stringify(val));
|
||||
this.args = this.args.concat(val);
|
||||
}
|
||||
else if (typeof (val) === 'string') {
|
||||
core.debug(this.toolPath + ' arg: ' + val);
|
||||
this.args = this.args.concat(val.trim());
|
||||
}
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Parses an argument line into one or more arguments
|
||||
* e.g. .line('"arg one" two -z') is equivalent to .arg(['arg one', 'two', '-z'])
|
||||
* returns ToolRunner for chaining
|
||||
*
|
||||
* @param val string argument line
|
||||
* @returns ToolRunner
|
||||
*/
|
||||
line(val) {
|
||||
if (!val) {
|
||||
return this;
|
||||
}
|
||||
core.debug(this.toolPath + ' arg: ' + val);
|
||||
this.args = this.args.concat(this._argStringToArray(val));
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Add argument(s) if a condition is met
|
||||
* Wraps arg(). See arg for details
|
||||
* returns ToolRunner for chaining
|
||||
*
|
||||
* @param condition boolean condition
|
||||
* @param val string cmdline or array of strings
|
||||
* @returns ToolRunner
|
||||
*/
|
||||
argIf(condition, val) {
|
||||
if (condition) {
|
||||
this.arg(val);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Pipe output of exec() to another tool
|
||||
* @param tool
|
||||
* @param file optional filename to additionally stream the output to.
|
||||
* @returns {ToolRunner}
|
||||
*/
|
||||
pipeExecOutputToTool(tool, file) {
|
||||
this.pipeOutputToTool = tool;
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Exec a tool synchronously.
|
||||
* Output will be *not* be streamed to the live console. It will be returned after execution is complete.
|
||||
* Appropriate for short running tools
|
||||
* Returns IExecSyncResult with output and return code
|
||||
*
|
||||
* @param tool path to tool to exec
|
||||
* @param options optional exec options. See IExecSyncOptions
|
||||
* @returns IExecSyncResult
|
||||
*/
|
||||
execSync(options) {
|
||||
core.debug('exec tool: ' + this.toolPath);
|
||||
core.debug('arguments:');
|
||||
this.args.forEach((arg) => {
|
||||
core.debug(' ' + arg);
|
||||
});
|
||||
options = this._cloneExecOptions(options);
|
||||
if (!options.silent) {
|
||||
options.outStream.write(this._getCommandString(options) + os.EOL);
|
||||
}
|
||||
var r = child.spawnSync(this._getSpawnFileName(), this._getSpawnArgs(options), this._getSpawnSyncOptions(options));
|
||||
var res = { code: r.status, error: r.error };
|
||||
if (!options.silent && r.stdout && r.stdout.length > 0) {
|
||||
options.outStream.write(r.stdout);
|
||||
}
|
||||
if (!options.silent && r.stderr && r.stderr.length > 0) {
|
||||
options.errStream.write(r.stderr);
|
||||
}
|
||||
res.stdout = (r.stdout) ? r.stdout.toString() : '';
|
||||
res.stderr = (r.stderr) ? r.stderr.toString() : '';
|
||||
return res;
|
||||
}
|
||||
}
|
||||
exports.ToolRunner = ToolRunner;
|
||||
class ExecState extends events.EventEmitter {
|
||||
constructor(options, toolPath) {
|
||||
super();
|
||||
this.delay = 10000; // 10 seconds
|
||||
this.timeout = null;
|
||||
if (!toolPath) {
|
||||
throw new Error('toolPath must not be empty');
|
||||
}
|
||||
this.options = options;
|
||||
this.toolPath = toolPath;
|
||||
let delay = process.env['TASKLIB_TEST_TOOLRUNNER_EXITDELAY'];
|
||||
if (delay) {
|
||||
this.delay = parseInt(delay);
|
||||
}
|
||||
}
|
||||
CheckComplete() {
|
||||
if (this.done) {
|
||||
return;
|
||||
}
|
||||
if (this.processClosed) {
|
||||
this._setResult();
|
||||
}
|
||||
else if (this.processExited) {
|
||||
this.timeout = setTimeout(ExecState.HandleTimeout, this.delay, this);
|
||||
}
|
||||
}
|
||||
_setResult() {
|
||||
// determine whether there is an error
|
||||
let error;
|
||||
if (this.processExited) {
|
||||
if (this.processError) {
|
||||
error = new Error(`LIB_ProcessError: \n tool: ${this.toolPath} \n error: ${this.processError}`);
|
||||
}
|
||||
else if (this.processExitCode != 0 && !this.options.ignoreReturnCode) {
|
||||
error = new Error(`LIB_ProcessExitCode\n tool: ${this.toolPath} \n Exit Code: ${this.processExitCode}`);
|
||||
}
|
||||
else if (this.processStderr && this.options.failOnStdErr) {
|
||||
error = new Error(`LIB_ProcessStderr', ${this.toolPath}`);
|
||||
}
|
||||
}
|
||||
// clear the timeout
|
||||
if (this.timeout) {
|
||||
clearTimeout(this.timeout);
|
||||
this.timeout = null;
|
||||
}
|
||||
this.done = true;
|
||||
this.emit('done', error, this.processExitCode);
|
||||
}
|
||||
static HandleTimeout(state) {
|
||||
if (state.done) {
|
||||
return;
|
||||
}
|
||||
if (!state.processClosed && state.processExited) {
|
||||
core.debug(`LIB_StdioNotClosed`);
|
||||
}
|
||||
state._setResult();
|
||||
}
|
||||
}
|
||||
@ -1,26 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.getTrafficSplitAPIVersion = void 0;
|
||||
const trafficSplitAPIVersionPrefix = "split.smi-spec.io";
|
||||
function getTrafficSplitAPIVersion(kubectl) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const result = yield kubectl.executeCommand("api-versions");
|
||||
const trafficSplitAPIVersion = result.stdout
|
||||
.split("\n")
|
||||
.find((version) => version.startsWith(trafficSplitAPIVersionPrefix));
|
||||
if (!trafficSplitAPIVersion) {
|
||||
throw new Error("Unable to find traffic split api version");
|
||||
}
|
||||
return trafficSplitAPIVersion;
|
||||
});
|
||||
}
|
||||
exports.getTrafficSplitAPIVersion = getTrafficSplitAPIVersion;
|
||||
@ -1,235 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.getNormalizedPath = exports.isHttpUrl = exports.getCurrentTime = exports.getRandomInt = exports.sleep = exports.normaliseWorkflowStrLabel = exports.getDeploymentConfig = exports.annotateChildPods = exports.getWorkflowFilePath = exports.getLastSuccessfulRunSha = exports.checkForErrors = exports.isEqual = exports.getExecutableExtension = void 0;
|
||||
const os = require("os");
|
||||
const core = require("@actions/core");
|
||||
const githubClient_1 = require("../githubClient");
|
||||
const httpClient_1 = require("./httpClient");
|
||||
const inputParams = require("../input-parameters");
|
||||
const docker_1 = require("../types/docker");
|
||||
const io = require("@actions/io");
|
||||
function getExecutableExtension() {
|
||||
if (os.type().match(/^Win/)) {
|
||||
return ".exe";
|
||||
}
|
||||
return "";
|
||||
}
|
||||
exports.getExecutableExtension = getExecutableExtension;
|
||||
function isEqual(str1, str2, ignoreCase) {
|
||||
if (str1 == null && str2 == null) {
|
||||
return true;
|
||||
}
|
||||
if (str1 == null || str2 == null) {
|
||||
return false;
|
||||
}
|
||||
if (ignoreCase) {
|
||||
return str1.toUpperCase() === str2.toUpperCase();
|
||||
}
|
||||
else {
|
||||
return str1 === str2;
|
||||
}
|
||||
}
|
||||
exports.isEqual = isEqual;
|
||||
function checkForErrors(execResults, warnIfError) {
|
||||
if (execResults.length !== 0) {
|
||||
let stderr = "";
|
||||
execResults.forEach((result) => {
|
||||
if (result && result.stderr) {
|
||||
if (result.code !== 0) {
|
||||
stderr += result.stderr + "\n";
|
||||
}
|
||||
else {
|
||||
core.warning(result.stderr);
|
||||
}
|
||||
}
|
||||
});
|
||||
if (stderr.length > 0) {
|
||||
if (warnIfError) {
|
||||
core.warning(stderr.trim());
|
||||
}
|
||||
else {
|
||||
throw new Error(stderr.trim());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
exports.checkForErrors = checkForErrors;
|
||||
function getLastSuccessfulRunSha(kubectl, namespaceName, annotationKey) {
|
||||
try {
|
||||
const result = kubectl.getResource("namespace", namespaceName);
|
||||
if (result) {
|
||||
if (result.stderr) {
|
||||
core.warning(`${result.stderr}`);
|
||||
return process.env.GITHUB_SHA;
|
||||
}
|
||||
else if (result.stdout) {
|
||||
const annotationsSet = JSON.parse(result.stdout).metadata.annotations;
|
||||
if (annotationsSet && annotationsSet[annotationKey]) {
|
||||
return JSON.parse(annotationsSet[annotationKey].replace(/'/g, '"'))
|
||||
.commit;
|
||||
}
|
||||
else {
|
||||
return "NA";
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (ex) {
|
||||
core.warning(`Failed to get commits from cluster. ${JSON.stringify(ex)}`);
|
||||
return "";
|
||||
}
|
||||
}
|
||||
exports.getLastSuccessfulRunSha = getLastSuccessfulRunSha;
|
||||
function getWorkflowFilePath(githubToken) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
let workflowFilePath = process.env.GITHUB_WORKFLOW;
|
||||
if (!workflowFilePath.startsWith(".github/workflows/")) {
|
||||
const githubClient = new githubClient_1.GitHubClient(process.env.GITHUB_REPOSITORY, githubToken);
|
||||
const response = yield githubClient.getWorkflows();
|
||||
if (response) {
|
||||
if (response.statusCode == httpClient_1.StatusCodes.OK &&
|
||||
response.body &&
|
||||
response.body.total_count) {
|
||||
if (response.body.total_count > 0) {
|
||||
for (let workflow of response.body.workflows) {
|
||||
if (process.env.GITHUB_WORKFLOW === workflow.name) {
|
||||
workflowFilePath = workflow.path;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (response.statusCode != httpClient_1.StatusCodes.OK) {
|
||||
core.debug(`An error occured while getting list of workflows on the repo. Statuscode: ${response.statusCode}, StatusMessage: ${response.statusMessage}`);
|
||||
}
|
||||
}
|
||||
else {
|
||||
core.warning(`Failed to get response from workflow list API`);
|
||||
}
|
||||
}
|
||||
return Promise.resolve(workflowFilePath);
|
||||
});
|
||||
}
|
||||
exports.getWorkflowFilePath = getWorkflowFilePath;
|
||||
function annotateChildPods(kubectl, resourceType, resourceName, annotationKeyValStr, allPods) {
|
||||
const commandExecutionResults = [];
|
||||
let owner = resourceName;
|
||||
if (resourceType.toLowerCase().indexOf("deployment") > -1) {
|
||||
owner = kubectl.getNewReplicaSet(resourceName);
|
||||
}
|
||||
if (allPods && allPods.items && allPods.items.length > 0) {
|
||||
allPods.items.forEach((pod) => {
|
||||
const owners = pod.metadata.ownerReferences;
|
||||
if (owners) {
|
||||
for (let ownerRef of owners) {
|
||||
if (ownerRef.name === owner) {
|
||||
commandExecutionResults.push(kubectl.annotate("pod", pod.metadata.name, annotationKeyValStr));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
return commandExecutionResults;
|
||||
}
|
||||
exports.annotateChildPods = annotateChildPods;
|
||||
function getDeploymentConfig() {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
let helmChartPaths = (process.env.HELM_CHART_PATHS &&
|
||||
process.env.HELM_CHART_PATHS.split(";").filter((path) => path != "")) ||
|
||||
[];
|
||||
helmChartPaths = helmChartPaths.map((helmchart) => getNormalizedPath(helmchart.trim()));
|
||||
let inputManifestFiles = inputParams.manifests || [];
|
||||
if (!helmChartPaths || helmChartPaths.length == 0) {
|
||||
inputManifestFiles = inputManifestFiles.map((manifestFile) => getNormalizedPath(manifestFile));
|
||||
}
|
||||
const imageNames = inputParams.containers || [];
|
||||
let imageDockerfilePathMap = {};
|
||||
//Fetching from image label if available
|
||||
for (const image of imageNames) {
|
||||
try {
|
||||
imageDockerfilePathMap[image] = yield getDockerfilePath(image);
|
||||
}
|
||||
catch (ex) {
|
||||
core.warning(`Failed to get dockerfile path for image ${image.toString()} | ` + ex);
|
||||
}
|
||||
}
|
||||
const deploymentConfig = {
|
||||
manifestFilePaths: inputManifestFiles,
|
||||
helmChartFilePaths: helmChartPaths,
|
||||
dockerfilePaths: imageDockerfilePathMap,
|
||||
};
|
||||
return Promise.resolve(deploymentConfig);
|
||||
});
|
||||
}
|
||||
exports.getDeploymentConfig = getDeploymentConfig;
|
||||
function normaliseWorkflowStrLabel(workflowName) {
|
||||
workflowName = workflowName.startsWith(".github/workflows/")
|
||||
? workflowName.replace(".github/workflows/", "")
|
||||
: workflowName;
|
||||
return workflowName.replace(/ /g, "_");
|
||||
}
|
||||
exports.normaliseWorkflowStrLabel = normaliseWorkflowStrLabel;
|
||||
function sleep(timeout) {
|
||||
return new Promise((resolve) => setTimeout(resolve, timeout));
|
||||
}
|
||||
exports.sleep = sleep;
|
||||
function getRandomInt(max) {
|
||||
return Math.floor(Math.random() * Math.floor(max));
|
||||
}
|
||||
exports.getRandomInt = getRandomInt;
|
||||
function getCurrentTime() {
|
||||
return new Date().getTime();
|
||||
}
|
||||
exports.getCurrentTime = getCurrentTime;
|
||||
function checkDockerPath() {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
let dockerPath = yield io.which("docker", false);
|
||||
if (!dockerPath) {
|
||||
throw new Error("Docker is not installed.");
|
||||
}
|
||||
});
|
||||
}
|
||||
function getDockerfilePath(image) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
let imageConfig, imageInspectResult;
|
||||
var dockerExec = new docker_1.DockerExec("docker");
|
||||
yield checkDockerPath();
|
||||
dockerExec.pull(image, [], true);
|
||||
imageInspectResult = dockerExec.inspect(image, [], true);
|
||||
imageConfig = JSON.parse(imageInspectResult)[0];
|
||||
const DOCKERFILE_PATH_LABEL_KEY = "dockerfile-path";
|
||||
let pathValue = "";
|
||||
if (imageConfig) {
|
||||
if (imageConfig.Config &&
|
||||
imageConfig.Config.Labels &&
|
||||
imageConfig.Config.Labels[DOCKERFILE_PATH_LABEL_KEY]) {
|
||||
const pathLabel = imageConfig.Config.Labels[DOCKERFILE_PATH_LABEL_KEY];
|
||||
pathValue = getNormalizedPath(pathLabel);
|
||||
}
|
||||
}
|
||||
return Promise.resolve(pathValue);
|
||||
});
|
||||
}
|
||||
function isHttpUrl(url) {
|
||||
const HTTP_REGEX = /^https?:\/\/.*$/;
|
||||
return HTTP_REGEX.test(url);
|
||||
}
|
||||
exports.isHttpUrl = isHttpUrl;
|
||||
function getNormalizedPath(pathValue) {
|
||||
if (!isHttpUrl(pathValue)) {
|
||||
//if it is not an http url then convert to link from current repo and commit
|
||||
return `https://github.com/${process.env.GITHUB_REPOSITORY}/blob/${process.env.GITHUB_SHA}/${pathValue}`;
|
||||
}
|
||||
return pathValue;
|
||||
}
|
||||
exports.getNormalizedPath = getNormalizedPath;
|
||||
@ -1,32 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.getWorkflowAnnotationKeyLabel = exports.getWorkflowAnnotations = void 0;
|
||||
function getWorkflowAnnotations(lastSuccessRunSha, workflowFilePath, deploymentConfig) {
|
||||
const annotationObject = {
|
||||
run: process.env.GITHUB_RUN_ID,
|
||||
repository: process.env.GITHUB_REPOSITORY,
|
||||
workflow: process.env.GITHUB_WORKFLOW,
|
||||
workflowFileName: workflowFilePath.replace(".github/workflows/", ""),
|
||||
jobName: process.env.GITHUB_JOB,
|
||||
createdBy: process.env.GITHUB_ACTOR,
|
||||
runUri: `https://github.com/${process.env.GITHUB_REPOSITORY}/actions/runs/${process.env.GITHUB_RUN_ID}`,
|
||||
commit: process.env.GITHUB_SHA,
|
||||
lastSuccessRunCommit: lastSuccessRunSha,
|
||||
branch: process.env.GITHUB_REF,
|
||||
deployTimestamp: Date.now(),
|
||||
dockerfilePaths: deploymentConfig.dockerfilePaths,
|
||||
manifestsPaths: deploymentConfig.manifestFilePaths,
|
||||
helmChartPaths: deploymentConfig.helmChartFilePaths,
|
||||
provider: "GitHub",
|
||||
};
|
||||
return JSON.stringify(annotationObject);
|
||||
}
|
||||
exports.getWorkflowAnnotations = getWorkflowAnnotations;
|
||||
function getWorkflowAnnotationKeyLabel(workflowFilePath) {
|
||||
const hashKey = require("crypto")
|
||||
.createHash("MD5")
|
||||
.update(`${process.env.GITHUB_REPOSITORY}/${workflowFilePath}`)
|
||||
.digest("hex");
|
||||
return `githubWorkflow_${hashKey}`;
|
||||
}
|
||||
exports.getWorkflowAnnotationKeyLabel = getWorkflowAnnotationKeyLabel;
|
||||
Loading…
x
Reference in New Issue
Block a user